func
stringlengths 0
484k
| target
int64 0
1
| cwe
listlengths 0
4
| project
stringclasses 799
values | commit_id
stringlengths 40
40
| hash
float64 1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
| size
int64 1
24k
| message
stringlengths 0
13.3k
|
---|---|---|---|---|---|---|---|
const char *v4l2_norm_to_name(v4l2_std_id id)
{
u32 myid = id;
int i;
/* HACK: ppc32 architecture doesn't have __ucmpdi2 function to handle
64 bit comparisons. So, on that architecture, with some gcc
variants, compilation fails. Currently, the max value is 30bit wide.
*/
BUG_ON(myid != id);
for (i = 0; standards[i].std; i++)
if (myid == standards[i].std)
break;
return standards[i].descr;
}
| 0 |
[
"CWE-401"
] |
linux
|
fb18802a338b36f675a388fc03d2aa504a0d0899
| 252,231,333,338,925,300,000,000,000,000,000,000,000 | 16 |
media: v4l: ioctl: Fix memory leak in video_usercopy
When an IOCTL with argument size larger than 128 that also used array
arguments were handled, two memory allocations were made but alas, only
the latter one of them was released. This happened because there was only
a single local variable to hold such a temporary allocation.
Fix this by adding separate variables to hold the pointers to the
temporary allocations.
Reported-by: Arnd Bergmann <[email protected]>
Reported-by: [email protected]
Fixes: d14e6d76ebf7 ("[media] v4l: Add multi-planar ioctl handling code")
Cc: [email protected]
Signed-off-by: Sakari Ailus <[email protected]>
Acked-by: Arnd Bergmann <[email protected]>
Acked-by: Hans Verkuil <[email protected]>
Reviewed-by: Laurent Pinchart <[email protected]>
Signed-off-by: Mauro Carvalho Chehab <[email protected]>
|
iakerb_tkt_creds_ctx(iakerb_ctx_id_t ctx,
krb5_gss_cred_id_t cred,
krb5_gss_name_t name,
OM_uint32 time_req)
{
krb5_error_code code;
krb5_creds creds;
krb5_timestamp now;
assert(cred->name != NULL);
assert(cred->name->princ != NULL);
memset(&creds, 0, sizeof(creds));
creds.client = cred->name->princ;
creds.server = name->princ;
if (time_req != 0 && time_req != GSS_C_INDEFINITE) {
code = krb5_timeofday(ctx->k5c, &now);
if (code != 0)
goto cleanup;
creds.times.endtime = now + time_req;
}
if (cred->name->ad_context != NULL) {
code = krb5_authdata_export_authdata(ctx->k5c,
cred->name->ad_context,
AD_USAGE_TGS_REQ,
&creds.authdata);
if (code != 0)
goto cleanup;
}
code = krb5_tkt_creds_init(ctx->k5c, cred->ccache, &creds, 0, &ctx->tcc);
if (code != 0)
goto cleanup;
cleanup:
krb5_free_authdata(ctx->k5c, creds.authdata);
return code;
}
| 0 |
[
"CWE-18"
] |
krb5
|
e04f0283516e80d2f93366e0d479d13c9b5c8c2a
| 42,035,248,858,740,880,000,000,000,000,000,000,000 | 44 |
Fix IAKERB context aliasing bugs [CVE-2015-2696]
The IAKERB mechanism currently replaces its context handle with the
krb5 mechanism handle upon establishment, under the assumption that
most GSS functions are only called after context establishment. This
assumption is incorrect, and can lead to aliasing violations for some
programs. Maintain the IAKERB context structure after context
establishment and add new IAKERB entry points to refer to it with that
type. Add initiate and established flags to the IAKERB context
structure for use in gss_inquire_context() prior to context
establishment.
CVE-2015-2696:
In MIT krb5 1.9 and later, applications which call
gss_inquire_context() on a partially-established IAKERB context can
cause the GSS-API library to read from a pointer using the wrong type,
generally causing a process crash. Java server applications using the
native JGSS provider are vulnerable to this bug. A carefully crafted
IAKERB packet might allow the gss_inquire_context() call to succeed
with attacker-determined results, but applications should not make
access control decisions based on gss_inquire_context() results prior
to context establishment.
CVSSv2 Vector: AV:N/AC:M/Au:N/C:N/I:N/A:C/E:POC/RL:OF/RC:C
[[email protected]: several bugfixes, style changes, and edge-case
behavior changes; commit message and CVE description]
ticket: 8244
target_version: 1.14
tags: pullup
|
template<typename tc>
CImg<T>& draw_point(const int x0, const int y0, const int z0,
const tc *const color, const float opacity=1) {
if (is_empty()) return *this;
if (!color)
throw CImgArgumentException(_cimg_instance
"draw_point(): Specified color is (null).",
cimg_instance);
if (x0>=0 && y0>=0 && z0>=0 && x0<width() && y0<height() && z0<depth()) {
const ulongT whd = (ulongT)_width*_height*_depth;
const float nopacity = cimg::abs(opacity), copacity = 1 - std::max(opacity,0.f);
T *ptrd = data(x0,y0,z0,0);
const tc *col = color;
if (opacity>=1) cimg_forC(*this,c) { *ptrd = (T)*(col++); ptrd+=whd; }
else cimg_forC(*this,c) { *ptrd = (T)(*(col++)*nopacity + *ptrd*copacity); ptrd+=whd; }
}
return *this;
| 0 |
[
"CWE-119",
"CWE-787"
] |
CImg
|
ac8003393569aba51048c9d67e1491559877b1d1
| 243,601,289,372,952,450,000,000,000,000,000,000,000 | 17 |
.
|
flatpak_deploy_init (FlatpakDeploy *self)
{
}
| 0 |
[
"CWE-668"
] |
flatpak
|
cd2142888fc4c199723a0dfca1f15ea8788a5483
| 220,395,197,027,947,730,000,000,000,000,000,000,000 | 3 |
Don't expose /proc when running apply_extra
As shown by CVE-2019-5736, it is sometimes possible for the sandbox
app to access outside files using /proc/self/exe. This is not
typically an issue for flatpak as the sandbox runs as the user which
has no permissions to e.g. modify the host files.
However, when installing apps using extra-data into the system repo
we *do* actually run a sandbox as root. So, in this case we disable mounting
/proc in the sandbox, which will neuter attacks like this.
|
static char** read_string_array(lua_State *vm, int narg, int *len_out) {
int len;
char **buff;
if(ntop_lua_check(vm, __FUNCTION__, narg, LUA_TTABLE)) {
*len_out = 0;
return(NULL);
}
len = lua_objlen(vm, narg);
*len_out = len;
buff = (char**)malloc(len*sizeof(char*));
if(!buff) {
*len_out = 0;
return(NULL);
}
for(int i = 0; i < len; i++) {
lua_pushinteger(vm, i+1);
lua_gettable(vm, -2);
if(lua_isstring(vm, -1))
buff[i] = (char*)lua_tostring(vm, -1);
else
buff[i] = (char*)"";
lua_pop(vm, 1);
}
return(buff);
}
| 0 |
[
"CWE-254"
] |
ntopng
|
2e0620be3410f5e22c9aa47e261bc5a12be692c6
| 72,668,808,466,062,685,000,000,000,000,000,000,000 | 31 |
Added security fix to avoid escalating privileges to non-privileged users
Many thanks to Dolev Farhi for reporting it
|
parse_subexp(Node** top, OnigToken* tok, int term, UChar** src, UChar* end,
ScanEnv* env)
{
int r;
Node *node, **headp;
*top = NULL;
env->parse_depth++;
if (env->parse_depth > ParseDepthLimit)
return ONIGERR_PARSE_DEPTH_LIMIT_OVER;
r = parse_branch(&node, tok, term, src, end, env);
if (r < 0) {
onig_node_free(node);
return r;
}
if (r == term) {
*top = node;
}
else if (r == TK_ALT) {
*top = onig_node_new_alt(node, NULL);
headp = &(NODE_CDR(*top));
while (r == TK_ALT) {
r = fetch_token(tok, src, end, env);
if (r < 0) return r;
r = parse_branch(&node, tok, term, src, end, env);
if (r < 0) {
onig_node_free(node);
return r;
}
*headp = onig_node_new_alt(node, NULL);
headp = &(NODE_CDR(*headp));
}
if (tok->type != (enum TokenSyms )term)
goto err;
}
else {
onig_node_free(node);
err:
if (term == TK_SUBEXP_CLOSE)
return ONIGERR_END_PATTERN_WITH_UNMATCHED_PARENTHESIS;
else
return ONIGERR_PARSER_BUG;
}
env->parse_depth--;
return r;
}
| 0 |
[
"CWE-476"
] |
oniguruma
|
850bd9b0d8186eb1637722b46b12656814ab4ad2
| 156,143,419,138,029,850,000,000,000,000,000,000,000 | 49 |
fix #87: Read unknown address in onig_error_code_to_str()
|
Status AutoParallel::Optimize(Cluster* cluster, const GrapplerItem& item,
GraphDef* output) {
TF_RETURN_IF_ERROR(Initialize(item));
BuildGraph(output);
return Status::OK();
}
| 0 |
[
"CWE-703",
"CWE-908"
] |
tensorflow
|
68867bf01239d9e1048f98cbad185bf4761bedd3
| 102,670,257,651,268,950,000,000,000,000,000,000,000 | 6 |
Prevent unitialized variable use in grappler.
PiperOrigin-RevId: 399702928
Change-Id: Id7e75451fbff297692dfb687f60ea04b25c96b24
|
ins_up(
int startcol) // when TRUE move to Insstart.col
{
pos_T tpos;
linenr_T old_topline = curwin->w_topline;
#ifdef FEAT_DIFF
int old_topfill = curwin->w_topfill;
#endif
undisplay_dollar();
tpos = curwin->w_cursor;
if (cursor_up(1L, TRUE) == OK)
{
if (startcol)
coladvance(getvcol_nolist(&Insstart));
if (old_topline != curwin->w_topline
#ifdef FEAT_DIFF
|| old_topfill != curwin->w_topfill
#endif
)
redraw_later(UPD_VALID);
start_arrow(&tpos);
can_cindent = TRUE;
}
else
vim_beep(BO_CRSR);
}
| 0 |
[
"CWE-126",
"CWE-787"
] |
vim
|
e98c88c44c308edaea5994b8ad4363e65030968c
| 64,395,574,011,303,060,000,000,000,000,000,000,000 | 27 |
patch 9.0.0218: reading before the start of the line
Problem: Reading before the start of the line.
Solution: When displaying "$" check the column is not negative.
|
set_hunkmax (void)
{
if (!p_line)
p_line = (char **) malloc (hunkmax * sizeof *p_line);
if (!p_len)
p_len = (size_t *) malloc (hunkmax * sizeof *p_len);
if (!p_Char)
p_Char = malloc (hunkmax * sizeof *p_Char);
}
| 1 |
[
"CWE-399"
] |
patch
|
0c08d7a902c6fdd49b704623a12d8d672ef18944
| 335,421,363,436,213,100,000,000,000,000,000,000,000 | 9 |
Fail when out of memory in set_hunkmax()
src/pch.c (another_hunk): Call set_hunkmax() from here to make sure it is
called even when falling back from plan A to plan B.
(open_patch_file): No need to call set_hunkmax() anymore.
src/pch.c (set_hunkmax): Fail when out of memory. Make static.
src/pch.h: Remove set_hunkmax() prototype.
|
c_valid_civil_p(int y, int m, int d, double sg,
int *rm, int *rd, int *rjd, int *ns)
{
int ry;
if (m < 0)
m += 13;
if (d < 0) {
if (!c_find_ldom(y, m, sg, rjd, ns))
return 0;
c_jd_to_civil(*rjd + d + 1, sg, &ry, rm, rd);
if (ry != y || *rm != m)
return 0;
d = *rd;
}
c_civil_to_jd(y, m, d, sg, rjd, ns);
c_jd_to_civil(*rjd, sg, &ry, rm, rd);
if (ry != y || *rm != m || *rd != d)
return 0;
return 1;
}
| 0 |
[] |
date
|
3959accef8da5c128f8a8e2fd54e932a4fb253b0
| 308,416,194,122,185,850,000,000,000,000,000,000,000 | 21 |
Add length limit option for methods that parses date strings
`Date.parse` now raises an ArgumentError when a given date string is
longer than 128. You can configure the limit by giving `limit` keyword
arguments like `Date.parse(str, limit: 1000)`. If you pass `limit: nil`,
the limit is disabled.
Not only `Date.parse` but also the following methods are changed.
* Date._parse
* Date.parse
* DateTime.parse
* Date._iso8601
* Date.iso8601
* DateTime.iso8601
* Date._rfc3339
* Date.rfc3339
* DateTime.rfc3339
* Date._xmlschema
* Date.xmlschema
* DateTime.xmlschema
* Date._rfc2822
* Date.rfc2822
* DateTime.rfc2822
* Date._rfc822
* Date.rfc822
* DateTime.rfc822
* Date._jisx0301
* Date.jisx0301
* DateTime.jisx0301
|
PJ_DEF(pj_status_t) pj_ssl_sock_start_recvfrom2 (pj_ssl_sock_t *ssock,
pj_pool_t *pool,
unsigned buff_size,
void *readbuf[],
pj_uint32_t flags)
{
PJ_UNUSED_ARG(ssock);
PJ_UNUSED_ARG(pool);
PJ_UNUSED_ARG(buff_size);
PJ_UNUSED_ARG(readbuf);
PJ_UNUSED_ARG(flags);
return PJ_ENOTSUP;
}
| 0 |
[
"CWE-362",
"CWE-703"
] |
pjproject
|
d5f95aa066f878b0aef6a64e60b61e8626e664cd
| 55,519,310,766,876,990,000,000,000,000,000,000,000 | 14 |
Merge pull request from GHSA-cv8x-p47p-99wr
* - Avoid SSL socket parent/listener getting destroyed during handshake by increasing parent's reference count.
- Add missing SSL socket close when the newly accepted SSL socket is discarded in SIP TLS transport.
* - Fix silly mistake: accepted active socket created without group lock in SSL socket.
- Replace assertion with normal validation check of SSL socket instance in OpenSSL verification callback (verify_cb()) to avoid crash, e.g: if somehow race condition with SSL socket destroy happens or OpenSSL application data index somehow gets corrupted.
|
static struct vrend_linked_shader_program *lookup_cs_shader_program(struct vrend_context *ctx,
GLuint cs_id)
{
struct vrend_linked_shader_program *ent;
LIST_FOR_EACH_ENTRY(ent, &ctx->sub->cs_programs, head) {
if (ent->ss[PIPE_SHADER_COMPUTE]->id == cs_id) {
list_del(&ent->head);
list_add(&ent->head, &ctx->sub->cs_programs);
return ent;
}
}
return NULL;
}
| 0 |
[
"CWE-787"
] |
virglrenderer
|
95e581fd181b213c2ed7cdc63f2abc03eaaa77ec
| 269,500,718,958,688,270,000,000,000,000,000,000,000 | 13 |
vrend: Add test to resource OOB write and fix it
v2: Also check that no depth != 1 has been send when none is due
Closes: #250
Signed-off-by: Gert Wollny <[email protected]>
Reviewed-by: Chia-I Wu <[email protected]>
|
static int vidioc_try_fmt_cap(struct file *file, void *priv, struct v4l2_format *fmt)
{
struct v4l2_loopback_device *dev;
char buf[5];
dev = v4l2loopback_getdevice(file);
if (0 == dev->ready_for_capture) {
dprintk("setting fmt_cap not possible yet\n");
return -EBUSY;
}
if (fmt->fmt.pix.pixelformat != dev->pix_format.pixelformat)
return -EINVAL;
fmt->fmt.pix = dev->pix_format;
buf[4] = 0;
dprintk("capFOURCC=%s\n", fourcc2str(dev->pix_format.pixelformat, buf));
return 0;
}
| 0 |
[
"CWE-787"
] |
v4l2loopback
|
64a216af4c09c9ba9326057d7e78994271827eff
| 47,846,417,826,378,810,000,000,000,000,000,000,000 | 21 |
add explicit format specifier to printf() invocations
CWE-134
|
static int check_sock_access(struct bpf_verifier_env *env, u32 regno, int off,
int size, enum bpf_access_type t)
{
struct bpf_reg_state *regs = cur_regs(env);
struct bpf_reg_state *reg = ®s[regno];
struct bpf_insn_access_aux info;
if (reg->smin_value < 0) {
verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
regno);
return -EACCES;
}
if (!bpf_sock_is_valid_access(off, size, t, &info)) {
verbose(env, "invalid bpf_sock access off=%d size=%d\n",
off, size);
return -EACCES;
}
return 0;
}
| 0 |
[
"CWE-703",
"CWE-189"
] |
linux
|
979d63d50c0c0f7bc537bf821e056cc9fe5abd38
| 300,678,287,511,038,220,000,000,000,000,000,000,000 | 21 |
bpf: prevent out of bounds speculation on pointer arithmetic
Jann reported that the original commit back in b2157399cc98
("bpf: prevent out-of-bounds speculation") was not sufficient
to stop CPU from speculating out of bounds memory access:
While b2157399cc98 only focussed on masking array map access
for unprivileged users for tail calls and data access such
that the user provided index gets sanitized from BPF program
and syscall side, there is still a more generic form affected
from BPF programs that applies to most maps that hold user
data in relation to dynamic map access when dealing with
unknown scalars or "slow" known scalars as access offset, for
example:
- Load a map value pointer into R6
- Load an index into R7
- Do a slow computation (e.g. with a memory dependency) that
loads a limit into R8 (e.g. load the limit from a map for
high latency, then mask it to make the verifier happy)
- Exit if R7 >= R8 (mispredicted branch)
- Load R0 = R6[R7]
- Load R0 = R6[R0]
For unknown scalars there are two options in the BPF verifier
where we could derive knowledge from in order to guarantee
safe access to the memory: i) While </>/<=/>= variants won't
allow to derive any lower or upper bounds from the unknown
scalar where it would be safe to add it to the map value
pointer, it is possible through ==/!= test however. ii) another
option is to transform the unknown scalar into a known scalar,
for example, through ALU ops combination such as R &= <imm>
followed by R |= <imm> or any similar combination where the
original information from the unknown scalar would be destroyed
entirely leaving R with a constant. The initial slow load still
precedes the latter ALU ops on that register, so the CPU
executes speculatively from that point. Once we have the known
scalar, any compare operation would work then. A third option
only involving registers with known scalars could be crafted
as described in [0] where a CPU port (e.g. Slow Int unit)
would be filled with many dependent computations such that
the subsequent condition depending on its outcome has to wait
for evaluation on its execution port and thereby executing
speculatively if the speculated code can be scheduled on a
different execution port, or any other form of mistraining
as described in [1], for example. Given this is not limited
to only unknown scalars, not only map but also stack access
is affected since both is accessible for unprivileged users
and could potentially be used for out of bounds access under
speculation.
In order to prevent any of these cases, the verifier is now
sanitizing pointer arithmetic on the offset such that any
out of bounds speculation would be masked in a way where the
pointer arithmetic result in the destination register will
stay unchanged, meaning offset masked into zero similar as
in array_index_nospec() case. With regards to implementation,
there are three options that were considered: i) new insn
for sanitation, ii) push/pop insn and sanitation as inlined
BPF, iii) reuse of ax register and sanitation as inlined BPF.
Option i) has the downside that we end up using from reserved
bits in the opcode space, but also that we would require
each JIT to emit masking as native arch opcodes meaning
mitigation would have slow adoption till everyone implements
it eventually which is counter-productive. Option ii) and iii)
have both in common that a temporary register is needed in
order to implement the sanitation as inlined BPF since we
are not allowed to modify the source register. While a push /
pop insn in ii) would be useful to have in any case, it
requires once again that every JIT needs to implement it
first. While possible, amount of changes needed would also
be unsuitable for a -stable patch. Therefore, the path which
has fewer changes, less BPF instructions for the mitigation
and does not require anything to be changed in the JITs is
option iii) which this work is pursuing. The ax register is
already mapped to a register in all JITs (modulo arm32 where
it's mapped to stack as various other BPF registers there)
and used in constant blinding for JITs-only so far. It can
be reused for verifier rewrites under certain constraints.
The interpreter's tmp "register" has therefore been remapped
into extending the register set with hidden ax register and
reusing that for a number of instructions that needed the
prior temporary variable internally (e.g. div, mod). This
allows for zero increase in stack space usage in the interpreter,
and enables (restricted) generic use in rewrites otherwise as
long as such a patchlet does not make use of these instructions.
The sanitation mask is dynamic and relative to the offset the
map value or stack pointer currently holds.
There are various cases that need to be taken under consideration
for the masking, e.g. such operation could look as follows:
ptr += val or val += ptr or ptr -= val. Thus, the value to be
sanitized could reside either in source or in destination
register, and the limit is different depending on whether
the ALU op is addition or subtraction and depending on the
current known and bounded offset. The limit is derived as
follows: limit := max_value_size - (smin_value + off). For
subtraction: limit := umax_value + off. This holds because
we do not allow any pointer arithmetic that would
temporarily go out of bounds or would have an unknown
value with mixed signed bounds where it is unclear at
verification time whether the actual runtime value would
be either negative or positive. For example, we have a
derived map pointer value with constant offset and bounded
one, so limit based on smin_value works because the verifier
requires that statically analyzed arithmetic on the pointer
must be in bounds, and thus it checks if resulting
smin_value + off and umax_value + off is still within map
value bounds at time of arithmetic in addition to time of
access. Similarly, for the case of stack access we derive
the limit as follows: MAX_BPF_STACK + off for subtraction
and -off for the case of addition where off := ptr_reg->off +
ptr_reg->var_off.value. Subtraction is a special case for
the masking which can be in form of ptr += -val, ptr -= -val,
or ptr -= val. In the first two cases where we know that
the value is negative, we need to temporarily negate the
value in order to do the sanitation on a positive value
where we later swap the ALU op, and restore original source
register if the value was in source.
The sanitation of pointer arithmetic alone is still not fully
sufficient as is, since a scenario like the following could
happen ...
PTR += 0x1000 (e.g. K-based imm)
PTR -= BIG_NUMBER_WITH_SLOW_COMPARISON
PTR += 0x1000
PTR -= BIG_NUMBER_WITH_SLOW_COMPARISON
[...]
... which under speculation could end up as ...
PTR += 0x1000
PTR -= 0 [ truncated by mitigation ]
PTR += 0x1000
PTR -= 0 [ truncated by mitigation ]
[...]
... and therefore still access out of bounds. To prevent such
case, the verifier is also analyzing safety for potential out
of bounds access under speculative execution. Meaning, it is
also simulating pointer access under truncation. We therefore
"branch off" and push the current verification state after the
ALU operation with known 0 to the verification stack for later
analysis. Given the current path analysis succeeded it is
likely that the one under speculation can be pruned. In any
case, it is also subject to existing complexity limits and
therefore anything beyond this point will be rejected. In
terms of pruning, it needs to be ensured that the verification
state from speculative execution simulation must never prune
a non-speculative execution path, therefore, we mark verifier
state accordingly at the time of push_stack(). If verifier
detects out of bounds access under speculative execution from
one of the possible paths that includes a truncation, it will
reject such program.
Given we mask every reg-based pointer arithmetic for
unprivileged programs, we've been looking into how it could
affect real-world programs in terms of size increase. As the
majority of programs are targeted for privileged-only use
case, we've unconditionally enabled masking (with its alu
restrictions on top of it) for privileged programs for the
sake of testing in order to check i) whether they get rejected
in its current form, and ii) by how much the number of
instructions and size will increase. We've tested this by
using Katran, Cilium and test_l4lb from the kernel selftests.
For Katran we've evaluated balancer_kern.o, Cilium bpf_lxc.o
and an older test object bpf_lxc_opt_-DUNKNOWN.o and l4lb
we've used test_l4lb.o as well as test_l4lb_noinline.o. We
found that none of the programs got rejected by the verifier
with this change, and that impact is rather minimal to none.
balancer_kern.o had 13,904 bytes (1,738 insns) xlated and
7,797 bytes JITed before and after the change. Most complex
program in bpf_lxc.o had 30,544 bytes (3,817 insns) xlated
and 18,538 bytes JITed before and after and none of the other
tail call programs in bpf_lxc.o had any changes either. For
the older bpf_lxc_opt_-DUNKNOWN.o object we found a small
increase from 20,616 bytes (2,576 insns) and 12,536 bytes JITed
before to 20,664 bytes (2,582 insns) and 12,558 bytes JITed
after the change. Other programs from that object file had
similar small increase. Both test_l4lb.o had no change and
remained at 6,544 bytes (817 insns) xlated and 3,401 bytes
JITed and for test_l4lb_noinline.o constant at 5,080 bytes
(634 insns) xlated and 3,313 bytes JITed. This can be explained
in that LLVM typically optimizes stack based pointer arithmetic
by using K-based operations and that use of dynamic map access
is not overly frequent. However, in future we may decide to
optimize the algorithm further under known guarantees from
branch and value speculation. Latter seems also unclear in
terms of prediction heuristics that today's CPUs apply as well
as whether there could be collisions in e.g. the predictor's
Value History/Pattern Table for triggering out of bounds access,
thus masking is performed unconditionally at this point but could
be subject to relaxation later on. We were generally also
brainstorming various other approaches for mitigation, but the
blocker was always lack of available registers at runtime and/or
overhead for runtime tracking of limits belonging to a specific
pointer. Thus, we found this to be minimally intrusive under
given constraints.
With that in place, a simple example with sanitized access on
unprivileged load at post-verification time looks as follows:
# bpftool prog dump xlated id 282
[...]
28: (79) r1 = *(u64 *)(r7 +0)
29: (79) r2 = *(u64 *)(r7 +8)
30: (57) r1 &= 15
31: (79) r3 = *(u64 *)(r0 +4608)
32: (57) r3 &= 1
33: (47) r3 |= 1
34: (2d) if r2 > r3 goto pc+19
35: (b4) (u32) r11 = (u32) 20479 |
36: (1f) r11 -= r2 | Dynamic sanitation for pointer
37: (4f) r11 |= r2 | arithmetic with registers
38: (87) r11 = -r11 | containing bounded or known
39: (c7) r11 s>>= 63 | scalars in order to prevent
40: (5f) r11 &= r2 | out of bounds speculation.
41: (0f) r4 += r11 |
42: (71) r4 = *(u8 *)(r4 +0)
43: (6f) r4 <<= r1
[...]
For the case where the scalar sits in the destination register
as opposed to the source register, the following code is emitted
for the above example:
[...]
16: (b4) (u32) r11 = (u32) 20479
17: (1f) r11 -= r2
18: (4f) r11 |= r2
19: (87) r11 = -r11
20: (c7) r11 s>>= 63
21: (5f) r2 &= r11
22: (0f) r2 += r0
23: (61) r0 = *(u32 *)(r2 +0)
[...]
JIT blinding example with non-conflicting use of r10:
[...]
d5: je 0x0000000000000106 _
d7: mov 0x0(%rax),%edi |
da: mov $0xf153246,%r10d | Index load from map value and
e0: xor $0xf153259,%r10 | (const blinded) mask with 0x1f.
e7: and %r10,%rdi |_
ea: mov $0x2f,%r10d |
f0: sub %rdi,%r10 | Sanitized addition. Both use r10
f3: or %rdi,%r10 | but do not interfere with each
f6: neg %r10 | other. (Neither do these instructions
f9: sar $0x3f,%r10 | interfere with the use of ax as temp
fd: and %r10,%rdi | in interpreter.)
100: add %rax,%rdi |_
103: mov 0x0(%rdi),%eax
[...]
Tested that it fixes Jann's reproducer, and also checked that test_verifier
and test_progs suite with interpreter, JIT and JIT with hardening enabled
on x86-64 and arm64 runs successfully.
[0] Speculose: Analyzing the Security Implications of Speculative
Execution in CPUs, Giorgi Maisuradze and Christian Rossow,
https://arxiv.org/pdf/1801.04084.pdf
[1] A Systematic Evaluation of Transient Execution Attacks and
Defenses, Claudio Canella, Jo Van Bulck, Michael Schwarz,
Moritz Lipp, Benjamin von Berg, Philipp Ortner, Frank Piessens,
Dmitry Evtyushkin, Daniel Gruss,
https://arxiv.org/pdf/1811.05441.pdf
Fixes: b2157399cc98 ("bpf: prevent out-of-bounds speculation")
Reported-by: Jann Horn <[email protected]>
Signed-off-by: Daniel Borkmann <[email protected]>
Acked-by: Alexei Starovoitov <[email protected]>
Signed-off-by: Alexei Starovoitov <[email protected]>
|
struct file_list *send_file_list(int f, int argc, char *argv[])
{
static const char *lastdir;
static int lastdir_len = -1;
int len, dirlen;
STRUCT_STAT st;
char *p, *dir;
struct file_list *flist;
struct timeval start_tv, end_tv;
int64 start_write;
int use_ff_fd = 0;
int disable_buffering, reenable_multiplex = -1;
int flags = recurse ? FLAG_CONTENT_DIR : 0;
int reading_remotely = filesfrom_host != NULL;
int rl_flags = (reading_remotely ? 0 : RL_DUMP_COMMENTS)
#ifdef ICONV_OPTION
| (filesfrom_convert ? RL_CONVERT : 0)
#endif
| (eol_nulls || reading_remotely ? RL_EOL_NULLS : 0);
int implied_dot_dir = 0;
rprintf(FLOG, "building file list\n");
if (show_filelist_p())
start_filelist_progress("building file list");
else if (inc_recurse && INFO_GTE(FLIST, 1) && !am_server)
rprintf(FCLIENT, "sending incremental file list\n");
start_write = stats.total_written;
gettimeofday(&start_tv, NULL);
if (relative_paths && protocol_version >= 30)
implied_dirs = 1; /* We send flagged implied dirs */
#ifdef SUPPORT_HARD_LINKS
if (preserve_hard_links && protocol_version >= 30 && !cur_flist)
init_hard_links();
#endif
flist = cur_flist = flist_new(0, "send_file_list");
if (inc_recurse) {
dir_flist = flist_new(FLIST_TEMP, "send_file_list");
flags |= FLAG_DIVERT_DIRS;
} else
dir_flist = cur_flist;
disable_buffering = io_start_buffering_out(f);
if (filesfrom_fd >= 0) {
if (argv[0] && !change_dir(argv[0], CD_NORMAL)) {
rsyserr(FERROR_XFER, errno, "change_dir %s failed",
full_fname(argv[0]));
exit_cleanup(RERR_FILESELECT);
}
if (protocol_version < 31) {
/* Older protocols send the files-from data w/o packaging
* it in multiplexed I/O packets, so temporarily switch
* to buffered I/O to match this behavior. */
reenable_multiplex = io_end_multiplex_in(MPLX_TO_BUFFERED);
}
use_ff_fd = 1;
}
if (!orig_dir)
orig_dir = strdup(curr_dir);
while (1) {
char fbuf[MAXPATHLEN], *fn, name_type;
if (use_ff_fd) {
if (read_line(filesfrom_fd, fbuf, sizeof fbuf, rl_flags) == 0)
break;
sanitize_path(fbuf, fbuf, "", 0, SP_KEEP_DOT_DIRS);
} else {
if (argc-- == 0)
break;
strlcpy(fbuf, *argv++, MAXPATHLEN);
if (sanitize_paths)
sanitize_path(fbuf, fbuf, "", 0, SP_KEEP_DOT_DIRS);
}
len = strlen(fbuf);
if (relative_paths) {
/* We clean up fbuf below. */
name_type = NORMAL_NAME;
} else if (!len || fbuf[len - 1] == '/') {
if (len == 2 && fbuf[0] == '.') {
/* Turn "./" into just "." rather than "./." */
fbuf[--len] = '\0';
} else {
if (len + 1 >= MAXPATHLEN)
overflow_exit("send_file_list");
fbuf[len++] = '.';
fbuf[len] = '\0';
}
name_type = DOTDIR_NAME;
} else if (len > 1 && fbuf[len-1] == '.' && fbuf[len-2] == '.'
&& (len == 2 || fbuf[len-3] == '/')) {
if (len + 2 >= MAXPATHLEN)
overflow_exit("send_file_list");
fbuf[len++] = '/';
fbuf[len++] = '.';
fbuf[len] = '\0';
name_type = DOTDIR_NAME;
} else if (fbuf[len-1] == '.' && (len == 1 || fbuf[len-2] == '/'))
name_type = DOTDIR_NAME;
else
name_type = NORMAL_NAME;
dir = NULL;
if (!relative_paths) {
p = strrchr(fbuf, '/');
if (p) {
*p = '\0';
if (p == fbuf)
dir = "/";
else
dir = fbuf;
len -= p - fbuf + 1;
fn = p + 1;
} else
fn = fbuf;
} else {
if ((p = strstr(fbuf, "/./")) != NULL) {
*p = '\0';
if (p == fbuf)
dir = "/";
else {
dir = fbuf;
clean_fname(dir, 0);
}
fn = p + 3;
while (*fn == '/')
fn++;
if (!*fn)
*--fn = '\0'; /* ensure room for '.' */
} else
fn = fbuf;
/* A leading ./ can be used in relative mode to affect
* the dest dir without its name being in the path. */
if (*fn == '.' && fn[1] == '/' && fn[2] && !implied_dot_dir)
implied_dot_dir = -1;
len = clean_fname(fn, CFN_KEEP_TRAILING_SLASH
| CFN_DROP_TRAILING_DOT_DIR);
if (len == 1) {
if (fn[0] == '/') {
fn = "/.";
len = 2;
name_type = DOTDIR_NAME;
} else if (fn[0] == '.')
name_type = DOTDIR_NAME;
} else if (fn[len-1] == '/') {
fn[--len] = '\0';
if (len == 1 && *fn == '.')
name_type = DOTDIR_NAME;
else
name_type = SLASH_ENDING_NAME;
}
/* Reject a ".." dir in the active part of the path. */
for (p = fn; (p = strstr(p, "..")) != NULL; p += 2) {
if ((p[2] == '/' || p[2] == '\0')
&& (p == fn || p[-1] == '/')) {
rprintf(FERROR,
"found \"..\" dir in relative path: %s\n",
fn);
exit_cleanup(RERR_SYNTAX);
}
}
}
if (!*fn) {
len = 1;
fn = ".";
name_type = DOTDIR_NAME;
}
dirlen = dir ? strlen(dir) : 0;
if (dirlen != lastdir_len || memcmp(lastdir, dir, dirlen) != 0) {
if (!change_pathname(NULL, dir, -dirlen))
goto bad_path;
lastdir = pathname;
lastdir_len = pathname_len;
} else if (!change_pathname(NULL, lastdir, lastdir_len)) {
bad_path:
if (implied_dot_dir < 0)
implied_dot_dir = 0;
continue;
}
if (implied_dot_dir < 0) {
implied_dot_dir = 1;
send_file_name(f, flist, ".", NULL, (flags | FLAG_IMPLIED_DIR) & ~FLAG_CONTENT_DIR, ALL_FILTERS);
}
if (fn != fbuf)
memmove(fbuf, fn, len + 1);
if (link_stat(fbuf, &st, copy_dirlinks || name_type != NORMAL_NAME) != 0
|| (name_type != DOTDIR_NAME && is_daemon_excluded(fbuf, S_ISDIR(st.st_mode)))
|| (relative_paths && path_is_daemon_excluded(fbuf, 1))) {
if (errno != ENOENT || missing_args == 0) {
/* This is a transfer error, but inhibit deletion
* only if we might be omitting an existing file. */
if (errno != ENOENT)
io_error |= IOERR_GENERAL;
rsyserr(FERROR_XFER, errno, "link_stat %s failed",
full_fname(fbuf));
continue;
} else if (missing_args == 1) {
/* Just ignore the arg. */
continue;
} else /* (missing_args == 2) */ {
/* Send the arg as a "missing" entry with
* mode 0, which tells the generator to delete it. */
memset(&st, 0, sizeof st);
}
}
/* A dot-dir should not be excluded! */
if (name_type != DOTDIR_NAME && st.st_mode != 0
&& is_excluded(fbuf, S_ISDIR(st.st_mode) != 0, ALL_FILTERS))
continue;
if (S_ISDIR(st.st_mode) && !xfer_dirs) {
rprintf(FINFO, "skipping directory %s\n", fbuf);
continue;
}
if (inc_recurse && relative_paths && *fbuf) {
if ((p = strchr(fbuf+1, '/')) != NULL) {
if (p - fbuf == 1 && *fbuf == '.') {
if ((fn = strchr(p+1, '/')) != NULL)
p = fn;
} else
fn = p;
send_implied_dirs(f, flist, fbuf, fbuf, p, flags,
IS_MISSING_FILE(st) ? MISSING_NAME : name_type);
if (fn == p)
continue;
}
} else if (implied_dirs && (p=strrchr(fbuf,'/')) && p != fbuf) {
/* Send the implied directories at the start of the
* source spec, so we get their permissions right. */
send_implied_dirs(f, flist, fbuf, fbuf, p, flags, 0);
}
if (one_file_system)
filesystem_dev = st.st_dev;
if (recurse || (xfer_dirs && name_type != NORMAL_NAME)) {
struct file_struct *file;
file = send_file_name(f, flist, fbuf, &st,
FLAG_TOP_DIR | FLAG_CONTENT_DIR | flags,
NO_FILTERS);
if (!file)
continue;
if (inc_recurse) {
if (name_type == DOTDIR_NAME) {
if (send_dir_depth < 0) {
send_dir_depth = 0;
change_local_filter_dir(fbuf, len, send_dir_depth);
}
send_directory(f, flist, fbuf, len, flags);
}
} else
send_if_directory(f, flist, file, fbuf, len, flags);
} else
send_file_name(f, flist, fbuf, &st, flags, NO_FILTERS);
}
if (reenable_multiplex >= 0)
io_start_multiplex_in(reenable_multiplex);
gettimeofday(&end_tv, NULL);
stats.flist_buildtime = (int64)(end_tv.tv_sec - start_tv.tv_sec) * 1000
+ (end_tv.tv_usec - start_tv.tv_usec) / 1000;
if (stats.flist_buildtime == 0)
stats.flist_buildtime = 1;
start_tv = end_tv;
/* Indicate end of file list */
if (io_error == 0 || ignore_errors)
write_byte(f, 0);
else if (use_safe_inc_flist) {
write_shortint(f, XMIT_EXTENDED_FLAGS|XMIT_IO_ERROR_ENDLIST);
write_varint(f, io_error);
} else {
if (delete_during && inc_recurse)
fatal_unsafe_io_error();
write_byte(f, 0);
}
#ifdef SUPPORT_HARD_LINKS
if (preserve_hard_links && protocol_version >= 30 && !inc_recurse)
idev_destroy();
#endif
if (show_filelist_p())
finish_filelist_progress(flist);
gettimeofday(&end_tv, NULL);
stats.flist_xfertime = (int64)(end_tv.tv_sec - start_tv.tv_sec) * 1000
+ (end_tv.tv_usec - start_tv.tv_usec) / 1000;
/* When converting names, both sides keep an unsorted file-list array
* because the names will differ on the sending and receiving sides
* (both sides will use the unsorted index number for each item). */
/* Sort the list without removing any duplicates. This allows the
* receiving side to ask for whatever name it kept. For incremental
* recursion mode, the sender marks duplicate dirs so that it can
* send them together in a single file-list. */
if (need_unsorted_flist) {
if (!(flist->sorted = new_array(struct file_struct *, flist->used)))
out_of_memory("send_file_list");
memcpy(flist->sorted, flist->files,
flist->used * sizeof (struct file_struct*));
} else
flist->sorted = flist->files;
flist_sort_and_clean(flist, 0);
file_total += flist->used;
file_old_total += flist->used;
if (numeric_ids <= 0 && !inc_recurse)
send_id_list(f);
/* send the io_error flag */
if (protocol_version < 30)
write_int(f, ignore_errors ? 0 : io_error);
else if (!use_safe_inc_flist && io_error && !ignore_errors)
send_msg_int(MSG_IO_ERROR, io_error);
if (disable_buffering)
io_end_buffering_out(IOBUF_FREE_BUFS);
stats.flist_size = stats.total_written - start_write;
stats.num_files = flist->used;
if (DEBUG_GTE(FLIST, 3))
output_flist(flist);
if (DEBUG_GTE(FLIST, 2))
rprintf(FINFO, "send_file_list done\n");
if (inc_recurse) {
send_dir_depth = 1;
add_dirs_to_tree(-1, flist, stats.num_dirs);
if (!file_total || strcmp(flist->sorted[flist->low]->basename, ".") != 0)
flist->parent_ndx = -1;
flist_done_allocating(flist);
if (send_dir_ndx < 0) {
write_ndx(f, NDX_FLIST_EOF);
flist_eof = 1;
if (DEBUG_GTE(FLIST, 3))
rprintf(FINFO, "[%s] flist_eof=1\n", who_am_i());
}
else if (file_total == 1) {
/* If we're creating incremental file-lists and there
* was just 1 item in the first file-list, send 1 more
* file-list to check if this is a 1-file xfer. */
send_extra_file_list(f, 1);
}
} else {
flist_eof = 1;
if (DEBUG_GTE(FLIST, 3))
rprintf(FINFO, "[%s] flist_eof=1\n", who_am_i());
}
return flist;
}
| 0 |
[
"CWE-59"
] |
rsync
|
962f8b90045ab331fc04c9e65f80f1a53e68243b
| 128,631,287,358,121,050,000,000,000,000,000,000,000 | 369 |
Complain if an inc-recursive path is not right for its dir.
This ensures that a malicious sender can't use a just-sent
symlink as a trasnfer path.
|
void CairoOutputDev::beginTransparencyGroup(GfxState * /*state*/, double * /*bbox*/,
GfxColorSpace * blendingColorSpace,
GBool /*isolated*/, GBool knockout,
GBool forSoftMask) {
/* push color space */
ColorSpaceStack* css = new ColorSpaceStack;
css->cs = blendingColorSpace;
css->knockout = knockout;
css->next = groupColorSpaceStack;
groupColorSpaceStack = css;
LOG(printf ("begin transparency group. knockout: %s\n", knockout ? "yes":"no"));
if (knockout) {
knockoutCount++;
if (!cairo_shape) {
/* create a surface for tracking the shape */
cairo_surface_t *cairo_shape_surface = cairo_surface_create_similar_clip (cairo, CAIRO_CONTENT_ALPHA);
cairo_shape = cairo_create (cairo_shape_surface);
cairo_surface_destroy (cairo_shape_surface);
/* the color doesn't matter as long as it is opaque */
cairo_set_source_rgb (cairo_shape, 0, 0, 0);
cairo_matrix_t matrix;
cairo_get_matrix (cairo, &matrix);
//printMatrix(&matrix);
cairo_set_matrix (cairo_shape, &matrix);
} else {
cairo_reference (cairo_shape);
}
}
if (groupColorSpaceStack->next && groupColorSpaceStack->next->knockout) {
/* we need to track the shape */
cairo_push_group (cairo_shape);
}
if (0 && forSoftMask)
cairo_push_group_with_content (cairo, CAIRO_CONTENT_ALPHA);
else
cairo_push_group (cairo);
/* push_group has an implicit cairo_save() */
if (knockout) {
/*XXX: let's hope this matches the semantics needed */
cairo_set_operator(cairo, CAIRO_OPERATOR_SOURCE);
} else {
cairo_set_operator(cairo, CAIRO_OPERATOR_OVER);
}
}
| 0 |
[] |
poppler
|
abf167af8b15e5f3b510275ce619e6fdb42edd40
| 12,986,880,276,537,710,000,000,000,000,000,000,000 | 48 |
Implement tiling/patterns in SplashOutputDev
Fixes bug 13518
|
njs_generate_variable_wo_dest(njs_vm_t *vm, njs_generator_t *generator,
njs_parser_node_t *node, njs_reference_type_t type, njs_variable_t **retvar)
{
njs_int_t ret;
njs_parser_scope_t *scope;
scope = njs_function_scope(node->scope);
scope->dest_disable = 1;
ret = njs_generate_variable(vm, generator, node, type, retvar);
scope->dest_disable = 0;
return ret;
}
| 0 |
[
"CWE-703",
"CWE-754"
] |
njs
|
404553896792b8f5f429dc8852d15784a59d8d3e
| 74,264,374,637,976,480,000,000,000,000,000,000,000 | 16 |
Fixed break instruction in a try-catch block.
Previously, JUMP offset for a break instruction inside a try-catch
block was not set to a correct offset during code generation
when a return instruction was present in inner try-catch block.
The fix is to update the JUMP offset appropriately.
This closes #553 issue on Github.
|
static int ahci_dma_prepare_buf(IDEDMA *dma, int is_write)
{
AHCIDevice *ad = DO_UPCAST(AHCIDevice, dma, dma);
IDEState *s = &ad->port.ifs[0];
ahci_populate_sglist(ad, &s->sg, s->io_buffer_offset);
s->io_buffer_size = s->sg.size;
DPRINTF(ad->port_no, "len=%#x\n", s->io_buffer_size);
return s->io_buffer_size != 0;
}
| 1 |
[
"CWE-399"
] |
qemu
|
3251bdcf1c67427d964517053c3d185b46e618e8
| 30,651,433,325,661,710,000,000,000,000,000,000,000 | 11 |
ide: Correct handling of malformed/short PRDTs
This impacts both BMDMA and AHCI HBA interfaces for IDE.
Currently, we confuse the difference between a PRDT having
"0 bytes" and a PRDT having "0 complete sectors."
When we receive an incomplete sector, inconsistent error checking
leads to an infinite loop wherein the call succeeds, but it
didn't give us enough bytes -- leading us to re-call the
DMA chain over and over again. This leads to, in the BMDMA case,
leaked memory for short PRDTs, and infinite loops and resource
usage in the AHCI case.
The .prepare_buf() callback is reworked to return the number of
bytes that it successfully prepared. 0 is a valid, non-error
answer that means the table was empty and described no bytes.
-1 indicates an error.
Our current implementation uses the io_buffer in IDEState to
ultimately describe the size of a prepared scatter-gather list.
Even though the AHCI PRDT/SGList can be as large as 256GiB, the
AHCI command header limits transactions to just 4GiB. ATA8-ACS3,
however, defines the largest transaction to be an LBA48 command
that transfers 65,536 sectors. With a 512 byte sector size, this
is just 32MiB.
Since our current state structures use the int type to describe
the size of the buffer, and this state is migrated as int32, we
are limited to describing 2GiB buffer sizes unless we change the
migration protocol.
For this reason, this patch begins to unify the assertions in the
IDE pathways that the scatter-gather list provided by either the
AHCI PRDT or the PCI BMDMA PRDs can only describe, at a maximum,
2GiB. This should be resilient enough unless we need a sector
size that exceeds 32KiB.
Further, the likelihood of any guest operating system actually
attempting to transfer this much data in a single operation is
very slim.
To this end, the IDEState variables have been updated to more
explicitly clarify our maximum supported size. Callers to the
prepare_buf callback have been reworked to understand the new
return code, and all versions of the prepare_buf callback have
been adjusted accordingly.
Lastly, the ahci_populate_sglist helper, relied upon by the
AHCI implementation of .prepare_buf() as well as the PCI
implementation of the callback have had overflow assertions
added to help make clear the reasonings behind the various
type changes.
[Added %d -> %"PRId64" fix John sent because off_pos changed from int to
int64_t.
--Stefan]
Signed-off-by: John Snow <[email protected]>
Reviewed-by: Paolo Bonzini <[email protected]>
Message-id: [email protected]
Signed-off-by: Stefan Hajnoczi <[email protected]>
|
static int sctp_getsockopt_pr_streamstatus(struct sock *sk, int len,
char __user *optval,
int __user *optlen)
{
struct sctp_stream_out *streamout;
struct sctp_association *asoc;
struct sctp_prstatus params;
int retval = -EINVAL;
int policy;
if (len < sizeof(params))
goto out;
len = sizeof(params);
if (copy_from_user(¶ms, optval, len)) {
retval = -EFAULT;
goto out;
}
policy = params.sprstat_policy;
if (policy & ~SCTP_PR_SCTP_MASK)
goto out;
asoc = sctp_id2assoc(sk, params.sprstat_assoc_id);
if (!asoc || params.sprstat_sid >= asoc->stream.outcnt)
goto out;
streamout = &asoc->stream.out[params.sprstat_sid];
if (policy == SCTP_PR_SCTP_NONE) {
params.sprstat_abandoned_unsent = 0;
params.sprstat_abandoned_sent = 0;
for (policy = 0; policy <= SCTP_PR_INDEX(MAX); policy++) {
params.sprstat_abandoned_unsent +=
streamout->abandoned_unsent[policy];
params.sprstat_abandoned_sent +=
streamout->abandoned_sent[policy];
}
} else {
params.sprstat_abandoned_unsent =
streamout->abandoned_unsent[__SCTP_PR_INDEX(policy)];
params.sprstat_abandoned_sent =
streamout->abandoned_sent[__SCTP_PR_INDEX(policy)];
}
if (put_user(len, optlen) || copy_to_user(optval, ¶ms, len)) {
retval = -EFAULT;
goto out;
}
retval = 0;
out:
return retval;
}
| 0 |
[
"CWE-416",
"CWE-787"
] |
linux
|
df80cd9b28b9ebaa284a41df611dbf3a2d05ca74
| 218,977,113,512,326,940,000,000,000,000,000,000,000 | 54 |
sctp: do not peel off an assoc from one netns to another one
Now when peeling off an association to the sock in another netns, all
transports in this assoc are not to be rehashed and keep use the old
key in hashtable.
As a transport uses sk->net as the hash key to insert into hashtable,
it would miss removing these transports from hashtable due to the new
netns when closing the sock and all transports are being freeed, then
later an use-after-free issue could be caused when looking up an asoc
and dereferencing those transports.
This is a very old issue since very beginning, ChunYu found it with
syzkaller fuzz testing with this series:
socket$inet6_sctp()
bind$inet6()
sendto$inet6()
unshare(0x40000000)
getsockopt$inet_sctp6_SCTP_GET_ASSOC_ID_LIST()
getsockopt$inet_sctp6_SCTP_SOCKOPT_PEELOFF()
This patch is to block this call when peeling one assoc off from one
netns to another one, so that the netns of all transport would not
go out-sync with the key in hashtable.
Note that this patch didn't fix it by rehashing transports, as it's
difficult to handle the situation when the tuple is already in use
in the new netns. Besides, no one would like to peel off one assoc
to another netns, considering ipaddrs, ifaces, etc. are usually
different.
Reported-by: ChunYu Wang <[email protected]>
Signed-off-by: Xin Long <[email protected]>
Acked-by: Marcelo Ricardo Leitner <[email protected]>
Acked-by: Neil Horman <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static inline int xattrs_differ(const char *fname, struct file_struct *file, stat_x *sxp)
{
if (preserve_xattrs) {
if (!XATTR_READY(*sxp))
get_xattr(fname, sxp);
if (xattr_diff(file, sxp, 0))
return 1;
}
return 0;
}
| 0 |
[
"CWE-59"
] |
rsync
|
e12a6c087ca1eecdb8eae5977be239c24f4dd3d9
| 173,685,715,853,996,240,000,000,000,000,000,000,000 | 11 |
Add parent-dir validation for --no-inc-recurse too.
|
static struct ip_sf_list *igmp_mcf_get_next(struct seq_file *seq, struct ip_sf_list *psf)
{
struct igmp_mcf_iter_state *state = igmp_mcf_seq_private(seq);
psf = psf->sf_next;
while (!psf) {
spin_unlock_bh(&state->im->lock);
state->im = state->im->next;
while (!state->im) {
state->dev = next_net_device_rcu(state->dev);
if (!state->dev) {
state->idev = NULL;
goto out;
}
state->idev = __in_dev_get_rcu(state->dev);
if (!state->idev)
continue;
state->im = rcu_dereference(state->idev->mc_list);
}
if (!state->im)
break;
spin_lock_bh(&state->im->lock);
psf = state->im->sources;
}
out:
return psf;
}
| 0 |
[
"CWE-362"
] |
linux
|
23d2b94043ca8835bd1e67749020e839f396a1c2
| 292,729,088,899,764,630,000,000,000,000,000,000,000 | 27 |
igmp: Add ip_mc_list lock in ip_check_mc_rcu
I got below panic when doing fuzz test:
Kernel panic - not syncing: panic_on_warn set ...
CPU: 0 PID: 4056 Comm: syz-executor.3 Tainted: G B 5.14.0-rc1-00195-gcff5c4254439-dirty #2
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.12.0-59-gc9ba5276e321-prebuilt.qemu.org 04/01/2014
Call Trace:
dump_stack_lvl+0x7a/0x9b
panic+0x2cd/0x5af
end_report.cold+0x5a/0x5a
kasan_report+0xec/0x110
ip_check_mc_rcu+0x556/0x5d0
__mkroute_output+0x895/0x1740
ip_route_output_key_hash_rcu+0x2d0/0x1050
ip_route_output_key_hash+0x182/0x2e0
ip_route_output_flow+0x28/0x130
udp_sendmsg+0x165d/0x2280
udpv6_sendmsg+0x121e/0x24f0
inet6_sendmsg+0xf7/0x140
sock_sendmsg+0xe9/0x180
____sys_sendmsg+0x2b8/0x7a0
___sys_sendmsg+0xf0/0x160
__sys_sendmmsg+0x17e/0x3c0
__x64_sys_sendmmsg+0x9e/0x100
do_syscall_64+0x3b/0x90
entry_SYSCALL_64_after_hwframe+0x44/0xae
RIP: 0033:0x462eb9
Code: f7 d8 64 89 02 b8 ff ff ff ff c3 66 0f 1f 44 00 00 48 89 f8
48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48>
3d 01 f0 ff ff 73 01 c3 48 c7 c1 bc ff ff ff f7 d8 64 89 01 48
RSP: 002b:00007f3df5af1c58 EFLAGS: 00000246 ORIG_RAX: 0000000000000133
RAX: ffffffffffffffda RBX: 000000000073bf00 RCX: 0000000000462eb9
RDX: 0000000000000312 RSI: 0000000020001700 RDI: 0000000000000007
RBP: 0000000000000004 R08: 0000000000000000 R09: 0000000000000000
R10: 0000000000000000 R11: 0000000000000246 R12: 00007f3df5af26bc
R13: 00000000004c372d R14: 0000000000700b10 R15: 00000000ffffffff
It is one use-after-free in ip_check_mc_rcu.
In ip_mc_del_src, the ip_sf_list of pmc has been freed under pmc->lock protection.
But access to ip_sf_list in ip_check_mc_rcu is not protected by the lock.
Signed-off-by: Liu Jian <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static int hid_scan_main(struct hid_parser *parser, struct hid_item *item)
{
__u32 data;
int i;
data = item_udata(item);
switch (item->tag) {
case HID_MAIN_ITEM_TAG_BEGIN_COLLECTION:
hid_scan_collection(parser, data & 0xff);
break;
case HID_MAIN_ITEM_TAG_END_COLLECTION:
break;
case HID_MAIN_ITEM_TAG_INPUT:
/* ignore constant inputs, they will be ignored by hid-input */
if (data & HID_MAIN_ITEM_CONSTANT)
break;
for (i = 0; i < parser->local.usage_index; i++)
hid_scan_input_usage(parser, parser->local.usage[i]);
break;
case HID_MAIN_ITEM_TAG_OUTPUT:
break;
case HID_MAIN_ITEM_TAG_FEATURE:
for (i = 0; i < parser->local.usage_index; i++)
hid_scan_feature_usage(parser, parser->local.usage[i]);
break;
}
/* Reset the local parser environment */
memset(&parser->local, 0, sizeof(parser->local));
return 0;
}
| 0 |
[
"CWE-125"
] |
linux
|
50220dead1650609206efe91f0cc116132d59b3f
| 339,174,287,232,598,900,000,000,000,000,000,000,000 | 33 |
HID: core: prevent out-of-bound readings
Plugging a Logitech DJ receiver with KASAN activated raises a bunch of
out-of-bound readings.
The fields are allocated up to MAX_USAGE, meaning that potentially, we do
not have enough fields to fit the incoming values.
Add checks and silence KASAN.
Signed-off-by: Benjamin Tissoires <[email protected]>
Signed-off-by: Jiri Kosina <[email protected]>
|
ImagingConvert2(Imaging imOut, Imaging imIn) {
return convert(imOut, imIn, imOut->mode, NULL, 0);
}
| 0 |
[
"CWE-120"
] |
Pillow
|
518ee3722a99d7f7d890db82a20bd81c1c0327fb
| 272,087,709,121,931,420,000,000,000,000,000,000,000 | 3 |
Use snprintf instead of sprintf
|
void Compute(OpKernelContext* ctx) override {
const Tensor* inputs;
const Tensor* seq_len;
Tensor* log_prob = nullptr;
OpOutputList decoded_indices;
OpOutputList decoded_values;
OpOutputList decoded_shape;
OP_REQUIRES_OK(ctx, decode_helper_.ValidateInputsGenerateOutputs(
ctx, &inputs, &seq_len, &log_prob, &decoded_indices,
&decoded_values, &decoded_shape));
const TensorShape& inputs_shape = inputs->shape();
std::vector<typename TTypes<T>::UnalignedConstMatrix> input_list_t;
const int64 max_time = inputs_shape.dim_size(0);
const int64 batch_size = inputs_shape.dim_size(1);
const int64 num_classes_raw = inputs_shape.dim_size(2);
OP_REQUIRES(
ctx, FastBoundsCheck(num_classes_raw, std::numeric_limits<int>::max()),
errors::InvalidArgument("num_classes cannot exceed max int"));
const int num_classes = static_cast<const int>(num_classes_raw);
auto inputs_t = inputs->tensor<T, 3>();
input_list_t.reserve(max_time);
for (std::size_t t = 0; t < max_time; ++t) {
input_list_t.emplace_back(inputs_t.data() + t * batch_size * num_classes,
batch_size, num_classes);
}
auto seq_len_t = seq_len->vec<int32>();
auto log_prob_t = log_prob->matrix<T>();
log_prob_t.setZero();
// Assumption: the blank index is num_classes - 1
int blank_index = num_classes - 1;
// Perform best path decoding
std::vector<std::vector<std::vector<int> > > sequences(batch_size);
auto decode = [&](const int64 begin, const int64 end) {
for (int b = begin; b < end; ++b) {
sequences[b].resize(1);
auto &sequence = sequences[b][0];
int prev_indices = -1;
for (int t = 0; t < seq_len_t(b); ++t) {
int max_class_indices;
log_prob_t(b, 0) +=
-RowMax<T>(input_list_t[t], b, &max_class_indices);
if (max_class_indices != blank_index &&
!(merge_repeated_ && max_class_indices == prev_indices)) {
sequence.push_back(max_class_indices);
}
prev_indices = max_class_indices;
}
}
};
const int64 kCostPerUnit = 50 * max_time * num_classes;
const int64 total = batch_size;
const DeviceBase::CpuWorkerThreads& worker_threads =
*ctx->device()->tensorflow_cpu_worker_threads();
Shard(worker_threads.num_threads, worker_threads.workers, total,
kCostPerUnit, decode);
OP_REQUIRES_OK(
ctx, decode_helper_.StoreAllDecodedSequences(
sequences, &decoded_indices, &decoded_values, &decoded_shape));
}
| 1 |
[
"CWE-617",
"CWE-703"
] |
tensorflow
|
ea3b43e98c32c97b35d52b4c66f9107452ca8fb2
| 4,567,149,156,074,682,700,000,000,000,000,000,000 | 68 |
Fix `tf.raw_ops.CTCGreedyDecoder` CHECK failure.
PiperOrigin-RevId: 369960465
Change-Id: If0b8b3264d5a47a24ac0970ed7b81ce6b4921fae
|
cockpit_web_response_error (CockpitWebResponse *self,
guint code,
GHashTable *headers,
const gchar *format,
...)
{
va_list var_args;
gchar *reason = NULL;
gchar *escaped = NULL;
const gchar *message;
GBytes *input = NULL;
GList *output, *l;
GError *error = NULL;
g_return_if_fail (COCKPIT_IS_WEB_RESPONSE (self));
if (format)
{
va_start (var_args, format);
reason = g_strdup_vprintf (format, var_args);
va_end (var_args);
message = reason;
}
else
{
switch (code)
{
case 400:
message = "Bad request";
break;
case 401:
message = "Not Authorized";
break;
case 403:
message = "Forbidden";
break;
case 404:
message = "Not Found";
break;
case 405:
message = "Method Not Allowed";
break;
case 413:
message = "Request Entity Too Large";
break;
case 502:
message = "Remote Page is Unavailable";
break;
case 500:
message = "Internal Server Error";
break;
default:
if (code < 100)
reason = g_strdup_printf ("%u Continue", code);
else if (code < 200)
reason = g_strdup_printf ("%u OK", code);
else if (code < 300)
reason = g_strdup_printf ("%u Moved", code);
else
reason = g_strdup_printf ("%u Failed", code);
message = reason;
break;
}
}
g_debug ("%s: returning error: %u %s", self->logname, code, message);
if (cockpit_web_failure_resource)
{
input = g_resources_lookup_data (cockpit_web_failure_resource, G_RESOURCE_LOOKUP_FLAGS_NONE, &error);
if (input == NULL)
{
g_critical ("couldn't load: %s: %s", cockpit_web_failure_resource, error->message);
g_error_free (error);
}
}
if (!input)
input = g_bytes_new_static (default_failure_template, strlen (default_failure_template));
output = cockpit_template_expand (input, "@@", "@@", substitute_message, (gpointer) message);
g_bytes_unref (input);
/* If sending arbitrary messages, make sure they're escaped */
if (reason)
{
g_strstrip (reason);
escaped = g_uri_escape_string (reason, " :", FALSE);
message = escaped;
}
if (headers)
{
if (!g_hash_table_lookup (headers, "Content-Type"))
g_hash_table_replace (headers, g_strdup ("Content-Type"), g_strdup ("text/html; charset=utf8"));
cockpit_web_response_headers_full (self, code, message, -1, headers);
}
else
{
cockpit_web_response_headers (self, code, message, -1, "Content-Type", "text/html; charset=utf8", NULL);
}
for (l = output; l != NULL; l = g_list_next (l))
{
if (!cockpit_web_response_queue (self, l->data))
break;
}
if (l == NULL)
cockpit_web_response_complete (self);
g_list_free_full (output, (GDestroyNotify)g_bytes_unref);
g_free (reason);
g_free (escaped);
}
| 0 |
[
"CWE-1021"
] |
cockpit
|
8d9bc10d8128aae03dfde62fd00075fe492ead10
| 246,987,058,272,131,750,000,000,000,000,000,000,000 | 113 |
common: Restrict frame embedding to same origin
Declare `X-Frame-Options: sameorigin` [1] so that cockpit frames can
only be embedded into pages coming from the same origin. This is similar
to setting CORP in commit 2b38b8de92f9a (which applies to `<script>`,
`<img>`, etc.).
The main use case for embedding is to run cockpit-ws behind a reverse
proxy, while also serving other pages. Cross-origin embedding is
discouraged these days to prevent "clickjacking".
Cross-origin embedding already did not work in most cases: Frames would
always just show the login page. However, this looks confusing and is
unclean. With X-Frame-Options, the browser instead shows an explanatory
error page.
Mention the same origin requirement in the embedding documentation.
Fixes #16122
https://bugzilla.redhat.com/show_bug.cgi?id=1980688
CVE-2021-3660
[1] https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Frame-Options
|
CImg<T>& xyYtoRGB(const bool use_D65=true) {
return xyYtoXYZ().XYZtoRGB(use_D65);
}
| 0 |
[
"CWE-770"
] |
cimg
|
619cb58dd90b4e03ac68286c70ed98acbefd1c90
| 318,923,905,291,139,960,000,000,000,000,000,000,000 | 3 |
CImg<>::load_bmp() and CImg<>::load_pandore(): Check that dimensions encoded in file does not exceed file size.
|
bool SSL::isTLS() const
{
return secure_.get_connection().TLS_;
}
| 0 |
[
"CWE-254"
] |
mysql-server
|
e7061f7e5a96c66cb2e0bf46bec7f6ff35801a69
| 215,838,300,260,132,620,000,000,000,000,000,000,000 | 4 |
Bug #22738607: YASSL FUNCTION X509_NAME_GET_INDEX_BY_NID IS NOT WORKING AS EXPECTED.
|
XML_ExternalEntityParserCreate(XML_Parser oldParser,
const XML_Char *context,
const XML_Char *encodingName)
{
XML_Parser parser = oldParser;
DTD *newDtd = NULL;
DTD *oldDtd = _dtd;
XML_StartElementHandler oldStartElementHandler = startElementHandler;
XML_EndElementHandler oldEndElementHandler = endElementHandler;
XML_CharacterDataHandler oldCharacterDataHandler = characterDataHandler;
XML_ProcessingInstructionHandler oldProcessingInstructionHandler
= processingInstructionHandler;
XML_CommentHandler oldCommentHandler = commentHandler;
XML_StartCdataSectionHandler oldStartCdataSectionHandler
= startCdataSectionHandler;
XML_EndCdataSectionHandler oldEndCdataSectionHandler
= endCdataSectionHandler;
XML_DefaultHandler oldDefaultHandler = defaultHandler;
XML_UnparsedEntityDeclHandler oldUnparsedEntityDeclHandler
= unparsedEntityDeclHandler;
XML_NotationDeclHandler oldNotationDeclHandler = notationDeclHandler;
XML_StartNamespaceDeclHandler oldStartNamespaceDeclHandler
= startNamespaceDeclHandler;
XML_EndNamespaceDeclHandler oldEndNamespaceDeclHandler
= endNamespaceDeclHandler;
XML_NotStandaloneHandler oldNotStandaloneHandler = notStandaloneHandler;
XML_ExternalEntityRefHandler oldExternalEntityRefHandler
= externalEntityRefHandler;
XML_SkippedEntityHandler oldSkippedEntityHandler = skippedEntityHandler;
XML_UnknownEncodingHandler oldUnknownEncodingHandler
= unknownEncodingHandler;
XML_ElementDeclHandler oldElementDeclHandler = elementDeclHandler;
XML_AttlistDeclHandler oldAttlistDeclHandler = attlistDeclHandler;
XML_EntityDeclHandler oldEntityDeclHandler = entityDeclHandler;
XML_XmlDeclHandler oldXmlDeclHandler = xmlDeclHandler;
ELEMENT_TYPE * oldDeclElementType = declElementType;
void *oldUserData = userData;
void *oldHandlerArg = handlerArg;
XML_Bool oldDefaultExpandInternalEntities = defaultExpandInternalEntities;
XML_Parser oldExternalEntityRefHandlerArg = externalEntityRefHandlerArg;
#ifdef XML_DTD
enum XML_ParamEntityParsing oldParamEntityParsing = paramEntityParsing;
int oldInEntityValue = prologState.inEntityValue;
#endif
XML_Bool oldns_triplets = ns_triplets;
/* Note that the new parser shares the same hash secret as the old
parser, so that dtdCopy and copyEntityTable can lookup values
from hash tables associated with either parser without us having
to worry which hash secrets each table has.
*/
unsigned long oldhash_secret_salt = hash_secret_salt;
#ifdef XML_DTD
if (!context)
newDtd = oldDtd;
#endif /* XML_DTD */
/* Note that the magical uses of the pre-processor to make field
access look more like C++ require that `parser' be overwritten
here. This makes this function more painful to follow than it
would be otherwise.
*/
if (ns) {
XML_Char tmp[2];
*tmp = namespaceSeparator;
parser = parserCreate(encodingName, &parser->m_mem, tmp, newDtd);
}
else {
parser = parserCreate(encodingName, &parser->m_mem, NULL, newDtd);
}
if (!parser)
return NULL;
startElementHandler = oldStartElementHandler;
endElementHandler = oldEndElementHandler;
characterDataHandler = oldCharacterDataHandler;
processingInstructionHandler = oldProcessingInstructionHandler;
commentHandler = oldCommentHandler;
startCdataSectionHandler = oldStartCdataSectionHandler;
endCdataSectionHandler = oldEndCdataSectionHandler;
defaultHandler = oldDefaultHandler;
unparsedEntityDeclHandler = oldUnparsedEntityDeclHandler;
notationDeclHandler = oldNotationDeclHandler;
startNamespaceDeclHandler = oldStartNamespaceDeclHandler;
endNamespaceDeclHandler = oldEndNamespaceDeclHandler;
notStandaloneHandler = oldNotStandaloneHandler;
externalEntityRefHandler = oldExternalEntityRefHandler;
skippedEntityHandler = oldSkippedEntityHandler;
unknownEncodingHandler = oldUnknownEncodingHandler;
elementDeclHandler = oldElementDeclHandler;
attlistDeclHandler = oldAttlistDeclHandler;
entityDeclHandler = oldEntityDeclHandler;
xmlDeclHandler = oldXmlDeclHandler;
declElementType = oldDeclElementType;
userData = oldUserData;
if (oldUserData == oldHandlerArg)
handlerArg = userData;
else
handlerArg = parser;
if (oldExternalEntityRefHandlerArg != oldParser)
externalEntityRefHandlerArg = oldExternalEntityRefHandlerArg;
defaultExpandInternalEntities = oldDefaultExpandInternalEntities;
ns_triplets = oldns_triplets;
hash_secret_salt = oldhash_secret_salt;
parentParser = oldParser;
#ifdef XML_DTD
paramEntityParsing = oldParamEntityParsing;
prologState.inEntityValue = oldInEntityValue;
if (context) {
#endif /* XML_DTD */
if (!dtdCopy(oldParser, _dtd, oldDtd, &parser->m_mem)
|| !setContext(parser, context)) {
XML_ParserFree(parser);
return NULL;
}
processor = externalEntityInitProcessor;
#ifdef XML_DTD
}
else {
/* The DTD instance referenced by _dtd is shared between the document's
root parser and external PE parsers, therefore one does not need to
call setContext. In addition, one also *must* not call setContext,
because this would overwrite existing prefix->binding pointers in
_dtd with ones that get destroyed with the external PE parser.
This would leave those prefixes with dangling pointers.
*/
isParamEntity = XML_TRUE;
XmlPrologStateInitExternalEntity(&prologState);
processor = externalParEntInitProcessor;
}
#endif /* XML_DTD */
return parser;
}
| 0 |
[
"CWE-119"
] |
libexpat
|
ba0f9c3b40c264b8dd392e02a7a060a8fa54f032
| 139,739,746,221,415,620,000,000,000,000,000,000,000 | 135 |
CVE-2015-1283 Sanity check size calculations. r=peterv, a=abillings
https://sourceforge.net/p/expat/bugs/528/
|
directive_args(agooErr err, gqlDoc doc, gqlCobj obj, gqlField field, gqlSel sel, gqlValue result, int depth) {
gqlDir d = (gqlDir)obj->ptr;
const char *key = sel->name;
gqlArg a;
gqlValue list = gql_list_create(err, NULL);
gqlValue co;
struct _gqlField cf;
struct _gqlCobj child = { .clas = &input_value_class };
int d2 = depth + 1;
if (NULL != sel->alias) {
key = sel->alias;
}
if (NULL == list ||
AGOO_ERR_OK != gql_object_set(err, result, key, list)) {
return err->code;
}
memset(&cf, 0, sizeof(cf));
cf.type = sel->type->base;
for (a = d->args; NULL != a; a = a->next) {
if (NULL == (co = gql_object_create(err)) ||
AGOO_ERR_OK != gql_list_append(err, list, co)) {
return err->code;
}
child.ptr = a;
if (AGOO_ERR_OK != gql_eval_sels(err, doc, (gqlRef)&child, &cf, sel->sels, co, d2)) {
return err->code;
}
}
return AGOO_ERR_OK;
}
| 0 |
[
"CWE-703"
] |
agoo
|
ecb72fa66f3d846b724b4fa1d7648669cebabe0c
| 323,048,346,253,643,570,000,000,000,000,000,000,000 | 32 |
Protect against recursive fragment in schema
Really a error on the developers part but this catches the error.
|
static void prepare_dmar(struct dmar_drhd_rt *dmar_unit)
{
dev_dbg(DBG_LEVEL_IOMMU, "enable dmar uint [0x%x]", dmar_unit->drhd->reg_base_addr);
dmar_setup_interrupt(dmar_unit);
dmar_set_root_table(dmar_unit);
dmar_enable_qi(dmar_unit);
dmar_set_intr_remap_table(dmar_unit);
}
| 0 |
[
"CWE-120",
"CWE-787"
] |
acrn-hypervisor
|
25c0e3817eb332660dd63d1d4522e63dcc94e79a
| 120,458,011,876,852,480,000,000,000,000,000,000,000 | 8 |
hv: validate input for dmar_free_irte function
Malicious input 'index' may trigger buffer
overflow on array 'irte_alloc_bitmap[]'.
This patch validate that 'index' shall be
less than 'CONFIG_MAX_IR_ENTRIES' and also
remove unnecessary check on 'index' in
'ptirq_free_irte()' function with this fix.
Tracked-On: #6132
Signed-off-by: Yonghua Huang <[email protected]>
|
geoArray *geoArrayCreate(void) {
geoArray *ga = zmalloc(sizeof(*ga));
/* It gets allocated on first geoArrayAppend() call. */
ga->array = NULL;
ga->buckets = 0;
ga->used = 0;
return ga;
}
| 0 |
[
"CWE-190"
] |
redis
|
f6a40570fa63d5afdd596c78083d754081d80ae3
| 31,725,093,490,406,370,000,000,000,000,000,000,000 | 8 |
Fix ziplist and listpack overflows and truncations (CVE-2021-32627, CVE-2021-32628)
- fix possible heap corruption in ziplist and listpack resulting by trying to
allocate more than the maximum size of 4GB.
- prevent ziplist (hash and zset) from reaching size of above 1GB, will be
converted to HT encoding, that's not a useful size.
- prevent listpack (stream) from reaching size of above 1GB.
- XADD will start a new listpack if the new record may cause the previous
listpack to grow over 1GB.
- XADD will respond with an error if a single stream record is over 1GB
- List type (ziplist in quicklist) was truncating strings that were over 4GB,
now it'll respond with an error.
|
static int SetAltNamesFromDcert(Cert* cert, DecodedCert* decoded)
{
int ret = 0;
cert->altNamesSz = 0;
if (decoded->altNames) {
ret = FlattenAltNames(cert->altNames,
sizeof(cert->altNames), decoded->altNames);
if (ret >= 0) {
cert->altNamesSz = ret;
ret = 0;
}
}
return ret;
}
| 0 |
[
"CWE-125",
"CWE-345"
] |
wolfssl
|
f93083be72a3b3d956b52a7ec13f307a27b6e093
| 190,293,954,345,000,500,000,000,000,000,000,000,000 | 16 |
OCSP: improve handling of OCSP no check extension
|
Section* Binary::add_section(const Section& section) {
SegmentCommand* _TEXT_segment = get_segment("__TEXT");
if (_TEXT_segment == nullptr) {
LIEF_ERR("Unable to get '__TEXT' segment");
return nullptr;
}
return add_section(*_TEXT_segment, section);
}
| 0 |
[
"CWE-703"
] |
LIEF
|
7acf0bc4224081d4f425fcc8b2e361b95291d878
| 44,863,996,388,635,090,000,000,000,000,000,000,000 | 8 |
Resolve #764
|
*/
int dev_change_tx_queue_len(struct net_device *dev, unsigned long new_len)
{
unsigned int orig_len = dev->tx_queue_len;
int res;
if (new_len != (unsigned int)new_len)
return -ERANGE;
if (new_len != orig_len) {
dev->tx_queue_len = new_len;
res = call_netdevice_notifiers(NETDEV_CHANGE_TX_QUEUE_LEN, dev);
res = notifier_to_errno(res);
if (res)
goto err_rollback;
res = dev_qdisc_change_tx_queue_len(dev);
if (res)
goto err_rollback;
}
return 0;
err_rollback:
netdev_err(dev, "refused to change device tx_queue_len\n");
dev->tx_queue_len = orig_len;
return res;
| 0 |
[
"CWE-416"
] |
linux
|
a4270d6795b0580287453ea55974d948393e66ef
| 270,001,495,154,013,880,000,000,000,000,000,000,000 | 26 |
net-gro: fix use-after-free read in napi_gro_frags()
If a network driver provides to napi_gro_frags() an
skb with a page fragment of exactly 14 bytes, the call
to gro_pull_from_frag0() will 'consume' the fragment
by calling skb_frag_unref(skb, 0), and the page might
be freed and reused.
Reading eth->h_proto at the end of napi_frags_skb() might
read mangled data, or crash under specific debugging features.
BUG: KASAN: use-after-free in napi_frags_skb net/core/dev.c:5833 [inline]
BUG: KASAN: use-after-free in napi_gro_frags+0xc6f/0xd10 net/core/dev.c:5841
Read of size 2 at addr ffff88809366840c by task syz-executor599/8957
CPU: 1 PID: 8957 Comm: syz-executor599 Not tainted 5.2.0-rc1+ #32
Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011
Call Trace:
__dump_stack lib/dump_stack.c:77 [inline]
dump_stack+0x172/0x1f0 lib/dump_stack.c:113
print_address_description.cold+0x7c/0x20d mm/kasan/report.c:188
__kasan_report.cold+0x1b/0x40 mm/kasan/report.c:317
kasan_report+0x12/0x20 mm/kasan/common.c:614
__asan_report_load_n_noabort+0xf/0x20 mm/kasan/generic_report.c:142
napi_frags_skb net/core/dev.c:5833 [inline]
napi_gro_frags+0xc6f/0xd10 net/core/dev.c:5841
tun_get_user+0x2f3c/0x3ff0 drivers/net/tun.c:1991
tun_chr_write_iter+0xbd/0x156 drivers/net/tun.c:2037
call_write_iter include/linux/fs.h:1872 [inline]
do_iter_readv_writev+0x5f8/0x8f0 fs/read_write.c:693
do_iter_write fs/read_write.c:970 [inline]
do_iter_write+0x184/0x610 fs/read_write.c:951
vfs_writev+0x1b3/0x2f0 fs/read_write.c:1015
do_writev+0x15b/0x330 fs/read_write.c:1058
Fixes: a50e233c50db ("net-gro: restore frag0 optimization")
Signed-off-by: Eric Dumazet <[email protected]>
Reported-by: syzbot <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
void SSL_set_info_callback(SSL *ssl,
void (*cb)(const SSL *ssl,int type,int val))
{
ssl->info_callback=cb;
}
| 0 |
[] |
openssl
|
ee2ffc279417f15fef3b1073c7dc81a908991516
| 260,316,641,540,010,900,000,000,000,000,000,000,000 | 5 |
Add Next Protocol Negotiation.
|
pg_tzset(const char *name)
{
pg_tz_cache *tzp;
struct state tzstate;
char uppername[TZ_STRLEN_MAX + 1];
char canonname[TZ_STRLEN_MAX + 1];
char *p;
if (strlen(name) > TZ_STRLEN_MAX)
return NULL; /* not going to fit */
if (!timezone_cache)
if (!init_timezone_hashtable())
return NULL;
/*
* Upcase the given name to perform a case-insensitive hashtable search.
* (We could alternatively downcase it, but we prefer upcase so that we
* can get consistently upcased results from tzparse() in case the name is
* a POSIX-style timezone spec.)
*/
p = uppername;
while (*name)
*p++ = pg_toupper((unsigned char) *name++);
*p = '\0';
tzp = (pg_tz_cache *) hash_search(timezone_cache,
uppername,
HASH_FIND,
NULL);
if (tzp)
{
/* Timezone found in cache, nothing more to do */
return &tzp->tz;
}
/*
* "GMT" is always sent to tzparse(), as per discussion above.
*/
if (strcmp(uppername, "GMT") == 0)
{
if (tzparse(uppername, &tzstate, TRUE) != 0)
{
/* This really, really should not happen ... */
elog(ERROR, "could not initialize GMT time zone");
}
/* Use uppercase name as canonical */
strcpy(canonname, uppername);
}
else if (tzload(uppername, canonname, &tzstate, TRUE) != 0)
{
if (uppername[0] == ':' || tzparse(uppername, &tzstate, FALSE) != 0)
{
/* Unknown timezone. Fail our call instead of loading GMT! */
return NULL;
}
/* For POSIX timezone specs, use uppercase name as canonical */
strcpy(canonname, uppername);
}
/* Save timezone in the cache */
tzp = (pg_tz_cache *) hash_search(timezone_cache,
uppername,
HASH_ENTER,
NULL);
/* hash_search already copied uppername into the hash key */
strcpy(tzp->tz.TZname, canonname);
memcpy(&tzp->tz.state, &tzstate, sizeof(tzstate));
return &tzp->tz;
}
| 0 |
[
"CWE-119"
] |
postgres
|
01824385aead50e557ca1af28640460fa9877d51
| 308,637,342,185,944,220,000,000,000,000,000,000,000 | 72 |
Prevent potential overruns of fixed-size buffers.
Coverity identified a number of places in which it couldn't prove that a
string being copied into a fixed-size buffer would fit. We believe that
most, perhaps all of these are in fact safe, or are copying data that is
coming from a trusted source so that any overrun is not really a security
issue. Nonetheless it seems prudent to forestall any risk by using
strlcpy() and similar functions.
Fixes by Peter Eisentraut and Jozef Mlich based on Coverity reports.
In addition, fix a potential null-pointer-dereference crash in
contrib/chkpass. The crypt(3) function is defined to return NULL on
failure, but chkpass.c didn't check for that before using the result.
The main practical case in which this could be an issue is if libc is
configured to refuse to execute unapproved hashing algorithms (e.g.,
"FIPS mode"). This ideally should've been a separate commit, but
since it touches code adjacent to one of the buffer overrun changes,
I included it in this commit to avoid last-minute merge issues.
This issue was reported by Honza Horak.
Security: CVE-2014-0065 for buffer overruns, CVE-2014-0066 for crypt()
|
PosibErr<void> Config::replace(ParmStr key, ParmStr value)
{
Entry * entry = new Entry;
entry->key = key;
entry->value = value;
return set(entry);
}
| 0 |
[
"CWE-125"
] |
aspell
|
80fa26c74279fced8d778351cff19d1d8f44fe4e
| 197,723,795,243,106,300,000,000,000,000,000,000,000 | 7 |
Fix various bugs found by OSS-Fuze.
|
RzAnalysisOp *op_cache_get(HtUP *cache, RzCore *core, ut64 addr) {
RzAnalysisOp *op = ht_up_find(cache, addr, NULL);
if (op) {
return op;
}
op = rz_core_analysis_op(core, addr, RZ_ANALYSIS_OP_MASK_BASIC | RZ_ANALYSIS_OP_MASK_VAL);
if (!ht_up_insert(cache, addr, op)) {
rz_analysis_op_free(op);
return NULL;
}
return op;
}
| 0 |
[
"CWE-703"
] |
rizin
|
6ce71d8aa3dafe3cdb52d5d72ae8f4b95916f939
| 14,852,682,081,186,584,000,000,000,000,000,000,000 | 12 |
Initialize retctx,ctx before freeing the inner elements
In rz_core_analysis_type_match retctx structure was initialized on the
stack only after a "goto out_function", where a field of that structure
was freed. When the goto path is taken, the field is not properly
initialized and it cause cause a crash of Rizin or have other effects.
Fixes: CVE-2021-4022
|
void OSD::handle_conf_change(const struct md_config_t *conf,
const std::set <std::string> &changed)
{
if (changed.count("osd_max_backfills")) {
service.local_reserver.set_max(cct->_conf->osd_max_backfills);
service.remote_reserver.set_max(cct->_conf->osd_max_backfills);
}
if (changed.count("osd_min_recovery_priority")) {
service.local_reserver.set_min_priority(cct->_conf->osd_min_recovery_priority);
service.remote_reserver.set_min_priority(cct->_conf->osd_min_recovery_priority);
}
if (changed.count("osd_max_trimming_pgs")) {
service.snap_reserver.set_max(cct->_conf->osd_max_trimming_pgs);
}
if (changed.count("osd_op_complaint_time") ||
changed.count("osd_op_log_threshold")) {
op_tracker.set_complaint_and_threshold(cct->_conf->osd_op_complaint_time,
cct->_conf->osd_op_log_threshold);
}
if (changed.count("osd_op_history_size") ||
changed.count("osd_op_history_duration")) {
op_tracker.set_history_size_and_duration(cct->_conf->osd_op_history_size,
cct->_conf->osd_op_history_duration);
}
if (changed.count("osd_op_history_slow_op_size") ||
changed.count("osd_op_history_slow_op_threshold")) {
op_tracker.set_history_slow_op_size_and_threshold(cct->_conf->osd_op_history_slow_op_size,
cct->_conf->osd_op_history_slow_op_threshold);
}
if (changed.count("osd_enable_op_tracker")) {
op_tracker.set_tracking(cct->_conf->osd_enable_op_tracker);
}
if (changed.count("osd_disk_thread_ioprio_class") ||
changed.count("osd_disk_thread_ioprio_priority")) {
set_disk_tp_priority();
}
if (changed.count("osd_map_cache_size")) {
service.map_cache.set_size(cct->_conf->osd_map_cache_size);
service.map_bl_cache.set_size(cct->_conf->osd_map_cache_size);
service.map_bl_inc_cache.set_size(cct->_conf->osd_map_cache_size);
}
if (changed.count("clog_to_monitors") ||
changed.count("clog_to_syslog") ||
changed.count("clog_to_syslog_level") ||
changed.count("clog_to_syslog_facility") ||
changed.count("clog_to_graylog") ||
changed.count("clog_to_graylog_host") ||
changed.count("clog_to_graylog_port") ||
changed.count("host") ||
changed.count("fsid")) {
update_log_config();
}
#ifdef HAVE_LIBFUSE
if (changed.count("osd_objectstore_fuse")) {
if (store) {
enable_disable_fuse(false);
}
}
#endif
if (changed.count("osd_recovery_delay_start")) {
service.defer_recovery(cct->_conf->osd_recovery_delay_start);
service.kick_recovery_queue();
}
if (changed.count("osd_client_message_cap")) {
uint64_t newval = cct->_conf->osd_client_message_cap;
Messenger::Policy pol = client_messenger->get_policy(entity_name_t::TYPE_CLIENT);
if (pol.throttler_messages && newval > 0) {
pol.throttler_messages->reset_max(newval);
}
}
if (changed.count("osd_client_message_size_cap")) {
uint64_t newval = cct->_conf->osd_client_message_size_cap;
Messenger::Policy pol = client_messenger->get_policy(entity_name_t::TYPE_CLIENT);
if (pol.throttler_bytes && newval > 0) {
pol.throttler_bytes->reset_max(newval);
}
}
check_config();
}
| 0 |
[
"CWE-287",
"CWE-284"
] |
ceph
|
5ead97120e07054d80623dada90a5cc764c28468
| 113,910,617,770,504,750,000,000,000,000,000,000,000 | 83 |
auth/cephx: add authorizer challenge
Allow the accepting side of a connection to reject an initial authorizer
with a random challenge. The connecting side then has to respond with an
updated authorizer proving they are able to decrypt the service's challenge
and that the new authorizer was produced for this specific connection
instance.
The accepting side requires this challenge and response unconditionally
if the client side advertises they have the feature bit. Servers wishing
to require this improved level of authentication simply have to require
the appropriate feature.
Signed-off-by: Sage Weil <[email protected]>
(cherry picked from commit f80b848d3f830eb6dba50123e04385173fa4540b)
# Conflicts:
# src/auth/Auth.h
# src/auth/cephx/CephxProtocol.cc
# src/auth/cephx/CephxProtocol.h
# src/auth/none/AuthNoneProtocol.h
# src/msg/Dispatcher.h
# src/msg/async/AsyncConnection.cc
- const_iterator
- ::decode vs decode
- AsyncConnection ctor arg noise
- get_random_bytes(), not cct->random()
|
void js_defproperty(js_State *J, int idx, const char *name, int atts)
{
jsR_defproperty(J, js_toobject(J, idx), name, atts, stackidx(J, -1), NULL, NULL);
js_pop(J, 1);
}
| 0 |
[
"CWE-476"
] |
mujs
|
77ab465f1c394bb77f00966cd950650f3f53cb24
| 267,612,571,937,703,300,000,000,000,000,000,000,000 | 5 |
Fix 697401: Error when dropping extra arguments to lightweight functions.
|
static uint16_t alloc_irtes(struct dmar_drhd_rt *dmar_unit, const uint16_t num)
{
uint16_t irte_idx;
uint64_t mask = (1UL << num) - 1U;
uint64_t test_mask;
ASSERT((bitmap_weight(num) == 1U) && (num <= 32U));
spinlock_obtain(&dmar_unit->lock);
for (irte_idx = 0U; irte_idx < CONFIG_MAX_IR_ENTRIES; irte_idx += num) {
test_mask = mask << (irte_idx & 0x3FU);
if ((dmar_unit->irte_alloc_bitmap[irte_idx >> 6U] & test_mask) == 0UL) {
dmar_unit->irte_alloc_bitmap[irte_idx >> 6U] |= test_mask;
break;
}
}
spinlock_release(&dmar_unit->lock);
return (irte_idx < CONFIG_MAX_IR_ENTRIES) ? irte_idx: INVALID_IRTE_ID;
}
| 0 |
[
"CWE-120",
"CWE-787"
] |
acrn-hypervisor
|
25c0e3817eb332660dd63d1d4522e63dcc94e79a
| 257,918,513,953,198,450,000,000,000,000,000,000,000 | 20 |
hv: validate input for dmar_free_irte function
Malicious input 'index' may trigger buffer
overflow on array 'irte_alloc_bitmap[]'.
This patch validate that 'index' shall be
less than 'CONFIG_MAX_IR_ENTRIES' and also
remove unnecessary check on 'index' in
'ptirq_free_irte()' function with this fix.
Tracked-On: #6132
Signed-off-by: Yonghua Huang <[email protected]>
|
static char *parse_encoded_word(char *str, enum ContentEncoding *enc, char **charset,
size_t *charsetlen, char **text, size_t *textlen)
{
static struct Regex *re = NULL;
regmatch_t match[4];
size_t nmatch = 4;
if (re == NULL)
{
re = mutt_regex_compile("=\\?"
"([^][()<>@,;:\\\"/?. =]+)" /* charset */
"\\?"
"([qQbB])" /* encoding */
"\\?"
"([^?]+)" /* encoded text - we accept whitespace
as some mailers do that, see #1189. */
"\\?=",
REG_EXTENDED);
assert(re && "Something is wrong with your RE engine.");
}
int rc = regexec(re->regex, str, nmatch, match, 0);
if (rc != 0)
return NULL;
/* Charset */
*charset = str + match[1].rm_so;
*charsetlen = match[1].rm_eo - match[1].rm_so;
/* Encoding: either Q or B */
*enc = ((str[match[2].rm_so] == 'Q') || (str[match[2].rm_so] == 'q')) ?
ENCQUOTEDPRINTABLE :
ENCBASE64;
*text = str + match[3].rm_so;
*textlen = match[3].rm_eo - match[3].rm_so;
return (str + match[0].rm_so);
}
| 0 |
[
"CWE-120",
"CWE-119",
"CWE-787"
] |
neomutt
|
6f163e07ae68654d7ac5268cbb7565f6df79ad85
| 125,965,610,428,854,830,000,000,000,000,000,000,000 | 38 |
Check outbuf length in mutt_to_base64()
The obuf can be overflowed in auth_cram.c, and possibly auth_gss.c.
Thanks to Jeriko One for the bug report.
|
_pickle_Pickler___sizeof___impl(PicklerObject *self)
/*[clinic end generated code: output=106edb3123f332e1 input=8cbbec9bd5540d42]*/
{
Py_ssize_t res, s;
res = _PyObject_SIZE(Py_TYPE(self));
if (self->memo != NULL) {
res += sizeof(PyMemoTable);
res += self->memo->mt_allocated * sizeof(PyMemoEntry);
}
if (self->output_buffer != NULL) {
s = _PySys_GetSizeOf(self->output_buffer);
if (s == -1)
return -1;
res += s;
}
return res;
}
| 0 |
[
"CWE-190",
"CWE-369"
] |
cpython
|
a4ae828ee416a66d8c7bf5ee71d653c2cc6a26dd
| 56,811,668,722,626,940,000,000,000,000,000,000,000 | 18 |
closes bpo-34656: Avoid relying on signed overflow in _pickle memos. (GH-9261)
|
MOBI_RET mobi_parse_rawml_opt(MOBIRawml *rawml, const MOBIData *m, bool parse_toc, bool parse_dict, bool reconstruct) {
MOBI_RET ret;
if (m == NULL) {
debug_print("%s", "Mobi structure not initialized\n");
return MOBI_INIT_FAILED;
}
if (rawml == NULL) {
return MOBI_INIT_FAILED;
}
/* Get maximal size of text data */
const size_t maxlen = mobi_get_text_maxsize(m);
if (maxlen == MOBI_NOTSET) {
debug_print("%s", "Insane text length\n");
return MOBI_DATA_CORRUPT;
}
char *text = malloc(maxlen + 1);
if (text == NULL) {
debug_print("%s", "Memory allocation failed\n");
return MOBI_MALLOC_FAILED;
}
/* Extract text records, unpack, merge and copy it to text string */
size_t length = maxlen;
ret = mobi_get_rawml(m, text, &length);
if (ret != MOBI_SUCCESS) {
debug_print("%s", "Error parsing text\n");
free(text);
return ret;
}
if (mobi_exists_fdst(m)) {
/* Skip parsing if section count less or equal than 1 */
if (m->mh->fdst_section_count && *m->mh->fdst_section_count > 1) {
ret = mobi_parse_fdst(m, rawml);
if (ret != MOBI_SUCCESS) {
free(text);
return ret;
}
}
}
ret = mobi_reconstruct_flow(rawml, text, length);
free(text);
if (ret != MOBI_SUCCESS) {
return ret;
}
ret = mobi_reconstruct_resources(m, rawml);
if (ret != MOBI_SUCCESS) {
return ret;
}
const size_t offset = mobi_get_kf8offset(m);
/* skeleton index */
if (mobi_exists_skel_indx(m) && mobi_exists_frag_indx(m)) {
const size_t indx_record_number = *m->mh->skeleton_index + offset;
/* to be freed in mobi_free_rawml */
MOBIIndx *skel_meta = mobi_init_indx();
ret = mobi_parse_index(m, skel_meta, indx_record_number);
if (ret != MOBI_SUCCESS) {
return ret;
}
rawml->skel = skel_meta;
}
/* fragment index */
if (mobi_exists_frag_indx(m)) {
MOBIIndx *frag_meta = mobi_init_indx();
const size_t indx_record_number = *m->mh->fragment_index + offset;
ret = mobi_parse_index(m, frag_meta, indx_record_number);
if (ret != MOBI_SUCCESS) {
return ret;
}
rawml->frag = frag_meta;
}
if (parse_toc) {
/* guide index */
if (mobi_exists_guide_indx(m)) {
MOBIIndx *guide_meta = mobi_init_indx();
const size_t indx_record_number = *m->mh->guide_index + offset;
ret = mobi_parse_index(m, guide_meta, indx_record_number);
if (ret != MOBI_SUCCESS) {
return ret;
}
rawml->guide = guide_meta;
}
/* ncx index */
if (mobi_exists_ncx(m)) {
MOBIIndx *ncx_meta = mobi_init_indx();
const size_t indx_record_number = *m->mh->ncx_index + offset;
ret = mobi_parse_index(m, ncx_meta, indx_record_number);
if (ret != MOBI_SUCCESS) {
return ret;
}
rawml->ncx = ncx_meta;
}
}
if (parse_dict && mobi_is_dictionary(m)) {
/* orth */
MOBIIndx *orth_meta = mobi_init_indx();
size_t indx_record_number = *m->mh->orth_index + offset;
ret = mobi_parse_index(m, orth_meta, indx_record_number);
if (ret != MOBI_SUCCESS) {
return ret;
}
rawml->orth = orth_meta;
/* infl */
if (mobi_exists_infl(m)) {
MOBIIndx *infl_meta = mobi_init_indx();
indx_record_number = *m->mh->infl_index + offset;
ret = mobi_parse_index(m, infl_meta, indx_record_number);
if (ret != MOBI_SUCCESS) {
return ret;
}
rawml->infl = infl_meta;
}
}
ret = mobi_reconstruct_parts(rawml);
if (ret != MOBI_SUCCESS) {
return ret;
}
if (reconstruct) {
#ifdef USE_XMLWRITER
ret = mobi_build_opf(rawml, m);
if (ret != MOBI_SUCCESS) {
return ret;
}
#endif
ret = mobi_reconstruct_links(rawml);
if (ret != MOBI_SUCCESS) {
return ret;
}
if (mobi_is_kf8(m)) {
debug_print("Stripping unneeded tags%s", "\n");
ret = mobi_iterate_txtparts(rawml, mobi_strip_mobitags);
if (ret != MOBI_SUCCESS) {
return ret;
}
}
}
if (mobi_is_cp1252(m)) {
debug_print("Converting cp1252 to utf8%s", "\n");
ret = mobi_iterate_txtparts(rawml, mobi_markup_to_utf8);
if (ret != MOBI_SUCCESS) {
return ret;
}
}
return MOBI_SUCCESS;
}
| 0 |
[
"CWE-703",
"CWE-125"
] |
libmobi
|
fb1ab50e448ddbed746fd27ae07469bc506d838b
| 87,199,275,542,799,110,000,000,000,000,000,000,000 | 152 |
Fix array boundary check when parsing inflections which could result in buffer over-read with corrupt input
|
int enc_untrusted_sigprocmask(int how, const sigset_t *set, sigset_t *oldset) {
klinux_sigset_t klinux_set;
if (!TokLinuxSigset(set, &klinux_set)) {
errno = EINVAL;
return -1;
}
int klinux_how = TokLinuxSigMaskAction(how);
if (klinux_how == -1) {
errno = EINVAL;
return -1;
}
MessageWriter input;
input.Push<int>(klinux_how);
input.Push<klinux_sigset_t>(klinux_set);
MessageReader output;
const auto status = NonSystemCallDispatcher(
::asylo::host_call::kSigprocmaskHandler, &input, &output);
CheckStatusAndParamCount(status, output, "enc_untrusted_sigprocmask", 3);
int result = output.next<int>();
int klinux_errno = output.next<int>();
// sigprocmask() returns -1 on failure, with errno set to indicate the cause
// of the error.
if (result == -1) {
errno = FromkLinuxErrorNumber(klinux_errno);
return result;
}
klinux_sigset_t klinux_oldset = output.next<klinux_sigset_t>();
if (oldset != nullptr) {
if (!FromkLinuxSigset(&klinux_oldset, oldset)) {
errno = EINVAL;
return -1;
}
}
return result;
}
| 0 |
[
"CWE-125"
] |
asylo
|
b1d120a2c7d7446d2cc58d517e20a1b184b82200
| 1,062,303,577,550,947,000,000,000,000,000,000,000 | 39 |
Check for return size in enc_untrusted_read
Check return size does not exceed requested. The returned result and
content still cannot be trusted, but it's expected behavior when not
using a secure file system.
PiperOrigin-RevId: 333827386
Change-Id: I0bdec0aec9356ea333dc8c647eba5d2772875f29
|
static const EVP_MD *nid_to_evpmd(int nid)
{
switch (nid) {
case NID_X9_62_prime256v1:
return EVP_sha256();
case NID_secp384r1:
return EVP_sha384();
case NID_secp521r1:
return EVP_sha512();
default:
return NULL;
}
return NULL;
}
| 0 |
[
"CWE-310"
] |
libssh
|
e99246246b4061f7e71463f8806b9dcad65affa0
| 5,794,060,601,551,204,000,000,000,000,000,000,000 | 15 |
security: fix for vulnerability CVE-2014-0017
When accepting a new connection, a forking server based on libssh forks
and the child process handles the request. The RAND_bytes() function of
openssl doesn't reset its state after the fork, but simply adds the
current process id (getpid) to the PRNG state, which is not guaranteed
to be unique.
This can cause several children to end up with same PRNG state which is
a security issue.
|
outputBits (int nBits, Int64 bits, Int64 &c, int &lc, char *&out)
{
c <<= nBits;
lc += nBits;
c |= bits;
while (lc >= 8)
*out++ = (c >> (lc -= 8));
}
| 0 |
[
"CWE-125"
] |
openexr
|
e79d2296496a50826a15c667bf92bdc5a05518b4
| 281,129,906,196,380,000,000,000,000,000,000,000,000 | 10 |
fix memory leaks and invalid memory accesses
Signed-off-by: Peter Hillman <[email protected]>
|
static int already_uses(struct module *a, struct module *b)
{
struct module_use *use;
list_for_each_entry(use, &b->source_list, source_list) {
if (use->source == a) {
pr_debug("%s uses %s!\n", a->name, b->name);
return 1;
}
}
pr_debug("%s does not use %s!\n", a->name, b->name);
return 0;
}
| 0 |
[
"CWE-362",
"CWE-347"
] |
linux
|
0c18f29aae7ce3dadd26d8ee3505d07cc982df75
| 49,892,566,371,696,000,000,000,000,000,000,000,000 | 13 |
module: limit enabling module.sig_enforce
Irrespective as to whether CONFIG_MODULE_SIG is configured, specifying
"module.sig_enforce=1" on the boot command line sets "sig_enforce".
Only allow "sig_enforce" to be set when CONFIG_MODULE_SIG is configured.
This patch makes the presence of /sys/module/module/parameters/sig_enforce
dependent on CONFIG_MODULE_SIG=y.
Fixes: fda784e50aac ("module: export module signature enforcement status")
Reported-by: Nayna Jain <[email protected]>
Tested-by: Mimi Zohar <[email protected]>
Tested-by: Jessica Yu <[email protected]>
Signed-off-by: Mimi Zohar <[email protected]>
Signed-off-by: Jessica Yu <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
GF_Box *tsro_box_new()
{
ISOM_DECL_BOX_ALLOC(GF_TimeOffHintEntryBox, GF_ISOM_BOX_TYPE_TSRO);
return (GF_Box *)tmp;
}
| 0 |
[
"CWE-787"
] |
gpac
|
77510778516803b7f7402d7423c6d6bef50254c3
| 25,351,831,955,503,436,000,000,000,000,000,000,000 | 5 |
fixed #2255
|
//! Return specified range of image columns \inplace.
CImg<T> get_columns(const int x0, const int x1) const {
return get_crop(x0,0,0,0,x1,height() - 1,depth() - 1,spectrum() - 1);
| 0 |
[
"CWE-125"
] |
CImg
|
10af1e8c1ad2a58a0a3342a856bae63e8f257abb
| 41,482,716,884,650,440,000,000,000,000,000,000,000 | 3 |
Fix other issues in 'CImg<T>::load_bmp()'.
|
int _q_unlink(const char *pathname)
{
#ifdef _WIN32
return _unlink(pathname);
#endif
return unlink(pathname);
}
| 0 |
[
"CWE-94"
] |
qdecoder
|
ce7c8a7ac450a823a11b06508ef1eb7441241f81
| 248,506,426,595,763,720,000,000,000,000,000,000,000 | 7 |
security update: add check on improperly encoded input
|
get_username(cupsd_client_t *con) /* I - Connection */
{
ipp_attribute_t *attr; /* Attribute */
if (con->username[0])
return (con->username);
else if ((attr = ippFindAttribute(con->request, "requesting-user-name",
IPP_TAG_NAME)) != NULL)
return (attr->values[0].string.text);
else
return ("anonymous");
}
| 0 |
[
"CWE-20"
] |
cups
|
49fa4983f25b64ec29d548ffa3b9782426007df3
| 189,695,535,784,735,400,000,000,000,000,000,000,000 | 13 |
DBUS notifications could crash the scheduler (Issue #5143)
- scheduler/ipp.c: Make sure requesting-user-name string is valid UTF-8.
|
static int hidpp20_battery_event(struct hidpp_device *hidpp,
u8 *data, int size)
{
struct hidpp_report *report = (struct hidpp_report *)data;
int status, capacity, next_capacity, level;
bool changed;
if (report->fap.feature_index != hidpp->battery.feature_index ||
report->fap.funcindex_clientid != EVENT_BATTERY_LEVEL_STATUS_BROADCAST)
return 0;
status = hidpp20_batterylevel_map_status_capacity(report->fap.params,
&capacity,
&next_capacity,
&level);
/* the capacity is only available when discharging or full */
hidpp->battery.online = status == POWER_SUPPLY_STATUS_DISCHARGING ||
status == POWER_SUPPLY_STATUS_FULL;
changed = capacity != hidpp->battery.capacity ||
level != hidpp->battery.level ||
status != hidpp->battery.status;
if (changed) {
hidpp->battery.level = level;
hidpp->battery.capacity = capacity;
hidpp->battery.status = status;
if (hidpp->battery.ps)
power_supply_changed(hidpp->battery.ps);
}
return 0;
}
| 0 |
[
"CWE-787"
] |
linux
|
d9d4b1e46d9543a82c23f6df03f4ad697dab361b
| 16,812,083,596,031,888,000,000,000,000,000,000,000 | 34 |
HID: Fix assumption that devices have inputs
The syzbot fuzzer found a slab-out-of-bounds write bug in the hid-gaff
driver. The problem is caused by the driver's assumption that the
device must have an input report. While this will be true for all
normal HID input devices, a suitably malicious device can violate the
assumption.
The same assumption is present in over a dozen other HID drivers.
This patch fixes them by checking that the list of hid_inputs for the
hid_device is nonempty before allowing it to be used.
Reported-and-tested-by: [email protected]
Signed-off-by: Alan Stern <[email protected]>
CC: <[email protected]>
Signed-off-by: Benjamin Tissoires <[email protected]>
|
xcf_init (Gimp *gimp)
{
GimpPlugInProcedure *proc;
GFile *file;
GimpProcedure *procedure;
g_return_if_fail (GIMP_IS_GIMP (gimp));
/* So this is sort of a hack, but its better than it was before. To
* do this right there would be a file load-save handler type and
* the whole interface would change but there isn't, and currently
* the plug-in structure contains all the load-save info, so it
* makes sense to use that for the XCF load/save handlers, even
* though they are internal. The only thing it requires is using a
* PlugInProcDef struct. -josh
*/
/* gimp-xcf-save */
file = g_file_new_for_path ("gimp-xcf-save");
procedure = gimp_plug_in_procedure_new (GIMP_PLUGIN, file);
g_object_unref (file);
procedure->proc_type = GIMP_INTERNAL;
procedure->marshal_func = xcf_save_invoker;
proc = GIMP_PLUG_IN_PROCEDURE (procedure);
proc->menu_label = g_strdup (N_("GIMP XCF image"));
gimp_plug_in_procedure_set_icon (proc, GIMP_ICON_TYPE_ICON_NAME,
(const guint8 *) "gimp-wilber",
strlen ("gimp-wilber") + 1);
gimp_plug_in_procedure_set_image_types (proc, "RGB*, GRAY*, INDEXED*");
gimp_plug_in_procedure_set_file_proc (proc, "xcf", "", NULL);
gimp_plug_in_procedure_set_mime_types (proc, "image/x-xcf");
gimp_plug_in_procedure_set_handles_uri (proc);
gimp_object_set_static_name (GIMP_OBJECT (procedure), "gimp-xcf-save");
gimp_procedure_set_static_strings (procedure,
"gimp-xcf-save",
"Saves file in the .xcf file format",
"The XCF file format has been designed "
"specifically for loading and saving "
"tiled and layered images in GIMP. "
"This procedure will save the specified "
"image in the xcf file format.",
"Spencer Kimball & Peter Mattis",
"Spencer Kimball & Peter Mattis",
"1995-1996",
NULL);
gimp_procedure_add_argument (procedure,
gimp_param_spec_int32 ("dummy-param",
"Dummy Param",
"Dummy parameter",
G_MININT32, G_MAXINT32, 0,
GIMP_PARAM_READWRITE));
gimp_procedure_add_argument (procedure,
gimp_param_spec_image_id ("image",
"Image",
"Input image",
gimp, FALSE,
GIMP_PARAM_READWRITE));
gimp_procedure_add_argument (procedure,
gimp_param_spec_drawable_id ("drawable",
"Drawable",
"Active drawable of input image",
gimp, TRUE,
GIMP_PARAM_READWRITE));
gimp_procedure_add_argument (procedure,
gimp_param_spec_string ("filename",
"Filename",
"The name of the file "
"to save the image in, "
"in URI format and "
"UTF-8 encoding",
TRUE, FALSE, TRUE,
NULL,
GIMP_PARAM_READWRITE));
gimp_procedure_add_argument (procedure,
gimp_param_spec_string ("raw-filename",
"Raw filename",
"The basename of the "
"file, in UTF-8",
FALSE, FALSE, TRUE,
NULL,
GIMP_PARAM_READWRITE));
gimp_plug_in_manager_add_procedure (gimp->plug_in_manager, proc);
g_object_unref (procedure);
/* gimp-xcf-load */
file = g_file_new_for_path ("gimp-xcf-load");
procedure = gimp_plug_in_procedure_new (GIMP_PLUGIN, file);
g_object_unref (file);
procedure->proc_type = GIMP_INTERNAL;
procedure->marshal_func = xcf_load_invoker;
proc = GIMP_PLUG_IN_PROCEDURE (procedure);
proc->menu_label = g_strdup (N_("GIMP XCF image"));
gimp_plug_in_procedure_set_icon (proc, GIMP_ICON_TYPE_ICON_NAME,
(const guint8 *) "gimp-wilber",
strlen ("gimp-wilber") + 1);
gimp_plug_in_procedure_set_image_types (proc, NULL);
gimp_plug_in_procedure_set_file_proc (proc, "xcf", "",
"0,string,gimp\\040xcf\\040");
gimp_plug_in_procedure_set_mime_types (proc, "image/x-xcf");
gimp_plug_in_procedure_set_handles_uri (proc);
gimp_object_set_static_name (GIMP_OBJECT (procedure), "gimp-xcf-load");
gimp_procedure_set_static_strings (procedure,
"gimp-xcf-load",
"Loads file saved in the .xcf file format",
"The XCF file format has been designed "
"specifically for loading and saving "
"tiled and layered images in GIMP. "
"This procedure will load the specified "
"file.",
"Spencer Kimball & Peter Mattis",
"Spencer Kimball & Peter Mattis",
"1995-1996",
NULL);
gimp_procedure_add_argument (procedure,
gimp_param_spec_int32 ("dummy-param",
"Dummy Param",
"Dummy parameter",
G_MININT32, G_MAXINT32, 0,
GIMP_PARAM_READWRITE));
gimp_procedure_add_argument (procedure,
gimp_param_spec_string ("filename",
"Filename",
"The name of the file "
"to load, in the "
"on-disk character "
"set and encoding",
TRUE, FALSE, TRUE,
NULL,
GIMP_PARAM_READWRITE));
gimp_procedure_add_argument (procedure,
gimp_param_spec_string ("raw-filename",
"Raw filename",
"The basename of the "
"file, in UTF-8",
FALSE, FALSE, TRUE,
NULL,
GIMP_PARAM_READWRITE));
gimp_procedure_add_return_value (procedure,
gimp_param_spec_image_id ("image",
"Image",
"Output image",
gimp, FALSE,
GIMP_PARAM_READWRITE));
gimp_plug_in_manager_add_procedure (gimp->plug_in_manager, proc);
g_object_unref (procedure);
}
| 0 |
[
"CWE-125"
] |
gimp
|
702c4227e8b6169f781e4bb5ae4b5733f51ab126
| 35,012,588,253,195,755,000,000,000,000,000,000,000 | 155 |
790783 - buffer overread in XCF parser if version field...
...has no null terminator
Check for the presence of '\0' before using atoi() on the version
string. Patch slightly modified (mitch).
|
TEST_F(QueryPlannerTest, SortLimit) {
// Negative limit indicates hard limit - see query_request.cpp
runQuerySortProjSkipNToReturn(BSONObj(), fromjson("{a: 1}"), BSONObj(), 0, -3);
assertNumSolutions(1U);
assertSolutionExists(
"{sort: {pattern: {a: 1}, limit: 3, node: {sortKeyGen: "
"{node: {cscan: {dir: 1}}}}}}");
}
| 0 |
[] |
mongo
|
ee97c0699fd55b498310996ee002328e533681a3
| 302,174,331,954,331,300,000,000,000,000,000,000,000 | 8 |
SERVER-36993 Fix crash due to incorrect $or pushdown for indexed $expr.
|
static inline void hid_map_usage_clear(struct hid_input *hidinput,
struct hid_usage *usage, unsigned long **bit, int *max,
__u8 type, __u16 c)
{
hid_map_usage(hidinput, usage, bit, max, type, c);
clear_bit(c, *bit);
}
| 1 |
[
"CWE-787"
] |
linux
|
35556bed836f8dc07ac55f69c8d17dce3e7f0e25
| 251,215,353,331,880,040,000,000,000,000,000,000,000 | 7 |
HID: core: Sanitize event code and type when mapping input
When calling into hid_map_usage(), the passed event code is
blindly stored as is, even if it doesn't fit in the associated bitmap.
This event code can come from a variety of sources, including devices
masquerading as input devices, only a bit more "programmable".
Instead of taking the event code at face value, check that it actually
fits the corresponding bitmap, and if it doesn't:
- spit out a warning so that we know which device is acting up
- NULLify the bitmap pointer so that we catch unexpected uses
Code paths that can make use of untrusted inputs can now check
that the mapping was indeed correct and bail out if not.
Cc: [email protected]
Signed-off-by: Marc Zyngier <[email protected]>
Signed-off-by: Benjamin Tissoires <[email protected]>
|
static void ax_bump(struct mkiss *ax)
{
struct sk_buff *skb;
int count;
spin_lock_bh(&ax->buflock);
if (ax->rbuff[0] > 0x0f) {
if (ax->rbuff[0] & 0x80) {
if (check_crc_16(ax->rbuff, ax->rcount) < 0) {
ax->dev->stats.rx_errors++;
spin_unlock_bh(&ax->buflock);
return;
}
if (ax->crcmode != CRC_MODE_SMACK && ax->crcauto) {
printk(KERN_INFO
"mkiss: %s: Switching to crc-smack\n",
ax->dev->name);
ax->crcmode = CRC_MODE_SMACK;
}
ax->rcount -= 2;
*ax->rbuff &= ~0x80;
} else if (ax->rbuff[0] & 0x20) {
if (check_crc_flex(ax->rbuff, ax->rcount) < 0) {
ax->dev->stats.rx_errors++;
spin_unlock_bh(&ax->buflock);
return;
}
if (ax->crcmode != CRC_MODE_FLEX && ax->crcauto) {
printk(KERN_INFO
"mkiss: %s: Switching to crc-flexnet\n",
ax->dev->name);
ax->crcmode = CRC_MODE_FLEX;
}
ax->rcount -= 2;
/*
* dl9sau bugfix: the trailling two bytes flexnet crc
* will not be passed to the kernel. thus we have to
* correct the kissparm signature, because it indicates
* a crc but there's none
*/
*ax->rbuff &= ~0x20;
}
}
count = ax->rcount;
if ((skb = dev_alloc_skb(count)) == NULL) {
printk(KERN_ERR "mkiss: %s: memory squeeze, dropping packet.\n",
ax->dev->name);
ax->dev->stats.rx_dropped++;
spin_unlock_bh(&ax->buflock);
return;
}
skb_put_data(skb, ax->rbuff, count);
skb->protocol = ax25_type_trans(skb, ax->dev);
netif_rx(skb);
ax->dev->stats.rx_packets++;
ax->dev->stats.rx_bytes += count;
spin_unlock_bh(&ax->buflock);
}
| 0 |
[
"CWE-416"
] |
linux
|
b2f37aead1b82a770c48b5d583f35ec22aabb61e
| 243,371,297,393,827,780,000,000,000,000,000,000,000 | 63 |
hamradio: improve the incomplete fix to avoid NPD
The previous commit 3e0588c291d6 ("hamradio: defer ax25 kfree after
unregister_netdev") reorder the kfree operations and unregister_netdev
operation to prevent UAF.
This commit improves the previous one by also deferring the nullify of
the ax->tty pointer. Otherwise, a NULL pointer dereference bug occurs.
Partial of the stack trace is shown below.
BUG: kernel NULL pointer dereference, address: 0000000000000538
RIP: 0010:ax_xmit+0x1f9/0x400
...
Call Trace:
dev_hard_start_xmit+0xec/0x320
sch_direct_xmit+0xea/0x240
__qdisc_run+0x166/0x5c0
__dev_queue_xmit+0x2c7/0xaf0
ax25_std_establish_data_link+0x59/0x60
ax25_connect+0x3a0/0x500
? security_socket_connect+0x2b/0x40
__sys_connect+0x96/0xc0
? __hrtimer_init+0xc0/0xc0
? common_nsleep+0x2e/0x50
? switch_fpu_return+0x139/0x1a0
__x64_sys_connect+0x11/0x20
do_syscall_64+0x33/0x40
entry_SYSCALL_64_after_hwframe+0x44/0xa9
The crash point is shown as below
static void ax_encaps(...) {
...
set_bit(TTY_DO_WRITE_WAKEUP, &ax->tty->flags); // ax->tty = NULL!
...
}
By placing the nullify action after the unregister_netdev, the ax->tty
pointer won't be assigned as NULL net_device framework layer is well
synchronized.
Signed-off-by: Lin Ma <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static void alg_do_release(const struct af_alg_type *type, void *private)
{
if (!type)
return;
type->release(private);
module_put(type->owner);
}
| 0 |
[
"CWE-416"
] |
linux
|
9060cb719e61b685ec0102574e10337fa5f445ea
| 257,864,738,880,712,970,000,000,000,000,000,000,000 | 8 |
net: crypto set sk to NULL when af_alg_release.
KASAN has found use-after-free in sockfs_setattr.
The existed commit 6d8c50dcb029 ("socket: close race condition between sock_close()
and sockfs_setattr()") is to fix this simillar issue, but it seems to ignore
that crypto module forgets to set the sk to NULL after af_alg_release.
KASAN report details as below:
BUG: KASAN: use-after-free in sockfs_setattr+0x120/0x150
Write of size 4 at addr ffff88837b956128 by task syz-executor0/4186
CPU: 2 PID: 4186 Comm: syz-executor0 Not tainted xxx + #1
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS
1.10.2-1ubuntu1 04/01/2014
Call Trace:
dump_stack+0xca/0x13e
print_address_description+0x79/0x330
? vprintk_func+0x5e/0xf0
kasan_report+0x18a/0x2e0
? sockfs_setattr+0x120/0x150
sockfs_setattr+0x120/0x150
? sock_register+0x2d0/0x2d0
notify_change+0x90c/0xd40
? chown_common+0x2ef/0x510
chown_common+0x2ef/0x510
? chmod_common+0x3b0/0x3b0
? __lock_is_held+0xbc/0x160
? __sb_start_write+0x13d/0x2b0
? __mnt_want_write+0x19a/0x250
do_fchownat+0x15c/0x190
? __ia32_sys_chmod+0x80/0x80
? trace_hardirqs_on_thunk+0x1a/0x1c
__x64_sys_fchownat+0xbf/0x160
? lockdep_hardirqs_on+0x39a/0x5e0
do_syscall_64+0xc8/0x580
entry_SYSCALL_64_after_hwframe+0x49/0xbe
RIP: 0033:0x462589
Code: f7 d8 64 89 02 b8 ff ff ff ff c3 66 0f 1f 44 00 00 48 89 f8 48 89
f7 48 89 d6 48 89
ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3
48 c7 c1 bc ff ff
ff f7 d8 64 89 01 48
RSP: 002b:00007fb4b2c83c58 EFLAGS: 00000246 ORIG_RAX: 0000000000000104
RAX: ffffffffffffffda RBX: 000000000072bfa0 RCX: 0000000000462589
RDX: 0000000000000000 RSI: 00000000200000c0 RDI: 0000000000000007
RBP: 0000000000000005 R08: 0000000000001000 R09: 0000000000000000
R10: 0000000000000000 R11: 0000000000000246 R12: 00007fb4b2c846bc
R13: 00000000004bc733 R14: 00000000006f5138 R15: 00000000ffffffff
Allocated by task 4185:
kasan_kmalloc+0xa0/0xd0
__kmalloc+0x14a/0x350
sk_prot_alloc+0xf6/0x290
sk_alloc+0x3d/0xc00
af_alg_accept+0x9e/0x670
hash_accept+0x4a3/0x650
__sys_accept4+0x306/0x5c0
__x64_sys_accept4+0x98/0x100
do_syscall_64+0xc8/0x580
entry_SYSCALL_64_after_hwframe+0x49/0xbe
Freed by task 4184:
__kasan_slab_free+0x12e/0x180
kfree+0xeb/0x2f0
__sk_destruct+0x4e6/0x6a0
sk_destruct+0x48/0x70
__sk_free+0xa9/0x270
sk_free+0x2a/0x30
af_alg_release+0x5c/0x70
__sock_release+0xd3/0x280
sock_close+0x1a/0x20
__fput+0x27f/0x7f0
task_work_run+0x136/0x1b0
exit_to_usermode_loop+0x1a7/0x1d0
do_syscall_64+0x461/0x580
entry_SYSCALL_64_after_hwframe+0x49/0xbe
Syzkaller reproducer:
r0 = perf_event_open(&(0x7f0000000000)={0x0, 0x70, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, @perf_config_ext}, 0x0, 0x0,
0xffffffffffffffff, 0x0)
r1 = socket$alg(0x26, 0x5, 0x0)
getrusage(0x0, 0x0)
bind(r1, &(0x7f00000001c0)=@alg={0x26, 'hash\x00', 0x0, 0x0,
'sha256-ssse3\x00'}, 0x80)
r2 = accept(r1, 0x0, 0x0)
r3 = accept4$unix(r2, 0x0, 0x0, 0x0)
r4 = dup3(r3, r0, 0x0)
fchownat(r4, &(0x7f00000000c0)='\x00', 0x0, 0x0, 0x1000)
Fixes: 6d8c50dcb029 ("socket: close race condition between sock_close() and sockfs_setattr()")
Signed-off-by: Mao Wenan <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
*/
PHP_MINIT_FUNCTION(spl)
{
PHP_MINIT(spl_exceptions)(INIT_FUNC_ARGS_PASSTHRU);
PHP_MINIT(spl_iterators)(INIT_FUNC_ARGS_PASSTHRU);
PHP_MINIT(spl_array)(INIT_FUNC_ARGS_PASSTHRU);
PHP_MINIT(spl_directory)(INIT_FUNC_ARGS_PASSTHRU);
PHP_MINIT(spl_dllist)(INIT_FUNC_ARGS_PASSTHRU);
PHP_MINIT(spl_heap)(INIT_FUNC_ARGS_PASSTHRU);
PHP_MINIT(spl_fixedarray)(INIT_FUNC_ARGS_PASSTHRU);
PHP_MINIT(spl_observer)(INIT_FUNC_ARGS_PASSTHRU);
return SUCCESS;
| 0 |
[] |
php-src
|
b584b513983319be170f02828bc7c12850b40320
| 331,999,189,977,317,540,000,000,000,000,000,000,000 | 13 |
Fixed bug #70290 (Null pointer deref (segfault) in spl_autoload via ob_start)
|
void read_coding_quadtree(thread_context* tctx,
int x0, int y0,
int log2CbSize,
int ctDepth)
{
logtrace(LogSlice,"- read_coding_quadtree %d;%d cbsize:%d depth:%d POC:%d\n",x0,y0,1<<log2CbSize,ctDepth,tctx->img->PicOrderCntVal);
de265_image* img = tctx->img;
const seq_parameter_set& sps = img->get_sps();
const pic_parameter_set& pps = img->get_pps();
int split_flag;
// We only send a split flag if CU is larger than minimum size and
// completely contained within the image area.
// If it is partly outside the image area and not at minimum size,
// it is split. If already at minimum size, it is not split further.
if (x0+(1<<log2CbSize) <= sps.pic_width_in_luma_samples &&
y0+(1<<log2CbSize) <= sps.pic_height_in_luma_samples &&
log2CbSize > sps.Log2MinCbSizeY) {
split_flag = decode_split_cu_flag(tctx, x0,y0, ctDepth);
} else {
if (log2CbSize > sps.Log2MinCbSizeY) { split_flag=1; }
else { split_flag=0; }
}
if (pps.cu_qp_delta_enabled_flag &&
log2CbSize >= pps.Log2MinCuQpDeltaSize)
{
tctx->IsCuQpDeltaCoded = 0;
tctx->CuQpDelta = 0;
}
else
{
// shdr->CuQpDelta = 0; // TODO check: is this the right place to set to default value ?
}
if (tctx->shdr->cu_chroma_qp_offset_enabled_flag &&
log2CbSize >= pps.Log2MinCuChromaQpOffsetSize) {
tctx->IsCuChromaQpOffsetCoded = 0;
}
if (split_flag) {
int x1 = x0 + (1<<(log2CbSize-1));
int y1 = y0 + (1<<(log2CbSize-1));
read_coding_quadtree(tctx,x0,y0, log2CbSize-1, ctDepth+1);
if (x1<sps.pic_width_in_luma_samples)
read_coding_quadtree(tctx,x1,y0, log2CbSize-1, ctDepth+1);
if (y1<sps.pic_height_in_luma_samples)
read_coding_quadtree(tctx,x0,y1, log2CbSize-1, ctDepth+1);
if (x1<sps.pic_width_in_luma_samples &&
y1<sps.pic_height_in_luma_samples)
read_coding_quadtree(tctx,x1,y1, log2CbSize-1, ctDepth+1);
}
else {
// set ctDepth of this CU
img->set_ctDepth(x0,y0, log2CbSize, ctDepth);
read_coding_unit(tctx, x0,y0, log2CbSize, ctDepth);
}
logtrace(LogSlice,"-\n");
}
| 0 |
[] |
libde265
|
e83f3798dd904aa579425c53020c67e03735138d
| 226,272,050,263,880,330,000,000,000,000,000,000,000 | 70 |
fix check for valid PPS idx (#298)
|
void IntegrationCodecClient::sendTrailers(Http::StreamEncoder& encoder,
const Http::HeaderMap& trailers) {
encoder.encodeTrailers(trailers);
flushWrite();
}
| 0 |
[
"CWE-400",
"CWE-703"
] |
envoy
|
afc39bea36fd436e54262f150c009e8d72db5014
| 320,580,727,070,402,200,000,000,000,000,000,000,000 | 5 |
Track byteSize of HeaderMap internally.
Introduces a cached byte size updated internally in HeaderMap. The value
is stored as an optional, and is cleared whenever a non-const pointer or
reference to a HeaderEntry is accessed. The cached value can be set with
refreshByteSize() which performs an iteration over the HeaderMap to sum
the size of each key and value in the HeaderMap.
Signed-off-by: Asra Ali <[email protected]>
|
unhook_all ()
{
int type;
struct t_hook *ptr_hook, *next_hook;
for (type = 0; type < HOOK_NUM_TYPES; type++)
{
ptr_hook = weechat_hooks[type];
while (ptr_hook)
{
next_hook = ptr_hook->next_hook;
unhook (ptr_hook);
ptr_hook = next_hook;
}
}
}
| 0 |
[
"CWE-20"
] |
weechat
|
efb795c74fe954b9544074aafcebb1be4452b03a
| 11,720,487,959,191,864,000,000,000,000,000,000,000 | 16 |
core: do not call shell to execute command in hook_process (fix security problem when a plugin/script gives untrusted command) (bug #37764)
|
TEST_F(RouterTest, PoolFailureDueToConnectTimeout) {
ON_CALL(callbacks_.route_->route_entry_, priority())
.WillByDefault(Return(Upstream::ResourcePriority::High));
EXPECT_CALL(cm_.thread_local_cluster_,
httpConnPool(Upstream::ResourcePriority::High, _, &router_));
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_, newStream(_, _))
.WillOnce(Invoke([&](Http::StreamDecoder&, Http::ConnectionPool::Callbacks& callbacks)
-> Http::ConnectionPool::Cancellable* {
callbacks.onPoolFailure(ConnectionPool::PoolFailureReason::Timeout, "connect_timeout",
cm_.thread_local_cluster_.conn_pool_.host_);
return nullptr;
}));
Http::TestResponseHeaderMapImpl response_headers{
{":status", "503"}, {"content-length", "134"}, {"content-type", "text/plain"}};
EXPECT_CALL(callbacks_, encodeHeaders_(HeaderMapEqualRef(&response_headers), false));
EXPECT_CALL(callbacks_, encodeData(_, true));
EXPECT_CALL(callbacks_.stream_info_,
setResponseFlag(StreamInfo::ResponseFlag::UpstreamConnectionFailure));
Http::TestRequestHeaderMapImpl headers;
HttpTestUtility::addDefaultHeaders(headers);
router_.decodeHeaders(headers, true);
EXPECT_TRUE(verifyHostUpstreamStats(0, 1));
// Pool failure, so upstream request was not initiated.
EXPECT_EQ(0U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
EXPECT_EQ(callbacks_.details(),
"upstream_reset_before_response_started{connection_failure,connect_timeout}");
}
| 0 |
[
"CWE-703"
] |
envoy
|
5bf9b0f1e7f247a4eee7180849cb0823926f7fff
| 133,509,792,965,789,060,000,000,000,000,000,000,000 | 30 |
[1.21] CVE-2022-21655
Signed-off-by: Otto van der Schaaf <[email protected]>
|
ieee80211_rx_h_action_return(struct ieee80211_rx_data *rx)
{
struct ieee80211_local *local = rx->local;
struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
struct sk_buff *nskb;
struct ieee80211_sub_if_data *sdata = rx->sdata;
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
if (!ieee80211_is_action(mgmt->frame_control))
return RX_CONTINUE;
/*
* For AP mode, hostapd is responsible for handling any action
* frames that we didn't handle, including returning unknown
* ones. For all other modes we will return them to the sender,
* setting the 0x80 bit in the action category, as required by
* 802.11-2012 9.24.4.
* Newer versions of hostapd shall also use the management frame
* registration mechanisms, but older ones still use cooked
* monitor interfaces so push all frames there.
*/
if (!(status->rx_flags & IEEE80211_RX_MALFORMED_ACTION_FRM) &&
(sdata->vif.type == NL80211_IFTYPE_AP ||
sdata->vif.type == NL80211_IFTYPE_AP_VLAN))
return RX_DROP_MONITOR;
if (is_multicast_ether_addr(mgmt->da))
return RX_DROP_MONITOR;
/* do not return rejected action frames */
if (mgmt->u.action.category & 0x80)
return RX_DROP_UNUSABLE;
nskb = skb_copy_expand(rx->skb, local->hw.extra_tx_headroom, 0,
GFP_ATOMIC);
if (nskb) {
struct ieee80211_mgmt *nmgmt = (void *)nskb->data;
nmgmt->u.action.category |= 0x80;
memcpy(nmgmt->da, nmgmt->sa, ETH_ALEN);
memcpy(nmgmt->sa, rx->sdata->vif.addr, ETH_ALEN);
memset(nskb->cb, 0, sizeof(nskb->cb));
if (rx->sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE) {
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(nskb);
info->flags = IEEE80211_TX_CTL_TX_OFFCHAN |
IEEE80211_TX_INTFL_OFFCHAN_TX_OK |
IEEE80211_TX_CTL_NO_CCK_RATE;
if (ieee80211_hw_check(&local->hw, QUEUE_CONTROL))
info->hw_queue =
local->hw.offchannel_tx_hw_queue;
}
__ieee80211_tx_skb_tid_band(rx->sdata, nskb, 7,
status->band, 0);
}
dev_kfree_skb(rx->skb);
return RX_QUEUED;
}
| 0 |
[] |
linux
|
588f7d39b3592a36fb7702ae3b8bdd9be4621e2f
| 60,382,045,643,779,780,000,000,000,000,000,000,000 | 61 |
mac80211: drop robust management frames from unknown TA
When receiving a robust management frame, drop it if we don't have
rx->sta since then we don't have a security association and thus
couldn't possibly validate the frame.
Cc: [email protected]
Signed-off-by: Johannes Berg <[email protected]>
|
MOCK_IMPL(STATIC time_t,
randomize_time,(time_t now, time_t max_backdate))
{
tor_assert(max_backdate > 0);
time_t earliest = now - max_backdate;
time_t latest = now;
if (earliest <= 0)
earliest = 1;
if (latest <= earliest)
latest = earliest + 1;
return crypto_rand_time_range(earliest, latest);
}
| 0 |
[
"CWE-200"
] |
tor
|
665baf5ed5c6186d973c46cdea165c0548027350
| 6,273,334,187,629,144,000,000,000,000,000,000,000 | 14 |
Consider the exit family when applying guard restrictions.
When the new path selection logic went into place, I accidentally
dropped the code that considered the _family_ of the exit node when
deciding if the guard was usable, and we didn't catch that during
code review.
This patch makes the guard_restriction_t code consider the exit
family as well, and adds some (hopefully redundant) checks for the
case where we lack a node_t for a guard but we have a bridge_info_t
for it.
Fixes bug 22753; bugfix on 0.3.0.1-alpha. Tracked as TROVE-2016-006
and CVE-2017-0377.
|
static int __remove_section(struct zone *zone, struct mem_section *ms)
{
/*
* XXX: Freeing memmap with vmemmap is not implement yet.
* This should be removed later.
*/
return -EBUSY;
}
| 0 |
[] |
linux-2.6
|
08dff7b7d629807dbb1f398c68dd9cd58dd657a1
| 120,791,200,170,496,600,000,000,000,000,000,000,000 | 8 |
mm/hotplug: correctly add new zone to all other nodes' zone lists
When online_pages() is called to add new memory to an empty zone, it
rebuilds all zone lists by calling build_all_zonelists(). But there's a
bug which prevents the new zone to be added to other nodes' zone lists.
online_pages() {
build_all_zonelists()
.....
node_set_state(zone_to_nid(zone), N_HIGH_MEMORY)
}
Here the node of the zone is put into N_HIGH_MEMORY state after calling
build_all_zonelists(), but build_all_zonelists() only adds zones from
nodes in N_HIGH_MEMORY state to the fallback zone lists.
build_all_zonelists()
->__build_all_zonelists()
->build_zonelists()
->find_next_best_node()
->for_each_node_state(n, N_HIGH_MEMORY)
So memory in the new zone will never be used by other nodes, and it may
cause strange behavor when system is under memory pressure. So put node
into N_HIGH_MEMORY state before calling build_all_zonelists().
Signed-off-by: Jianguo Wu <[email protected]>
Signed-off-by: Jiang Liu <[email protected]>
Cc: Mel Gorman <[email protected]>
Cc: Michal Hocko <[email protected]>
Cc: Minchan Kim <[email protected]>
Cc: Rusty Russell <[email protected]>
Cc: Yinghai Lu <[email protected]>
Cc: Tony Luck <[email protected]>
Cc: KAMEZAWA Hiroyuki <[email protected]>
Cc: KOSAKI Motohiro <[email protected]>
Cc: David Rientjes <[email protected]>
Cc: Keping Chen <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
lyd_new_yangdata(const struct lys_module *module, const char *name_template, const char *name)
{
const struct lys_node *schema = NULL, *snode;
if (!module || !name_template || !name) {
LOGARG;
return NULL;
}
schema = lyp_get_yang_data_template(module, name_template, strlen(name_template));
if (!schema) {
LOGERR(module->ctx, LY_EINVAL, "Failed to find yang-data template \"%s\".", name_template);
return NULL;
}
if (lys_getnext_data(module, schema, name, strlen(name), LYS_CONTAINER, 0, &snode) || !snode) {
LOGERR(module->ctx, LY_EINVAL, "Failed to find \"%s\" as a container child of \"%s:%s\".",
name, module->name, schema->name);
return NULL;
}
return _lyd_new(NULL, snode, 0);
}
| 0 |
[
"CWE-119"
] |
libyang
|
32fb4993bc8bb49e93e84016af3c10ea53964be5
| 65,519,280,154,549,790,000,000,000,000,000,000,000 | 23 |
schema tree BUGFIX do not check features while still resolving schema
Fixes #723
|
std::string help() const override {
return "Adds a role to the system";
}
| 0 |
[
"CWE-613"
] |
mongo
|
e55d6e2292e5dbe2f97153251d8193d1cc89f5d7
| 221,701,680,948,466,020,000,000,000,000,000,000,000 | 3 |
SERVER-38984 Validate unique User ID on UserCache hit
|
void md5_update(MD5CTX c, const void *data, unsigned long len)
{
EVP_DigestUpdate(c, data, len);
}
| 0 |
[
"CWE-476"
] |
libssh
|
b36272eac1b36982598c10de7af0a501582de07a
| 177,191,595,221,869,600,000,000,000,000,000,000,000 | 4 |
CVE-2020-1730: Fix a possible segfault when zeroing AES-CTR key
Fixes T213
Signed-off-by: Andreas Schneider <[email protected]>
Reviewed-by: Anderson Toshiyuki Sasaki <[email protected]>
|
virtual ~ExpectedResultBase() {}
| 0 |
[
"CWE-835"
] |
mongo
|
0a076417d1d7fba3632b73349a1fd29a83e68816
| 340,067,152,319,676,060,000,000,000,000,000,000,000 | 1 |
SERVER-38070 fix infinite loop in agg expression
|
void json_tokener_reset(struct json_tokener *tok)
{
int i;
if (!tok)
return;
for(i = tok->depth; i >= 0; i--)
json_tokener_reset_level(tok, i);
tok->depth = 0;
tok->err = json_tokener_success;
}
| 0 |
[
"CWE-119",
"CWE-310"
] |
json-c
|
64e36901a0614bf64a19bc3396469c66dcd0b015
| 179,445,907,806,056,680,000,000,000,000,000,000,000 | 11 |
Patch to address the following issues:
* CVE-2013-6371: hash collision denial of service
* CVE-2013-6370: buffer overflow if size_t is larger than int
|
void avahi_server_enumerate_aux_records(AvahiServer *s, AvahiInterface *i, AvahiRecord *r, void (*callback)(AvahiServer *s, AvahiRecord *r, int flush_cache, void* userdata), void* userdata) {
assert(s);
assert(i);
assert(r);
assert(callback);
/* Call the specified callback far all records referenced by the one specified in *r */
if (r->key->clazz == AVAHI_DNS_CLASS_IN) {
if (r->key->type == AVAHI_DNS_TYPE_PTR) {
enum_aux_records(s, i, r->data.ptr.name, AVAHI_DNS_TYPE_SRV, callback, userdata);
enum_aux_records(s, i, r->data.ptr.name, AVAHI_DNS_TYPE_TXT, callback, userdata);
} else if (r->key->type == AVAHI_DNS_TYPE_SRV) {
enum_aux_records(s, i, r->data.srv.name, AVAHI_DNS_TYPE_A, callback, userdata);
enum_aux_records(s, i, r->data.srv.name, AVAHI_DNS_TYPE_AAAA, callback, userdata);
} else if (r->key->type == AVAHI_DNS_TYPE_CNAME)
enum_aux_records(s, i, r->data.cname.name, AVAHI_DNS_TYPE_ANY, callback, userdata);
}
}
| 0 |
[
"CWE-399"
] |
avahi
|
3093047f1aa36bed8a37fa79004bf0ee287929f4
| 294,964,401,481,877,300,000,000,000,000,000,000,000 | 19 |
Don't get confused by UDP packets with a source port that is zero
This is a fix for rhbz 475394.
Problem identified by Hugo Dias.
|
determine_versionability (struct cgraph_node *node,
class ipa_node_params *info)
{
const char *reason = NULL;
/* There are a number of generic reasons functions cannot be versioned. We
also cannot remove parameters if there are type attributes such as fnspec
present. */
if (node->alias || node->thunk.thunk_p)
reason = "alias or thunk";
else if (!node->versionable)
reason = "not a tree_versionable_function";
else if (node->get_availability () <= AVAIL_INTERPOSABLE)
reason = "insufficient body availability";
else if (!opt_for_fn (node->decl, optimize)
|| !opt_for_fn (node->decl, flag_ipa_cp))
reason = "non-optimized function";
else if (lookup_attribute ("omp declare simd", DECL_ATTRIBUTES (node->decl)))
{
/* Ideally we should clone the SIMD clones themselves and create
vector copies of them, so IPA-cp and SIMD clones can happily
coexist, but that may not be worth the effort. */
reason = "function has SIMD clones";
}
else if (lookup_attribute ("target_clones", DECL_ATTRIBUTES (node->decl)))
{
/* Ideally we should clone the target clones themselves and create
copies of them, so IPA-cp and target clones can happily
coexist, but that may not be worth the effort. */
reason = "function target_clones attribute";
}
/* Don't clone decls local to a comdat group; it breaks and for C++
decloned constructors, inlining is always better anyway. */
else if (node->comdat_local_p ())
reason = "comdat-local function";
else if (node->calls_comdat_local)
{
/* TODO: call is versionable if we make sure that all
callers are inside of a comdat group. */
reason = "calls comdat-local function";
}
/* Functions calling BUILT_IN_VA_ARG_PACK and BUILT_IN_VA_ARG_PACK_LEN
work only when inlined. Cloning them may still lead to better code
because ipa-cp will not give up on cloning further. If the function is
external this however leads to wrong code because we may end up producing
offline copy of the function. */
if (DECL_EXTERNAL (node->decl))
for (cgraph_edge *edge = node->callees; !reason && edge;
edge = edge->next_callee)
if (fndecl_built_in_p (edge->callee->decl, BUILT_IN_NORMAL))
{
if (DECL_FUNCTION_CODE (edge->callee->decl) == BUILT_IN_VA_ARG_PACK)
reason = "external function which calls va_arg_pack";
if (DECL_FUNCTION_CODE (edge->callee->decl)
== BUILT_IN_VA_ARG_PACK_LEN)
reason = "external function which calls va_arg_pack_len";
}
if (reason && dump_file && !node->alias && !node->thunk.thunk_p)
fprintf (dump_file, "Function %s is not versionable, reason: %s.\n",
node->dump_name (), reason);
info->versionable = (reason == NULL);
}
| 0 |
[
"CWE-20"
] |
gcc
|
a09ccc22459c565814f79f96586fe4ad083fe4eb
| 338,130,583,500,935,580,000,000,000,000,000,000,000 | 65 |
Avoid segfault when doing IPA-VRP but not IPA-CP (PR 93015)
2019-12-21 Martin Jambor <[email protected]>
PR ipa/93015
* ipa-cp.c (ipcp_store_vr_results): Check that info exists
testsuite/
* gcc.dg/lto/pr93015_0.c: New test.
From-SVN: r279695
|
PHP_MSHUTDOWN_FUNCTION(openssl)
{
EVP_cleanup();
#if OPENSSL_VERSION_NUMBER >= 0x00090805f
/* prevent accessing locking callback from unloaded extension */
CRYPTO_set_locking_callback(NULL);
/* free allocated error strings */
ERR_free_strings();
#endif
php_unregister_url_stream_wrapper("https" TSRMLS_CC);
php_unregister_url_stream_wrapper("ftps" TSRMLS_CC);
php_stream_xport_unregister("ssl" TSRMLS_CC);
#ifndef OPENSSL_NO_SSL2
php_stream_xport_unregister("sslv2" TSRMLS_CC);
#endif
php_stream_xport_unregister("sslv3" TSRMLS_CC);
php_stream_xport_unregister("tls" TSRMLS_CC);
php_stream_xport_unregister("tlsv1.0" TSRMLS_CC);
#if OPENSSL_VERSION_NUMBER >= 0x10001001L
php_stream_xport_unregister("tlsv1.1" TSRMLS_CC);
php_stream_xport_unregister("tlsv1.2" TSRMLS_CC);
#endif
/* reinstate the default tcp handler */
php_stream_xport_register("tcp", php_stream_generic_socket_factory TSRMLS_CC);
UNREGISTER_INI_ENTRIES();
return SUCCESS;
}
| 0 |
[
"CWE-754"
] |
php-src
|
89637c6b41b510c20d262c17483f582f115c66d6
| 72,871,155,123,185,940,000,000,000,000,000,000,000 | 33 |
Fix bug #74651 - check EVP_SealInit as it can return -1
|
static enum_func_status
php_mysqlnd_rowp_read(void * _packet, MYSQLND_CONN_DATA * conn TSRMLS_DC)
{
zend_uchar *p;
enum_func_status ret = PASS;
MYSQLND_PACKET_ROW *packet= (MYSQLND_PACKET_ROW *) _packet;
size_t post_alloc_for_bit_fields = 0;
size_t data_size = 0;
DBG_ENTER("php_mysqlnd_rowp_read");
if (!packet->binary_protocol && packet->bit_fields_count) {
/* For every field we need terminating \0 */
post_alloc_for_bit_fields = packet->bit_fields_total_len + packet->bit_fields_count;
}
ret = php_mysqlnd_read_row_ex(conn, packet->result_set_memory_pool, &packet->row_buffer, &data_size,
packet->persistent_alloc, post_alloc_for_bit_fields
TSRMLS_CC);
if (FAIL == ret) {
goto end;
}
MYSQLND_INC_CONN_STATISTIC_W_VALUE2(conn->stats, packet_type_to_statistic_byte_count[PROT_ROW_PACKET],
MYSQLND_HEADER_SIZE + packet->header.size,
packet_type_to_statistic_packet_count[PROT_ROW_PACKET],
1);
/* packet->row_buffer->ptr is of size 'data_size + 1' */
packet->header.size = data_size;
packet->row_buffer->app = data_size;
if (ERROR_MARKER == (*(p = packet->row_buffer->ptr))) {
/*
Error message as part of the result set,
not good but we should not hang. See:
Bug #27876 : SF with cyrillic variable name fails during execution
*/
ret = FAIL;
php_mysqlnd_read_error_from_line(p + 1, data_size - 1,
packet->error_info.error,
sizeof(packet->error_info.error),
&packet->error_info.error_no,
packet->error_info.sqlstate
TSRMLS_CC);
} else if (EODATA_MARKER == *p && data_size < 8) { /* EOF */
packet->eof = TRUE;
p++;
if (data_size > 1) {
packet->warning_count = uint2korr(p);
p += 2;
packet->server_status = uint2korr(p);
/* Seems we have 3 bytes reserved for future use */
DBG_INF_FMT("server_status=%u warning_count=%u", packet->server_status, packet->warning_count);
}
} else {
MYSQLND_INC_CONN_STATISTIC(conn->stats,
packet->binary_protocol? STAT_ROWS_FETCHED_FROM_SERVER_PS:
STAT_ROWS_FETCHED_FROM_SERVER_NORMAL);
packet->eof = FALSE;
/* packet->field_count is set by the user of the packet */
if (!packet->skip_extraction) {
if (!packet->fields) {
DBG_INF("Allocating packet->fields");
/*
old-API will probably set packet->fields to NULL every time, though for
unbuffered sets it makes not much sense as the zvals in this buffer matter,
not the buffer. Constantly allocating and deallocating brings nothing.
For PS - if stmt_store() is performed, thus we don't have a cursor, it will
behave just like old-API buffered. Cursors will behave like a bit different,
but mostly like old-API unbuffered and thus will populate this array with
value.
*/
packet->fields = (zval **) mnd_pecalloc(packet->field_count, sizeof(zval *),
packet->persistent_alloc);
}
} else {
MYSQLND_INC_CONN_STATISTIC(conn->stats,
packet->binary_protocol? STAT_ROWS_SKIPPED_PS:
STAT_ROWS_SKIPPED_NORMAL);
}
}
end:
DBG_RETURN(ret);
| 0 |
[
"CWE-119",
"CWE-787"
] |
php-src
|
28f80baf3c53e267c9ce46a2a0fadbb981585132
| 214,642,112,435,050,150,000,000,000,000,000,000,000 | 87 |
Fix bug #72293 - Heap overflow in mysqlnd related to BIT fields
|
parse_reparse_posix(struct reparse_posix_data *symlink_buf,
u32 plen, char **target_path,
struct cifs_sb_info *cifs_sb)
{
unsigned int len;
/* See MS-FSCC 2.1.2.6 for the 'NFS' style reparse tags */
len = le16_to_cpu(symlink_buf->ReparseDataLength);
if (le64_to_cpu(symlink_buf->InodeType) != NFS_SPECFILE_LNK) {
cifs_dbg(VFS, "%lld not a supported symlink type\n",
le64_to_cpu(symlink_buf->InodeType));
return -EOPNOTSUPP;
}
*target_path = cifs_strndup_from_utf16(
symlink_buf->PathBuffer,
len, true, cifs_sb->local_nls);
if (!(*target_path))
return -ENOMEM;
convert_delimiter(*target_path, '/');
cifs_dbg(FYI, "%s: target path: %s\n", __func__, *target_path);
return 0;
}
| 0 |
[
"CWE-476"
] |
linux
|
d6f5e358452479fa8a773b5c6ccc9e4ec5a20880
| 160,145,054,573,324,980,000,000,000,000,000,000,000 | 26 |
cifs: fix NULL ptr dereference in smb2_ioctl_query_info()
When calling smb2_ioctl_query_info() with invalid
smb_query_info::flags, a NULL ptr dereference is triggered when trying
to kfree() uninitialised rqst[n].rq_iov array.
This also fixes leaked paths that are created in SMB2_open_init()
which required SMB2_open_free() to properly free them.
Here is a small C reproducer that triggers it
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <unistd.h>
#include <fcntl.h>
#include <sys/ioctl.h>
#define die(s) perror(s), exit(1)
#define QUERY_INFO 0xc018cf07
int main(int argc, char *argv[])
{
int fd;
if (argc < 2)
exit(1);
fd = open(argv[1], O_RDONLY);
if (fd == -1)
die("open");
if (ioctl(fd, QUERY_INFO, (uint32_t[]) { 0, 0, 0, 4, 0, 0}) == -1)
die("ioctl");
close(fd);
return 0;
}
mount.cifs //srv/share /mnt -o ...
gcc repro.c && ./a.out /mnt/f0
[ 1832.124468] CIFS: VFS: \\w22-dc.zelda.test\test Invalid passthru query flags: 0x4
[ 1832.125043] general protection fault, probably for non-canonical address 0xdffffc0000000000: 0000 [#1] PREEMPT SMP KASAN NOPTI
[ 1832.125764] KASAN: null-ptr-deref in range [0x0000000000000000-0x0000000000000007]
[ 1832.126241] CPU: 3 PID: 1133 Comm: a.out Not tainted 5.17.0-rc8 #2
[ 1832.126630] Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS rel-1.15.0-0-g2dd4b9b-rebuilt.opensuse.org 04/01/2014
[ 1832.127322] RIP: 0010:smb2_ioctl_query_info+0x7a3/0xe30 [cifs]
[ 1832.127749] Code: 00 00 00 fc ff df 48 c1 ea 03 80 3c 02 00 0f 85 6c 05 00 00 48 b8 00 00 00 00 00 fc ff df 4d 8b 74 24 28 4c 89 f2 48 c1 ea 03 <80> 3c 02 00 0f 85 cb 04 00 00 49 8b 3e e8 bb fc fa ff 48 89 da 48
[ 1832.128911] RSP: 0018:ffffc90000957b08 EFLAGS: 00010256
[ 1832.129243] RAX: dffffc0000000000 RBX: ffff888117e9b850 RCX: ffffffffa020580d
[ 1832.129691] RDX: 0000000000000000 RSI: 0000000000000004 RDI: ffffffffa043a2c0
[ 1832.130137] RBP: ffff888117e9b878 R08: 0000000000000001 R09: 0000000000000003
[ 1832.130585] R10: fffffbfff4087458 R11: 0000000000000001 R12: ffff888117e9b800
[ 1832.131037] R13: 00000000ffffffea R14: 0000000000000000 R15: ffff888117e9b8a8
[ 1832.131485] FS: 00007fcee9900740(0000) GS:ffff888151a00000(0000) knlGS:0000000000000000
[ 1832.131993] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
[ 1832.132354] CR2: 00007fcee9a1ef5e CR3: 0000000114cd2000 CR4: 0000000000350ee0
[ 1832.132801] Call Trace:
[ 1832.132962] <TASK>
[ 1832.133104] ? smb2_query_reparse_tag+0x890/0x890 [cifs]
[ 1832.133489] ? cifs_mapchar+0x460/0x460 [cifs]
[ 1832.133822] ? rcu_read_lock_sched_held+0x3f/0x70
[ 1832.134125] ? cifs_strndup_to_utf16+0x15b/0x250 [cifs]
[ 1832.134502] ? lock_downgrade+0x6f0/0x6f0
[ 1832.134760] ? cifs_convert_path_to_utf16+0x198/0x220 [cifs]
[ 1832.135170] ? smb2_check_message+0x1080/0x1080 [cifs]
[ 1832.135545] cifs_ioctl+0x1577/0x3320 [cifs]
[ 1832.135864] ? lock_downgrade+0x6f0/0x6f0
[ 1832.136125] ? cifs_readdir+0x2e60/0x2e60 [cifs]
[ 1832.136468] ? rcu_read_lock_sched_held+0x3f/0x70
[ 1832.136769] ? __rseq_handle_notify_resume+0x80b/0xbe0
[ 1832.137096] ? __up_read+0x192/0x710
[ 1832.137327] ? __ia32_sys_rseq+0xf0/0xf0
[ 1832.137578] ? __x64_sys_openat+0x11f/0x1d0
[ 1832.137850] __x64_sys_ioctl+0x127/0x190
[ 1832.138103] do_syscall_64+0x3b/0x90
[ 1832.138378] entry_SYSCALL_64_after_hwframe+0x44/0xae
[ 1832.138702] RIP: 0033:0x7fcee9a253df
[ 1832.138937] Code: 00 48 89 44 24 18 31 c0 48 8d 44 24 60 c7 04 24 10 00 00 00 48 89 44 24 08 48 8d 44 24 20 48 89 44 24 10 b8 10 00 00 00 0f 05 <41> 89 c0 3d 00 f0 ff ff 77 1f 48 8b 44 24 18 64 48 2b 04 25 28 00
[ 1832.140107] RSP: 002b:00007ffeba94a8a0 EFLAGS: 00000246 ORIG_RAX: 0000000000000010
[ 1832.140606] RAX: ffffffffffffffda RBX: 0000000000000000 RCX: 00007fcee9a253df
[ 1832.141058] RDX: 00007ffeba94a910 RSI: 00000000c018cf07 RDI: 0000000000000003
[ 1832.141503] RBP: 00007ffeba94a930 R08: 00007fcee9b24db0 R09: 00007fcee9b45c4e
[ 1832.141948] R10: 00007fcee9918d40 R11: 0000000000000246 R12: 00007ffeba94aa48
[ 1832.142396] R13: 0000000000401176 R14: 0000000000403df8 R15: 00007fcee9b78000
[ 1832.142851] </TASK>
[ 1832.142994] Modules linked in: cifs cifs_arc4 cifs_md4 bpf_preload [last unloaded: cifs]
Cc: [email protected]
Signed-off-by: Paulo Alcantara (SUSE) <[email protected]>
Signed-off-by: Steve French <[email protected]>
|
int print_deleg_lookup(RES* ssl, struct worker* worker, uint8_t* nm,
size_t nmlen, int ATTR_UNUSED(nmlabs))
{
/* deep links into the iterator module */
struct delegpt* dp;
struct dns_msg* msg;
struct regional* region = worker->scratchpad;
char b[260];
struct query_info qinfo;
struct iter_hints_stub* stub;
regional_free_all(region);
qinfo.qname = nm;
qinfo.qname_len = nmlen;
qinfo.qtype = LDNS_RR_TYPE_A;
qinfo.qclass = LDNS_RR_CLASS_IN;
qinfo.local_alias = NULL;
dname_str(nm, b);
if(!ssl_printf(ssl, "The following name servers are used for lookup "
"of %s\n", b))
return 0;
dp = forwards_lookup(worker->env.fwds, nm, qinfo.qclass);
if(dp) {
if(!ssl_printf(ssl, "forwarding request:\n"))
return 0;
print_dp_main(ssl, dp, NULL);
print_dp_details(ssl, worker, dp);
return 1;
}
while(1) {
dp = dns_cache_find_delegation(&worker->env, nm, nmlen,
qinfo.qtype, qinfo.qclass, region, &msg,
*worker->env.now);
if(!dp) {
return ssl_printf(ssl, "no delegation from "
"cache; goes to configured roots\n");
}
/* go up? */
if(iter_dp_is_useless(&qinfo, BIT_RD, dp,
(worker->env.cfg->do_ip4 && worker->back->num_ip4 != 0),
(worker->env.cfg->do_ip6 && worker->back->num_ip6 != 0))) {
print_dp_main(ssl, dp, msg);
print_dp_details(ssl, worker, dp);
if(!ssl_printf(ssl, "cache delegation was "
"useless (no IP addresses)\n"))
return 0;
if(dname_is_root(nm)) {
/* goes to root config */
return ssl_printf(ssl, "no delegation from "
"cache; goes to configured roots\n");
} else {
/* useless, goes up */
nm = dp->name;
nmlen = dp->namelen;
dname_remove_label(&nm, &nmlen);
dname_str(nm, b);
if(!ssl_printf(ssl, "going up, lookup %s\n", b))
return 0;
continue;
}
}
stub = hints_lookup_stub(worker->env.hints, nm, qinfo.qclass,
dp);
if(stub) {
if(stub->noprime) {
if(!ssl_printf(ssl, "The noprime stub servers "
"are used:\n"))
return 0;
} else {
if(!ssl_printf(ssl, "The stub is primed "
"with servers:\n"))
return 0;
}
print_dp_main(ssl, stub->dp, NULL);
print_dp_details(ssl, worker, stub->dp);
} else {
print_dp_main(ssl, dp, msg);
print_dp_details(ssl, worker, dp);
}
break;
}
return 1;
}
| 1 |
[
"CWE-613",
"CWE-703"
] |
unbound
|
f6753a0f1018133df552347a199e0362fc1dac68
| 132,475,581,811,875,140,000,000,000,000,000,000,000 | 86 |
- Fix the novel ghost domain issues CVE-2022-30698 and CVE-2022-30699.
|
void HGraphBuilder::GenerateMathLog(CallRuntime* call) {
ASSERT_EQ(1, call->arguments()->length());
CHECK_ALIVE(VisitArgumentList(call->arguments()));
HValue* context = environment()->LookupContext();
HCallStub* result =
new(zone()) HCallStub(context, CodeStub::TranscendentalCache, 1);
result->set_transcendental_type(TranscendentalCache::LOG);
Drop(1);
return ast_context()->ReturnInstruction(result, call->id());
}
| 0 |
[] |
node
|
fd80a31e0697d6317ce8c2d289575399f4e06d21
| 185,227,124,565,499,550,000,000,000,000,000,000,000 | 10 |
deps: backport 5f836c from v8 upstream
Original commit message:
Fix Hydrogen bounds check elimination
When combining bounds checks, they must all be moved before the first load/store
that they are guarding.
BUG=chromium:344186
LOG=y
[email protected]
Review URL: https://codereview.chromium.org/172093002
git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@19475 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
fix #8070
|
static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
struct io_poll_iocb *poll = &req->poll;
u32 flags;
if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
return -EINVAL;
if (sqe->ioprio || sqe->buf_index || sqe->off || sqe->addr)
return -EINVAL;
flags = READ_ONCE(sqe->len);
if (flags & ~IORING_POLL_ADD_MULTI)
return -EINVAL;
if ((flags & IORING_POLL_ADD_MULTI) && (req->flags & REQ_F_CQE_SKIP))
return -EINVAL;
io_req_set_refcount(req);
req->cflags = poll->events = io_poll_parse_events(sqe, flags);
return 0;
| 0 |
[
"CWE-416"
] |
linux
|
e677edbcabee849bfdd43f1602bccbecf736a646
| 329,052,985,388,963,940,000,000,000,000,000,000,000 | 19 |
io_uring: fix race between timeout flush and removal
io_flush_timeouts() assumes the timeout isn't in progress of triggering
or being removed/canceled, so it unconditionally removes it from the
timeout list and attempts to cancel it.
Leave it on the list and let the normal timeout cancelation take care
of it.
Cc: [email protected] # 5.5+
Signed-off-by: Jens Axboe <[email protected]>
|
inline U32 readOpaque32() { check(4); U32 r; ((U8*)&r)[0] = *ptr++;
((U8*)&r)[1] = *ptr++; ((U8*)&r)[2] = *ptr++;
((U8*)&r)[3] = *ptr++; return r; }
| 0 |
[
"CWE-20",
"CWE-787"
] |
tigervnc
|
0943c006c7d900dfc0281639e992791d6c567438
| 131,024,751,553,163,060,000,000,000,000,000,000,000 | 3 |
Use size_t for lengths in stream objects
Provides safety against them accidentally becoming negative because
of bugs in the calculations.
Also does the same to CharArray and friends as they were strongly
connection to the stream objects.
|
static void ps_mm_destroy(ps_mm *data)
{
int h;
ps_sd *sd, *next;
/* This function is called during each module shutdown,
but we must not release the shared memory pool, when
an Apache child dies! */
if (data->owner != getpid()) {
return;
}
for (h = 0; h < data->hash_max + 1; h++) {
for (sd = data->hash[h]; sd; sd = next) {
next = sd->next;
ps_sd_destroy(data, sd);
}
}
mm_free(data->mm, data->hash);
mm_destroy(data->mm);
free(data);
}
| 0 |
[
"CWE-264"
] |
php-src
|
25e8fcc88fa20dc9d4c47184471003f436927cde
| 7,156,559,296,232,536,000,000,000,000,000,000,000 | 23 |
Strict session
|
mb_replace_pop_ins(int cc)
{
int n;
char_u buf[MB_MAXBYTES + 1];
int i;
int c;
if (has_mbyte && (n = MB_BYTE2LEN(cc)) > 1)
{
buf[0] = cc;
for (i = 1; i < n; ++i)
buf[i] = replace_pop();
ins_bytes_len(buf, n);
}
else
ins_char(cc);
if (enc_utf8)
// Handle composing chars.
for (;;)
{
c = replace_pop();
if (c == -1) // stack empty
break;
if ((n = MB_BYTE2LEN(c)) == 1)
{
// Not a multi-byte char, put it back.
replace_push(c);
break;
}
else
{
buf[0] = c;
for (i = 1; i < n; ++i)
buf[i] = replace_pop();
if (utf_iscomposing(utf_ptr2char(buf)))
ins_bytes_len(buf, n);
else
{
// Not a composing char, put it back.
for (i = n - 1; i >= 0; --i)
replace_push(buf[i]);
break;
}
}
}
}
| 0 |
[] |
vim
|
98a336dd497d3422e7efeef9f24cc9e25aeb8a49
| 251,572,372,935,924,540,000,000,000,000,000,000,000 | 47 |
patch 8.2.0133: invalid memory access with search command
Problem: Invalid memory access with search command.
Solution: When :normal runs out of characters in bracketed paste mode break
out of the loop.(closes #5511)
|
xmlValidatePushElement(xmlValidCtxtPtr ctxt, xmlDocPtr doc,
xmlNodePtr elem, const xmlChar *qname) {
int ret = 1;
xmlElementPtr eDecl;
int extsubset = 0;
if (ctxt == NULL)
return(0);
/* printf("PushElem %s\n", qname); */
if ((ctxt->vstateNr > 0) && (ctxt->vstate != NULL)) {
xmlValidStatePtr state = ctxt->vstate;
xmlElementPtr elemDecl;
/*
* Check the new element against the content model of the new elem.
*/
if (state->elemDecl != NULL) {
elemDecl = state->elemDecl;
switch(elemDecl->etype) {
case XML_ELEMENT_TYPE_UNDEFINED:
ret = 0;
break;
case XML_ELEMENT_TYPE_EMPTY:
xmlErrValidNode(ctxt, state->node,
XML_DTD_NOT_EMPTY,
"Element %s was declared EMPTY this one has content\n",
state->node->name, NULL, NULL);
ret = 0;
break;
case XML_ELEMENT_TYPE_ANY:
/* I don't think anything is required then */
break;
case XML_ELEMENT_TYPE_MIXED:
/* simple case of declared as #PCDATA */
if ((elemDecl->content != NULL) &&
(elemDecl->content->type ==
XML_ELEMENT_CONTENT_PCDATA)) {
xmlErrValidNode(ctxt, state->node,
XML_DTD_NOT_PCDATA,
"Element %s was declared #PCDATA but contains non text nodes\n",
state->node->name, NULL, NULL);
ret = 0;
} else {
ret = xmlValidateCheckMixed(ctxt, elemDecl->content,
qname);
if (ret != 1) {
xmlErrValidNode(ctxt, state->node,
XML_DTD_INVALID_CHILD,
"Element %s is not declared in %s list of possible children\n",
qname, state->node->name, NULL);
}
}
break;
case XML_ELEMENT_TYPE_ELEMENT:
/*
* TODO:
* VC: Standalone Document Declaration
* - element types with element content, if white space
* occurs directly within any instance of those types.
*/
if (state->exec != NULL) {
ret = xmlRegExecPushString(state->exec, qname, NULL);
if (ret < 0) {
xmlErrValidNode(ctxt, state->node,
XML_DTD_CONTENT_MODEL,
"Element %s content does not follow the DTD, Misplaced %s\n",
state->node->name, qname, NULL);
ret = 0;
} else {
ret = 1;
}
}
break;
}
}
}
eDecl = xmlValidGetElemDecl(ctxt, doc, elem, &extsubset);
vstateVPush(ctxt, eDecl, elem);
return(ret);
}
| 0 |
[
"CWE-416"
] |
libxml2
|
652dd12a858989b14eed4e84e453059cd3ba340e
| 84,982,435,194,051,060,000,000,000,000,000,000,000 | 81 |
[CVE-2022-23308] Use-after-free of ID and IDREF attributes
If a document is parsed with XML_PARSE_DTDVALID and without
XML_PARSE_NOENT, the value of ID attributes has to be normalized after
potentially expanding entities in xmlRemoveID. Otherwise, later calls
to xmlGetID can return a pointer to previously freed memory.
ID attributes which are empty or contain only whitespace after
entity expansion are affected in a similar way. This is fixed by
not storing such attributes in the ID table.
The test to detect streaming mode when validating against a DTD was
broken. In connection with the defects above, this could result in a
use-after-free when using the xmlReader interface with validation.
Fix detection of streaming mode to avoid similar issues. (This changes
the expected result of a test case. But as far as I can tell, using the
XML reader with XIncludes referencing the root document never worked
properly, anyway.)
All of these issues can result in denial of service. Using xmlReader
with validation could result in disclosure of memory via the error
channel, typically stderr. The security impact of xmlGetID returning
a pointer to freed memory depends on the application. The typical use
case of calling xmlGetID on an unmodified document is not affected.
|
PostgresMain(int argc, char *argv[],
const char *dbname,
const char *username)
{
int firstchar;
StringInfoData input_message;
sigjmp_buf local_sigjmp_buf;
volatile bool send_ready_for_query = true;
/* Initialize startup process environment if necessary. */
if (!IsUnderPostmaster)
InitStandaloneProcess(argv[0]);
SetProcessingMode(InitProcessing);
/*
* Set default values for command-line options.
*/
if (!IsUnderPostmaster)
InitializeGUCOptions();
/*
* Parse command-line options.
*/
process_postgres_switches(argc, argv, PGC_POSTMASTER, &dbname);
/* Must have gotten a database name, or have a default (the username) */
if (dbname == NULL)
{
dbname = username;
if (dbname == NULL)
ereport(FATAL,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("%s: no database nor user name specified",
progname)));
}
/* Acquire configuration parameters, unless inherited from postmaster */
if (!IsUnderPostmaster)
{
if (!SelectConfigFiles(userDoption, progname))
proc_exit(1);
}
/*
* Set up signal handlers and masks.
*
* Note that postmaster blocked all signals before forking child process,
* so there is no race condition whereby we might receive a signal before
* we have set up the handler.
*
* Also note: it's best not to use any signals that are SIG_IGNored in the
* postmaster. If such a signal arrives before we are able to change the
* handler to non-SIG_IGN, it'll get dropped. Instead, make a dummy
* handler in the postmaster to reserve the signal. (Of course, this isn't
* an issue for signals that are locally generated, such as SIGALRM and
* SIGPIPE.)
*/
if (am_walsender)
WalSndSignals();
else
{
pqsignal(SIGHUP, SigHupHandler); /* set flag to read config
* file */
pqsignal(SIGINT, StatementCancelHandler); /* cancel current query */
pqsignal(SIGTERM, die); /* cancel current query and exit */
/*
* In a standalone backend, SIGQUIT can be generated from the keyboard
* easily, while SIGTERM cannot, so we make both signals do die()
* rather than quickdie().
*/
if (IsUnderPostmaster)
pqsignal(SIGQUIT, quickdie); /* hard crash time */
else
pqsignal(SIGQUIT, die); /* cancel current query and exit */
InitializeTimeouts(); /* establishes SIGALRM handler */
/*
* Ignore failure to write to frontend. Note: if frontend closes
* connection, we will notice it and exit cleanly when control next
* returns to outer loop. This seems safer than forcing exit in the
* midst of output during who-knows-what operation...
*/
pqsignal(SIGPIPE, SIG_IGN);
pqsignal(SIGUSR1, procsignal_sigusr1_handler);
pqsignal(SIGUSR2, SIG_IGN);
pqsignal(SIGFPE, FloatExceptionHandler);
/*
* Reset some signals that are accepted by postmaster but not by
* backend
*/
pqsignal(SIGCHLD, SIG_DFL); /* system() requires this on some
* platforms */
}
pqinitmask();
if (IsUnderPostmaster)
{
/* We allow SIGQUIT (quickdie) at all times */
sigdelset(&BlockSig, SIGQUIT);
}
PG_SETMASK(&BlockSig); /* block everything except SIGQUIT */
if (!IsUnderPostmaster)
{
/*
* Validate we have been given a reasonable-looking DataDir (if under
* postmaster, assume postmaster did this already).
*/
Assert(DataDir);
ValidatePgVersion(DataDir);
/* Change into DataDir (if under postmaster, was done already) */
ChangeToDataDir();
/*
* Create lockfile for data directory.
*/
CreateDataDirLockFile(false);
/* Initialize MaxBackends (if under postmaster, was done already) */
InitializeMaxBackends();
}
/* Early initialization */
BaseInit();
/*
* Create a per-backend PGPROC struct in shared memory, except in the
* EXEC_BACKEND case where this was done in SubPostmasterMain. We must do
* this before we can use LWLocks (and in the EXEC_BACKEND case we already
* had to do some stuff with LWLocks).
*/
#ifdef EXEC_BACKEND
if (!IsUnderPostmaster)
InitProcess();
#else
InitProcess();
#endif
/* We need to allow SIGINT, etc during the initial transaction */
PG_SETMASK(&UnBlockSig);
/*
* General initialization.
*
* NOTE: if you are tempted to add code in this vicinity, consider putting
* it inside InitPostgres() instead. In particular, anything that
* involves database access should be there, not here.
*/
InitPostgres(dbname, InvalidOid, username, NULL);
/*
* If the PostmasterContext is still around, recycle the space; we don't
* need it anymore after InitPostgres completes. Note this does not trash
* *MyProcPort, because ConnCreate() allocated that space with malloc()
* ... else we'd need to copy the Port data first. Also, subsidiary data
* such as the username isn't lost either; see ProcessStartupPacket().
*/
if (PostmasterContext)
{
MemoryContextDelete(PostmasterContext);
PostmasterContext = NULL;
}
SetProcessingMode(NormalProcessing);
/*
* Now all GUC states are fully set up. Report them to client if
* appropriate.
*/
BeginReportingGUCOptions();
/*
* Also set up handler to log session end; we have to wait till now to be
* sure Log_disconnections has its final value.
*/
if (IsUnderPostmaster && Log_disconnections)
on_proc_exit(log_disconnections, 0);
/* Perform initialization specific to a WAL sender process. */
if (am_walsender)
InitWalSender();
/*
* process any libraries that should be preloaded at backend start (this
* likewise can't be done until GUC settings are complete)
*/
process_session_preload_libraries();
/*
* Send this backend's cancellation info to the frontend.
*/
if (whereToSendOutput == DestRemote &&
PG_PROTOCOL_MAJOR(FrontendProtocol) >= 2)
{
StringInfoData buf;
pq_beginmessage(&buf, 'K');
pq_sendint(&buf, (int32) MyProcPid, sizeof(int32));
pq_sendint(&buf, (int32) MyCancelKey, sizeof(int32));
pq_endmessage(&buf);
/* Need not flush since ReadyForQuery will do it. */
}
/* Welcome banner for standalone case */
if (whereToSendOutput == DestDebug)
printf("\nPostgreSQL stand-alone backend %s\n", PG_VERSION);
/*
* Create the memory context we will use in the main loop.
*
* MessageContext is reset once per iteration of the main loop, ie, upon
* completion of processing of each command message from the client.
*/
MessageContext = AllocSetContextCreate(TopMemoryContext,
"MessageContext",
ALLOCSET_DEFAULT_MINSIZE,
ALLOCSET_DEFAULT_INITSIZE,
ALLOCSET_DEFAULT_MAXSIZE);
/*
* Remember stand-alone backend startup time
*/
if (!IsUnderPostmaster)
PgStartTime = GetCurrentTimestamp();
/*
* POSTGRES main processing loop begins here
*
* If an exception is encountered, processing resumes here so we abort the
* current transaction and start a new one.
*
* You might wonder why this isn't coded as an infinite loop around a
* PG_TRY construct. The reason is that this is the bottom of the
* exception stack, and so with PG_TRY there would be no exception handler
* in force at all during the CATCH part. By leaving the outermost setjmp
* always active, we have at least some chance of recovering from an error
* during error recovery. (If we get into an infinite loop thereby, it
* will soon be stopped by overflow of elog.c's internal state stack.)
*
* Note that we use sigsetjmp(..., 1), so that this function's signal mask
* (to wit, UnBlockSig) will be restored when longjmp'ing to here. This
* is essential in case we longjmp'd out of a signal handler on a platform
* where that leaves the signal blocked. It's not redundant with the
* unblock in AbortTransaction() because the latter is only called if we
* were inside a transaction.
*/
if (sigsetjmp(local_sigjmp_buf, 1) != 0)
{
/*
* NOTE: if you are tempted to add more code in this if-block,
* consider the high probability that it should be in
* AbortTransaction() instead. The only stuff done directly here
* should be stuff that is guaranteed to apply *only* for outer-level
* error recovery, such as adjusting the FE/BE protocol status.
*/
/* Since not using PG_TRY, must reset error stack by hand */
error_context_stack = NULL;
/* Prevent interrupts while cleaning up */
HOLD_INTERRUPTS();
/*
* Forget any pending QueryCancel request, since we're returning to
* the idle loop anyway, and cancel any active timeout requests. (In
* future we might want to allow some timeout requests to survive, but
* at minimum it'd be necessary to do reschedule_timeouts(), in case
* we got here because of a query cancel interrupting the SIGALRM
* interrupt handler.) Note in particular that we must clear the
* statement and lock timeout indicators, to prevent any future plain
* query cancels from being misreported as timeouts in case we're
* forgetting a timeout cancel.
*/
disable_all_timeouts(false);
QueryCancelPending = false; /* second to avoid race condition */
/*
* Turn off these interrupts too. This is only needed here and not in
* other exception-catching places since these interrupts are only
* enabled while we wait for client input.
*/
DoingCommandRead = false;
DisableNotifyInterrupt();
DisableCatchupInterrupt();
/* Make sure libpq is in a good state */
pq_comm_reset();
/* Report the error to the client and/or server log */
EmitErrorReport();
/*
* Make sure debug_query_string gets reset before we possibly clobber
* the storage it points at.
*/
debug_query_string = NULL;
/*
* Abort the current transaction in order to recover.
*/
AbortCurrentTransaction();
if (am_walsender)
WalSndErrorCleanup();
/*
* We can't release replication slots inside AbortTransaction() as we
* need to be able to start and abort transactions while having a slot
* acquired. But we never need to hold them across top level errors,
* so releasing here is fine. There's another cleanup in ProcKill()
* ensuring we'll correctly cleanup on FATAL errors as well.
*/
if (MyReplicationSlot != NULL)
ReplicationSlotRelease();
/*
* Now return to normal top-level context and clear ErrorContext for
* next time.
*/
MemoryContextSwitchTo(TopMemoryContext);
FlushErrorState();
/*
* If we were handling an extended-query-protocol message, initiate
* skip till next Sync. This also causes us not to issue
* ReadyForQuery (until we get Sync).
*/
if (doing_extended_query_message)
ignore_till_sync = true;
/* We don't have a transaction command open anymore */
xact_started = false;
/*
* If an error occurred while we were reading a message from the
* client, we have potentially lost track of where the previous
* message ends and the next one begins. Even though we have
* otherwise recovered from the error, we cannot safely read any more
* messages from the client, so there isn't much we can do with the
* connection anymore.
*/
if (pq_is_reading_msg())
ereport(FATAL,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
errmsg("terminating connection because protocol sync was lost")));
/* Now we can allow interrupts again */
RESUME_INTERRUPTS();
}
/* We can now handle ereport(ERROR) */
PG_exception_stack = &local_sigjmp_buf;
if (!ignore_till_sync)
send_ready_for_query = true; /* initially, or after error */
/*
* Non-error queries loop here.
*/
for (;;)
{
/*
* At top of loop, reset extended-query-message flag, so that any
* errors encountered in "idle" state don't provoke skip.
*/
doing_extended_query_message = false;
/*
* Release storage left over from prior query cycle, and create a new
* query input buffer in the cleared MessageContext.
*/
MemoryContextSwitchTo(MessageContext);
MemoryContextResetAndDeleteChildren(MessageContext);
initStringInfo(&input_message);
/*
* (1) If we've reached idle state, tell the frontend we're ready for
* a new query.
*
* Note: this includes fflush()'ing the last of the prior output.
*
* This is also a good time to send collected statistics to the
* collector, and to update the PS stats display. We avoid doing
* those every time through the message loop because it'd slow down
* processing of batched messages, and because we don't want to report
* uncommitted updates (that confuses autovacuum). The notification
* processor wants a call too, if we are not in a transaction block.
*/
if (send_ready_for_query)
{
if (IsAbortedTransactionBlockState())
{
set_ps_display("idle in transaction (aborted)", false);
pgstat_report_activity(STATE_IDLEINTRANSACTION_ABORTED, NULL);
}
else if (IsTransactionOrTransactionBlock())
{
set_ps_display("idle in transaction", false);
pgstat_report_activity(STATE_IDLEINTRANSACTION, NULL);
}
else
{
ProcessCompletedNotifies();
pgstat_report_stat(false);
set_ps_display("idle", false);
pgstat_report_activity(STATE_IDLE, NULL);
}
ReadyForQuery(whereToSendOutput);
send_ready_for_query = false;
}
/*
* (2) Allow asynchronous signals to be executed immediately if they
* come in while we are waiting for client input. (This must be
* conditional since we don't want, say, reads on behalf of COPY FROM
* STDIN doing the same thing.)
*/
DoingCommandRead = true;
/*
* (3) read a command (loop blocks here)
*/
firstchar = ReadCommand(&input_message);
/*
* (4) disable async signal conditions again.
*
* Query cancel is supposed to be a no-op when there is no query in
* progress, so if a query cancel arrived while we were idle, just
* reset QueryCancelPending. ProcessInterrupts() has that effect when
* it's called when DoingCommandRead is set, so check for interrupts
* before resetting DoingCommandRead.
*/
CHECK_FOR_INTERRUPTS();
DoingCommandRead = false;
/*
* (5) check for any other interesting events that happened while we
* slept.
*/
if (got_SIGHUP)
{
got_SIGHUP = false;
ProcessConfigFile(PGC_SIGHUP);
}
/*
* (6) process the command. But ignore it if we're skipping till
* Sync.
*/
if (ignore_till_sync && firstchar != EOF)
continue;
switch (firstchar)
{
case 'Q': /* simple query */
{
const char *query_string;
/* Set statement_timestamp() */
SetCurrentStatementStartTimestamp();
query_string = pq_getmsgstring(&input_message);
pq_getmsgend(&input_message);
if (am_walsender)
exec_replication_command(query_string);
else
exec_simple_query(query_string);
send_ready_for_query = true;
}
break;
case 'P': /* parse */
{
const char *stmt_name;
const char *query_string;
int numParams;
Oid *paramTypes = NULL;
forbidden_in_wal_sender(firstchar);
/* Set statement_timestamp() */
SetCurrentStatementStartTimestamp();
stmt_name = pq_getmsgstring(&input_message);
query_string = pq_getmsgstring(&input_message);
numParams = pq_getmsgint(&input_message, 2);
if (numParams > 0)
{
int i;
paramTypes = (Oid *) palloc(numParams * sizeof(Oid));
for (i = 0; i < numParams; i++)
paramTypes[i] = pq_getmsgint(&input_message, 4);
}
pq_getmsgend(&input_message);
exec_parse_message(query_string, stmt_name,
paramTypes, numParams);
}
break;
case 'B': /* bind */
forbidden_in_wal_sender(firstchar);
/* Set statement_timestamp() */
SetCurrentStatementStartTimestamp();
/*
* this message is complex enough that it seems best to put
* the field extraction out-of-line
*/
exec_bind_message(&input_message);
break;
case 'E': /* execute */
{
const char *portal_name;
int max_rows;
forbidden_in_wal_sender(firstchar);
/* Set statement_timestamp() */
SetCurrentStatementStartTimestamp();
portal_name = pq_getmsgstring(&input_message);
max_rows = pq_getmsgint(&input_message, 4);
pq_getmsgend(&input_message);
exec_execute_message(portal_name, max_rows);
}
break;
case 'F': /* fastpath function call */
forbidden_in_wal_sender(firstchar);
/* Set statement_timestamp() */
SetCurrentStatementStartTimestamp();
/* Report query to various monitoring facilities. */
pgstat_report_activity(STATE_FASTPATH, NULL);
set_ps_display("<FASTPATH>", false);
/* start an xact for this function invocation */
start_xact_command();
/*
* Note: we may at this point be inside an aborted
* transaction. We can't throw error for that until we've
* finished reading the function-call message, so
* HandleFunctionRequest() must check for it after doing so.
* Be careful not to do anything that assumes we're inside a
* valid transaction here.
*/
/* switch back to message context */
MemoryContextSwitchTo(MessageContext);
if (HandleFunctionRequest(&input_message) == EOF)
{
/* lost frontend connection during F message input */
/*
* Reset whereToSendOutput to prevent ereport from
* attempting to send any more messages to client.
*/
if (whereToSendOutput == DestRemote)
whereToSendOutput = DestNone;
proc_exit(0);
}
/* commit the function-invocation transaction */
finish_xact_command();
send_ready_for_query = true;
break;
case 'C': /* close */
{
int close_type;
const char *close_target;
forbidden_in_wal_sender(firstchar);
close_type = pq_getmsgbyte(&input_message);
close_target = pq_getmsgstring(&input_message);
pq_getmsgend(&input_message);
switch (close_type)
{
case 'S':
if (close_target[0] != '\0')
DropPreparedStatement(close_target, false);
else
{
/* special-case the unnamed statement */
drop_unnamed_stmt();
}
break;
case 'P':
{
Portal portal;
portal = GetPortalByName(close_target);
if (PortalIsValid(portal))
PortalDrop(portal, false);
}
break;
default:
ereport(ERROR,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
errmsg("invalid CLOSE message subtype %d",
close_type)));
break;
}
if (whereToSendOutput == DestRemote)
pq_putemptymessage('3'); /* CloseComplete */
}
break;
case 'D': /* describe */
{
int describe_type;
const char *describe_target;
forbidden_in_wal_sender(firstchar);
/* Set statement_timestamp() (needed for xact) */
SetCurrentStatementStartTimestamp();
describe_type = pq_getmsgbyte(&input_message);
describe_target = pq_getmsgstring(&input_message);
pq_getmsgend(&input_message);
switch (describe_type)
{
case 'S':
exec_describe_statement_message(describe_target);
break;
case 'P':
exec_describe_portal_message(describe_target);
break;
default:
ereport(ERROR,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
errmsg("invalid DESCRIBE message subtype %d",
describe_type)));
break;
}
}
break;
case 'H': /* flush */
pq_getmsgend(&input_message);
if (whereToSendOutput == DestRemote)
pq_flush();
break;
case 'S': /* sync */
pq_getmsgend(&input_message);
finish_xact_command();
send_ready_for_query = true;
break;
/*
* 'X' means that the frontend is closing down the socket. EOF
* means unexpected loss of frontend connection. Either way,
* perform normal shutdown.
*/
case 'X':
case EOF:
/*
* Reset whereToSendOutput to prevent ereport from attempting
* to send any more messages to client.
*/
if (whereToSendOutput == DestRemote)
whereToSendOutput = DestNone;
/*
* NOTE: if you are tempted to add more code here, DON'T!
* Whatever you had in mind to do should be set up as an
* on_proc_exit or on_shmem_exit callback, instead. Otherwise
* it will fail to be called during other backend-shutdown
* scenarios.
*/
proc_exit(0);
case 'd': /* copy data */
case 'c': /* copy done */
case 'f': /* copy fail */
/*
* Accept but ignore these messages, per protocol spec; we
* probably got here because a COPY failed, and the frontend
* is still sending data.
*/
break;
default:
ereport(FATAL,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
errmsg("invalid frontend message type %d",
firstchar)));
}
} /* end of input-reading loop */
}
| 0 |
[
"CWE-89"
] |
postgres
|
2b3a8b20c2da9f39ffecae25ab7c66974fbc0d3b
| 186,300,637,815,117,600,000,000,000,000,000,000,000 | 722 |
Be more careful to not lose sync in the FE/BE protocol.
If any error occurred while we were in the middle of reading a protocol
message from the client, we could lose sync, and incorrectly try to
interpret a part of another message as a new protocol message. That will
usually lead to an "invalid frontend message" error that terminates the
connection. However, this is a security issue because an attacker might
be able to deliberately cause an error, inject a Query message in what's
supposed to be just user data, and have the server execute it.
We were quite careful to not have CHECK_FOR_INTERRUPTS() calls or other
operations that could ereport(ERROR) in the middle of processing a message,
but a query cancel interrupt or statement timeout could nevertheless cause
it to happen. Also, the V2 fastpath and COPY handling were not so careful.
It's very difficult to recover in the V2 COPY protocol, so we will just
terminate the connection on error. In practice, that's what happened
previously anyway, as we lost protocol sync.
To fix, add a new variable in pqcomm.c, PqCommReadingMsg, that is set
whenever we're in the middle of reading a message. When it's set, we cannot
safely ERROR out and continue running, because we might've read only part
of a message. PqCommReadingMsg acts somewhat similarly to critical sections
in that if an error occurs while it's set, the error handler will force the
connection to be terminated, as if the error was FATAL. It's not
implemented by promoting ERROR to FATAL in elog.c, like ERROR is promoted
to PANIC in critical sections, because we want to be able to use
PG_TRY/CATCH to recover and regain protocol sync. pq_getmessage() takes
advantage of that to prevent an OOM error from terminating the connection.
To prevent unnecessary connection terminations, add a holdoff mechanism
similar to HOLD/RESUME_INTERRUPTS() that can be used hold off query cancel
interrupts, but still allow die interrupts. The rules on which interrupts
are processed when are now a bit more complicated, so refactor
ProcessInterrupts() and the calls to it in signal handlers so that the
signal handlers always call it if ImmediateInterruptOK is set, and
ProcessInterrupts() can decide to not do anything if the other conditions
are not met.
Reported by Emil Lenngren. Patch reviewed by Noah Misch and Andres Freund.
Backpatch to all supported versions.
Security: CVE-2015-0244
|
backref_match_at_nested_level(regex_t* reg,
StackType* top, StackType* stk_base,
int ignore_case, int case_fold_flag,
int nest, int mem_num, MemNumType* memp,
UChar** s, const UChar* send)
{
UChar *ss, *p, *pstart, *pend = NULL_UCHARP;
int level;
StackType* k;
level = 0;
k = top;
k--;
while (k >= stk_base) {
if (k->type == STK_CALL_FRAME) {
level--;
}
else if (k->type == STK_RETURN) {
level++;
}
else if (level == nest) {
if (k->type == STK_MEM_START) {
if (mem_is_in_memp(k->zid, mem_num, memp)) {
pstart = k->u.mem.pstr;
if (IS_NOT_NULL(pend)) {
if (pend - pstart > send - *s) return 0; /* or goto next_mem; */
p = pstart;
ss = *s;
if (ignore_case != 0) {
if (string_cmp_ic(reg->enc, case_fold_flag,
pstart, &ss, (int )(pend - pstart)) == 0)
return 0; /* or goto next_mem; */
}
else {
while (p < pend) {
if (*p++ != *ss++) return 0; /* or goto next_mem; */
}
}
*s = ss;
return 1;
}
}
}
else if (k->type == STK_MEM_END) {
if (mem_is_in_memp(k->zid, mem_num, memp)) {
pend = k->u.mem.pstr;
}
}
}
k--;
}
return 0;
}
| 0 |
[
"CWE-125"
] |
oniguruma
|
d3e402928b6eb3327f8f7d59a9edfa622fec557b
| 147,109,676,103,382,150,000,000,000,000,000,000,000 | 56 |
fix heap-buffer-overflow
|
static int unlink_urbs (struct usbnet *dev, struct sk_buff_head *q)
{
unsigned long flags;
struct sk_buff *skb;
int count = 0;
spin_lock_irqsave (&q->lock, flags);
while (!skb_queue_empty(q)) {
struct skb_data *entry;
struct urb *urb;
int retval;
skb_queue_walk(q, skb) {
entry = (struct skb_data *) skb->cb;
if (entry->state != unlink_start)
goto found;
}
break;
found:
entry->state = unlink_start;
urb = entry->urb;
/*
* Get reference count of the URB to avoid it to be
* freed during usb_unlink_urb, which may trigger
* use-after-free problem inside usb_unlink_urb since
* usb_unlink_urb is always racing with .complete
* handler(include defer_bh).
*/
usb_get_urb(urb);
spin_unlock_irqrestore(&q->lock, flags);
// during some PM-driven resume scenarios,
// these (async) unlinks complete immediately
retval = usb_unlink_urb (urb);
if (retval != -EINPROGRESS && retval != 0)
netdev_dbg(dev->net, "unlink urb err, %d\n", retval);
else
count++;
usb_put_urb(urb);
spin_lock_irqsave(&q->lock, flags);
}
spin_unlock_irqrestore (&q->lock, flags);
return count;
}
| 0 |
[
"CWE-703"
] |
linux
|
1666984c8625b3db19a9abc298931d35ab7bc64b
| 110,769,967,628,184,280,000,000,000,000,000,000,000 | 44 |
usbnet: cleanup after bind() in probe()
In case bind() works, but a later error forces bailing
in probe() in error cases work and a timer may be scheduled.
They must be killed. This fixes an error case related to
the double free reported in
http://www.spinics.net/lists/netdev/msg367669.html
and needs to go on top of Linus' fix to cdc-ncm.
Signed-off-by: Oliver Neukum <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static int io_register_enable_rings(struct io_ring_ctx *ctx)
{
if (!(ctx->flags & IORING_SETUP_R_DISABLED))
return -EBADFD;
if (ctx->restrictions.registered)
ctx->restricted = 1;
io_sq_offload_start(ctx);
return 0;
| 0 |
[
"CWE-667"
] |
linux
|
3ebba796fa251d042be42b929a2d916ee5c34a49
| 257,579,878,806,346,370,000,000,000,000,000,000,000 | 11 |
io_uring: ensure that SQPOLL thread is started for exit
If we create it in a disabled state because IORING_SETUP_R_DISABLED is
set on ring creation, we need to ensure that we've kicked the thread if
we're exiting before it's been explicitly disabled. Otherwise we can run
into a deadlock where exit is waiting go park the SQPOLL thread, but the
SQPOLL thread itself is waiting to get a signal to start.
That results in the below trace of both tasks hung, waiting on each other:
INFO: task syz-executor458:8401 blocked for more than 143 seconds.
Not tainted 5.11.0-next-20210226-syzkaller #0
"echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
task:syz-executor458 state:D stack:27536 pid: 8401 ppid: 8400 flags:0x00004004
Call Trace:
context_switch kernel/sched/core.c:4324 [inline]
__schedule+0x90c/0x21a0 kernel/sched/core.c:5075
schedule+0xcf/0x270 kernel/sched/core.c:5154
schedule_timeout+0x1db/0x250 kernel/time/timer.c:1868
do_wait_for_common kernel/sched/completion.c:85 [inline]
__wait_for_common kernel/sched/completion.c:106 [inline]
wait_for_common kernel/sched/completion.c:117 [inline]
wait_for_completion+0x168/0x270 kernel/sched/completion.c:138
io_sq_thread_park fs/io_uring.c:7115 [inline]
io_sq_thread_park+0xd5/0x130 fs/io_uring.c:7103
io_uring_cancel_task_requests+0x24c/0xd90 fs/io_uring.c:8745
__io_uring_files_cancel+0x110/0x230 fs/io_uring.c:8840
io_uring_files_cancel include/linux/io_uring.h:47 [inline]
do_exit+0x299/0x2a60 kernel/exit.c:780
do_group_exit+0x125/0x310 kernel/exit.c:922
__do_sys_exit_group kernel/exit.c:933 [inline]
__se_sys_exit_group kernel/exit.c:931 [inline]
__x64_sys_exit_group+0x3a/0x50 kernel/exit.c:931
do_syscall_64+0x2d/0x70 arch/x86/entry/common.c:46
entry_SYSCALL_64_after_hwframe+0x44/0xae
RIP: 0033:0x43e899
RSP: 002b:00007ffe89376d48 EFLAGS: 00000246 ORIG_RAX: 00000000000000e7
RAX: ffffffffffffffda RBX: 00000000004af2f0 RCX: 000000000043e899
RDX: 000000000000003c RSI: 00000000000000e7 RDI: 0000000000000000
RBP: 0000000000000000 R08: ffffffffffffffc0 R09: 0000000010000000
R10: 0000000000008011 R11: 0000000000000246 R12: 00000000004af2f0
R13: 0000000000000001 R14: 0000000000000000 R15: 0000000000000001
INFO: task iou-sqp-8401:8402 can't die for more than 143 seconds.
task:iou-sqp-8401 state:D stack:30272 pid: 8402 ppid: 8400 flags:0x00004004
Call Trace:
context_switch kernel/sched/core.c:4324 [inline]
__schedule+0x90c/0x21a0 kernel/sched/core.c:5075
schedule+0xcf/0x270 kernel/sched/core.c:5154
schedule_timeout+0x1db/0x250 kernel/time/timer.c:1868
do_wait_for_common kernel/sched/completion.c:85 [inline]
__wait_for_common kernel/sched/completion.c:106 [inline]
wait_for_common kernel/sched/completion.c:117 [inline]
wait_for_completion+0x168/0x270 kernel/sched/completion.c:138
io_sq_thread+0x27d/0x1ae0 fs/io_uring.c:6717
ret_from_fork+0x1f/0x30 arch/x86/entry/entry_64.S:294
INFO: task iou-sqp-8401:8402 blocked for more than 143 seconds.
Reported-by: [email protected]
Signed-off-by: Jens Axboe <[email protected]>
|
PERL_STATIC_INLINE regnode_offset
S_handle_named_backref(pTHX_ RExC_state_t *pRExC_state,
I32 *flagp,
char * parse_start,
char ch
)
{
regnode_offset ret;
char* name_start = RExC_parse;
U32 num = 0;
SV *sv_dat = reg_scan_name(pRExC_state, REG_RSN_RETURN_DATA);
GET_RE_DEBUG_FLAGS_DECL;
PERL_ARGS_ASSERT_HANDLE_NAMED_BACKREF;
if (RExC_parse == name_start || *RExC_parse != ch) {
/* diag_listed_as: Sequence \%s... not terminated in regex; marked by <-- HERE in m/%s/ */
vFAIL2("Sequence %.3s... not terminated", parse_start);
}
if (sv_dat) {
num = add_data( pRExC_state, STR_WITH_LEN("S"));
RExC_rxi->data->data[num]=(void*)sv_dat;
SvREFCNT_inc_simple_void_NN(sv_dat);
}
RExC_sawback = 1;
ret = reganode(pRExC_state,
((! FOLD)
? NREF
: (ASCII_FOLD_RESTRICTED)
? NREFFA
: (AT_LEAST_UNI_SEMANTICS)
? NREFFU
: (LOC)
? NREFFL
: NREFF),
num);
*flagp |= HASWIDTH;
Set_Node_Offset(REGNODE_p(ret), parse_start+1);
Set_Node_Cur_Length(REGNODE_p(ret), parse_start);
nextchar(pRExC_state);
return ret;
| 0 |
[
"CWE-190",
"CWE-787"
] |
perl5
|
897d1f7fd515b828e4b198d8b8bef76c6faf03ed
| 323,505,587,434,031,700,000,000,000,000,000,000,000 | 44 |
regcomp.c: Prevent integer overflow from nested regex quantifiers.
(CVE-2020-10543) On 32bit systems the size calculations for nested regular
expression quantifiers could overflow causing heap memory corruption.
Fixes: Perl/perl5-security#125
(cherry picked from commit bfd31397db5dc1a5c5d3e0a1f753a4f89a736e71)
|
int cgroup_migrate(struct task_struct *leader, bool threadgroup,
struct cgroup_mgctx *mgctx)
{
struct task_struct *task;
/*
* Prevent freeing of tasks while we take a snapshot. Tasks that are
* already PF_EXITING could be freed from underneath us unless we
* take an rcu_read_lock.
*/
spin_lock_irq(&css_set_lock);
rcu_read_lock();
task = leader;
do {
cgroup_migrate_add_task(task, mgctx);
if (!threadgroup)
break;
} while_each_thread(leader, task);
rcu_read_unlock();
spin_unlock_irq(&css_set_lock);
return cgroup_migrate_execute(mgctx);
}
| 0 |
[
"CWE-416"
] |
linux
|
a06247c6804f1a7c86a2e5398a4c1f1db1471848
| 173,599,067,864,295,370,000,000,000,000,000,000,000 | 23 |
psi: Fix uaf issue when psi trigger is destroyed while being polled
With write operation on psi files replacing old trigger with a new one,
the lifetime of its waitqueue is totally arbitrary. Overwriting an
existing trigger causes its waitqueue to be freed and pending poll()
will stumble on trigger->event_wait which was destroyed.
Fix this by disallowing to redefine an existing psi trigger. If a write
operation is used on a file descriptor with an already existing psi
trigger, the operation will fail with EBUSY error.
Also bypass a check for psi_disabled in the psi_trigger_destroy as the
flag can be flipped after the trigger is created, leading to a memory
leak.
Fixes: 0e94682b73bf ("psi: introduce psi monitor")
Reported-by: [email protected]
Suggested-by: Linus Torvalds <[email protected]>
Analyzed-by: Eric Biggers <[email protected]>
Signed-off-by: Suren Baghdasaryan <[email protected]>
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Reviewed-by: Eric Biggers <[email protected]>
Acked-by: Johannes Weiner <[email protected]>
Cc: [email protected]
Link: https://lore.kernel.org/r/[email protected]
|
static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt,
unsigned long addr,
const void *old,
const void *new,
unsigned int bytes,
struct x86_exception *exception)
{
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
u64 page_line_mask;
unsigned long hva;
gpa_t gpa;
int r;
/* guests cmpxchg8b have to be emulated atomically */
if (bytes > 8 || (bytes & (bytes - 1)))
goto emul_write;
gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, NULL);
if (gpa == UNMAPPED_GVA ||
(gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
goto emul_write;
/*
* Emulate the atomic as a straight write to avoid #AC if SLD is
* enabled in the host and the access splits a cache line.
*/
if (boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT))
page_line_mask = ~(cache_line_size() - 1);
else
page_line_mask = PAGE_MASK;
if (((gpa + bytes - 1) & page_line_mask) != (gpa & page_line_mask))
goto emul_write;
hva = kvm_vcpu_gfn_to_hva(vcpu, gpa_to_gfn(gpa));
if (kvm_is_error_hva(hva))
goto emul_write;
hva += offset_in_page(gpa);
switch (bytes) {
case 1:
r = emulator_try_cmpxchg_user(u8, hva, old, new);
break;
case 2:
r = emulator_try_cmpxchg_user(u16, hva, old, new);
break;
case 4:
r = emulator_try_cmpxchg_user(u32, hva, old, new);
break;
case 8:
r = emulator_try_cmpxchg_user(u64, hva, old, new);
break;
default:
BUG();
}
if (r < 0)
return X86EMUL_UNHANDLEABLE;
if (r)
return X86EMUL_CMPXCHG_FAILED;
kvm_page_track_write(vcpu, gpa, new, bytes);
return X86EMUL_CONTINUE;
emul_write:
printk_once(KERN_WARNING "kvm: emulating exchange as write\n");
return emulator_write_emulated(ctxt, addr, new, bytes, exception);
}
| 0 |
[
"CWE-476"
] |
linux
|
fee060cd52d69c114b62d1a2948ea9648b5131f9
| 185,122,673,807,582,670,000,000,000,000,000,000,000 | 72 |
KVM: x86: avoid calling x86 emulator without a decoded instruction
Whenever x86_decode_emulated_instruction() detects a breakpoint, it
returns the value that kvm_vcpu_check_breakpoint() writes into its
pass-by-reference second argument. Unfortunately this is completely
bogus because the expected outcome of x86_decode_emulated_instruction
is an EMULATION_* value.
Then, if kvm_vcpu_check_breakpoint() does "*r = 0" (corresponding to
a KVM_EXIT_DEBUG userspace exit), it is misunderstood as EMULATION_OK
and x86_emulate_instruction() is called without having decoded the
instruction. This causes various havoc from running with a stale
emulation context.
The fix is to move the call to kvm_vcpu_check_breakpoint() where it was
before commit 4aa2691dcbd3 ("KVM: x86: Factor out x86 instruction
emulation with decoding") introduced x86_decode_emulated_instruction().
The other caller of the function does not need breakpoint checks,
because it is invoked as part of a vmexit and the processor has already
checked those before executing the instruction that #GP'd.
This fixes CVE-2022-1852.
Reported-by: Qiuhao Li <[email protected]>
Reported-by: Gaoning Pan <[email protected]>
Reported-by: Yongkang Jia <[email protected]>
Fixes: 4aa2691dcbd3 ("KVM: x86: Factor out x86 instruction emulation with decoding")
Cc: [email protected]
Signed-off-by: Sean Christopherson <[email protected]>
Message-Id: <[email protected]>
[Rewrote commit message according to Qiuhao's report, since a patch
already existed to fix the bug. - Paolo]
Signed-off-by: Paolo Bonzini <[email protected]>
|
static int scsi_setup_flush_cmnd(struct scsi_device *sdp, struct request *rq)
{
rq->timeout = SD_FLUSH_TIMEOUT;
rq->retries = SD_MAX_RETRIES;
rq->cmd[0] = SYNCHRONIZE_CACHE;
rq->cmd_len = 10;
return scsi_setup_blk_pc_cmnd(sdp, rq);
}
| 0 |
[
"CWE-284",
"CWE-264"
] |
linux
|
0bfc96cb77224736dfa35c3c555d37b3646ef35e
| 283,227,210,278,221,650,000,000,000,000,000,000,000 | 9 |
block: fail SCSI passthrough ioctls on partition devices
Linux allows executing the SG_IO ioctl on a partition or LVM volume, and
will pass the command to the underlying block device. This is
well-known, but it is also a large security problem when (via Unix
permissions, ACLs, SELinux or a combination thereof) a program or user
needs to be granted access only to part of the disk.
This patch lets partitions forward a small set of harmless ioctls;
others are logged with printk so that we can see which ioctls are
actually sent. In my tests only CDROM_GET_CAPABILITY actually occurred.
Of course it was being sent to a (partition on a) hard disk, so it would
have failed with ENOTTY and the patch isn't changing anything in
practice. Still, I'm treating it specially to avoid spamming the logs.
In principle, this restriction should include programs running with
CAP_SYS_RAWIO. If for example I let a program access /dev/sda2 and
/dev/sdb, it still should not be able to read/write outside the
boundaries of /dev/sda2 independent of the capabilities. However, for
now programs with CAP_SYS_RAWIO will still be allowed to send the
ioctls. Their actions will still be logged.
This patch does not affect the non-libata IDE driver. That driver
however already tests for bd != bd->bd_contains before issuing some
ioctl; it could be restricted further to forbid these ioctls even for
programs running with CAP_SYS_ADMIN/CAP_SYS_RAWIO.
Cc: [email protected]
Cc: Jens Axboe <[email protected]>
Cc: James Bottomley <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
[ Make it also print the command name when warning - Linus ]
Signed-off-by: Linus Torvalds <[email protected]>
|
int STDCALL mysql_reset_connection(MYSQL *mysql)
{
int rc;
/* check if connection handler is active */
if (IS_CONNHDLR_ACTIVE(mysql))
{
if (mysql->extension->conn_hdlr->plugin && mysql->extension->conn_hdlr->plugin->reset)
return(mysql->extension->conn_hdlr->plugin->reset(mysql));
}
/* skip result sets */
if (mysql->status == MYSQL_STATUS_USE_RESULT ||
mysql->status == MYSQL_STATUS_GET_RESULT ||
mysql->status & SERVER_MORE_RESULTS_EXIST)
{
mthd_my_skip_result(mysql);
mysql->status= MYSQL_STATUS_READY;
}
rc= ma_simple_command(mysql, COM_RESET_CONNECTION, 0, 0, 0, 0);
if (rc && mysql->options.reconnect)
{
/* There is no big sense in resetting but we need reconnect */
rc= ma_simple_command(mysql, COM_RESET_CONNECTION,0,0,0,0);
}
if (rc)
return 1;
/* reset the connection in all active statements */
ma_invalidate_stmts(mysql, "mysql_reset_connection()");
free_old_query(mysql);
mysql->status= MYSQL_STATUS_READY;
mysql->affected_rows= ~(my_ulonglong)0;
mysql->insert_id= 0;
return 0;
}
| 0 |
[] |
mariadb-connector-c
|
27b2f3d1f1550dfaee0f63a331a406ab31c1b37e
| 275,860,702,094,216,640,000,000,000,000,000,000,000 | 37 |
various checks for corrupted packets in the protocol
also: check the return value of unpack_fields()
|
static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
return -EINVAL;
if (sqe->ioprio || sqe->off || sqe->addr || sqe->len ||
sqe->rw_flags || sqe->buf_index)
return -EINVAL;
if (req->flags & REQ_F_FIXED_FILE)
return -EBADF;
req->close.fd = READ_ONCE(sqe->fd);
req->close.file_slot = READ_ONCE(sqe->file_index);
if (req->close.file_slot && req->close.fd)
return -EINVAL;
return 0;
}
| 0 |
[
"CWE-416"
] |
linux
|
e677edbcabee849bfdd43f1602bccbecf736a646
| 28,313,373,766,557,196,000,000,000,000,000,000,000 | 17 |
io_uring: fix race between timeout flush and removal
io_flush_timeouts() assumes the timeout isn't in progress of triggering
or being removed/canceled, so it unconditionally removes it from the
timeout list and attempts to cancel it.
Leave it on the list and let the normal timeout cancelation take care
of it.
Cc: [email protected] # 5.5+
Signed-off-by: Jens Axboe <[email protected]>
|
static int req_add_output_filter(lua_State *L)
{
request_rec *r = ap_lua_check_request_rec(L, 1);
const char *name = luaL_checkstring(L, 2);
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01485) "adding output filter %s",
name);
ap_add_output_filter(name, L, r, r->connection);
return 0;
}
| 0 |
[
"CWE-20"
] |
httpd
|
78eb3b9235515652ed141353d98c239237030410
| 108,633,717,268,169,530,000,000,000,000,000,000,000 | 9 |
*) SECURITY: CVE-2015-0228 (cve.mitre.org)
mod_lua: A maliciously crafted websockets PING after a script
calls r:wsupgrade() can cause a child process crash.
[Edward Lu <Chaosed0 gmail.com>]
Discovered by Guido Vranken <guidovranken gmail.com>
Submitted by: Edward Lu
Committed by: covener
git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/trunk@1657261 13f79535-47bb-0310-9956-ffa450edef68
|
void TRI_InitV8ServerUtils(v8::Isolate* isolate) {
TRI_AddGlobalFunctionVocbase(
isolate, TRI_V8_ASCII_STRING(isolate, "SYS_CLUSTER_API_JWT_POLICY"),
JS_ClusterApiJwtPolicy, true);
TRI_AddGlobalFunctionVocbase(
isolate, TRI_V8_ASCII_STRING(isolate, "SYS_IS_FOXX_API_DISABLED"),
JS_IsFoxxApiDisabled, true);
TRI_AddGlobalFunctionVocbase(
isolate, TRI_V8_ASCII_STRING(isolate, "SYS_IS_FOXX_STORE_DISABLED"),
JS_IsFoxxStoreDisabled, true);
TRI_AddGlobalFunctionVocbase(
isolate, TRI_V8_ASCII_STRING(isolate, "SYS_RUN_IN_RESTRICTED_CONTEXT"),
JS_RunInRestrictedContext, true);
TRI_AddGlobalFunctionVocbase(
isolate, TRI_V8_ASCII_STRING(isolate, "SYS_CREATE_HOTBACKUP"),
JS_CreateHotbackup);
// debugging functions
TRI_AddGlobalFunctionVocbase(
isolate, TRI_V8_ASCII_STRING(isolate, "SYS_DEBUG_CLEAR_FAILAT"),
JS_DebugClearFailAt);
#ifdef ARANGODB_ENABLE_FAILURE_TESTS
TRI_AddGlobalFunctionVocbase(
isolate, TRI_V8_ASCII_STRING(isolate, "SYS_DEBUG_TERMINATE"),
JS_DebugTerminate);
TRI_AddGlobalFunctionVocbase(
isolate, TRI_V8_ASCII_STRING(isolate, "SYS_DEBUG_SET_FAILAT"),
JS_DebugSetFailAt);
TRI_AddGlobalFunctionVocbase(
isolate, TRI_V8_ASCII_STRING(isolate, "SYS_DEBUG_REMOVE_FAILAT"),
JS_DebugRemoveFailAt);
TRI_AddGlobalFunctionVocbase(
isolate, TRI_V8_ASCII_STRING(isolate, "SYS_DEBUG_SHOULD_FAILAT"),
JS_DebugShouldFailAt);
#endif
// poll interval for Foxx queues
TRI_GET_GLOBALS();
FoxxFeature& foxxFeature = v8g->_server.getFeature<FoxxFeature>();
isolate->GetCurrentContext()
->Global()
->DefineOwnProperty(
TRI_IGETC, TRI_V8_ASCII_STRING(isolate, "FOXX_QUEUES_POLL_INTERVAL"),
v8::Number::New(isolate, foxxFeature.pollInterval()), v8::ReadOnly)
.FromMaybe(false); // ignore result
isolate->GetCurrentContext()
->Global()
->DefineOwnProperty(
TRI_IGETC,
TRI_V8_ASCII_STRING(isolate, "FOXX_STARTUP_WAIT_FOR_SELF_HEAL"),
v8::Boolean::New(isolate, foxxFeature.startupWaitForSelfHeal()),
v8::ReadOnly)
.FromMaybe(false); // ignore result
}
| 1 |
[
"CWE-918"
] |
arangodb
|
d7b35a6884c6b2802d34d79fb2a79fb2c9ec2175
| 72,503,858,120,705,610,000,000,000,000,000,000,000 | 58 |
[APM-78] Disable installation from remote URL (#15292) (#15343)
* [APM-78] Disable installation from remote URL (#15292)
* Update CHANGELOG
* Fix clang-format
Co-authored-by: Vadim <[email protected]>
|
SProcXkbDispatch(ClientPtr client)
{
REQUEST(xReq);
switch (stuff->data) {
case X_kbUseExtension:
return SProcXkbUseExtension(client);
case X_kbSelectEvents:
return SProcXkbSelectEvents(client);
case X_kbBell:
return SProcXkbBell(client);
case X_kbGetState:
return SProcXkbGetState(client);
case X_kbLatchLockState:
return SProcXkbLatchLockState(client);
case X_kbGetControls:
return SProcXkbGetControls(client);
case X_kbSetControls:
return SProcXkbSetControls(client);
case X_kbGetMap:
return SProcXkbGetMap(client);
case X_kbSetMap:
return SProcXkbSetMap(client);
case X_kbGetCompatMap:
return SProcXkbGetCompatMap(client);
case X_kbSetCompatMap:
return SProcXkbSetCompatMap(client);
case X_kbGetIndicatorState:
return SProcXkbGetIndicatorState(client);
case X_kbGetIndicatorMap:
return SProcXkbGetIndicatorMap(client);
case X_kbSetIndicatorMap:
return SProcXkbSetIndicatorMap(client);
case X_kbGetNamedIndicator:
return SProcXkbGetNamedIndicator(client);
case X_kbSetNamedIndicator:
return SProcXkbSetNamedIndicator(client);
case X_kbGetNames:
return SProcXkbGetNames(client);
case X_kbSetNames:
return SProcXkbSetNames(client);
case X_kbGetGeometry:
return SProcXkbGetGeometry(client);
case X_kbSetGeometry:
return SProcXkbSetGeometry(client);
case X_kbPerClientFlags:
return SProcXkbPerClientFlags(client);
case X_kbListComponents:
return SProcXkbListComponents(client);
case X_kbGetKbdByName:
return SProcXkbGetKbdByName(client);
case X_kbGetDeviceInfo:
return SProcXkbGetDeviceInfo(client);
case X_kbSetDeviceInfo:
return SProcXkbSetDeviceInfo(client);
case X_kbSetDebuggingFlags:
return SProcXkbSetDebuggingFlags(client);
default:
return BadRequest;
}
}
| 0 |
[
"CWE-191"
] |
xserver
|
144849ea27230962227e62a943b399e2ab304787
| 9,585,537,264,703,475,000,000,000,000,000,000,000 | 60 |
Fix XkbSelectEvents() integer underflow
CVE-2020-14361 ZDI-CAN 11573
This vulnerability was discovered by:
Jan-Niklas Sohn working with Trend Micro Zero Day Initiative
Signed-off-by: Matthieu Herrb <[email protected]>
|
QInt16() : value(0) {}
| 0 |
[
"CWE-908",
"CWE-787"
] |
tensorflow
|
ace0c15a22f7f054abcc1f53eabbcb0a1239a9e2
| 96,234,882,407,118,850,000,000,000,000,000,000,000 | 1 |
Default initialize fixed point Eigen types.
In certain cases, tensors are filled with default values of the type. But, for these fixed point types, these values were uninitialized. Thus, we would have uninitialized memory access bugs, some of which were caught by MSAN.
PiperOrigin-RevId: 344101137
Change-Id: I14555fda74dca3b5f1582da9008901937e3f14e2
|
overwrite_dialog_response_cb (GtkDialog *dialog,
int response_id,
gpointer user_data)
{
OverwriteData *odata = user_data;
gboolean do_not_extract = FALSE;
switch (response_id) {
case _FR_RESPONSE_OVERWRITE_YES_ALL:
odata->edata->overwrite = FR_OVERWRITE_YES;
break;
case _FR_RESPONSE_OVERWRITE_YES:
odata->current_file = odata->current_file->next;
break;
case _FR_RESPONSE_OVERWRITE_NO:
{
/* remove the file from the list to extract */
GList *next = odata->current_file->next;
odata->edata->file_list = g_list_remove_link (odata->edata->file_list, odata->current_file);
_g_string_list_free (odata->current_file);
odata->current_file = next;
odata->extract_all = FALSE;
}
break;
case GTK_RESPONSE_DELETE_EVENT:
case GTK_RESPONSE_CANCEL:
do_not_extract = TRUE;
break;
default:
break;
}
gtk_widget_destroy (GTK_WIDGET (dialog));
if (do_not_extract) {
fr_window_stop_batch (odata->window);
g_free (odata);
return;
}
_fr_window_ask_overwrite_dialog (odata);
}
| 0 |
[
"CWE-22"
] |
file-roller
|
b147281293a8307808475e102a14857055f81631
| 318,699,230,762,735,850,000,000,000,000,000,000,000 | 47 |
libarchive: sanitize filenames before extracting
|
void *mlx5_get_send_wqe(struct mlx5_ib_qp *qp, int n)
{
return get_wqe(qp, qp->sq.offset + (n << MLX5_IB_SQ_STRIDE));
}
| 0 |
[
"CWE-119",
"CWE-787"
] |
linux
|
0625b4ba1a5d4703c7fb01c497bd6c156908af00
| 185,891,827,052,147,800,000,000,000,000,000,000,000 | 4 |
IB/mlx5: Fix leaking stack memory to userspace
mlx5_ib_create_qp_resp was never initialized and only the first 4 bytes
were written.
Fixes: 41d902cb7c32 ("RDMA/mlx5: Fix definition of mlx5_ib_create_qp_resp")
Cc: <[email protected]>
Acked-by: Leon Romanovsky <[email protected]>
Signed-off-by: Jason Gunthorpe <[email protected]>
|
static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
{
struct desc_struct desc;
struct desc_ptr dt;
u64 val, cr0, cr4;
u32 base3;
u16 selector;
int i, r;
for (i = 0; i < 16; i++)
*reg_write(ctxt, i) = GET_SMSTATE(u64, smbase, 0x7ff8 - i * 8);
ctxt->_eip = GET_SMSTATE(u64, smbase, 0x7f78);
ctxt->eflags = GET_SMSTATE(u32, smbase, 0x7f70) | X86_EFLAGS_FIXED;
val = GET_SMSTATE(u32, smbase, 0x7f68);
ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1);
val = GET_SMSTATE(u32, smbase, 0x7f60);
ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);
cr0 = GET_SMSTATE(u64, smbase, 0x7f58);
ctxt->ops->set_cr(ctxt, 3, GET_SMSTATE(u64, smbase, 0x7f50));
cr4 = GET_SMSTATE(u64, smbase, 0x7f48);
ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7f00));
val = GET_SMSTATE(u64, smbase, 0x7ed0);
ctxt->ops->set_msr(ctxt, MSR_EFER, val & ~EFER_LMA);
selector = GET_SMSTATE(u32, smbase, 0x7e90);
rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, 0x7e92) << 8);
set_desc_limit(&desc, GET_SMSTATE(u32, smbase, 0x7e94));
set_desc_base(&desc, GET_SMSTATE(u32, smbase, 0x7e98));
base3 = GET_SMSTATE(u32, smbase, 0x7e9c);
ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_TR);
dt.size = GET_SMSTATE(u32, smbase, 0x7e84);
dt.address = GET_SMSTATE(u64, smbase, 0x7e88);
ctxt->ops->set_idt(ctxt, &dt);
selector = GET_SMSTATE(u32, smbase, 0x7e70);
rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, 0x7e72) << 8);
set_desc_limit(&desc, GET_SMSTATE(u32, smbase, 0x7e74));
set_desc_base(&desc, GET_SMSTATE(u32, smbase, 0x7e78));
base3 = GET_SMSTATE(u32, smbase, 0x7e7c);
ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_LDTR);
dt.size = GET_SMSTATE(u32, smbase, 0x7e64);
dt.address = GET_SMSTATE(u64, smbase, 0x7e68);
ctxt->ops->set_gdt(ctxt, &dt);
r = rsm_enter_protected_mode(ctxt, cr0, cr4);
if (r != X86EMUL_CONTINUE)
return r;
for (i = 0; i < 6; i++) {
r = rsm_load_seg_64(ctxt, smbase, i);
if (r != X86EMUL_CONTINUE)
return r;
}
return X86EMUL_CONTINUE;
}
| 0 |
[
"CWE-284"
] |
linux
|
33ab91103b3415e12457e3104f0e4517ce12d0f3
| 90,548,750,891,371,400,000,000,000,000,000,000,000 | 61 |
KVM: x86: fix emulation of "MOV SS, null selector"
This is CVE-2017-2583. On Intel this causes a failed vmentry because
SS's type is neither 3 nor 7 (even though the manual says this check is
only done for usable SS, and the dmesg splat says that SS is unusable!).
On AMD it's worse: svm.c is confused and sets CPL to 0 in the vmcb.
The fix fabricates a data segment descriptor when SS is set to a null
selector, so that CPL and SS.DPL are set correctly in the VMCS/vmcb.
Furthermore, only allow setting SS to a NULL selector if SS.RPL < 3;
this in turn ensures CPL < 3 because RPL must be equal to CPL.
Thanks to Andy Lutomirski and Willy Tarreau for help in analyzing
the bug and deciphering the manuals.
Reported-by: Xiaohan Zhang <[email protected]>
Fixes: 79d5b4c3cd809c770d4bf9812635647016c56011
Cc: [email protected]
Signed-off-by: Paolo Bonzini <[email protected]>
|
Subsets and Splits
CWE 416 & 19
The query filters records related to specific CWEs (Common Weakness Enumerations), providing a basic overview of entries with these vulnerabilities but without deeper analysis.
CWE Frequency in Train Set
Counts the occurrences of each CWE (Common Weakness Enumeration) in the dataset, providing a basic distribution but limited insight.