func
stringlengths 0
484k
| target
int64 0
1
| cwe
sequencelengths 0
4
| project
stringclasses 799
values | commit_id
stringlengths 40
40
| hash
float64 1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
| size
int64 1
24k
| message
stringlengths 0
13.3k
|
---|---|---|---|---|---|---|---|
get_max_rate_vht_80_ss3(int mcs)
{
switch (mcs) {
case 0:
return 87800000;
case 1:
return 175500000;
case 2:
return 263300000;
case 3:
return 351000000;
case 4:
return 526500000;
case 5:
return 702000000;
case 6:
return 0;
case 7:
return 877500000;
case 8:
return 105300000;
case 9:
return 117000000;
}
return 0;
} | 0 | [
"CWE-20"
] | NetworkManager | 420784e342da4883f6debdfe10cde68507b10d27 | 45,134,720,854,532,580,000,000,000,000,000,000,000 | 26 | core: fix crash in nm_wildcard_match_check()
It's not entirely clear how to treat %NULL.
Clearly "match.interface-name=eth0" should not
match with an interface %NULL. But what about
"match.interface-name=!eth0"? It's now implemented
that negative matches still succeed against %NULL.
What about "match.interface-name=*"? That probably
should also match with %NULL. So we treat %NULL really
like "".
Against commit 11cd443448bc ('iwd: Don't call IWD methods when device
unmanaged'), we got this backtrace:
#0 0x00007f1c164069f1 in __strnlen_avx2 () at ../sysdeps/x86_64/multiarch/strlen-avx2.S:62
#1 0x00007f1c1637ac9e in __fnmatch (pattern=<optimized out>, string=<optimized out>, string@entry=0x0, flags=flags@entry=0) at fnmatch.c:379
p = 0x0
res = <optimized out>
orig_pattern = <optimized out>
n = <optimized out>
wpattern = 0x7fff8d860730 L"pci-0000:03:00.0"
ps = {__count = 0, __value = {__wch = 0, __wchb = "\000\000\000"}}
wpattern_malloc = 0x0
wstring_malloc = 0x0
wstring = <optimized out>
alloca_used = 80
__PRETTY_FUNCTION__ = "__fnmatch"
#2 0x0000564484a978bf in nm_wildcard_match_check (str=0x0, patterns=<optimized out>, num_patterns=<optimized out>) at src/core/nm-core-utils.c:1959
is_inverted = 0
is_mandatory = 0
match = <optimized out>
p = 0x564486c43fa0 "pci-0000:03:00.0"
has_optional = 0
has_any_optional = 0
i = <optimized out>
#3 0x0000564484bf4797 in check_connection_compatible (self=<optimized out>, connection=<optimized out>, error=0x0) at src/core/devices/nm-device.c:7499
patterns = <optimized out>
device_driver = 0x564486c76bd0 "veth"
num_patterns = 1
priv = 0x564486cbe0b0
__func__ = "check_connection_compatible"
device_iface = <optimized out>
local = 0x564486c99a60
conn_iface = 0x0
klass = <optimized out>
s_match = 0x564486c63df0 [NMSettingMatch]
#4 0x0000564484c38491 in check_connection_compatible (device=0x564486cbe590 [NMDeviceVeth], connection=0x564486c6b160, error=0x0) at src/core/devices/nm-device-ethernet.c:348
self = 0x564486cbe590 [NMDeviceVeth]
s_wired = <optimized out>
Fixes: 3ced486f4162 ('libnm/match: extend syntax for match patterns with '|', '&', '!' and '\\'')
https://bugzilla.redhat.com/show_bug.cgi?id=1942741 |
static void sigusr2_handler(UNUSED(int val))
{
if (!am_server)
output_summary();
close_all();
if (got_xfer_error)
_exit(RERR_PARTIAL);
_exit(0);
} | 0 | [] | rsync | b7231c7d02cfb65d291af74ff66e7d8c507ee871 | 254,144,023,036,244,540,000,000,000,000,000,000,000 | 9 | Some extra file-list safety checks. |
static int sanitize_val_alu(struct bpf_verifier_env *env,
struct bpf_insn *insn)
{
struct bpf_insn_aux_data *aux = cur_aux(env);
if (can_skip_alu_sanitation(env, insn))
return 0;
return update_alu_sanitation_state(aux, BPF_ALU_NON_POINTER, 0);
} | 0 | [
"CWE-703",
"CWE-189"
] | linux | d3bd7413e0ca40b60cf60d4003246d067cafdeda | 87,592,046,110,053,950,000,000,000,000,000,000,000 | 10 | bpf: fix sanitation of alu op with pointer / scalar type from different paths
While 979d63d50c0c ("bpf: prevent out of bounds speculation on pointer
arithmetic") took care of rejecting alu op on pointer when e.g. pointer
came from two different map values with different map properties such as
value size, Jann reported that a case was not covered yet when a given
alu op is used in both "ptr_reg += reg" and "numeric_reg += reg" from
different branches where we would incorrectly try to sanitize based
on the pointer's limit. Catch this corner case and reject the program
instead.
Fixes: 979d63d50c0c ("bpf: prevent out of bounds speculation on pointer arithmetic")
Reported-by: Jann Horn <[email protected]>
Signed-off-by: Daniel Borkmann <[email protected]>
Acked-by: Alexei Starovoitov <[email protected]>
Signed-off-by: Alexei Starovoitov <[email protected]> |
static int set_permissions(struct ctl_table_header *head,
struct ctl_table *table)
{
struct user_namespace *user_ns =
container_of(head->set, struct user_namespace, set);
int mode;
/* Allow users with CAP_SYS_RESOURCE unrestrained access */
if (ns_capable(user_ns, CAP_SYS_RESOURCE))
mode = (table->mode & S_IRWXU) >> 6;
else
/* Allow all others at most read-only access */
mode = table->mode & S_IROTH;
return (mode << 6) | (mode << 3) | mode;
} | 0 | [
"CWE-416",
"CWE-362"
] | linux | 040757f738e13caaa9c5078bca79aa97e11dde88 | 335,032,300,023,724,300,000,000,000,000,000,000,000 | 15 | ucount: Remove the atomicity from ucount->count
Always increment/decrement ucount->count under the ucounts_lock. The
increments are there already and moving the decrements there means the
locking logic of the code is simpler. This simplification in the
locking logic fixes a race between put_ucounts and get_ucounts that
could result in a use-after-free because the count could go zero then
be found by get_ucounts and then be freed by put_ucounts.
A bug presumably this one was found by a combination of syzkaller and
KASAN. JongWhan Kim reported the syzkaller failure and Dmitry Vyukov
spotted the race in the code.
Cc: [email protected]
Fixes: f6b2db1a3e8d ("userns: Make the count of user namespaces per user")
Reported-by: JongHwan Kim <[email protected]>
Reported-by: Dmitry Vyukov <[email protected]>
Reviewed-by: Andrei Vagin <[email protected]>
Signed-off-by: "Eric W. Biederman" <[email protected]> |
acl_prefetch_http(struct proxy *px, struct session *s, void *l7, unsigned int opt,
const struct arg *args, struct sample *smp, int req_vol)
{
struct http_txn *txn = l7;
struct http_msg *msg = &txn->req;
/* Note: hdr_idx.v cannot be NULL in this ACL because the ACL is tagged
* as a layer7 ACL, which involves automatic allocation of hdr_idx.
*/
if (unlikely(!s || !txn))
return 0;
/* Check for a dependency on a request */
smp->type = SMP_T_BOOL;
if ((opt & SMP_OPT_DIR) == SMP_OPT_DIR_REQ) {
if (unlikely(!s->req))
return 0;
/* If the buffer does not leave enough free space at the end,
* we must first realign it.
*/
if (s->req->buf->p > s->req->buf->data &&
s->req->buf->i + s->req->buf->p > s->req->buf->data + s->req->buf->size - global.tune.maxrewrite)
buffer_slow_realign(s->req->buf);
if (unlikely(txn->req.msg_state < HTTP_MSG_BODY)) {
if ((msg->msg_state == HTTP_MSG_ERROR) ||
buffer_full(s->req->buf, global.tune.maxrewrite)) {
smp->data.uint = 0;
return -1;
}
/* Try to decode HTTP request */
if (likely(msg->next < s->req->buf->i))
http_msg_analyzer(msg, &txn->hdr_idx);
/* Still no valid request ? */
if (unlikely(msg->msg_state < HTTP_MSG_BODY)) {
if ((msg->msg_state == HTTP_MSG_ERROR) ||
buffer_full(s->req->buf, global.tune.maxrewrite)) {
smp->data.uint = 0;
return -1;
}
/* wait for final state */
smp->flags |= SMP_F_MAY_CHANGE;
return 0;
}
/* OK we just got a valid HTTP request. We have some minor
* preparation to perform so that further checks can rely
* on HTTP tests.
*/
/* If the request was parsed but was too large, we must absolutely
* return an error so that it is not processed. At the moment this
* cannot happen, but if the parsers are to change in the future,
* we want this check to be maintained.
*/
if (unlikely(s->req->buf->i + s->req->buf->p >
s->req->buf->data + s->req->buf->size - global.tune.maxrewrite)) {
msg->msg_state = HTTP_MSG_ERROR;
return 1;
}
txn->meth = find_http_meth(msg->chn->buf->p, msg->sl.rq.m_l);
if (txn->meth == HTTP_METH_GET || txn->meth == HTTP_METH_HEAD)
s->flags |= SN_REDIRECTABLE;
if (unlikely(msg->sl.rq.v_l == 0) && !http_upgrade_v09_to_v10(txn)) {
smp->data.uint = 0;
return -1;
}
}
if (req_vol && txn->rsp.msg_state != HTTP_MSG_RPBEFORE)
return 0; /* data might have moved and indexes changed */
/* otherwise everything's ready for the request */
}
else {
/* Check for a dependency on a response */
if (txn->rsp.msg_state < HTTP_MSG_BODY)
return 0;
}
/* everything's OK */
return 1;
} | 0 | [] | haproxy | aae75e3279c6c9bd136413a72dafdcd4986bb89a | 125,636,785,442,996,400,000,000,000,000,000,000,000 | 90 | BUG/CRITICAL: using HTTP information in tcp-request content may crash the process
During normal HTTP request processing, request buffers are realigned if
there are less than global.maxrewrite bytes available after them, in
order to leave enough room for rewriting headers after the request. This
is done in http_wait_for_request().
However, if some HTTP inspection happens during a "tcp-request content"
rule, this realignment is not performed. In theory this is not a problem
because empty buffers are always aligned and TCP inspection happens at
the beginning of a connection. But with HTTP keep-alive, it also happens
at the beginning of each subsequent request. So if a second request was
pipelined by the client before the first one had a chance to be forwarded,
the second request will not be realigned. Then, http_wait_for_request()
will not perform such a realignment either because the request was
already parsed and marked as such. The consequence of this, is that the
rewrite of a sufficient number of such pipelined, unaligned requests may
leave less room past the request been processed than the configured
reserve, which can lead to a buffer overflow if request processing appends
some data past the end of the buffer.
A number of conditions are required for the bug to be triggered :
- HTTP keep-alive must be enabled ;
- HTTP inspection in TCP rules must be used ;
- some request appending rules are needed (reqadd, x-forwarded-for)
- since empty buffers are always realigned, the client must pipeline
enough requests so that the buffer always contains something till
the point where there is no more room for rewriting.
While such a configuration is quite unlikely to be met (which is
confirmed by the bug's lifetime), a few people do use these features
together for very specific usages. And more importantly, writing such
a configuration and the request to attack it is trivial.
A quick workaround consists in forcing keep-alive off by adding
"option httpclose" or "option forceclose" in the frontend. Alternatively,
disabling HTTP-based TCP inspection rules enough if the application
supports it.
At first glance, this bug does not look like it could lead to remote code
execution, as the overflowing part is controlled by the configuration and
not by the user. But some deeper analysis should be performed to confirm
this. And anyway, corrupting the process' memory and crashing it is quite
trivial.
Special thanks go to Yves Lafon from the W3C who reported this bug and
deployed significant efforts to collect the relevant data needed to
understand it in less than one week.
CVE-2013-1912 was assigned to this issue.
Note that 1.4 is also affected so the fix must be backported. |
void unknown_exception(struct pt_regs *regs)
{
enum ctx_state prev_state = exception_enter();
printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n",
regs->nip, regs->msr, regs->trap);
_exception(SIGTRAP, regs, 0, 0);
exception_exit(prev_state);
} | 0 | [] | linux | 5d176f751ee3c6eededd984ad409bff201f436a7 | 138,489,114,311,473,200,000,000,000,000,000,000,000 | 11 | powerpc: tm: Enable transactional memory (TM) lazily for userspace
Currently the MSR TM bit is always set if the hardware is TM capable.
This adds extra overhead as it means the TM SPRS (TFHAR, TEXASR and
TFAIR) must be swapped for each process regardless of if they use TM.
For processes that don't use TM the TM MSR bit can be turned off
allowing the kernel to avoid the expensive swap of the TM registers.
A TM unavailable exception will occur if a thread does use TM and the
kernel will enable MSR_TM and leave it so for some time afterwards.
Signed-off-by: Cyril Bur <[email protected]>
Signed-off-by: Michael Ellerman <[email protected]> |
static void release_reg_references(struct bpf_verifier_env *env,
struct bpf_func_state *state,
int ref_obj_id)
{
struct bpf_reg_state *regs = state->regs, *reg;
int i;
for (i = 0; i < MAX_BPF_REG; i++)
if (regs[i].ref_obj_id == ref_obj_id)
mark_reg_unknown(env, regs, i);
bpf_for_each_spilled_reg(i, state, reg) {
if (!reg)
continue;
if (reg->ref_obj_id == ref_obj_id)
__mark_reg_unknown(env, reg);
}
} | 0 | [
"CWE-119",
"CWE-681",
"CWE-787"
] | linux | 5b9fbeb75b6a98955f628e205ac26689bcb1383e | 42,255,117,395,121,410,000,000,000,000,000,000,000 | 18 | bpf: Fix scalar32_min_max_or bounds tracking
Simon reported an issue with the current scalar32_min_max_or() implementation.
That is, compared to the other 32 bit subreg tracking functions, the code in
scalar32_min_max_or() stands out that it's using the 64 bit registers instead
of 32 bit ones. This leads to bounds tracking issues, for example:
[...]
8: R0=map_value(id=0,off=0,ks=4,vs=48,imm=0) R10=fp0 fp-8=mmmmmmmm
8: (79) r1 = *(u64 *)(r0 +0)
R0=map_value(id=0,off=0,ks=4,vs=48,imm=0) R10=fp0 fp-8=mmmmmmmm
9: R0=map_value(id=0,off=0,ks=4,vs=48,imm=0) R1_w=inv(id=0) R10=fp0 fp-8=mmmmmmmm
9: (b7) r0 = 1
10: R0_w=inv1 R1_w=inv(id=0) R10=fp0 fp-8=mmmmmmmm
10: (18) r2 = 0x600000002
12: R0_w=inv1 R1_w=inv(id=0) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
12: (ad) if r1 < r2 goto pc+1
R0_w=inv1 R1_w=inv(id=0,umin_value=25769803778) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
13: R0_w=inv1 R1_w=inv(id=0,umin_value=25769803778) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
13: (95) exit
14: R0_w=inv1 R1_w=inv(id=0,umax_value=25769803777,var_off=(0x0; 0x7ffffffff)) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
14: (25) if r1 > 0x0 goto pc+1
R0_w=inv1 R1_w=inv(id=0,umax_value=0,var_off=(0x0; 0x7fffffff),u32_max_value=2147483647) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
15: R0_w=inv1 R1_w=inv(id=0,umax_value=0,var_off=(0x0; 0x7fffffff),u32_max_value=2147483647) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
15: (95) exit
16: R0_w=inv1 R1_w=inv(id=0,umin_value=1,umax_value=25769803777,var_off=(0x0; 0x77fffffff),u32_max_value=2147483647) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
16: (47) r1 |= 0
17: R0_w=inv1 R1_w=inv(id=0,umin_value=1,umax_value=32212254719,var_off=(0x1; 0x700000000),s32_max_value=1,u32_max_value=1) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
[...]
The bound tests on the map value force the upper unsigned bound to be 25769803777
in 64 bit (0b11000000000000000000000000000000001) and then lower one to be 1. By
using OR they are truncated and thus result in the range [1,1] for the 32 bit reg
tracker. This is incorrect given the only thing we know is that the value must be
positive and thus 2147483647 (0b1111111111111111111111111111111) at max for the
subregs. Fix it by using the {u,s}32_{min,max}_value vars instead. This also makes
sense, for example, for the case where we update dst_reg->s32_{min,max}_value in
the else branch we need to use the newly computed dst_reg->u32_{min,max}_value as
we know that these are positive. Previously, in the else branch the 64 bit values
of umin_value=1 and umax_value=32212254719 were used and latter got truncated to
be 1 as upper bound there. After the fix the subreg range is now correct:
[...]
8: R0=map_value(id=0,off=0,ks=4,vs=48,imm=0) R10=fp0 fp-8=mmmmmmmm
8: (79) r1 = *(u64 *)(r0 +0)
R0=map_value(id=0,off=0,ks=4,vs=48,imm=0) R10=fp0 fp-8=mmmmmmmm
9: R0=map_value(id=0,off=0,ks=4,vs=48,imm=0) R1_w=inv(id=0) R10=fp0 fp-8=mmmmmmmm
9: (b7) r0 = 1
10: R0_w=inv1 R1_w=inv(id=0) R10=fp0 fp-8=mmmmmmmm
10: (18) r2 = 0x600000002
12: R0_w=inv1 R1_w=inv(id=0) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
12: (ad) if r1 < r2 goto pc+1
R0_w=inv1 R1_w=inv(id=0,umin_value=25769803778) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
13: R0_w=inv1 R1_w=inv(id=0,umin_value=25769803778) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
13: (95) exit
14: R0_w=inv1 R1_w=inv(id=0,umax_value=25769803777,var_off=(0x0; 0x7ffffffff)) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
14: (25) if r1 > 0x0 goto pc+1
R0_w=inv1 R1_w=inv(id=0,umax_value=0,var_off=(0x0; 0x7fffffff),u32_max_value=2147483647) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
15: R0_w=inv1 R1_w=inv(id=0,umax_value=0,var_off=(0x0; 0x7fffffff),u32_max_value=2147483647) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
15: (95) exit
16: R0_w=inv1 R1_w=inv(id=0,umin_value=1,umax_value=25769803777,var_off=(0x0; 0x77fffffff),u32_max_value=2147483647) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
16: (47) r1 |= 0
17: R0_w=inv1 R1_w=inv(id=0,umin_value=1,umax_value=32212254719,var_off=(0x0; 0x77fffffff),u32_max_value=2147483647) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
[...]
Fixes: 3f50f132d840 ("bpf: Verifier, do explicit ALU32 bounds tracking")
Reported-by: Simon Scannell <[email protected]>
Signed-off-by: Daniel Borkmann <[email protected]>
Reviewed-by: John Fastabend <[email protected]>
Acked-by: Alexei Starovoitov <[email protected]> |
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
{
flush_all_to_thread(src);
/*
* Flush TM state out so we can copy it. __switch_to_tm() does this
* flush but it removes the checkpointed state from the current CPU and
* transitions the CPU out of TM mode. Hence we need to call
* tm_recheckpoint_new_task() (on the same task) to restore the
* checkpointed state back and the TM mode.
*
* Can't pass dst because it isn't ready. Doesn't matter, passing
* dst is only important for __switch_to()
*/
__switch_to_tm(src, src);
*dst = *src;
clear_task_ebb(dst);
return 0;
} | 0 | [] | linux | 5d176f751ee3c6eededd984ad409bff201f436a7 | 148,783,938,438,006,460,000,000,000,000,000,000,000 | 21 | powerpc: tm: Enable transactional memory (TM) lazily for userspace
Currently the MSR TM bit is always set if the hardware is TM capable.
This adds extra overhead as it means the TM SPRS (TFHAR, TEXASR and
TFAIR) must be swapped for each process regardless of if they use TM.
For processes that don't use TM the TM MSR bit can be turned off
allowing the kernel to avoid the expensive swap of the TM registers.
A TM unavailable exception will occur if a thread does use TM and the
kernel will enable MSR_TM and leave it so for some time afterwards.
Signed-off-by: Cyril Bur <[email protected]>
Signed-off-by: Michael Ellerman <[email protected]> |
static int minix_get_block(struct inode *inode, sector_t block,
struct buffer_head *bh_result, int create)
{
if (INODE_VERSION(inode) == MINIX_V1)
return V1_minix_get_block(inode, block, bh_result, create);
else
return V2_minix_get_block(inode, block, bh_result, create);
} | 0 | [
"CWE-189"
] | linux-2.6 | f5fb09fa3392ad43fbcfc2f4580752f383ab5996 | 272,617,583,222,885,470,000,000,000,000,000,000,000 | 8 | [PATCH] Fix for minix crash
Mounting a (corrupt) minix filesystem with zero s_zmap_blocks
gives a spectacular crash on my 2.6.17.8 system, no doubt
because minix/inode.c does an unconditional
minix_set_bit(0,sbi->s_zmap[0]->b_data);
[[email protected]: make labels conistent while we're there]
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
static int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
size_t len, int noblock, int flags, int *addr_len)
{
struct inet_sock *inet = inet_sk(sk);
struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name;
struct sk_buff *skb;
int copied, err;
/*
* Check any passed addresses
*/
if (addr_len)
*addr_len=sizeof(*sin);
if (flags & MSG_ERRQUEUE)
return ip_recv_error(sk, msg, len);
try_again:
skb = skb_recv_datagram(sk, flags, noblock, &err);
if (!skb)
goto out;
copied = skb->len - sizeof(struct udphdr);
if (copied > len) {
copied = len;
msg->msg_flags |= MSG_TRUNC;
}
if (skb->ip_summed==CHECKSUM_UNNECESSARY) {
err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov,
copied);
} else if (msg->msg_flags&MSG_TRUNC) {
if (__udp_checksum_complete(skb))
goto csum_copy_err;
err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov,
copied);
} else {
err = skb_copy_and_csum_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov);
if (err == -EINVAL)
goto csum_copy_err;
}
if (err)
goto out_free;
sock_recv_timestamp(msg, sk, skb);
/* Copy the address. */
if (sin)
{
sin->sin_family = AF_INET;
sin->sin_port = skb->h.uh->source;
sin->sin_addr.s_addr = skb->nh.iph->saddr;
memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
}
if (inet->cmsg_flags)
ip_cmsg_recv(msg, skb);
err = copied;
if (flags & MSG_TRUNC)
err = skb->len - sizeof(struct udphdr);
out_free:
skb_free_datagram(sk, skb);
out:
return err;
csum_copy_err:
UDP_INC_STATS_BH(UDP_MIB_INERRORS);
skb_kill_datagram(sk, skb, flags);
if (noblock)
return -EAGAIN;
goto try_again;
} | 0 | [
"CWE-476"
] | linux-2.6 | 1e0c14f49d6b393179f423abbac47f85618d3d46 | 102,257,619,668,173,900,000,000,000,000,000,000,000 | 77 | [UDP]: Fix MSG_PROBE crash
UDP tracks corking status through the pending variable. The
IP layer also tracks it through the socket write queue. It
is possible for the two to get out of sync when MSG_PROBE is
used.
This patch changes UDP to check the write queue to ensure
that the two stay in sync.
Signed-off-by: Herbert Xu <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
dissect_tcpopt_user_to(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, void* data _U_)
{
proto_item *tf;
proto_tree *field_tree;
proto_item *length_item;
guint16 to;
int offset = 0;
tf = proto_tree_add_item(tree, proto_tcp_option_user_to, tvb, offset, -1, ENC_NA);
field_tree = proto_item_add_subtree(tf, ett_tcp_option_user_to);
proto_tree_add_item(field_tree, hf_tcp_option_kind, tvb,
offset, 1, ENC_BIG_ENDIAN);
length_item = proto_tree_add_item(field_tree, hf_tcp_option_len, tvb,
offset + 1, 1, ENC_BIG_ENDIAN);
if (!tcp_option_len_check(length_item, pinfo, tvb_reported_length(tvb), TCPOLEN_USER_TO))
return tvb_captured_length(tvb);
proto_tree_add_item(field_tree, hf_tcp_option_user_to_granularity, tvb, offset + 2, 2, ENC_BIG_ENDIAN);
to = tvb_get_ntohs(tvb, offset + 2) & 0x7FFF;
proto_tree_add_item(field_tree, hf_tcp_option_user_to_val, tvb, offset + 2, 2, ENC_BIG_ENDIAN);
tcp_info_append_uint(pinfo, "USER_TO", to);
return tvb_captured_length(tvb);
} | 0 | [
"CWE-354"
] | wireshark | 7f3fe6164a68b76d9988c4253b24d43f498f1753 | 57,629,197,246,312,950,000,000,000,000,000,000,000 | 26 | TCP: do not use an unknown status when the checksum is 0xffff
Otherwise it triggers an assert when adding the column as the field is
defined as BASE_NONE and not BASE_DEC or BASE_HEX. Thus an unknown value
(not in proto_checksum_vals[)array) cannot be represented.
Mark the checksum as bad even if we process the packet.
Closes #16816
Conflicts:
epan/dissectors/packet-tcp.c |
char *device_node_gen_full_name(const struct device_node *np, char *buf, char *end)
{
int depth;
const struct device_node *parent = np->parent;
static const struct printf_spec strspec = {
.field_width = -1,
.precision = -1,
};
/* special case for root node */
if (!parent)
return string(buf, end, "/", strspec);
for (depth = 0; parent->parent; depth++)
parent = parent->parent;
for ( ; depth >= 0; depth--) {
buf = string(buf, end, "/", strspec);
buf = string(buf, end, device_node_name_for_depth(np, depth),
strspec);
}
return buf;
} | 0 | [
"CWE-200"
] | linux | ad67b74d2469d9b82aaa572d76474c95bc484d57 | 192,353,288,930,372,260,000,000,000,000,000,000,000 | 23 | printk: hash addresses printed with %p
Currently there exist approximately 14 000 places in the kernel where
addresses are being printed using an unadorned %p. This potentially
leaks sensitive information regarding the Kernel layout in memory. Many
of these calls are stale, instead of fixing every call lets hash the
address by default before printing. This will of course break some
users, forcing code printing needed addresses to be updated.
Code that _really_ needs the address will soon be able to use the new
printk specifier %px to print the address.
For what it's worth, usage of unadorned %p can be broken down as
follows (thanks to Joe Perches).
$ git grep -E '%p[^A-Za-z0-9]' | cut -f1 -d"/" | sort | uniq -c
1084 arch
20 block
10 crypto
32 Documentation
8121 drivers
1221 fs
143 include
101 kernel
69 lib
100 mm
1510 net
40 samples
7 scripts
11 security
166 sound
152 tools
2 virt
Add function ptr_to_id() to map an address to a 32 bit unique
identifier. Hash any unadorned usage of specifier %p and any malformed
specifiers.
Signed-off-by: Tobin C. Harding <[email protected]> |
clean_uid_from_key (kbnode_t keyblock, kbnode_t uidnode, int noisy)
{
kbnode_t node;
PKT_user_id *uid = uidnode->pkt->pkt.user_id;
int deleted = 0;
assert (keyblock->pkt->pkttype==PKT_PUBLIC_KEY);
assert (uidnode->pkt->pkttype==PKT_USER_ID);
/* Skip valid user IDs, compacted user IDs, and non-self-signed user
IDs if --allow-non-selfsigned-uid is set. */
if (uid->created
|| uid->flags.compacted
|| (!uid->is_expired && !uid->is_revoked && opt.allow_non_selfsigned_uid))
return 0;
for (node=uidnode->next;
node && node->pkt->pkttype == PKT_SIGNATURE;
node=node->next)
{
if (!node->pkt->pkt.signature->flags.chosen_selfsig)
{
delete_kbnode (node);
deleted = 1;
uidnode->pkt->pkt.user_id->flags.compacted = 1;
}
}
if (noisy)
{
const char *reason;
char *user = utf8_to_native (uid->name, uid->len, 0);
if (uid->is_revoked)
reason = _("revoked");
else if (uid->is_expired)
reason = _("expired");
else
reason = _("invalid");
log_info ("compacting user ID \"%s\" on key %s: %s\n",
user, keystr_from_pk (keyblock->pkt->pkt.public_key),
reason);
xfree (user);
}
return deleted;
} | 0 | [
"CWE-20"
] | gnupg | 2183683bd633818dd031b090b5530951de76f392 | 133,440,665,509,897,560,000,000,000,000,000,000,000 | 49 | Use inline functions to convert buffer data to scalars.
* common/host2net.h (buf16_to_ulong, buf16_to_uint): New.
(buf16_to_ushort, buf16_to_u16): New.
(buf32_to_size_t, buf32_to_ulong, buf32_to_uint, buf32_to_u32): New.
--
Commit 91b826a38880fd8a989318585eb502582636ddd8 was not enough to
avoid all sign extension on shift problems. Hanno Böck found a case
with an invalid read due to this problem. To fix that once and for
all almost all uses of "<< 24" and "<< 8" are changed by this patch to
use an inline function from host2net.h.
Signed-off-by: Werner Koch <[email protected]> |
static inline void io_consume_sqe(struct io_ring_ctx *ctx)
{
ctx->cached_sq_head++;
} | 0 | [] | linux | 0f2122045b946241a9e549c2a76cea54fa58a7ff | 104,504,399,707,582,020,000,000,000,000,000,000,000 | 4 | io_uring: don't rely on weak ->files references
Grab actual references to the files_struct. To avoid circular references
issues due to this, we add a per-task note that keeps track of what
io_uring contexts a task has used. When the tasks execs or exits its
assigned files, we cancel requests based on this tracking.
With that, we can grab proper references to the files table, and no
longer need to rely on stashing away ring_fd and ring_file to check
if the ring_fd may have been closed.
Cc: [email protected] # v5.5+
Reviewed-by: Pavel Begunkov <[email protected]>
Signed-off-by: Jens Axboe <[email protected]> |
GF_Err video_sample_entry_AddBox(GF_Box *s, GF_Box *a)
{
GF_MPEGVisualSampleEntryBox *ptr = (GF_MPEGVisualSampleEntryBox *)s;
switch (a->type) {
case GF_ISOM_BOX_TYPE_ESDS:
if (ptr->esd) ERROR_ON_DUPLICATED_BOX(a, ptr)
ptr->esd = (GF_ESDBox *)a;
break;
case GF_ISOM_BOX_TYPE_SINF:
gf_list_add(ptr->protections, a);
break;
case GF_ISOM_BOX_TYPE_RINF:
if (ptr->rinf) ERROR_ON_DUPLICATED_BOX(a, ptr)
ptr->rinf = (GF_RestrictedSchemeInfoBox *) a;
break;
case GF_ISOM_BOX_TYPE_AVCC:
if (ptr->avc_config) ERROR_ON_DUPLICATED_BOX(a, ptr)
ptr->avc_config = (GF_AVCConfigurationBox *)a;
break;
case GF_ISOM_BOX_TYPE_HVCC:
if (ptr->hevc_config) ERROR_ON_DUPLICATED_BOX(a, ptr)
ptr->hevc_config = (GF_HEVCConfigurationBox *)a;
break;
case GF_ISOM_BOX_TYPE_SVCC:
if (ptr->svc_config) ERROR_ON_DUPLICATED_BOX(a, ptr)
ptr->svc_config = (GF_AVCConfigurationBox *)a;
break;
case GF_ISOM_BOX_TYPE_MVCC:
if (ptr->mvc_config) ERROR_ON_DUPLICATED_BOX(a, ptr)
ptr->mvc_config = (GF_AVCConfigurationBox *)a;
break;
case GF_ISOM_BOX_TYPE_LHVC:
if (ptr->lhvc_config) ERROR_ON_DUPLICATED_BOX(a, ptr)
ptr->lhvc_config = (GF_HEVCConfigurationBox *)a;
break;
case GF_ISOM_BOX_TYPE_AV1C:
if (ptr->av1_config) ERROR_ON_DUPLICATED_BOX(a, ptr)
ptr->av1_config = (GF_AV1ConfigurationBox *)a;
break;
case GF_ISOM_BOX_TYPE_VPCC:
if (ptr->vp_config) ERROR_ON_DUPLICATED_BOX(a, ptr)
ptr->vp_config = (GF_VPConfigurationBox *)a;
break;
case GF_ISOM_BOX_TYPE_M4DS:
if (ptr->descr) ERROR_ON_DUPLICATED_BOX(a, ptr)
ptr->descr = (GF_MPEG4ExtensionDescriptorsBox *)a;
break;
case GF_ISOM_BOX_TYPE_UUID:
if (! memcmp(((GF_UnknownUUIDBox*)a)->uuid, GF_ISOM_IPOD_EXT, 16)) {
if (ptr->ipod_ext) ERROR_ON_DUPLICATED_BOX(a, ptr)
ptr->ipod_ext = (GF_UnknownUUIDBox *)a;
} else {
return gf_isom_box_add_default(s, a);
}
break;
case GF_ISOM_BOX_TYPE_D263:
ptr->cfg_3gpp = (GF_3GPPConfigBox *)a;
/*for 3GP config, remember sample entry type in config*/
ptr->cfg_3gpp->cfg.type = ptr->type;
break;
case GF_ISOM_BOX_TYPE_PASP:
if (ptr->pasp) ERROR_ON_DUPLICATED_BOX(a, ptr)
ptr->pasp = (GF_PixelAspectRatioBox *)a;
break;
case GF_ISOM_BOX_TYPE_CLAP:
if (ptr->clap) ERROR_ON_DUPLICATED_BOX(a, ptr)
ptr->clap = (GF_CleanApertureBox *)a;
break;
case GF_ISOM_BOX_TYPE_COLR:
if (ptr->colr) ERROR_ON_DUPLICATED_BOX(a, ptr)
ptr->colr = (GF_ColourInformationBox*)a;
break;
case GF_ISOM_BOX_TYPE_MDCV:
if (ptr->mdcv) ERROR_ON_DUPLICATED_BOX(a, ptr)
ptr->mdcv = (GF_MasteringDisplayColourVolumeBox*)a;
break;
case GF_ISOM_BOX_TYPE_CLLI:
if (ptr->clli) ERROR_ON_DUPLICATED_BOX(a, ptr)
ptr->clli = (GF_ContentLightLevelBox*)a;
break;
case GF_ISOM_BOX_TYPE_CCST:
if (ptr->ccst) ERROR_ON_DUPLICATED_BOX(a, ptr)
ptr->ccst = (GF_CodingConstraintsBox *)a;
break;
case GF_ISOM_BOX_TYPE_AUXI:
if (ptr->auxi) ERROR_ON_DUPLICATED_BOX(a, ptr)
ptr->auxi = (GF_AuxiliaryTypeInfoBox *)a;
break;
case GF_ISOM_BOX_TYPE_RVCC:
if (ptr->rvcc) ERROR_ON_DUPLICATED_BOX(a, ptr)
ptr->rvcc = (GF_RVCConfigurationBox *)a;
break;
default:
return gf_isom_box_add_default(s, a);
}
return GF_OK;
} | 0 | [
"CWE-416"
] | gpac | 6063b1a011c3f80cee25daade18154e15e4c058c | 228,539,801,718,434,600,000,000,000,000,000,000,000 | 98 | fix UAF in audio_sample_entry_Read (#1440) |
PHP_FUNCTION(imagefill)
{
zval *IM;
long x, y, col;
gdImagePtr im;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rlll", &IM, &x, &y, &col) == FAILURE) {
return;
}
ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd);
gdImageFill(im, x, y, col);
RETURN_TRUE;
} | 0 | [
"CWE-703",
"CWE-189"
] | php-src | 2938329ce19cb8c4197dec146c3ec887c6f61d01 | 193,314,807,842,481,160,000,000,000,000,000,000,000 | 14 | Fixed bug #66356 (Heap Overflow Vulnerability in imagecrop())
And also fixed the bug: arguments are altered after some calls |
static PHP_FUNCTION(xmlwriter_write_attribute_ns)
{
zval *pind;
xmlwriter_object *intern;
xmlTextWriterPtr ptr;
char *name, *prefix, *uri, *content;
int name_len, prefix_len, uri_len, content_len, retval;
#ifdef ZEND_ENGINE_2
zval *this = getThis();
if (this) {
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "sss!s",
&prefix, &prefix_len, &name, &name_len, &uri, &uri_len, &content, &content_len) == FAILURE) {
return;
}
XMLWRITER_FROM_OBJECT(intern, this);
} else
#endif
{
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rsss!s", &pind,
&prefix, &prefix_len, &name, &name_len, &uri, &uri_len, &content, &content_len) == FAILURE) {
return;
}
ZEND_FETCH_RESOURCE(intern,xmlwriter_object *, &pind, -1, "XMLWriter", le_xmlwriter);
}
XMLW_NAME_CHK("Invalid Attribute Name");
ptr = intern->ptr;
if (ptr) {
retval = xmlTextWriterWriteAttributeNS(ptr, (xmlChar *)prefix, (xmlChar *)name, (xmlChar *)uri, (xmlChar *)content);
if (retval != -1) {
RETURN_TRUE;
}
}
RETURN_FALSE;
} | 0 | [
"CWE-20"
] | php-src | 52b93f0cfd3cba7ff98cc5198df6ca4f23865f80 | 332,511,932,751,003,880,000,000,000,000,000,000,000 | 40 | Fixed bug #69353 (Missing null byte checks for paths in various PHP extensions) |
static bool inetdev_valid_mtu(unsigned int mtu)
{
return mtu >= 68;
} | 0 | [
"CWE-399"
] | net-next | fbd40ea0180a2d328c5adc61414dc8bab9335ce2 | 121,293,243,088,610,100,000,000,000,000,000,000,000 | 4 | ipv4: Don't do expensive useless work during inetdev destroy.
When an inetdev is destroyed, every address assigned to the interface
is removed. And in this scenerio we do two pointless things which can
be very expensive if the number of assigned interfaces is large:
1) Address promotion. We are deleting all addresses, so there is no
point in doing this.
2) A full nf conntrack table purge for every address. We only need to
do this once, as is already caught by the existing
masq_dev_notifier so masq_inet_event() can skip this.
Reported-by: Solar Designer <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
Tested-by: Cyrill Gorcunov <[email protected]> |
GF_Err mfra_Size(GF_Box *s)
{
GF_Err e;
GF_MovieFragmentRandomAccessBox *ptr = (GF_MovieFragmentRandomAccessBox *)s;
if (ptr->mfro) {
e = gf_isom_box_size((GF_Box *)ptr->mfro);
if (e) return e;
ptr->size += ptr->mfro->size;
}
return gf_isom_box_array_size(s, ptr->tfra_list);
} | 0 | [
"CWE-400",
"CWE-401"
] | gpac | d2371b4b204f0a3c0af51ad4e9b491144dd1225c | 74,729,870,412,002,080,000,000,000,000,000,000,000 | 12 | prevent dref memleak on invalid input (#1183) |
jd_local_to_utc(int jd, int df, int of)
{
df -= of;
if (df < 0)
jd -= 1;
else if (df >= DAY_IN_SECONDS)
jd += 1;
return jd;
} | 0 | [] | date | 3959accef8da5c128f8a8e2fd54e932a4fb253b0 | 152,691,885,708,106,720,000,000,000,000,000,000,000 | 9 | Add length limit option for methods that parses date strings
`Date.parse` now raises an ArgumentError when a given date string is
longer than 128. You can configure the limit by giving `limit` keyword
arguments like `Date.parse(str, limit: 1000)`. If you pass `limit: nil`,
the limit is disabled.
Not only `Date.parse` but also the following methods are changed.
* Date._parse
* Date.parse
* DateTime.parse
* Date._iso8601
* Date.iso8601
* DateTime.iso8601
* Date._rfc3339
* Date.rfc3339
* DateTime.rfc3339
* Date._xmlschema
* Date.xmlschema
* DateTime.xmlschema
* Date._rfc2822
* Date.rfc2822
* DateTime.rfc2822
* Date._rfc822
* Date.rfc822
* DateTime.rfc822
* Date._jisx0301
* Date.jisx0301
* DateTime.jisx0301 |
format_CT_CLEAR(const struct ofpact_null *a OVS_UNUSED,
const struct ofpact_format_params *fp)
{
ds_put_format(fp->s, "%sct_clear%s", colors.value, colors.end);
} | 0 | [
"CWE-416"
] | ovs | 77cccc74deede443e8b9102299efc869a52b65b2 | 111,891,201,338,122,900,000,000,000,000,000,000,000 | 5 | ofp-actions: Fix use-after-free while decoding RAW_ENCAP.
While decoding RAW_ENCAP action, decode_ed_prop() might re-allocate
ofpbuf if there is no enough space left. However, function
'decode_NXAST_RAW_ENCAP' continues to use old pointer to 'encap'
structure leading to write-after-free and incorrect decoding.
==3549105==ERROR: AddressSanitizer: heap-use-after-free on address
0x60600000011a at pc 0x0000005f6cc6 bp 0x7ffc3a2d4410 sp 0x7ffc3a2d4408
WRITE of size 2 at 0x60600000011a thread T0
#0 0x5f6cc5 in decode_NXAST_RAW_ENCAP lib/ofp-actions.c:4461:20
#1 0x5f0551 in ofpact_decode ./lib/ofp-actions.inc2:4777:16
#2 0x5ed17c in ofpacts_decode lib/ofp-actions.c:7752:21
#3 0x5eba9a in ofpacts_pull_openflow_actions__ lib/ofp-actions.c:7791:13
#4 0x5eb9fc in ofpacts_pull_openflow_actions lib/ofp-actions.c:7835:12
#5 0x64bb8b in ofputil_decode_packet_out lib/ofp-packet.c:1113:17
#6 0x65b6f4 in ofp_print_packet_out lib/ofp-print.c:148:13
#7 0x659e3f in ofp_to_string__ lib/ofp-print.c:1029:16
#8 0x659b24 in ofp_to_string lib/ofp-print.c:1244:21
#9 0x65a28c in ofp_print lib/ofp-print.c:1288:28
#10 0x540d11 in ofctl_ofp_parse utilities/ovs-ofctl.c:2814:9
#11 0x564228 in ovs_cmdl_run_command__ lib/command-line.c:247:17
#12 0x56408a in ovs_cmdl_run_command lib/command-line.c:278:5
#13 0x5391ae in main utilities/ovs-ofctl.c:179:9
#14 0x7f6911ce9081 in __libc_start_main (/lib64/libc.so.6+0x27081)
#15 0x461fed in _start (utilities/ovs-ofctl+0x461fed)
Fix that by getting a new pointer before using.
Credit to OSS-Fuzz.
Fuzzer regression test will fail only with AddressSanitizer enabled.
Reported-at: https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=27851
Fixes: f839892a206a ("OF support and translation of generic encap and decap")
Acked-by: William Tu <[email protected]>
Signed-off-by: Ilya Maximets <[email protected]> |
static void AutoSelectOpenCLDevices(MagickCLEnv clEnv)
{
const char
*option;
double
best_score;
MagickBooleanType
benchmark;
size_t
i;
option=getenv("MAGICK_OCL_DEVICE");
if (option != (const char *) NULL)
{
if (strcmp(option,"GPU") == 0)
SelectOpenCLDevice(clEnv,CL_DEVICE_TYPE_GPU);
else if (strcmp(option,"CPU") == 0)
SelectOpenCLDevice(clEnv,CL_DEVICE_TYPE_CPU);
else if (strcmp(option,"OFF") == 0)
{
for (i = 0; i < clEnv->number_devices; i++)
clEnv->devices[i]->enabled=MagickFalse;
clEnv->enabled=MagickFalse;
}
}
if (LoadOpenCLBenchmarks(clEnv) == MagickFalse)
return;
benchmark=MagickFalse;
if (clEnv->cpu_score == MAGICKCORE_OPENCL_UNDEFINED_SCORE)
benchmark=MagickTrue;
else
{
for (i = 0; i < clEnv->number_devices; i++)
{
if (clEnv->devices[i]->score == MAGICKCORE_OPENCL_UNDEFINED_SCORE)
{
benchmark=MagickTrue;
break;
}
}
}
if (benchmark != MagickFalse)
BenchmarkOpenCLDevices(clEnv);
best_score=clEnv->cpu_score;
for (i = 0; i < clEnv->number_devices; i++)
best_score=MagickMin(clEnv->devices[i]->score,best_score);
for (i = 0; i < clEnv->number_devices; i++)
{
if (clEnv->devices[i]->score != best_score)
clEnv->devices[i]->enabled=MagickFalse;
}
} | 0 | [
"CWE-476"
] | ImageMagick | cca91aa1861818342e3d072bb0fad7dc4ffac24a | 23,174,788,710,268,930,000,000,000,000,000,000,000 | 60 | https://github.com/ImageMagick/ImageMagick/issues/790 |
int vrend_create_query(struct vrend_context *ctx, uint32_t handle,
uint32_t query_type, uint32_t query_index,
uint32_t res_handle, UNUSED uint32_t offset)
{
struct vrend_query *q;
struct vrend_resource *res;
uint32_t ret_handle;
bool fake_samples_passed = false;
res = vrend_renderer_ctx_res_lookup(ctx, res_handle);
if (!res || !has_bit(res->storage_bits, VREND_STORAGE_HOST_SYSTEM_MEMORY)) {
vrend_report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_RESOURCE, res_handle);
return EINVAL;
}
/* If we don't have ARB_occlusion_query, at least try to fake GL_SAMPLES_PASSED
* by using GL_ANY_SAMPLES_PASSED (i.e. EXT_occlusion_query_boolean) */
if (!has_feature(feat_occlusion_query) && query_type == PIPE_QUERY_OCCLUSION_COUNTER) {
VREND_DEBUG(dbg_query, ctx, "GL_SAMPLES_PASSED not supported will try GL_ANY_SAMPLES_PASSED\n");
query_type = PIPE_QUERY_OCCLUSION_PREDICATE;
fake_samples_passed = true;
}
if (query_type == PIPE_QUERY_OCCLUSION_PREDICATE &&
!has_feature(feat_occlusion_query_boolean)) {
vrend_report_context_error(ctx, VIRGL_ERROR_GL_ANY_SAMPLES_PASSED, res_handle);
return EINVAL;
}
q = CALLOC_STRUCT(vrend_query);
if (!q)
return ENOMEM;
list_inithead(&q->waiting_queries);
q->type = query_type;
q->index = query_index;
q->ctx = ctx;
q->fake_samples_passed = fake_samples_passed;
vrend_resource_reference(&q->res, res);
switch (q->type) {
case PIPE_QUERY_OCCLUSION_COUNTER:
q->gltype = GL_SAMPLES_PASSED_ARB;
break;
case PIPE_QUERY_OCCLUSION_PREDICATE:
if (has_feature(feat_occlusion_query_boolean)) {
q->gltype = GL_ANY_SAMPLES_PASSED;
break;
} else
return EINVAL;
case PIPE_QUERY_TIMESTAMP:
if (!has_feature(feat_timer_query))
return EINVAL;
q->gltype = GL_TIMESTAMP;
break;
case PIPE_QUERY_TIME_ELAPSED:
if (!has_feature(feat_timer_query))
return EINVAL;
q->gltype = GL_TIME_ELAPSED;
break;
case PIPE_QUERY_PRIMITIVES_GENERATED:
q->gltype = GL_PRIMITIVES_GENERATED;
break;
case PIPE_QUERY_PRIMITIVES_EMITTED:
q->gltype = GL_TRANSFORM_FEEDBACK_PRIMITIVES_WRITTEN;
break;
case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
q->gltype = GL_ANY_SAMPLES_PASSED_CONSERVATIVE;
break;
case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
if (!has_feature(feat_transform_feedback_overflow_query))
return EINVAL;
q->gltype = GL_TRANSFORM_FEEDBACK_STREAM_OVERFLOW_ARB;
break;
case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
if (!has_feature(feat_transform_feedback_overflow_query))
return EINVAL;
q->gltype = GL_TRANSFORM_FEEDBACK_OVERFLOW_ARB;
break;
default:
vrend_printf("unknown query object received %d\n", q->type);
break;
}
glGenQueries(1, &q->id);
ret_handle = vrend_renderer_object_insert(ctx, q, handle,
VIRGL_OBJECT_QUERY);
if (!ret_handle) {
FREE(q);
return ENOMEM;
}
return 0;
} | 0 | [
"CWE-787"
] | virglrenderer | 95e581fd181b213c2ed7cdc63f2abc03eaaa77ec | 299,603,856,944,631,700,000,000,000,000,000,000,000 | 94 | vrend: Add test to resource OOB write and fix it
v2: Also check that no depth != 1 has been send when none is due
Closes: #250
Signed-off-by: Gert Wollny <[email protected]>
Reviewed-by: Chia-I Wu <[email protected]> |
static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct net_device *rcv = NULL;
struct veth_priv *priv, *rcv_priv;
struct veth_net_stats *stats, *rcv_stats;
int length;
priv = netdev_priv(dev);
rcv = priv->peer;
rcv_priv = netdev_priv(rcv);
stats = this_cpu_ptr(priv->stats);
rcv_stats = this_cpu_ptr(rcv_priv->stats);
if (!(rcv->flags & IFF_UP))
goto tx_drop;
if (dev->features & NETIF_F_NO_CSUM)
skb->ip_summed = rcv_priv->ip_summed;
length = skb->len + ETH_HLEN;
if (dev_forward_skb(rcv, skb) != NET_RX_SUCCESS)
goto rx_drop;
stats->tx_bytes += length;
stats->tx_packets++;
rcv_stats->rx_bytes += length;
rcv_stats->rx_packets++;
return NETDEV_TX_OK;
tx_drop:
kfree_skb(skb);
stats->tx_dropped++;
return NETDEV_TX_OK;
rx_drop:
rcv_stats->rx_dropped++;
return NETDEV_TX_OK;
} | 0 | [
"CWE-399"
] | linux | 6ec82562ffc6f297d0de36d65776cff8e5704867 | 117,481,773,382,688,380,000,000,000,000,000,000,000 | 41 | veth: Dont kfree_skb() after dev_forward_skb()
In case of congestion, netif_rx() frees the skb, so we must assume
dev_forward_skb() also consume skb.
Bug introduced by commit 445409602c092
(veth: move loopback logic to common location)
We must change dev_forward_skb() to always consume skb, and veth to not
double free it.
Bug report : http://marc.info/?l=linux-netdev&m=127310770900442&w=3
Reported-by: Martín Ferrari <[email protected]>
Signed-off-by: Eric Dumazet <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
static int checkout_notify(
checkout_data *data,
git_checkout_notify_t why,
const git_diff_delta *delta,
const git_index_entry *wditem)
{
git_diff_file wdfile;
const git_diff_file *baseline = NULL, *target = NULL, *workdir = NULL;
const char *path = NULL;
if (!data->opts.notify_cb ||
(why & data->opts.notify_flags) == 0)
return 0;
if (wditem) {
memset(&wdfile, 0, sizeof(wdfile));
git_oid_cpy(&wdfile.id, &wditem->id);
wdfile.path = wditem->path;
wdfile.size = wditem->file_size;
wdfile.flags = GIT_DIFF_FLAG_VALID_ID;
wdfile.mode = wditem->mode;
workdir = &wdfile;
path = wditem->path;
}
if (delta) {
switch (delta->status) {
case GIT_DELTA_UNMODIFIED:
case GIT_DELTA_MODIFIED:
case GIT_DELTA_TYPECHANGE:
default:
baseline = &delta->old_file;
target = &delta->new_file;
break;
case GIT_DELTA_ADDED:
case GIT_DELTA_IGNORED:
case GIT_DELTA_UNTRACKED:
case GIT_DELTA_UNREADABLE:
target = &delta->new_file;
break;
case GIT_DELTA_DELETED:
baseline = &delta->old_file;
break;
}
path = delta->old_file.path;
}
{
int error = data->opts.notify_cb(
why, path, baseline, target, workdir, data->opts.notify_payload);
return git_error_set_after_callback_function(
error, "git_checkout notification");
}
} | 0 | [
"CWE-20",
"CWE-706"
] | libgit2 | 64c612cc3e25eff5fb02c59ef5a66ba7a14751e4 | 62,078,765,005,616,130,000,000,000,000,000,000,000 | 59 | Protect against 8.3 "short name" attacks also on Linux/macOS
The Windows Subsystem for Linux (WSL) is getting increasingly popular,
in particular because it makes it _so_ easy to run Linux software on
Windows' files, via the auto-mounted Windows drives (`C:\` is mapped to
`/mnt/c/`, no need to set that up manually).
Unfortunately, files/directories on the Windows drives can be accessed
via their _short names_, if that feature is enabled (which it is on the
`C:` drive by default).
Which means that we have to safeguard even our Linux users against the
short name attacks.
Further, while the default options of CIFS/SMB-mounts seem to disallow
accessing files on network shares via their short names on Linux/macOS,
it _is_ possible to do so with the right options.
So let's just safe-guard against short name attacks _everywhere_.
Signed-off-by: Johannes Schindelin <[email protected]> |
int cil_gen_selinuxuser(struct cil_db *db, struct cil_tree_node *parse_current, struct cil_tree_node *ast_node)
{
enum cil_syntax syntax[] = {
CIL_SYN_STRING,
CIL_SYN_STRING,
CIL_SYN_STRING,
CIL_SYN_STRING | CIL_SYN_LIST,
CIL_SYN_END
};
int syntax_len = sizeof(syntax)/sizeof(*syntax);
struct cil_selinuxuser *selinuxuser = NULL;
int rc = SEPOL_ERR;
if (db == NULL || parse_current == NULL || ast_node == NULL) {
goto exit;
}
rc = __cil_verify_syntax(parse_current, syntax, syntax_len);
if (rc != SEPOL_OK) {
goto exit;
}
cil_selinuxuser_init(&selinuxuser);
selinuxuser->name_str = parse_current->next->data;
selinuxuser->user_str = parse_current->next->next->data;
if (parse_current->next->next->next->cl_head == NULL) {
selinuxuser->range_str = parse_current->next->next->next->data;
} else {
cil_levelrange_init(&selinuxuser->range);
rc = cil_fill_levelrange(parse_current->next->next->next->cl_head, selinuxuser->range);
if (rc != SEPOL_OK) {
goto exit;
}
}
ast_node->data = selinuxuser;
ast_node->flavor = CIL_SELINUXUSER;
return SEPOL_OK;
exit:
cil_tree_log(parse_current, CIL_ERR, "Bad selinuxuser declaration");
cil_destroy_selinuxuser(selinuxuser);
return rc;
} | 0 | [
"CWE-125"
] | selinux | 340f0eb7f3673e8aacaf0a96cbfcd4d12a405521 | 89,877,909,991,733,080,000,000,000,000,000,000,000 | 47 | libsepol/cil: Check for statements not allowed in optional blocks
While there are some checks for invalid statements in an optional
block when resolving the AST, there are no checks when building the
AST.
OSS-Fuzz found the following policy which caused a null dereference
in cil_tree_get_next_path().
(blockinherit b3)
(sid SID)
(sidorder(SID))
(optional o
(ibpkeycon :(1 0)s)
(block b3
(filecon""block())
(filecon""block())))
The problem is that the blockinherit copies block b3 before
the optional block is disabled. When the optional is disabled,
block b3 is deleted along with everything else in the optional.
Later, when filecon statements with the same path are found an
error message is produced and in trying to find out where the block
was copied from, the reference to the deleted block is used. The
error handling code assumes (rightly) that if something was copied
from a block then that block should still exist.
It is clear that in-statements, blocks, and macros cannot be in an
optional, because that allows nodes to be copied from the optional
block to somewhere outside even though the optional could be disabled
later. When optionals are disabled the AST is reset and the
resolution is restarted at the point of resolving macro calls, so
anything resolved before macro calls will never be re-resolved.
This includes tunableifs, in-statements, blockinherits,
blockabstracts, and macro definitions. Tunable declarations also
cannot be in an optional block because they are needed to resolve
tunableifs. It should be fine to allow blockinherit statements in
an optional, because that is copying nodes from outside the optional
to the optional and if the optional is later disabled, everything
will be deleted anyway.
Check and quit with an error if a tunable declaration, in-statement,
block, blockabstract, or macro definition is found within an
optional when either building or resolving the AST.
Signed-off-by: James Carter <[email protected]> |
static int qxl_pre_save(void *opaque)
{
PCIQXLDevice* d = opaque;
uint8_t *ram_start = d->vga.vram_ptr;
trace_qxl_pre_save(d->id);
if (d->last_release == NULL) {
d->last_release_offset = 0;
} else {
d->last_release_offset = (uint8_t *)d->last_release - ram_start;
}
assert(d->last_release_offset < d->vga.vram_size);
return 0;
} | 0 | [
"CWE-476"
] | qemu | d52680fc932efb8a2f334cc6993e705ed1e31e99 | 85,946,564,239,216,840,000,000,000,000,000,000,000 | 15 | qxl: check release info object
When releasing spice resources in release_resource() routine,
if release info object 'ext.info' is null, it leads to null
pointer dereference. Add check to avoid it.
Reported-by: Bugs SysSec <[email protected]>
Signed-off-by: Prasad J Pandit <[email protected]>
Message-id: [email protected]
Signed-off-by: Gerd Hoffmann <[email protected]> |
void StreamTcpDetectLogFlush(ThreadVars *tv, StreamTcpThread *stt, Flow *f, Packet *p, PacketQueue *pq)
{
TcpSession *ssn = f->protoctx;
ssn->client.flags |= STREAMTCP_STREAM_FLAG_TRIGGER_RAW;
ssn->server.flags |= STREAMTCP_STREAM_FLAG_TRIGGER_RAW;
bool ts = PKT_IS_TOSERVER(p) ? true : false;
ts ^= StreamTcpInlineMode();
StreamTcpPseudoPacketCreateDetectLogFlush(tv, stt, p, ssn, pq, ts^0);
StreamTcpPseudoPacketCreateDetectLogFlush(tv, stt, p, ssn, pq, ts^1);
} | 0 | [] | suricata | 843d0b7a10bb45627f94764a6c5d468a24143345 | 256,962,682,049,808,820,000,000,000,000,000,000,000 | 10 | stream: support RST getting lost/ignored
In case of a valid RST on a SYN, the state is switched to 'TCP_CLOSED'.
However, the target of the RST may not have received it, or may not
have accepted it. Also, the RST may have been injected, so the supposed
sender may not actually be aware of the RST that was sent in it's name.
In this case the previous behavior was to switch the state to CLOSED and
accept no further TCP updates or stream reassembly.
This patch changes this. It still switches the state to CLOSED, as this
is by far the most likely to be correct. However, it will reconsider
the state if the receiver continues to talk.
To do this on each state change the previous state will be recorded in
TcpSession::pstate. If a non-RST packet is received after a RST, this
TcpSession::pstate is used to try to continue the conversation.
If the (supposed) sender of the RST is also continueing the conversation
as normal, it's highly likely it didn't send the RST. In this case
a stream event is generated.
Ticket: #2501
Reported-By: Kirill Shipulin |
static JSValue js_sys_get_opt(JSContext *ctx, JSValueConst this_val, int argc, JSValueConst *argv)
{
const char *sec, *key, *val;
JSValue res;
if (argc!=2) return GF_JS_EXCEPTION(ctx);
sec = JS_ToCString(ctx, argv[0]);
if (!sec) return GF_JS_EXCEPTION(ctx);
key = JS_ToCString(ctx, argv[1]);
if (!key) {
JS_FreeCString(ctx, sec);
return GF_JS_EXCEPTION(ctx);
}
val = gf_opts_get_key(sec, key);
res = val ? JS_NewString(ctx, val) : JS_NULL;
JS_FreeCString(ctx, sec);
JS_FreeCString(ctx, key);
return res; | 0 | [
"CWE-787"
] | gpac | ea1eca00fd92fa17f0e25ac25652622924a9a6a0 | 229,227,825,164,392,360,000,000,000,000,000,000,000 | 18 | fixed #2138 |
onig_regset_replace(OnigRegSet* set, int at, regex_t* reg)
{
int i;
if (at < 0 || at >= set->n)
return ONIGERR_INVALID_ARGUMENT;
if (IS_NULL(reg)) {
onig_region_free(set->rs[at].region, 1);
for (i = at; i < set->n - 1; i++) {
set->rs[i].reg = set->rs[i+1].reg;
set->rs[i].region = set->rs[i+1].region;
}
set->n--;
}
else {
if (IS_FIND_LONGEST(reg->options))
return ONIGERR_INVALID_ARGUMENT;
if (set->n > 1 && reg->enc != set->enc)
return ONIGERR_INVALID_ARGUMENT;
set->rs[at].reg = reg;
}
for (i = 0; i < set->n; i++)
update_regset_by_reg(set, set->rs[i].reg);
return 0;
} | 0 | [
"CWE-125"
] | oniguruma | 0463e21432515631a9bc925ce5eb95b097c73719 | 305,130,008,727,873,020,000,000,000,000,000,000,000 | 30 | fix #164: Integer overflow related to reg->dmax in search_in_range() |
static int sd_int_pkt_scan(struct gspca_dev *gspca_dev,
u8 *data, /* interrupt packet data */
int len) /* interrupt packet length */
{
int ret = -EINVAL;
if (len == 1 && (data[0] == 0x80 || data[0] == 0x10)) {
input_report_key(gspca_dev->input_dev, KEY_CAMERA, 1);
input_sync(gspca_dev->input_dev);
ret = 0;
}
if (len == 1 && (data[0] == 0x88 || data[0] == 0x11)) {
input_report_key(gspca_dev->input_dev, KEY_CAMERA, 0);
input_sync(gspca_dev->input_dev);
ret = 0;
}
return ret;
} | 0 | [
"CWE-476"
] | linux | 485b06aadb933190f4bc44e006076bc27a23f205 | 53,670,261,053,565,340,000,000,000,000,000,000,000 | 20 | media: stv06xx: add missing descriptor sanity checks
Make sure to check that we have two alternate settings and at least one
endpoint before accessing the second altsetting structure and
dereferencing the endpoint arrays.
This specifically avoids dereferencing NULL-pointers or corrupting
memory when a device does not have the expected descriptors.
Note that the sanity checks in stv06xx_start() and pb0100_start() are
not redundant as the driver is mixing looking up altsettings by index
and by number, which may not coincide.
Fixes: 8668d504d72c ("V4L/DVB (12082): gspca_stv06xx: Add support for st6422 bridge and sensor")
Fixes: c0b33bdc5b8d ("[media] gspca-stv06xx: support bandwidth changing")
Cc: stable <[email protected]> # 2.6.31
Cc: Hans de Goede <[email protected]>
Signed-off-by: Johan Hovold <[email protected]>
Signed-off-by: Hans Verkuil <[email protected]>
Signed-off-by: Mauro Carvalho Chehab <[email protected]> |
static int nf_tables_delrule(struct sock *nlsk, struct sk_buff *skb,
const struct nlmsghdr *nlh,
const struct nlattr * const nla[])
{
const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
struct nft_af_info *afi;
struct net *net = sock_net(skb->sk);
struct nft_table *table;
struct nft_chain *chain = NULL;
struct nft_rule *rule;
int family = nfmsg->nfgen_family, err = 0;
struct nft_ctx ctx;
afi = nf_tables_afinfo_lookup(net, family, false);
if (IS_ERR(afi))
return PTR_ERR(afi);
table = nf_tables_table_lookup(afi, nla[NFTA_RULE_TABLE]);
if (IS_ERR(table))
return PTR_ERR(table);
if (table->flags & NFT_TABLE_INACTIVE)
return -ENOENT;
if (nla[NFTA_RULE_CHAIN]) {
chain = nf_tables_chain_lookup(table, nla[NFTA_RULE_CHAIN]);
if (IS_ERR(chain))
return PTR_ERR(chain);
}
nft_ctx_init(&ctx, skb, nlh, afi, table, chain, nla);
if (chain) {
if (nla[NFTA_RULE_HANDLE]) {
rule = nf_tables_rule_lookup(chain,
nla[NFTA_RULE_HANDLE]);
if (IS_ERR(rule))
return PTR_ERR(rule);
err = nft_delrule(&ctx, rule);
} else {
err = nft_delrule_by_chain(&ctx);
}
} else {
list_for_each_entry(chain, &table->chains, list) {
ctx.chain = chain;
err = nft_delrule_by_chain(&ctx);
if (err < 0)
break;
}
}
return err;
} | 0 | [
"CWE-19"
] | nf | a2f18db0c68fec96631c10cad9384c196e9008ac | 249,369,988,868,607,080,000,000,000,000,000,000,000 | 53 | netfilter: nf_tables: fix flush ruleset chain dependencies
Jumping between chains doesn't mix well with flush ruleset. Rules
from a different chain and set elements may still refer to us.
[ 353.373791] ------------[ cut here ]------------
[ 353.373845] kernel BUG at net/netfilter/nf_tables_api.c:1159!
[ 353.373896] invalid opcode: 0000 [#1] SMP
[ 353.373942] Modules linked in: intel_powerclamp uas iwldvm iwlwifi
[ 353.374017] CPU: 0 PID: 6445 Comm: 31c3.nft Not tainted 3.18.0 #98
[ 353.374069] Hardware name: LENOVO 5129CTO/5129CTO, BIOS 6QET47WW (1.17 ) 07/14/2010
[...]
[ 353.375018] Call Trace:
[ 353.375046] [<ffffffff81964c31>] ? nf_tables_commit+0x381/0x540
[ 353.375101] [<ffffffff81949118>] nfnetlink_rcv+0x3d8/0x4b0
[ 353.375150] [<ffffffff81943fc5>] netlink_unicast+0x105/0x1a0
[ 353.375200] [<ffffffff8194438e>] netlink_sendmsg+0x32e/0x790
[ 353.375253] [<ffffffff818f398e>] sock_sendmsg+0x8e/0xc0
[ 353.375300] [<ffffffff818f36b9>] ? move_addr_to_kernel.part.20+0x19/0x70
[ 353.375357] [<ffffffff818f44f9>] ? move_addr_to_kernel+0x19/0x30
[ 353.375410] [<ffffffff819016d2>] ? verify_iovec+0x42/0xd0
[ 353.375459] [<ffffffff818f3e10>] ___sys_sendmsg+0x3f0/0x400
[ 353.375510] [<ffffffff810615fa>] ? native_sched_clock+0x2a/0x90
[ 353.375563] [<ffffffff81176697>] ? acct_account_cputime+0x17/0x20
[ 353.375616] [<ffffffff8110dc78>] ? account_user_time+0x88/0xa0
[ 353.375667] [<ffffffff818f4bbd>] __sys_sendmsg+0x3d/0x80
[ 353.375719] [<ffffffff81b184f4>] ? int_check_syscall_exit_work+0x34/0x3d
[ 353.375776] [<ffffffff818f4c0d>] SyS_sendmsg+0xd/0x20
[ 353.375823] [<ffffffff81b1826d>] system_call_fastpath+0x16/0x1b
Release objects in this order: rules -> sets -> chains -> tables, to
make sure no references to chains are held anymore.
Reported-by: Asbjoern Sloth Toennesen <[email protected]>
Signed-off-by: Pablo Neira Ayuso <[email protected]> |
int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
{
struct ring_buffer_per_cpu *cpu_buffer;
cpu_buffer = iter->cpu_buffer;
return iter->head_page == cpu_buffer->commit_page &&
iter->head == rb_commit_index(cpu_buffer);
} | 0 | [
"CWE-190"
] | linux-stable | 59643d1535eb220668692a5359de22545af579f6 | 331,894,688,029,972,360,000,000,000,000,000,000,000 | 9 | ring-buffer: Prevent overflow of size in ring_buffer_resize()
If the size passed to ring_buffer_resize() is greater than MAX_LONG - BUF_PAGE_SIZE
then the DIV_ROUND_UP() will return zero.
Here's the details:
# echo 18014398509481980 > /sys/kernel/debug/tracing/buffer_size_kb
tracing_entries_write() processes this and converts kb to bytes.
18014398509481980 << 10 = 18446744073709547520
and this is passed to ring_buffer_resize() as unsigned long size.
size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
Where DIV_ROUND_UP(a, b) is (a + b - 1)/b
BUF_PAGE_SIZE is 4080 and here
18446744073709547520 + 4080 - 1 = 18446744073709551599
where 18446744073709551599 is still smaller than 2^64
2^64 - 18446744073709551599 = 17
But now 18446744073709551599 / 4080 = 4521260802379792
and size = size * 4080 = 18446744073709551360
This is checked to make sure its still greater than 2 * 4080,
which it is.
Then we convert to the number of buffer pages needed.
nr_page = DIV_ROUND_UP(size, BUF_PAGE_SIZE)
but this time size is 18446744073709551360 and
2^64 - (18446744073709551360 + 4080 - 1) = -3823
Thus it overflows and the resulting number is less than 4080, which makes
3823 / 4080 = 0
an nr_pages is set to this. As we already checked against the minimum that
nr_pages may be, this causes the logic to fail as well, and we crash the
kernel.
There's no reason to have the two DIV_ROUND_UP() (that's just result of
historical code changes), clean up the code and fix this bug.
Cc: [email protected] # 3.5+
Fixes: 83f40318dab00 ("ring-buffer: Make removal of ring buffer pages atomic")
Signed-off-by: Steven Rostedt <[email protected]> |
const void *gf_isom_get_tfrf(GF_ISOFile *movie, u32 trackNumber)
{
#ifdef GPAC_DISABLE_ISOM_FRAGMENTS
return NULL;
#else
GF_TrackBox *trak = gf_isom_get_track_from_file(movie, trackNumber);
if (!trak) return NULL;
return trak->tfrf;
#endif | 0 | [
"CWE-476"
] | gpac | ebfa346eff05049718f7b80041093b4c5581c24e | 151,103,930,777,488,770,000,000,000,000,000,000,000 | 11 | fixed #1706 |
GF_Err clap_box_dump(GF_Box *a, FILE * trace)
{
GF_CleanApertureBox *ptr = (GF_CleanApertureBox*)a;
gf_isom_box_dump_start(a, "CleanApertureBox", trace);
gf_fprintf(trace, "cleanApertureWidthN=\"%d\" cleanApertureWidthD=\"%d\" ", ptr->cleanApertureWidthN, ptr->cleanApertureWidthD);
gf_fprintf(trace, "cleanApertureHeightN=\"%d\" cleanApertureHeightD=\"%d\" ", ptr->cleanApertureHeightN, ptr->cleanApertureHeightD);
gf_fprintf(trace, "horizOffN=\"%d\" horizOffD=\"%d\" ", ptr->horizOffN, ptr->horizOffD);
gf_fprintf(trace, "vertOffN=\"%d\" vertOffD=\"%d\"", ptr->vertOffN, ptr->vertOffD);
gf_fprintf(trace, ">\n");
gf_isom_box_dump_done("CleanApertureBox", a, trace);
return GF_OK;
} | 0 | [
"CWE-787"
] | gpac | ea1eca00fd92fa17f0e25ac25652622924a9a6a0 | 337,128,890,931,455,430,000,000,000,000,000,000,000 | 12 | fixed #2138 |
static bool detect_legacy_conv_subs(ASS_Track *track)
{
/*
* FFmpeg and libav convert srt subtitles to ass.
* In legacy versions, they did not set the 'ScaledBorderAndShadow' header,
* but expected it to default to yes (which libass did).
* To avoid breaking them, we try to detect these
* converted subs by common properties of ffmpeg/libav's converted subs.
* Since files with custom format lines (-2014.10.11) default to SBAS=1
* regardless of being ffmpeg generated or not, we are only concerned with
* post-signature and pre-SBAS ffmpeg-files (2014.10.11-2020.04.17).
* We want to avoid matching modified ffmpeg files though.
*
* Relevant ffmpeg commits are:
* 2c77c90684e24ef16f7e7c4462e011434cee6a98 2010.12.29
* Initial conversion format.
* Style "Format:" line is mix of SSA and ASS
* Event "Format:" line
* "Format: Layer, Start, End, Text\r\n"
* Only Header in ScriptInfo is "ScriptType: v4.00+"
* 0e7782c08ec77739edb0b98ba5d896b45e98235f 2012.06.15
* Adds 'Style' to Event "Format:" line
* 5039aadf68deb9ad6dd0737ea11259fe53d3727b 2014.06.18
* Adds PlayerRes(X|Y) (384x288)
* (moved below ScriptType: a few minutes later)
* 40b9f28641b696c6bb73ce49dc97c2ce2700cbdb 2014.10.11 14:31:23 +0200
* Regular full ASS Event and Style "Format:" lines
* 52b0a0ecaa02e17f7e01bead8c3f215f1cfd48dc 2014.10.11 18:37:43 +0200 <==
* Signature comment
* 56bc0a6736cdc7edab837ff8f304661fd16de0e4 2015.02.08
* Allow custom PlayRes(X|Y)
* a8ba2a2c1294a330a0e79ae7f0d3a203a7599166 2020.04.17
* Set 'ScaledBorderAndShadow: yes'
*
* libav outputs initial ffmpeg format. (no longer maintained)
*/
// GENBY_FFMPEG and exact ffmpeg headers required
// Note: If there's SINFO_SCRIPTTYPE in the future this needs to be updated
if (track->parser_priv->header_flags
^ (SINFO_PLAYRESX | SINFO_PLAYRESY | GENBY_FFMPEG))
return false;
// Legacy ffmpeg only ever has one style
// Check 2 not 1 because libass also adds a def style
if (track->n_styles != 2
|| strncmp(track->styles[1].Name, "Default", 7))
return false;
return true;
} | 0 | [
"CWE-369",
"CWE-787"
] | libass | 017137471d0043e0321e377ed8da48e45a3ec632 | 321,245,480,650,862,100,000,000,000,000,000,000,000 | 51 | decode_font: fix subtraction broken by change to unsigned type
This caused a one-byte buffer overwrite and an assertion failure.
Regression in commit 910211f1c0078e37546f73e95306724358b89be2.
Discovered by OSS-Fuzz.
Fixes https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=26674.
Fixes https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=26678. |
int IsPCS(cmsColorSpaceSignature ColorSpace)
{
return (ColorSpace == cmsSigXYZData ||
ColorSpace == cmsSigLabData);
} | 0 | [] | Little-CMS | 41d222df1bc6188131a8f46c32eab0a4d4cdf1b6 | 311,559,129,356,974,500,000,000,000,000,000,000,000 | 5 | Memory squeezing fix: lcms2 cmsPipeline construction
When creating a new pipeline, lcms would often try to allocate a stage
and pass it to cmsPipelineInsertStage without checking whether the
allocation succeeded. cmsPipelineInsertStage would then assert (or crash)
if it had not.
The fix here is to change cmsPipelineInsertStage to check and return
an error value. All calling code is then checked to test this return
value and cope. |
void visit(TokenBoundary & /*ope*/) override { has_token_boundary_ = true; } | 0 | [
"CWE-125"
] | cpp-peglib | b3b29ce8f3acf3a32733d930105a17d7b0ba347e | 279,930,879,786,509,720,000,000,000,000,000,000,000 | 1 | Fix #122 |
int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len)
{
struct super_block *sb = inode->i_sb;
handle_t *handle;
struct ext4_ext_path *path;
struct ext4_extent *extent;
ext4_lblk_t offset_lblk, len_lblk, ee_start_lblk = 0;
unsigned int credits, ee_len;
int ret = 0, depth, split_flag = 0;
loff_t ioffset;
/*
* We need to test this early because xfstests assumes that an
* insert range of (0, 1) will return EOPNOTSUPP if the file
* system does not support insert range.
*/
if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
return -EOPNOTSUPP;
/* Insert range works only on fs block size aligned offsets. */
if (offset & (EXT4_CLUSTER_SIZE(sb) - 1) ||
len & (EXT4_CLUSTER_SIZE(sb) - 1))
return -EINVAL;
if (!S_ISREG(inode->i_mode))
return -EOPNOTSUPP;
trace_ext4_insert_range(inode, offset, len);
offset_lblk = offset >> EXT4_BLOCK_SIZE_BITS(sb);
len_lblk = len >> EXT4_BLOCK_SIZE_BITS(sb);
/* Call ext4_force_commit to flush all data in case of data=journal */
if (ext4_should_journal_data(inode)) {
ret = ext4_force_commit(inode->i_sb);
if (ret)
return ret;
}
/*
* Need to round down to align start offset to page size boundary
* for page size > block size.
*/
ioffset = round_down(offset, PAGE_SIZE);
/* Write out all dirty pages */
ret = filemap_write_and_wait_range(inode->i_mapping, ioffset,
LLONG_MAX);
if (ret)
return ret;
/* Take mutex lock */
mutex_lock(&inode->i_mutex);
/* Currently just for extent based files */
if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
ret = -EOPNOTSUPP;
goto out_mutex;
}
/* Check for wrap through zero */
if (inode->i_size + len > inode->i_sb->s_maxbytes) {
ret = -EFBIG;
goto out_mutex;
}
/* Offset should be less than i_size */
if (offset >= i_size_read(inode)) {
ret = -EINVAL;
goto out_mutex;
}
/* Wait for existing dio to complete */
ext4_inode_block_unlocked_dio(inode);
inode_dio_wait(inode);
/*
* Prevent page faults from reinstantiating pages we have released from
* page cache.
*/
down_write(&EXT4_I(inode)->i_mmap_sem);
truncate_pagecache(inode, ioffset);
credits = ext4_writepage_trans_blocks(inode);
handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
goto out_mmap;
}
/* Expand file to avoid data loss if there is error while shifting */
inode->i_size += len;
EXT4_I(inode)->i_disksize += len;
inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
ret = ext4_mark_inode_dirty(handle, inode);
if (ret)
goto out_stop;
down_write(&EXT4_I(inode)->i_data_sem);
ext4_discard_preallocations(inode);
path = ext4_find_extent(inode, offset_lblk, NULL, 0);
if (IS_ERR(path)) {
up_write(&EXT4_I(inode)->i_data_sem);
goto out_stop;
}
depth = ext_depth(inode);
extent = path[depth].p_ext;
if (extent) {
ee_start_lblk = le32_to_cpu(extent->ee_block);
ee_len = ext4_ext_get_actual_len(extent);
/*
* If offset_lblk is not the starting block of extent, split
* the extent @offset_lblk
*/
if ((offset_lblk > ee_start_lblk) &&
(offset_lblk < (ee_start_lblk + ee_len))) {
if (ext4_ext_is_unwritten(extent))
split_flag = EXT4_EXT_MARK_UNWRIT1 |
EXT4_EXT_MARK_UNWRIT2;
ret = ext4_split_extent_at(handle, inode, &path,
offset_lblk, split_flag,
EXT4_EX_NOCACHE |
EXT4_GET_BLOCKS_PRE_IO |
EXT4_GET_BLOCKS_METADATA_NOFAIL);
}
ext4_ext_drop_refs(path);
kfree(path);
if (ret < 0) {
up_write(&EXT4_I(inode)->i_data_sem);
goto out_stop;
}
}
ret = ext4_es_remove_extent(inode, offset_lblk,
EXT_MAX_BLOCKS - offset_lblk);
if (ret) {
up_write(&EXT4_I(inode)->i_data_sem);
goto out_stop;
}
/*
* if offset_lblk lies in a hole which is at start of file, use
* ee_start_lblk to shift extents
*/
ret = ext4_ext_shift_extents(inode, handle,
ee_start_lblk > offset_lblk ? ee_start_lblk : offset_lblk,
len_lblk, SHIFT_RIGHT);
up_write(&EXT4_I(inode)->i_data_sem);
if (IS_SYNC(inode))
ext4_handle_sync(handle);
out_stop:
ext4_journal_stop(handle);
out_mmap:
up_write(&EXT4_I(inode)->i_mmap_sem);
ext4_inode_resume_unlocked_dio(inode);
out_mutex:
mutex_unlock(&inode->i_mutex);
return ret;
} | 1 | [
"CWE-362"
] | linux | 32ebffd3bbb4162da5ff88f9a35dd32d0a28ea70 | 64,994,538,480,491,920,000,000,000,000,000,000,000 | 165 | ext4: fix races between buffered IO and collapse / insert range
Current code implementing FALLOC_FL_COLLAPSE_RANGE and
FALLOC_FL_INSERT_RANGE is prone to races with buffered writes and page
faults. If buffered write or write via mmap manages to squeeze between
filemap_write_and_wait_range() and truncate_pagecache() in the fallocate
implementations, the written data is simply discarded by
truncate_pagecache() although it should have been shifted.
Fix the problem by moving filemap_write_and_wait_range() call inside
i_mutex and i_mmap_sem. That way we are protected against races with
both buffered writes and page faults.
Signed-off-by: Jan Kara <[email protected]>
Signed-off-by: Theodore Ts'o <[email protected]> |
static int hidp_setup_hid(struct hidp_session *session,
struct hidp_connadd_req *req)
{
struct hid_device *hid;
int err;
session->rd_data = memdup_user(req->rd_data, req->rd_size);
if (IS_ERR(session->rd_data))
return PTR_ERR(session->rd_data);
session->rd_size = req->rd_size;
hid = hid_allocate_device();
if (IS_ERR(hid)) {
err = PTR_ERR(hid);
goto fault;
}
session->hid = hid;
hid->driver_data = session;
hid->bus = BUS_BLUETOOTH;
hid->vendor = req->vendor;
hid->product = req->product;
hid->version = req->version;
hid->country = req->country;
strncpy(hid->name, req->name, sizeof(hid->name));
snprintf(hid->phys, sizeof(hid->phys), "%pMR",
&l2cap_pi(session->ctrl_sock->sk)->chan->src);
/* NOTE: Some device modules depend on the dst address being stored in
* uniq. Please be aware of this before making changes to this behavior.
*/
snprintf(hid->uniq, sizeof(hid->uniq), "%pMR",
&l2cap_pi(session->ctrl_sock->sk)->chan->dst);
hid->dev.parent = &session->conn->hcon->dev;
hid->ll_driver = &hidp_hid_driver;
/* True if device is blacklisted in drivers/hid/hid-quirks.c */
if (hid_ignore(hid)) {
hid_destroy_device(session->hid);
session->hid = NULL;
return -ENODEV;
}
return 0;
fault:
kfree(session->rd_data);
session->rd_data = NULL;
return err;
} | 0 | [
"CWE-787"
] | linux | 7992c18810e568b95c869b227137a2215702a805 | 228,857,198,672,617,000,000,000,000,000,000,000,000 | 57 | Bluetooth: hidp: buffer overflow in hidp_process_report
CVE-2018-9363
The buffer length is unsigned at all layers, but gets cast to int and
checked in hidp_process_report and can lead to a buffer overflow.
Switch len parameter to unsigned int to resolve issue.
This affects 3.18 and newer kernels.
Signed-off-by: Mark Salyzyn <[email protected]>
Fixes: a4b1b5877b514b276f0f31efe02388a9c2836728 ("HID: Bluetooth: hidp: make sure input buffers are big enough")
Cc: Marcel Holtmann <[email protected]>
Cc: Johan Hedberg <[email protected]>
Cc: "David S. Miller" <[email protected]>
Cc: Kees Cook <[email protected]>
Cc: Benjamin Tissoires <[email protected]>
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Acked-by: Kees Cook <[email protected]>
Signed-off-by: Marcel Holtmann <[email protected]> |
void test_moveto(bezctx *bc, double x, double y, int is_open) {
printf("test_moveto(%g,%g)_%d\n",x,y,is_open);
} | 0 | [
"CWE-787"
] | libspiro | 35233450c922787dad42321e359e5229ff470a1e | 132,609,146,111,036,130,000,000,000,000,000,000,000 | 3 | CVE-2019-19847, Stack-based buffer overflow in the spiro_to_bpath0()
Frederic Cambus (@fcambus) discovered a bug in call-test.c using:
./configure CFLAGS="-fsanitize=address"
make
./tests/call-test[14,15,16,17,18,19]
Fredrick Brennan (@ctrlcctrlv) provided bugfix. See issue #21 |
static int em_das(struct x86_emulate_ctxt *ctxt)
{
u8 al, old_al;
bool af, cf, old_cf;
cf = ctxt->eflags & X86_EFLAGS_CF;
al = ctxt->dst.val;
old_al = al;
old_cf = cf;
cf = false;
af = ctxt->eflags & X86_EFLAGS_AF;
if ((al & 0x0f) > 9 || af) {
al -= 6;
cf = old_cf | (al >= 250);
af = true;
} else {
af = false;
}
if (old_al > 0x99 || old_cf) {
al -= 0x60;
cf = true;
}
ctxt->dst.val = al;
/* Set PF, ZF, SF */
ctxt->src.type = OP_IMM;
ctxt->src.val = 0;
ctxt->src.bytes = 1;
fastop(ctxt, em_or);
ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
if (cf)
ctxt->eflags |= X86_EFLAGS_CF;
if (af)
ctxt->eflags |= X86_EFLAGS_AF;
return X86EMUL_CONTINUE;
} | 0 | [] | kvm | d1442d85cc30ea75f7d399474ca738e0bc96f715 | 227,372,805,831,688,120,000,000,000,000,000,000,000 | 37 | KVM: x86: Handle errors when RIP is set during far jumps
Far jmp/call/ret may fault while loading a new RIP. Currently KVM does not
handle this case, and may result in failed vm-entry once the assignment is
done. The tricky part of doing so is that loading the new CS affects the
VMCS/VMCB state, so if we fail during loading the new RIP, we are left in
unconsistent state. Therefore, this patch saves on 64-bit the old CS
descriptor and restores it if loading RIP failed.
This fixes CVE-2014-3647.
Cc: [email protected]
Signed-off-by: Nadav Amit <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]> |
static int read_off64(off_t *var, unsigned char *mem,
struct mspack_system *sys, struct mspack_file *fh)
{
#if LARGEFILE_SUPPORT
*var = EndGetI64(mem);
#else
*var = EndGetI32(mem);
if ((*var & 0x80000000) || EndGetI32(mem+4)) {
sys->message(fh, (char *)largefile_msg);
return 1;
}
#endif
return 0;
} | 0 | [
"CWE-119",
"CWE-787"
] | libmspack | 2f084136cfe0d05e5bf5703f3e83c6d955234b4d | 34,940,486,150,690,427,000,000,000,000,000,000,000 | 14 | length checks when looking for control files |
static bool parse_too_many_nested_mime_parts(struct message_parser_ctx *ctx)
{
return ctx->nested_parts_count+1 >= ctx->max_nested_mime_parts;
} | 0 | [
"CWE-20"
] | core | fb97a1cddbda4019e327fa736972a1c7433fedaa | 108,221,675,385,489,310,000,000,000,000,000,000,000 | 4 | lib-mail: message-parser - Fix assert-crash when enforcing MIME part limit
The limit could have been exceeded with message/rfc822 parts. |
ews_response_cb (SoupSession *session,
SoupMessage *msg,
gpointer data)
{
EwsNode *enode = (EwsNode *) data;
ESoapResponse *response;
ESoapParameter *param;
const gchar *persistent_auth;
gint log_level;
gint wait_ms = 0;
persistent_auth = soup_message_headers_get_one (msg->response_headers, "Persistent-Auth");
if (persistent_auth && g_ascii_strcasecmp (persistent_auth, "false") == 0) {
SoupSessionFeature *feature;
feature = soup_session_get_feature (session, SOUP_TYPE_AUTH_MANAGER);
if (feature) {
soup_auth_manager_clear_cached_credentials (SOUP_AUTH_MANAGER (feature));
}
}
if (g_cancellable_is_cancelled (enode->cancellable))
goto exit;
ews_connection_check_ssl_error (enode->cnc, msg);
if (ews_connection_credentials_failed (enode->cnc, msg, enode->simple)) {
goto exit;
} else if (msg->status_code == SOUP_STATUS_SSL_FAILED) {
g_simple_async_result_set_error (
enode->simple, SOUP_HTTP_ERROR, SOUP_STATUS_SSL_FAILED,
"%s", msg->reason_phrase);
goto exit;
} else if (msg->status_code == SOUP_STATUS_UNAUTHORIZED) {
if (msg->response_headers) {
const gchar *diagnostics;
diagnostics = soup_message_headers_get_list (msg->response_headers, "X-MS-DIAGNOSTICS");
if (diagnostics && strstr (diagnostics, "invalid_grant")) {
g_simple_async_result_set_error (
enode->simple,
EWS_CONNECTION_ERROR,
EWS_CONNECTION_ERROR_ACCESSDENIED,
"%s", diagnostics);
goto exit;
} else if (diagnostics && *diagnostics) {
g_simple_async_result_set_error (
enode->simple,
EWS_CONNECTION_ERROR,
EWS_CONNECTION_ERROR_AUTHENTICATION_FAILED,
"%s", diagnostics);
goto exit;
}
}
g_simple_async_result_set_error (
enode->simple,
EWS_CONNECTION_ERROR,
EWS_CONNECTION_ERROR_AUTHENTICATION_FAILED,
_("Authentication failed"));
goto exit;
} else if (msg->status_code == SOUP_STATUS_CANT_RESOLVE ||
msg->status_code == SOUP_STATUS_CANT_RESOLVE_PROXY ||
msg->status_code == SOUP_STATUS_CANT_CONNECT ||
msg->status_code == SOUP_STATUS_CANT_CONNECT_PROXY ||
msg->status_code == SOUP_STATUS_IO_ERROR) {
g_simple_async_result_set_error (
enode->simple,
EWS_CONNECTION_ERROR,
EWS_CONNECTION_ERROR_UNAVAILABLE,
"%s", msg->reason_phrase);
goto exit;
}
response = e_soap_message_parse_response ((ESoapMessage *) msg);
if (response == NULL) {
g_simple_async_result_set_error (
enode->simple,
EWS_CONNECTION_ERROR,
EWS_CONNECTION_ERROR_NORESPONSE,
_("No response: %s"), msg->reason_phrase);
goto exit;
}
/* TODO: The stdout can be replaced with Evolution's
* Logging framework also */
log_level = e_ews_debug_get_log_level ();
if (log_level >= 1 && log_level < 3) {
/* This will dump only the headers, since we stole the body.
* And only if EWS_DEBUG=1, since higher levels will have dumped
* it directly from libsoup anyway. */
e_ews_debug_dump_raw_soup_response (msg);
/* And this will dump the body... */
e_soap_response_dump_response (response, stdout);
}
param = e_soap_response_get_first_parameter_by_name (response, "detail", NULL);
if (param)
param = e_soap_parameter_get_first_child_by_name (param, "ResponseCode");
if (param) {
gchar *value;
value = e_soap_parameter_get_string_value (param);
if (value && ews_get_error_code (value) == EWS_CONNECTION_ERROR_SERVERBUSY) {
param = e_soap_response_get_first_parameter_by_name (response, "detail", NULL);
if (param)
param = e_soap_parameter_get_first_child_by_name (param, "MessageXml");
if (param) {
param = e_soap_parameter_get_first_child_by_name (param, "Value");
if (param) {
g_free (value);
value = e_soap_parameter_get_property (param, "Name");
if (g_strcmp0 (value, "BackOffMilliseconds") == 0) {
wait_ms = e_soap_parameter_get_int_value (param);
}
}
}
}
g_free (value);
}
if (wait_ms > 0 && e_ews_connection_get_backoff_enabled (enode->cnc)) {
GCancellable *cancellable = enode->cancellable;
EFlag *flag;
if (cancellable)
g_object_ref (cancellable);
g_object_ref (msg);
flag = e_flag_new ();
while (wait_ms > 0 && !g_cancellable_is_cancelled (cancellable) && msg->status_code != SOUP_STATUS_CANCELLED) {
gint64 now = g_get_monotonic_time ();
gint left_minutes, left_seconds;
left_minutes = wait_ms / 60000;
left_seconds = (wait_ms / 1000) % 60;
if (left_minutes > 0) {
camel_operation_push_message (cancellable,
g_dngettext (GETTEXT_PACKAGE,
"Exchange server is busy, waiting to retry (%d:%02d minute)",
"Exchange server is busy, waiting to retry (%d:%02d minutes)", left_minutes),
left_minutes, left_seconds);
} else {
camel_operation_push_message (cancellable,
g_dngettext (GETTEXT_PACKAGE,
"Exchange server is busy, waiting to retry (%d second)",
"Exchange server is busy, waiting to retry (%d seconds)", left_seconds),
left_seconds);
}
e_flag_wait_until (flag, now + (G_TIME_SPAN_MILLISECOND * (wait_ms > 1000 ? 1000 : wait_ms)));
now = g_get_monotonic_time () - now;
now = now / G_TIME_SPAN_MILLISECOND;
if (now >= wait_ms)
wait_ms = 0;
wait_ms -= now;
camel_operation_pop_message (cancellable);
}
e_flag_free (flag);
g_object_unref (response);
if (g_cancellable_is_cancelled (cancellable) ||
msg->status_code == SOUP_STATUS_CANCELLED) {
g_clear_object (&cancellable);
g_object_unref (msg);
} else {
EwsNode *new_node;
new_node = ews_node_new ();
new_node->msg = E_SOAP_MESSAGE (msg); /* takes ownership */
new_node->pri = enode->pri;
new_node->cb = enode->cb;
new_node->cnc = enode->cnc;
new_node->simple = enode->simple;
enode->simple = NULL;
QUEUE_LOCK (enode->cnc);
enode->cnc->priv->jobs = g_slist_prepend (enode->cnc->priv->jobs, new_node);
QUEUE_UNLOCK (enode->cnc);
if (cancellable) {
new_node->cancellable = g_object_ref (cancellable);
new_node->cancel_handler_id = g_cancellable_connect (
cancellable, G_CALLBACK (ews_cancel_request), new_node, NULL);
}
g_clear_object (&cancellable);
}
goto exit;
}
if (enode->cb != NULL)
enode->cb (response, enode->simple);
g_object_unref (response);
exit:
if (enode->simple)
g_simple_async_result_complete_in_idle (enode->simple);
ews_active_job_done (enode->cnc, enode);
} | 0 | [
"CWE-295"
] | evolution-ews | 915226eca9454b8b3e5adb6f2fff9698451778de | 277,661,027,831,251,040,000,000,000,000,000,000,000 | 214 | I#27 - SSL Certificates are not validated
This depends on https://gitlab.gnome.org/GNOME/evolution-data-server/commit/6672b8236139bd6ef41ecb915f4c72e2a052dba5 too.
Closes https://gitlab.gnome.org/GNOME/evolution-ews/issues/27 |
static CURLcode setstropt(char **charp, const char *s)
{
/* Release the previous storage at `charp' and replace by a dynamic storage
copy of `s'. Return CURLE_OK or CURLE_OUT_OF_MEMORY. */
Curl_safefree(*charp);
if(s) {
char *str = strdup(s);
if(!str)
return CURLE_OUT_OF_MEMORY;
*charp = str;
}
return CURLE_OK;
} | 0 | [
"CWE-287"
] | curl | d41dcba4e9b69d6b761e3460cc6ae7e8fd8f621f | 103,688,458,934,008,430,000,000,000,000,000,000,000 | 18 | NTLM: Fix ConnectionExists to compare Proxy credentials
Proxy NTLM authentication should compare credentials when
re-using a connection similar to host authentication, as it
authenticate the connection.
Example:
curl -v -x http://proxy:port http://host/ -U good_user:good_pwd
--proxy-ntlm --next -x http://proxy:port http://host/
[-U fake_user:fake_pwd --proxy-ntlm]
CVE-2016-0755
Bug: http://curl.haxx.se/docs/adv_20160127A.html |
g_NPN_PostURLNotify(NPP instance, const char *url, const char *target, uint32_t len, const char *buf, NPBool file, void *notifyData)
{
if (!thread_check()) {
npw_printf("WARNING: NPN_PostURLNotify not called from the main thread\n");
return NPERR_INVALID_INSTANCE_ERROR;
}
if (instance == NULL)
return NPERR_INVALID_INSTANCE_ERROR;
PluginInstance *plugin = PLUGIN_INSTANCE(instance);
if (plugin == NULL)
return NPERR_INVALID_INSTANCE_ERROR;
D(bugiI("NPN_PostURLNotify instance=%p\n", instance));
npw_plugin_instance_ref(plugin);
NPError ret = invoke_NPN_PostURLNotify(plugin, url, target, len, buf, file, notifyData);
npw_plugin_instance_unref(plugin);
D(bugiD("NPN_PostURLNotify return: %d [%s]\n", ret, string_of_NPError(ret)));
return ret;
} | 0 | [
"CWE-264"
] | nspluginwrapper | 7e4ab8e1189846041f955e6c83f72bc1624e7a98 | 230,369,436,199,996,630,000,000,000,000,000,000,000 | 21 | Support all the new variables added |
static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid)
{
unsigned long cl;
struct Qdisc *leaf;
const struct Qdisc_class_ops *cops = p->ops->cl_ops;
if (cops == NULL)
return NULL;
cl = cops->get(p, classid);
if (cl == 0)
return NULL;
leaf = cops->leaf(p, cl);
cops->put(p, cl);
return leaf;
} | 0 | [
"CWE-909"
] | linux-2.6 | 16ebb5e0b36ceadc8186f71d68b0c4fa4b6e781b | 222,638,836,268,317,400,000,000,000,000,000,000,000 | 16 | tc: Fix unitialized kernel memory leak
Three bytes of uninitialized kernel memory are currently leaked to user
Signed-off-by: Eric Dumazet <[email protected]>
Reviewed-by: Jiri Pirko <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
proto_get_id(const protocol_t *protocol)
{
return protocol->proto_id;
} | 0 | [
"CWE-401"
] | wireshark | a9fc769d7bb4b491efb61c699d57c9f35269d871 | 228,314,836,214,534,600,000,000,000,000,000,000,000 | 4 | epan: Fix a memory leak.
Make sure _proto_tree_add_bits_ret_val allocates a bits array using the
packet scope, otherwise we leak memory. Fixes #17032. |
int pkey_gost_decrypt(EVP_PKEY_CTX *pctx, unsigned char *key,
size_t *key_len, const unsigned char *in, size_t in_len)
{
struct gost_pmeth_data *gctx = EVP_PKEY_CTX_get_data(pctx);
switch (gctx->cipher_nid)
{
case NID_id_Gost28147_89:
case NID_undef: /* FIXME */
return pkey_GOST_ECcp_decrypt(pctx, key, key_len, in, in_len);
case NID_kuznyechik_ctr:
case NID_magma_ctr:
return pkey_gost2018_decrypt(pctx, key, key_len, in, in_len);
default:
GOSTerr(GOST_F_PKEY_GOST_DECRYPT, ERR_R_INTERNAL_ERROR);
return -1;
}
} | 1 | [
"CWE-120",
"CWE-787"
] | engine | b2b4d629f100eaee9f5942a106b1ccefe85b8808 | 60,457,162,388,305,120,000,000,000,000,000,000,000 | 17 | On unpacking key blob output buffer size should be fixed
Related: CVE-2022-29242 |
struct dst_entry *ip6_route_output(struct net *net, const struct sock *sk,
struct flowi6 *fl6)
{
int flags = 0;
fl6->flowi6_iif = LOOPBACK_IFINDEX;
if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr))
flags |= RT6_LOOKUP_F_IFACE;
if (!ipv6_addr_any(&fl6->saddr))
flags |= RT6_LOOKUP_F_HAS_SADDR;
else if (sk)
flags |= rt6_srcprefs2flags(inet6_sk(sk)->srcprefs);
return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_output);
} | 0 | [
"CWE-17"
] | linux-stable | 9d289715eb5c252ae15bd547cb252ca547a3c4f2 | 283,430,476,882,192,400,000,000,000,000,000,000,000 | 17 | ipv6: stop sending PTB packets for MTU < 1280
Reduce the attack vector and stop generating IPv6 Fragment Header for
paths with an MTU smaller than the minimum required IPv6 MTU
size (1280 byte) - called atomic fragments.
See IETF I-D "Deprecating the Generation of IPv6 Atomic Fragments" [1]
for more information and how this "feature" can be misused.
[1] https://tools.ietf.org/html/draft-ietf-6man-deprecate-atomfrag-generation-00
Signed-off-by: Fernando Gont <[email protected]>
Signed-off-by: Hagen Paul Pfeifer <[email protected]>
Acked-by: Hannes Frederic Sowa <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
int git_pkt_buffer_flush(git_buf *buf)
{
return git_buf_put(buf, pkt_flush_str, strlen(pkt_flush_str));
} | 0 | [
"CWE-119",
"CWE-787"
] | libgit2 | 66e3774d279672ee51c3b54545a79d20d1ada834 | 329,996,508,031,881,600,000,000,000,000,000,000,000 | 4 | smart_pkt: verify packet length exceeds PKT_LEN_SIZE
Each packet line in the Git protocol is prefixed by a four-byte
length of how much data will follow, which we parse in
`git_pkt_parse_line`. The transmitted length can either be equal
to zero in case of a flush packet or has to be at least of length
four, as it also includes the encoded length itself. Not
checking this may result in a buffer overflow as we directly pass
the length to functions which accept a `size_t` length as
parameter.
Fix the issue by verifying that non-flush packets have at least a
length of `PKT_LEN_SIZE`. |
lldpd_af_to_lldp_proto(int af)
{
switch (af) {
case LLDPD_AF_IPV4:
return LLDP_MGMT_ADDR_IP4;
case LLDPD_AF_IPV6:
return LLDP_MGMT_ADDR_IP6;
default:
return LLDP_MGMT_ADDR_NONE;
}
} | 0 | [
"CWE-617",
"CWE-703"
] | lldpd | 793526f8884455f43daecd0a2c46772388417a00 | 303,450,806,063,328,300,000,000,000,000,000,000,000 | 11 | protocols: don't use assert on paths that can be reached
Malformed packets should not make lldpd crash. Ensure we can handle them
by not using assert() in this part. |
static int sctp_send_asconf_del_ip(struct sock *sk,
struct sockaddr *addrs,
int addrcnt)
{
struct net *net = sock_net(sk);
struct sctp_sock *sp;
struct sctp_endpoint *ep;
struct sctp_association *asoc;
struct sctp_transport *transport;
struct sctp_bind_addr *bp;
struct sctp_chunk *chunk;
union sctp_addr *laddr;
void *addr_buf;
struct sctp_af *af;
struct sctp_sockaddr_entry *saddr;
int i;
int retval = 0;
int stored = 0;
chunk = NULL;
if (!net->sctp.addip_enable)
return retval;
sp = sctp_sk(sk);
ep = sp->ep;
SCTP_DEBUG_PRINTK("%s: (sk: %p, addrs: %p, addrcnt: %d)\n",
__func__, sk, addrs, addrcnt);
list_for_each_entry(asoc, &ep->asocs, asocs) {
if (!asoc->peer.asconf_capable)
continue;
if (asoc->peer.addip_disabled_mask & SCTP_PARAM_DEL_IP)
continue;
if (!sctp_state(asoc, ESTABLISHED))
continue;
/* Check if any address in the packed array of addresses is
* not present in the bind address list of the association.
* If so, do not send the asconf chunk to its peer, but
* continue with other associations.
*/
addr_buf = addrs;
for (i = 0; i < addrcnt; i++) {
laddr = addr_buf;
af = sctp_get_af_specific(laddr->v4.sin_family);
if (!af) {
retval = -EINVAL;
goto out;
}
if (!sctp_assoc_lookup_laddr(asoc, laddr))
break;
addr_buf += af->sockaddr_len;
}
if (i < addrcnt)
continue;
/* Find one address in the association's bind address list
* that is not in the packed array of addresses. This is to
* make sure that we do not delete all the addresses in the
* association.
*/
bp = &asoc->base.bind_addr;
laddr = sctp_find_unmatch_addr(bp, (union sctp_addr *)addrs,
addrcnt, sp);
if ((laddr == NULL) && (addrcnt == 1)) {
if (asoc->asconf_addr_del_pending)
continue;
asoc->asconf_addr_del_pending =
kzalloc(sizeof(union sctp_addr), GFP_ATOMIC);
if (asoc->asconf_addr_del_pending == NULL) {
retval = -ENOMEM;
goto out;
}
asoc->asconf_addr_del_pending->sa.sa_family =
addrs->sa_family;
asoc->asconf_addr_del_pending->v4.sin_port =
htons(bp->port);
if (addrs->sa_family == AF_INET) {
struct sockaddr_in *sin;
sin = (struct sockaddr_in *)addrs;
asoc->asconf_addr_del_pending->v4.sin_addr.s_addr = sin->sin_addr.s_addr;
} else if (addrs->sa_family == AF_INET6) {
struct sockaddr_in6 *sin6;
sin6 = (struct sockaddr_in6 *)addrs;
asoc->asconf_addr_del_pending->v6.sin6_addr = sin6->sin6_addr;
}
SCTP_DEBUG_PRINTK_IPADDR("send_asconf_del_ip: keep the last address asoc: %p ",
" at %p\n", asoc, asoc->asconf_addr_del_pending,
asoc->asconf_addr_del_pending);
asoc->src_out_of_asoc_ok = 1;
stored = 1;
goto skip_mkasconf;
}
/* We do not need RCU protection throughout this loop
* because this is done under a socket lock from the
* setsockopt call.
*/
chunk = sctp_make_asconf_update_ip(asoc, laddr, addrs, addrcnt,
SCTP_PARAM_DEL_IP);
if (!chunk) {
retval = -ENOMEM;
goto out;
}
skip_mkasconf:
/* Reset use_as_src flag for the addresses in the bind address
* list that are to be deleted.
*/
addr_buf = addrs;
for (i = 0; i < addrcnt; i++) {
laddr = addr_buf;
af = sctp_get_af_specific(laddr->v4.sin_family);
list_for_each_entry(saddr, &bp->address_list, list) {
if (sctp_cmp_addr_exact(&saddr->a, laddr))
saddr->state = SCTP_ADDR_DEL;
}
addr_buf += af->sockaddr_len;
}
/* Update the route and saddr entries for all the transports
* as some of the addresses in the bind address list are
* about to be deleted and cannot be used as source addresses.
*/
list_for_each_entry(transport, &asoc->peer.transport_addr_list,
transports) {
dst_release(transport->dst);
sctp_transport_route(transport, NULL,
sctp_sk(asoc->base.sk));
}
if (stored)
/* We don't need to transmit ASCONF */
continue;
retval = sctp_send_asconf(asoc, chunk);
}
out:
return retval;
} | 0 | [
"CWE-20"
] | linux | 726bc6b092da4c093eb74d13c07184b18c1af0f1 | 207,993,421,536,479,770,000,000,000,000,000,000,000 | 147 | net/sctp: Validate parameter size for SCTP_GET_ASSOC_STATS
Building sctp may fail with:
In function ‘copy_from_user’,
inlined from ‘sctp_getsockopt_assoc_stats’ at
net/sctp/socket.c:5656:20:
arch/x86/include/asm/uaccess_32.h:211:26: error: call to
‘copy_from_user_overflow’ declared with attribute error: copy_from_user()
buffer size is not provably correct
if built with W=1 due to a missing parameter size validation
before the call to copy_from_user.
Signed-off-by: Guenter Roeck <[email protected]>
Acked-by: Vlad Yasevich <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
char *lxc_string_join(const char *sep, const char **parts, bool use_as_prefix)
{
char *result;
char **p;
size_t sep_len = strlen(sep);
size_t result_len = use_as_prefix * sep_len;
/* calculate new string length */
for (p = (char **)parts; *p; p++)
result_len += (p > (char **)parts) * sep_len + strlen(*p);
result = calloc(result_len + 1, 1);
if (!result)
return NULL;
if (use_as_prefix)
strcpy(result, sep);
for (p = (char **)parts; *p; p++) {
if (p > (char **)parts)
strcat(result, sep);
strcat(result, *p);
}
return result;
} | 0 | [
"CWE-59",
"CWE-61"
] | lxc | 592fd47a6245508b79fe6ac819fe6d3b2c1289be | 193,718,193,074,927,100,000,000,000,000,000,000,000 | 25 | CVE-2015-1335: Protect container mounts against symlinks
When a container starts up, lxc sets up the container's inital fstree
by doing a bunch of mounting, guided by the container configuration
file. The container config is owned by the admin or user on the host,
so we do not try to guard against bad entries. However, since the
mount target is in the container, it's possible that the container admin
could divert the mount with symbolic links. This could bypass proper
container startup (i.e. confinement of a root-owned container by the
restrictive apparmor policy, by diverting the required write to
/proc/self/attr/current), or bypass the (path-based) apparmor policy
by diverting, say, /proc to /mnt in the container.
To prevent this,
1. do not allow mounts to paths containing symbolic links
2. do not allow bind mounts from relative paths containing symbolic
links.
Details:
Define safe_mount which ensures that the container has not inserted any
symbolic links into any mount targets for mounts to be done during
container setup.
The host's mount path may contain symbolic links. As it is under the
control of the administrator, that's ok. So safe_mount begins the check
for symbolic links after the rootfs->mount, by opening that directory.
It opens each directory along the path using openat() relative to the
parent directory using O_NOFOLLOW. When the target is reached, it
mounts onto /proc/self/fd/<targetfd>.
Use safe_mount() in mount_entry(), when mounting container proc,
and when needed. In particular, safe_mount() need not be used in
any case where:
1. the mount is done in the container's namespace
2. the mount is for the container's rootfs
3. the mount is relative to a tmpfs or proc/sysfs which we have
just safe_mount()ed ourselves
Since we were using proc/net as a temporary placeholder for /proc/sys/net
during container startup, and proc/net is a symbolic link, use proc/tty
instead.
Update the lxc.container.conf manpage with details about the new
restrictions.
Finally, add a testcase to test some symbolic link possibilities.
Reported-by: Roman Fiedler
Signed-off-by: Serge Hallyn <[email protected]>
Acked-by: Stéphane Graber <[email protected]> |
void textview_set_font(TextView *textview, const gchar *codeset)
{
GtkTextTag *tag;
GtkTextBuffer *buffer = gtk_text_view_get_buffer(GTK_TEXT_VIEW(textview->text));
GtkTextTagTable *tags = gtk_text_buffer_get_tag_table(buffer);
PangoFontDescription *font_desc, *bold_font_desc;
font_desc = pango_font_description_from_string
(NORMAL_FONT);
if (font_desc) {
gtk_widget_modify_font(textview->text, font_desc);
CHANGE_TAG_FONT("header", font_desc);
CHANGE_TAG_FONT("hlink", font_desc);
pango_font_description_free(font_desc);
}
if (prefs_common.derive_from_normal_font || !BOLD_FONT) {
bold_font_desc = pango_font_description_from_string
(NORMAL_FONT);
if (bold_font_desc)
pango_font_description_set_weight
(bold_font_desc, PANGO_WEIGHT_BOLD);
} else {
bold_font_desc = pango_font_description_from_string
(BOLD_FONT);
}
if (bold_font_desc) {
CHANGE_TAG_FONT("header_title", bold_font_desc);
pango_font_description_free(bold_font_desc);
}
if (prefs_common.textfont) {
PangoFontDescription *font_desc;
font_desc = pango_font_description_from_string
(prefs_common.textfont);
if (font_desc) {
gtk_widget_modify_font(textview->text, font_desc);
pango_font_description_free(font_desc);
}
}
gtk_text_view_set_pixels_above_lines(GTK_TEXT_VIEW(textview->text),
prefs_common.line_space / 2);
gtk_text_view_set_pixels_below_lines(GTK_TEXT_VIEW(textview->text),
prefs_common.line_space / 2);
} | 0 | [
"CWE-601"
] | claws | ac286a71ed78429e16c612161251b9ea90ccd431 | 306,081,582,828,919,900,000,000,000,000,000,000,000 | 45 | harden link checker before accepting click |
static void parse_ls(const char *p, struct branch *b)
{
struct tree_entry *root = NULL;
struct tree_entry leaf = {NULL};
/* ls SP (<tree-ish> SP)? <path> */
if (*p == '"') {
if (!b)
die("Not in a commit: %s", command_buf.buf);
root = &b->branch_tree;
} else {
struct object_entry *e = parse_treeish_dataref(&p);
root = new_tree_entry();
oidcpy(&root->versions[1].oid, &e->idx.oid);
if (!is_null_oid(&root->versions[1].oid))
root->versions[1].mode = S_IFDIR;
load_tree(root);
}
if (*p == '"') {
static struct strbuf uq = STRBUF_INIT;
const char *endp;
strbuf_reset(&uq);
if (unquote_c_style(&uq, p, &endp))
die("Invalid path: %s", command_buf.buf);
if (*endp)
die("Garbage after path in: %s", command_buf.buf);
p = uq.buf;
}
tree_content_get(root, p, &leaf, 1);
/*
* A directory in preparation would have a sha1 of zero
* until it is saved. Save, for simplicity.
*/
if (S_ISDIR(leaf.versions[1].mode))
store_tree(&leaf);
print_ls(leaf.versions[1].mode, leaf.versions[1].oid.hash, p);
if (leaf.tree)
release_tree_content_recursive(leaf.tree);
if (!b || root != &b->branch_tree)
release_tree_entry(root);
} | 0 | [] | git | 68061e3470210703cb15594194718d35094afdc0 | 325,168,897,981,341,100,000,000,000,000,000,000,000 | 42 | fast-import: disallow "feature export-marks" by default
The fast-import stream command "feature export-marks=<path>" lets the
stream write marks to an arbitrary path. This may be surprising if you
are running fast-import against an untrusted input (which otherwise
cannot do anything except update Git objects and refs).
Let's disallow the use of this feature by default, and provide a
command-line option to re-enable it (you can always just use the
command-line --export-marks as well, but the in-stream version provides
an easy way for exporters to control the process).
This is a backwards-incompatible change, since the default is flipping
to the new, safer behavior. However, since the main users of the
in-stream versions would be import/export-based remote helpers, and
since we trust remote helpers already (which are already running
arbitrary code), we'll pass the new option by default when reading a
remote helper's stream. This should minimize the impact.
Note that the implementation isn't totally simple, as we have to work
around the fact that fast-import doesn't parse its command-line options
until after it has read any "feature" lines from the stream. This is how
it lets command-line options override in-stream. But in our case, it's
important to parse the new --allow-unsafe-features first.
There are three options for resolving this:
1. Do a separate "early" pass over the options. This is easy for us to
do because there are no command-line options that allow the
"unstuck" form (so there's no chance of us mistaking an argument
for an option), though it does introduce a risk of incorrect
parsing later (e.g,. if we convert to parse-options).
2. Move the option parsing phase back to the start of the program, but
teach the stream-reading code never to override an existing value.
This is tricky, because stream "feature" lines override each other
(meaning we'd have to start tracking the source for every option).
3. Accept that we might parse a "feature export-marks" line that is
forbidden, as long we don't _act_ on it until after we've parsed
the command line options.
This would, in fact, work with the current code, but only because
the previous patch fixed the export-marks parser to avoid touching
the filesystem.
So while it works, it does carry risk of somebody getting it wrong
in the future in a rather subtle and unsafe way.
I've gone with option (1) here as simple, safe, and unlikely to cause
regressions.
This fixes CVE-2019-1348.
Signed-off-by: Jeff King <[email protected]> |
is_dir (GFile *file)
{
GFileInfo *info;
gboolean res;
res = FALSE;
info = g_file_query_info (file,
G_FILE_ATTRIBUTE_STANDARD_TYPE,
G_FILE_QUERY_INFO_NOFOLLOW_SYMLINKS,
NULL, NULL);
if (info) {
res = g_file_info_get_file_type (info) == G_FILE_TYPE_DIRECTORY;
g_object_unref (info);
}
return res;
} | 0 | [] | nautilus | ca2fd475297946f163c32dcea897f25da892b89d | 129,745,016,257,803,220,000,000,000,000,000,000,000 | 17 | Add nautilus_file_mark_desktop_file_trusted(), this now adds a #! line if
2009-02-24 Alexander Larsson <[email protected]>
* libnautilus-private/nautilus-file-operations.c:
* libnautilus-private/nautilus-file-operations.h:
Add nautilus_file_mark_desktop_file_trusted(), this now
adds a #! line if there is none as well as makes the file
executable.
* libnautilus-private/nautilus-mime-actions.c:
Use nautilus_file_mark_desktop_file_trusted() instead of
just setting the permissions.
svn path=/trunk/; revision=15006 |
static void ttwu_queue(struct task_struct *p, int cpu)
{
struct rq *rq = cpu_rq(cpu);
#if defined(CONFIG_SMP)
if (sched_feat(TTWU_QUEUE) && !cpus_share_cache(smp_processor_id(), cpu)) {
sched_clock_cpu(cpu); /* sync clocks x-cpu */
ttwu_queue_remote(p, cpu);
return;
}
#endif
raw_spin_lock(&rq->lock);
ttwu_do_activate(rq, p, 0);
raw_spin_unlock(&rq->lock);
} | 0 | [
"CWE-200"
] | linux | 4efbc454ba68def5ef285b26ebfcfdb605b52755 | 235,567,703,952,132,850,000,000,000,000,000,000,000 | 16 | sched: Fix information leak in sys_sched_getattr()
We're copying the on-stack structure to userspace, but forgot to give
the right number of bytes to copy. This allows the calling process to
obtain up to PAGE_SIZE bytes from the stack (and possibly adjacent
kernel memory).
This fix copies only as much as we actually have on the stack
(attr->size defaults to the size of the struct) and leaves the rest of
the userspace-provided buffer untouched.
Found using kmemcheck + trinity.
Fixes: d50dde5a10f30 ("sched: Add new scheduler syscalls to support an extended scheduling parameters ABI")
Cc: Dario Faggioli <[email protected]>
Cc: Juri Lelli <[email protected]>
Cc: Ingo Molnar <[email protected]>
Signed-off-by: Vegard Nossum <[email protected]>
Signed-off-by: Peter Zijlstra <[email protected]>
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Thomas Gleixner <[email protected]> |
int ipt_register_table(struct xt_table *table, const struct ipt_replace *repl)
{
int ret;
struct xt_table_info *newinfo;
static struct xt_table_info bootstrap
= { 0, 0, 0, { 0 }, { 0 }, { } };
void *loc_cpu_entry;
newinfo = xt_alloc_table_info(repl->size);
if (!newinfo)
return -ENOMEM;
/* choose the copy on our node/cpu
* but dont care of preemption
*/
loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
memcpy(loc_cpu_entry, repl->entries, repl->size);
ret = translate_table(table->name, table->valid_hooks,
newinfo, loc_cpu_entry, repl->size,
repl->num_entries,
repl->hook_entry,
repl->underflow);
if (ret != 0) {
xt_free_table_info(newinfo);
return ret;
}
ret = xt_register_table(table, &bootstrap, newinfo);
if (ret != 0) {
xt_free_table_info(newinfo);
return ret;
}
return 0;
} | 0 | [
"CWE-787"
] | linux | 9fa492cdc160cd27ce1046cb36f47d3b2b1efa21 | 228,976,336,196,142,400,000,000,000,000,000,000,000 | 36 | [NETFILTER]: x_tables: simplify compat API
Split the xt_compat_match/xt_compat_target into smaller type-safe functions
performing just one operation. Handle all alignment and size-related
conversions centrally in these function instead of requiring each module to
implement a full-blown conversion function. Replace ->compat callback by
->compat_from_user and ->compat_to_user callbacks, responsible for
converting just a single private structure.
Signed-off-by: Patrick McHardy <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
struct net_device *dev)
{
int ret;
struct scatterlist *sg;
struct sk_buff *trailer;
unsigned char *iv;
struct ethhdr *eth;
struct macsec_eth_header *hh;
size_t unprotected_len;
struct aead_request *req;
struct macsec_secy *secy;
struct macsec_tx_sc *tx_sc;
struct macsec_tx_sa *tx_sa;
struct macsec_dev *macsec = macsec_priv(dev);
bool sci_present;
u32 pn;
secy = &macsec->secy;
tx_sc = &secy->tx_sc;
/* 10.5.1 TX SA assignment */
tx_sa = macsec_txsa_get(tx_sc->sa[tx_sc->encoding_sa]);
if (!tx_sa) {
secy->operational = false;
kfree_skb(skb);
return ERR_PTR(-EINVAL);
}
if (unlikely(skb_headroom(skb) < MACSEC_NEEDED_HEADROOM ||
skb_tailroom(skb) < MACSEC_NEEDED_TAILROOM)) {
struct sk_buff *nskb = skb_copy_expand(skb,
MACSEC_NEEDED_HEADROOM,
MACSEC_NEEDED_TAILROOM,
GFP_ATOMIC);
if (likely(nskb)) {
consume_skb(skb);
skb = nskb;
} else {
macsec_txsa_put(tx_sa);
kfree_skb(skb);
return ERR_PTR(-ENOMEM);
}
} else {
skb = skb_unshare(skb, GFP_ATOMIC);
if (!skb) {
macsec_txsa_put(tx_sa);
return ERR_PTR(-ENOMEM);
}
}
unprotected_len = skb->len;
eth = eth_hdr(skb);
sci_present = send_sci(secy);
hh = (struct macsec_eth_header *)skb_push(skb, macsec_extra_len(sci_present));
memmove(hh, eth, 2 * ETH_ALEN);
pn = tx_sa_update_pn(tx_sa, secy);
if (pn == 0) {
macsec_txsa_put(tx_sa);
kfree_skb(skb);
return ERR_PTR(-ENOLINK);
}
macsec_fill_sectag(hh, secy, pn, sci_present);
macsec_set_shortlen(hh, unprotected_len - 2 * ETH_ALEN);
skb_put(skb, secy->icv_len);
if (skb->len - ETH_HLEN > macsec_priv(dev)->real_dev->mtu) {
struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats);
u64_stats_update_begin(&secy_stats->syncp);
secy_stats->stats.OutPktsTooLong++;
u64_stats_update_end(&secy_stats->syncp);
macsec_txsa_put(tx_sa);
kfree_skb(skb);
return ERR_PTR(-EINVAL);
}
ret = skb_cow_data(skb, 0, &trailer);
if (unlikely(ret < 0)) {
macsec_txsa_put(tx_sa);
kfree_skb(skb);
return ERR_PTR(ret);
}
req = macsec_alloc_req(tx_sa->key.tfm, &iv, &sg, ret);
if (!req) {
macsec_txsa_put(tx_sa);
kfree_skb(skb);
return ERR_PTR(-ENOMEM);
}
macsec_fill_iv(iv, secy->sci, pn);
sg_init_table(sg, ret);
skb_to_sgvec(skb, sg, 0, skb->len);
if (tx_sc->encrypt) {
int len = skb->len - macsec_hdr_len(sci_present) -
secy->icv_len;
aead_request_set_crypt(req, sg, sg, len, iv);
aead_request_set_ad(req, macsec_hdr_len(sci_present));
} else {
aead_request_set_crypt(req, sg, sg, 0, iv);
aead_request_set_ad(req, skb->len - secy->icv_len);
}
macsec_skb_cb(skb)->req = req;
macsec_skb_cb(skb)->tx_sa = tx_sa;
aead_request_set_callback(req, 0, macsec_encrypt_done, skb);
dev_hold(skb->dev);
ret = crypto_aead_encrypt(req);
if (ret == -EINPROGRESS) {
return ERR_PTR(ret);
} else if (ret != 0) {
dev_put(skb->dev);
kfree_skb(skb);
aead_request_free(req);
macsec_txsa_put(tx_sa);
return ERR_PTR(-EINVAL);
}
dev_put(skb->dev);
aead_request_free(req);
macsec_txsa_put(tx_sa);
return skb;
} | 0 | [
"CWE-119"
] | net | 5294b83086cc1c35b4efeca03644cf9d12282e5b | 174,953,247,299,083,950,000,000,000,000,000,000,000 | 131 | macsec: dynamically allocate space for sglist
We call skb_cow_data, which is good anyway to ensure we can actually
modify the skb as such (another error from prior). Now that we have the
number of fragments required, we can safely allocate exactly that amount
of memory.
Fixes: c09440f7dcb3 ("macsec: introduce IEEE 802.1AE driver")
Signed-off-by: Jason A. Donenfeld <[email protected]>
Acked-by: Sabrina Dubroca <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
clientAclChecklistFill(ACLFilledChecklist &checklist, ClientHttpRequest *http)
{
checklist.setRequest(http->request);
checklist.al = http->al;
checklist.syncAle(http->request, http->log_uri);
// TODO: If http->getConn is always http->request->clientConnectionManager,
// then call setIdent() inside checklist.setRequest(). Otherwise, restore
// USE_IDENT lost in commit 94439e4.
ConnStateData * conn = http->getConn();
const char *ident = (cbdataReferenceValid(conn) &&
conn && conn->clientConnection) ?
conn->clientConnection->rfc931 : dash_str;
checklist.setIdent(ident);
} | 0 | [
"CWE-116"
] | squid | 7024fb734a59409889e53df2257b3fc817809fb4 | 164,450,449,916,613,790,000,000,000,000,000,000,000 | 15 | Handle more Range requests (#790)
Also removed some effectively unused code. |
has_colors(void)
{
return NCURSES_SP_NAME(has_colors) (CURRENT_SCREEN);
} | 0 | [] | ncurses | 790a85dbd4a81d5f5d8dd02a44d84f01512ef443 | 183,746,520,353,691,600,000,000,000,000,000,000,000 | 4 | ncurses 6.2 - patch 20200531
+ correct configure version-check/warnng for g++ to allow for 10.x
+ re-enable "bel" in konsole-base (report by Nia Huang)
+ add linux-s entry (patch by Alexandre Montaron).
+ drop long-obsolete convert_configure.pl
+ add test/test_parm.c, for checking tparm changes.
+ improve parameter-checking for tparm, adding function _nc_tiparm() to
handle the most-used case, which accepts only numeric parameters
(report/testcase by "puppet-meteor").
+ use a more conservative estimate of the buffer-size in lib_tparm.c's
save_text() and save_number(), in case the sprintf() function
passes-through unexpected characters from a format specifier
(report/testcase by "puppet-meteor").
+ add a check for end-of-string in cvtchar to handle a malformed
string in infotocap (report/testcase by "puppet-meteor"). |
ruby_scan_oct(const char *start, size_t len, size_t *retlen)
{
register const char *s = start;
register unsigned long retval = 0;
while (len-- && *s >= '0' && *s <= '7') {
retval <<= 3;
retval |= *s++ - '0';
}
*retlen = (int)(s - start); /* less than len */
return retval;
} | 0 | [
"CWE-119"
] | ruby | 5cb83d9dab13e14e6146f455ffd9fed4254d238f | 28,810,915,932,266,530,000,000,000,000,000,000,000 | 12 | util.c: ignore too long fraction part
* util.c (ruby_strtod): ignore too long fraction part, which does not
affect the result.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@43775 b2dd03c8-39d4-4d8f-98ff-823fe69b080e |
DLLEXPORT tjhandle DLLCALL tjInitCompress(void)
{
tjinstance *this=NULL;
if((this=(tjinstance *)malloc(sizeof(tjinstance)))==NULL)
{
snprintf(errStr, JMSG_LENGTH_MAX,
"tjInitCompress(): Memory allocation failure");
return NULL;
}
MEMZERO(this, sizeof(tjinstance));
return _tjInitCompress(this);
} | 0 | [] | libjpeg-turbo | dab6be4cfb2f9307b5378d2d1dc74d9080383dc2 | 237,638,967,173,456,400,000,000,000,000,000,000,000 | 12 | tjDecompressToYUV*(): Fix OOB write/double free
... when attempting to decompress grayscale JPEG images with sampling
factors != 1.
Fixes #387 |
ciEnv::ciEnv(Arena* arena) : _ciEnv_arena(mtCompiler) {
ASSERT_IN_VM;
// Set up ciEnv::current immediately, for the sake of ciObjectFactory, etc.
CompilerThread* current_thread = CompilerThread::current();
assert(current_thread->env() == NULL, "must be");
current_thread->set_env(this);
assert(ciEnv::current() == this, "sanity");
_oop_recorder = NULL;
_debug_info = NULL;
_dependencies = NULL;
_failure_reason = NULL;
_inc_decompile_count_on_failure = true;
_compilable = MethodCompilable_never;
_break_at_compile = false;
_compiler_data = NULL;
#ifndef PRODUCT
assert(firstEnv, "must be first");
firstEnv = false;
#endif /* !PRODUCT */
_num_inlined_bytecodes = 0;
_task = NULL;
_log = NULL;
// Temporary buffer for creating symbols and such.
_name_buffer = NULL;
_name_buffer_len = 0;
_arena = arena;
_factory = new (_arena) ciObjectFactory(_arena, 128);
// Preload commonly referenced system ciObjects.
// During VM initialization, these instances have not yet been created.
// Assertions ensure that these instances are not accessed before
// their initialization.
assert(Universe::is_fully_initialized(), "must be");
_NullPointerException_instance = NULL;
_ArithmeticException_instance = NULL;
_ArrayIndexOutOfBoundsException_instance = NULL;
_ArrayStoreException_instance = NULL;
_ClassCastException_instance = NULL;
_the_null_string = NULL;
_the_min_jint_string = NULL;
_jvmti_can_hotswap_or_post_breakpoint = false;
_jvmti_can_access_local_variables = false;
_jvmti_can_post_on_exceptions = false;
_jvmti_can_pop_frame = false;
} | 0 | [] | jdk11u | 6c0ba0785a2f0900be301f72764cf4dcfa720991 | 169,636,337,806,843,900,000,000,000,000,000,000,000 | 54 | 8281859: Improve class compilation
Reviewed-by: mbaesken
Backport-of: 3ac62a66efd05d0842076dd4cfbea0e53b12630f |
static int ZEND_FASTCALL ZEND_USER_OPCODE_SPEC_HANDLER(ZEND_OPCODE_HANDLER_ARGS)
{
int ret = zend_user_opcode_handlers[EX(opline)->opcode](ZEND_OPCODE_HANDLER_ARGS_PASSTHRU_INTERNAL);
switch (ret) {
case ZEND_USER_OPCODE_CONTINUE:
ZEND_VM_CONTINUE();
case ZEND_USER_OPCODE_RETURN:
return zend_leave_helper_SPEC(ZEND_OPCODE_HANDLER_ARGS_PASSTHRU);
case ZEND_USER_OPCODE_ENTER:
ZEND_VM_ENTER();
case ZEND_USER_OPCODE_LEAVE:
ZEND_VM_LEAVE();
case ZEND_USER_OPCODE_DISPATCH:
ZEND_VM_DISPATCH(EX(opline)->opcode, EX(opline));
default:
ZEND_VM_DISPATCH((zend_uchar)(ret & 0xff), EX(opline));
}
} | 0 | [] | php-src | ce96fd6b0761d98353761bf78d5bfb55291179fd | 111,557,277,700,534,350,000,000,000,000,000,000,000 | 19 | - fix #39863, do not accept paths with NULL in them. See http://news.php.net/php.internals/50191, trunk will have the patch later (adding a macro and/or changing (some) APIs. Patch by Rasmus |
static int pushDownWhereTerms(
Parse *pParse, /* Parse context (for malloc() and error reporting) */
Select *pSubq, /* The subquery whose WHERE clause is to be augmented */
Expr *pWhere, /* The WHERE clause of the outer query */
int iCursor, /* Cursor number of the subquery */
int isLeftJoin /* True if pSubq is the right term of a LEFT JOIN */
){
Expr *pNew;
int nChng = 0;
if( pWhere==0 ) return 0;
if( pSubq->selFlags & SF_Recursive ) return 0; /* restriction (2) */
#ifndef SQLITE_OMIT_WINDOWFUNC
if( pSubq->pWin ) return 0; /* restriction (6) */
#endif
#ifdef SQLITE_DEBUG
/* Only the first term of a compound can have a WITH clause. But make
** sure no other terms are marked SF_Recursive in case something changes
** in the future.
*/
{
Select *pX;
for(pX=pSubq; pX; pX=pX->pPrior){
assert( (pX->selFlags & (SF_Recursive))==0 );
}
}
#endif
if( pSubq->pLimit!=0 ){
return 0; /* restriction (3) */
}
while( pWhere->op==TK_AND ){
nChng += pushDownWhereTerms(pParse, pSubq, pWhere->pRight,
iCursor, isLeftJoin);
pWhere = pWhere->pLeft;
}
if( isLeftJoin
&& (ExprHasProperty(pWhere,EP_FromJoin)==0
|| pWhere->iRightJoinTable!=iCursor)
){
return 0; /* restriction (4) */
}
if( ExprHasProperty(pWhere,EP_FromJoin) && pWhere->iRightJoinTable!=iCursor ){
return 0; /* restriction (5) */
}
if( sqlite3ExprIsTableConstant(pWhere, iCursor) ){
nChng++;
while( pSubq ){
SubstContext x;
pNew = sqlite3ExprDup(pParse->db, pWhere, 0);
unsetJoinExpr(pNew, -1);
x.pParse = pParse;
x.iTable = iCursor;
x.iNewTable = iCursor;
x.isLeftJoin = 0;
x.pEList = pSubq->pEList;
pNew = substExpr(&x, pNew);
if( pSubq->selFlags & SF_Aggregate ){
pSubq->pHaving = sqlite3ExprAnd(pParse, pSubq->pHaving, pNew);
}else{
pSubq->pWhere = sqlite3ExprAnd(pParse, pSubq->pWhere, pNew);
}
pSubq = pSubq->pPrior;
}
}
return nChng;
} | 0 | [
"CWE-20"
] | sqlite | e59c562b3f6894f84c715772c4b116d7b5c01348 | 12,304,438,323,537,223,000,000,000,000,000,000,000 | 68 | Fix a crash that could occur if a sub-select that uses both DISTINCT and window functions also used an ORDER BY that is the same as its select list.
FossilOrigin-Name: bcdd66c1691955c697f3d756c2b035acfe98f6aad72e90b0021bab6e9023b3ba |
static void *swiotlb_mem_remap(struct io_tlb_mem *mem, unsigned long bytes)
{
void *vaddr = NULL;
if (swiotlb_unencrypted_base) {
phys_addr_t paddr = mem->start + swiotlb_unencrypted_base;
vaddr = memremap(paddr, bytes, MEMREMAP_WB);
if (!vaddr)
pr_err("Failed to map the unencrypted memory %pa size %lx.\n",
&paddr, bytes);
}
return vaddr;
} | 0 | [
"CWE-200",
"CWE-401"
] | linux | aa6f8dcbab473f3a3c7454b74caa46d36cdc5d13 | 11,146,444,142,836,318,000,000,000,000,000,000,000 | 15 | swiotlb: rework "fix info leak with DMA_FROM_DEVICE"
Unfortunately, we ended up merging an old version of the patch "fix info
leak with DMA_FROM_DEVICE" instead of merging the latest one. Christoph
(the swiotlb maintainer), he asked me to create an incremental fix
(after I have pointed this out the mix up, and asked him for guidance).
So here we go.
The main differences between what we got and what was agreed are:
* swiotlb_sync_single_for_device is also required to do an extra bounce
* We decided not to introduce DMA_ATTR_OVERWRITE until we have exploiters
* The implantation of DMA_ATTR_OVERWRITE is flawed: DMA_ATTR_OVERWRITE
must take precedence over DMA_ATTR_SKIP_CPU_SYNC
Thus this patch removes DMA_ATTR_OVERWRITE, and makes
swiotlb_sync_single_for_device() bounce unconditionally (that is, also
when dir == DMA_TO_DEVICE) in order do avoid synchronising back stale
data from the swiotlb buffer.
Let me note, that if the size used with dma_sync_* API is less than the
size used with dma_[un]map_*, under certain circumstances we may still
end up with swiotlb not being transparent. In that sense, this is no
perfect fix either.
To get this bullet proof, we would have to bounce the entire
mapping/bounce buffer. For that we would have to figure out the starting
address, and the size of the mapping in
swiotlb_sync_single_for_device(). While this does seem possible, there
seems to be no firm consensus on how things are supposed to work.
Signed-off-by: Halil Pasic <[email protected]>
Fixes: ddbd89deb7d3 ("swiotlb: fix info leak with DMA_FROM_DEVICE")
Cc: [email protected]
Reviewed-by: Christoph Hellwig <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
void mct_log(const char *format, ...)
{
va_list args;
va_start(args, format);
vprintf(format, args);
va_end(args);
if (mct_log_file)
{
va_list args;
va_start(args, format);
vfprintf(mct_log_file, format, args);
va_end(args);
}
} | 0 | [
"CWE-284",
"CWE-295"
] | mysql-server | 3bd5589e1a5a93f9c224badf983cd65c45215390 | 60,330,355,966,561,830,000,000,000,000,000,000,000 | 15 | WL#6791 : Redefine client --ssl option to imply enforced encryption
# Changed the meaning of the --ssl=1 option of all client binaries
to mean force ssl, not try ssl and fail over to eunecrypted
# Added a new MYSQL_OPT_SSL_ENFORCE mysql_options()
option to specify that an ssl connection is required.
# Added a new macro SSL_SET_OPTIONS() to the client
SSL handling headers that sets all the relevant SSL options at
once.
# Revamped all of the current native clients to use the new macro
# Removed some Windows line endings.
# Added proper handling of the new option into the ssl helper
headers.
# If SSL is mandatory assume that the media is secure enough
for the sha256 plugin to do unencrypted password exchange even
before establishing a connection.
# Set the default ssl cipher to DHE-RSA-AES256-SHA if none is
specified.
# updated test cases that require a non-default cipher to spawn
a mysql command line tool binary since mysqltest has no support
for specifying ciphers.
# updated the replication slave connection code to always enforce
SSL if any of the SSL config options is present.
# test cases added and updated.
# added a mysql_get_option() API to return mysql_options()
values. Used the new API inside the sha256 plugin.
# Fixed compilation warnings because of unused variables.
# Fixed test failures (mysql_ssl and bug13115401)
# Fixed whitespace issues.
# Fully implemented the mysql_get_option() function.
# Added a test case for mysql_get_option()
# fixed some trailing whitespace issues
# fixed some uint/int warnings in mysql_client_test.c
# removed shared memory option from non-windows get_options
tests
# moved MYSQL_OPT_LOCAL_INFILE to the uint options |
int unregister_netdevice_notifier(struct notifier_block *nb)
{
int err;
rtnl_lock();
err = raw_notifier_chain_unregister(&netdev_chain, nb);
rtnl_unlock();
return err;
} | 0 | [
"CWE-399"
] | linux | 6ec82562ffc6f297d0de36d65776cff8e5704867 | 255,937,740,001,637,680,000,000,000,000,000,000,000 | 9 | veth: Dont kfree_skb() after dev_forward_skb()
In case of congestion, netif_rx() frees the skb, so we must assume
dev_forward_skb() also consume skb.
Bug introduced by commit 445409602c092
(veth: move loopback logic to common location)
We must change dev_forward_skb() to always consume skb, and veth to not
double free it.
Bug report : http://marc.info/?l=linux-netdev&m=127310770900442&w=3
Reported-by: Martín Ferrari <[email protected]>
Signed-off-by: Eric Dumazet <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
static int nc_email(ASN1_IA5STRING *eml, ASN1_IA5STRING *base)
{
const char *baseptr = (char *)base->data;
const char *emlptr = (char *)eml->data;
const char *baseat = ia5memrchr(base, '@');
const char *emlat = ia5memrchr(eml, '@');
size_t basehostlen, emlhostlen;
if (!emlat)
return X509_V_ERR_UNSUPPORTED_NAME_SYNTAX;
/* Special case: initial '.' is RHS match */
if (!baseat && base->length > 0 && (*baseptr == '.')) {
if (eml->length > base->length) {
emlptr += eml->length - base->length;
if (ia5ncasecmp(baseptr, emlptr, base->length) == 0)
return X509_V_OK;
}
return X509_V_ERR_PERMITTED_VIOLATION;
}
/* If we have anything before '@' match local part */
if (baseat) {
if (baseat != baseptr) {
if ((baseat - baseptr) != (emlat - emlptr))
return X509_V_ERR_PERMITTED_VIOLATION;
/* Case sensitive match of local part */
if (strncmp(baseptr, emlptr, emlat - emlptr))
return X509_V_ERR_PERMITTED_VIOLATION;
}
/* Position base after '@' */
baseptr = baseat + 1;
}
emlptr = emlat + 1;
basehostlen = IA5_OFFSET_LEN(base, baseptr);
emlhostlen = IA5_OFFSET_LEN(eml, emlptr);
/* Just have hostname left to match: case insensitive */
if (basehostlen != emlhostlen || ia5ncasecmp(baseptr, emlptr, emlhostlen))
return X509_V_ERR_PERMITTED_VIOLATION;
return X509_V_OK;
} | 0 | [
"CWE-125"
] | openssl | 8393de42498f8be75cf0353f5c9f906a43a748d2 | 158,896,649,841,590,640,000,000,000,000,000,000,000 | 43 | Fix the name constraints code to not assume NUL terminated strings
ASN.1 strings may not be NUL terminated. Don't assume they are.
CVE-2021-3712
Reviewed-by: Viktor Dukhovni <[email protected]>
Reviewed-by: Paul Dale <[email protected]> |
static double mp_image_sort(_cimg_math_parser& mp) {
const bool is_increasing = (bool)_mp_arg(3);
const unsigned int
ind = (unsigned int)cimg::mod((int)_mp_arg(2),mp.listout.width()),
axis = (unsigned int)_mp_arg(4);
cimg::mutex(6);
CImg<T> &img = mp.listout[ind];
img.sort(is_increasing,
axis==0 || axis=='x'?'x':
axis==1 || axis=='y'?'y':
axis==2 || axis=='z'?'z':
axis==3 || axis=='c'?'c':0);
cimg::mutex(6,0);
return cimg::type<double>::nan(); | 0 | [
"CWE-125"
] | CImg | 10af1e8c1ad2a58a0a3342a856bae63e8f257abb | 4,721,675,273,115,603,300,000,000,000,000,000,000 | 15 | Fix other issues in 'CImg<T>::load_bmp()'. |
static float fMAX(float a, float b) { return MAX(a, b); } | 0 | [
"CWE-476",
"CWE-119"
] | LibRaw | d7c3d2cb460be10a3ea7b32e9443a83c243b2251 | 208,819,072,376,089,940,000,000,000,000,000,000,000 | 1 | Secunia SA75000 advisory: several buffer overruns |
void xclaimCommand(client *c) {
streamCG *group = NULL;
robj *o = lookupKeyRead(c->db,c->argv[1]);
long long minidle; /* Minimum idle time argument. */
long long retrycount = -1; /* -1 means RETRYCOUNT option not given. */
mstime_t deliverytime = -1; /* -1 means IDLE/TIME options not given. */
int force = 0;
int justid = 0;
if (o) {
if (checkType(c,o,OBJ_STREAM)) return; /* Type error. */
group = streamLookupCG(o->ptr,c->argv[2]->ptr);
}
/* No key or group? Send an error given that the group creation
* is mandatory. */
if (o == NULL || group == NULL) {
addReplyErrorFormat(c,"-NOGROUP No such key '%s' or "
"consumer group '%s'", (char*)c->argv[1]->ptr,
(char*)c->argv[2]->ptr);
return;
}
if (getLongLongFromObjectOrReply(c,c->argv[4],&minidle,
"Invalid min-idle-time argument for XCLAIM")
!= C_OK) return;
if (minidle < 0) minidle = 0;
/* Start parsing the IDs, so that we abort ASAP if there is a syntax
* error: the return value of this command cannot be an error in case
* the client successfully claimed some message, so it should be
* executed in a "all or nothing" fashion. */
int j;
for (j = 4; j < c->argc; j++) {
streamID id;
if (streamParseIDOrReply(NULL,c->argv[j],&id,0) != C_OK) break;
}
int last_id_arg = j-1; /* Next time we iterate the IDs we now the range. */
/* If we stopped because some IDs cannot be parsed, perhaps they
* are trailing options. */
time_t now = mstime();
for (; j < c->argc; j++) {
int moreargs = (c->argc-1) - j; /* Number of additional arguments. */
char *opt = c->argv[j]->ptr;
if (!strcasecmp(opt,"FORCE")) {
force = 1;
} else if (!strcasecmp(opt,"JUSTID")) {
justid = 1;
} else if (!strcasecmp(opt,"IDLE") && moreargs) {
j++;
if (getLongLongFromObjectOrReply(c,c->argv[j],&deliverytime,
"Invalid IDLE option argument for XCLAIM")
!= C_OK) return;
deliverytime = now - deliverytime;
} else if (!strcasecmp(opt,"TIME") && moreargs) {
j++;
if (getLongLongFromObjectOrReply(c,c->argv[j],&deliverytime,
"Invalid IDLE option argument for XCLAIM")
!= C_OK) return;
} else if (!strcasecmp(opt,"RETRYCOUNT") && moreargs) {
j++;
if (getLongLongFromObjectOrReply(c,c->argv[j],&retrycount,
"Invalid IDLE option argument for XCLAIM")
!= C_OK) return;
} else {
addReplyErrorFormat(c,"Unrecognized XCLAIM option '%s'",opt);
return;
}
}
if (deliverytime != -1) {
/* If a delivery time was passed, either with IDLE or TIME, we
* do some sanity check on it, and set the deliverytime to now
* (which is a sane choice usually) if the value is bogus.
* To raise an error here is not wise because clients may compute
* the idle time doing some math startin from their local time,
* and this is not a good excuse to fail in case, for instance,
* the computed time is a bit in the future from our POV. */
if (deliverytime < 0 || deliverytime > now) deliverytime = now;
} else {
/* If no IDLE/TIME option was passed, we want the last delivery
* time to be now, so that the idle time of the message will be
* zero. */
deliverytime = now;
}
/* Do the actual claiming. */
streamConsumer *consumer = streamLookupConsumer(group,c->argv[3]->ptr,1);
void *arraylenptr = addDeferredMultiBulkLength(c);
size_t arraylen = 0;
for (int j = 5; j <= last_id_arg; j++) {
streamID id;
unsigned char buf[sizeof(streamID)];
if (streamParseIDOrReply(c,c->argv[j],&id,0) != C_OK) return;
streamEncodeID(buf,&id);
/* Lookup the ID in the group PEL. */
streamNACK *nack = raxFind(group->pel,buf,sizeof(buf));
/* If FORCE is passed, let's check if at least the entry
* exists in the Stream. In such case, we'll crate a new
* entry in the PEL from scratch, so that XCLAIM can also
* be used to create entries in the PEL. Useful for AOF
* and replication of consumer groups. */
if (force && nack == raxNotFound) {
streamIterator myiterator;
streamIteratorStart(&myiterator,o->ptr,&id,&id,0);
int64_t numfields;
int found = 0;
streamID item_id;
if (streamIteratorGetID(&myiterator,&item_id,&numfields)) found = 1;
streamIteratorStop(&myiterator);
/* Item must exist for us to create a NACK for it. */
if (!found) continue;
/* Create the NACK. */
nack = streamCreateNACK(NULL);
raxInsert(group->pel,buf,sizeof(buf),nack,NULL);
}
if (nack != raxNotFound) {
/* We need to check if the minimum idle time requested
* by the caller is satisfied by this entry. */
if (minidle) {
mstime_t this_idle = now - nack->delivery_time;
if (this_idle < minidle) continue;
}
/* Remove the entry from the old consumer.
* Note that nack->consumer is NULL if we created the
* NACK above because of the FORCE option. */
if (nack->consumer)
raxRemove(nack->consumer->pel,buf,sizeof(buf),NULL);
/* Update the consumer and idle time. */
nack->consumer = consumer;
nack->delivery_time = deliverytime;
/* Set the delivery attempts counter if given. */
if (retrycount >= 0) nack->delivery_count = retrycount;
/* Add the entry in the new consumer local PEL. */
raxInsert(consumer->pel,buf,sizeof(buf),nack,NULL);
/* Send the reply for this entry. */
if (justid) {
addReplyStreamID(c,&id);
} else {
streamReplyWithRange(c,o->ptr,&id,NULL,1,0,NULL,NULL,
STREAM_RWR_RAWENTRIES,NULL);
}
arraylen++;
/* Propagate this change. */
streamPropagateXCLAIM(c,c->argv[1],c->argv[3],c->argv[j],nack);
server.dirty++;
}
}
setDeferredMultiBulkLength(c,arraylenptr,arraylen);
preventCommandPropagation(c);
} | 0 | [
"CWE-125",
"CWE-704"
] | redis | c04082cf138f1f51cedf05ee9ad36fb6763cafc6 | 163,139,353,578,830,960,000,000,000,000,000,000,000 | 158 | Abort in XGROUP if the key is not a stream |
qtdemux_parse_trak (GstQTDemux * qtdemux, GNode * trak)
{
GstByteReader tkhd;
int offset;
GNode *mdia;
GNode *mdhd;
GNode *hdlr;
GNode *minf;
GNode *stbl;
GNode *stsd;
GNode *mp4a;
GNode *mp4v;
GNode *wave;
GNode *esds;
GNode *pasp;
GNode *colr;
GNode *tref;
GNode *udta;
GNode *svmi;
GNode *fiel;
QtDemuxStream *stream = NULL;
gboolean new_stream = FALSE;
gchar *codec = NULL;
const guint8 *stsd_data;
guint16 lang_code; /* quicktime lang code or packed iso code */
guint32 version;
guint32 tkhd_flags = 0;
guint8 tkhd_version = 0;
guint32 fourcc;
guint value_size, stsd_len, len;
guint32 track_id;
guint32 dummy;
GST_DEBUG_OBJECT (qtdemux, "parse_trak");
if (!qtdemux_tree_get_child_by_type_full (trak, FOURCC_tkhd, &tkhd)
|| !gst_byte_reader_get_uint8 (&tkhd, &tkhd_version)
|| !gst_byte_reader_get_uint24_be (&tkhd, &tkhd_flags))
goto corrupt_file;
/* pick between 64 or 32 bits */
value_size = tkhd_version == 1 ? 8 : 4;
if (!gst_byte_reader_skip (&tkhd, value_size * 2) ||
!gst_byte_reader_get_uint32_be (&tkhd, &track_id))
goto corrupt_file;
if (!qtdemux->got_moov) {
if (qtdemux_find_stream (qtdemux, track_id))
goto existing_stream;
stream = _create_stream ();
stream->track_id = track_id;
new_stream = TRUE;
} else {
stream = qtdemux_find_stream (qtdemux, track_id);
if (!stream) {
GST_WARNING_OBJECT (qtdemux, "Stream not found, going to ignore it");
goto skip_track;
}
/* flush samples data from this track from previous moov */
gst_qtdemux_stream_flush_segments_data (qtdemux, stream);
gst_qtdemux_stream_flush_samples_data (qtdemux, stream);
}
/* need defaults for fragments */
qtdemux_parse_trex (qtdemux, stream, &dummy, &dummy, &dummy);
if (stream->pending_tags == NULL)
stream->pending_tags = gst_tag_list_new_empty ();
if ((tkhd_flags & 1) == 0)
stream->disabled = TRUE;
GST_LOG_OBJECT (qtdemux, "track[tkhd] version/flags/id: 0x%02x/%06x/%u",
tkhd_version, tkhd_flags, stream->track_id);
if (!(mdia = qtdemux_tree_get_child_by_type (trak, FOURCC_mdia)))
goto corrupt_file;
if (!(mdhd = qtdemux_tree_get_child_by_type (mdia, FOURCC_mdhd))) {
/* be nice for some crooked mjp2 files that use mhdr for mdhd */
if (qtdemux->major_brand != FOURCC_mjp2 ||
!(mdhd = qtdemux_tree_get_child_by_type (mdia, FOURCC_mhdr)))
goto corrupt_file;
}
len = QT_UINT32 ((guint8 *) mdhd->data);
version = QT_UINT32 ((guint8 *) mdhd->data + 8);
GST_LOG_OBJECT (qtdemux, "track version/flags: %08x", version);
if (version == 0x01000000) {
if (len < 38)
goto corrupt_file;
stream->timescale = QT_UINT32 ((guint8 *) mdhd->data + 28);
stream->duration = QT_UINT64 ((guint8 *) mdhd->data + 32);
lang_code = QT_UINT16 ((guint8 *) mdhd->data + 36);
} else {
if (len < 30)
goto corrupt_file;
stream->timescale = QT_UINT32 ((guint8 *) mdhd->data + 20);
stream->duration = QT_UINT32 ((guint8 *) mdhd->data + 24);
lang_code = QT_UINT16 ((guint8 *) mdhd->data + 28);
}
if (lang_code < 0x400) {
qtdemux_lang_map_qt_code_to_iso (stream->lang_id, lang_code);
} else if (lang_code == 0x7fff) {
stream->lang_id[0] = 0; /* unspecified */
} else {
stream->lang_id[0] = 0x60 + ((lang_code >> 10) & 0x1F);
stream->lang_id[1] = 0x60 + ((lang_code >> 5) & 0x1F);
stream->lang_id[2] = 0x60 + (lang_code & 0x1F);
stream->lang_id[3] = 0;
}
GST_LOG_OBJECT (qtdemux, "track timescale: %" G_GUINT32_FORMAT,
stream->timescale);
GST_LOG_OBJECT (qtdemux, "track duration: %" G_GUINT64_FORMAT,
stream->duration);
GST_LOG_OBJECT (qtdemux, "track language code/id: 0x%04x/%s",
lang_code, stream->lang_id);
if (G_UNLIKELY (stream->timescale == 0 || qtdemux->timescale == 0))
goto corrupt_file;
if ((tref = qtdemux_tree_get_child_by_type (trak, FOURCC_tref))) {
/* chapters track reference */
GNode *chap = qtdemux_tree_get_child_by_type (tref, FOURCC_chap);
if (chap) {
gsize length = GST_READ_UINT32_BE (chap->data);
if (qtdemux->chapters_track_id)
GST_FIXME_OBJECT (qtdemux, "Multiple CHAP tracks");
if (length >= 12) {
qtdemux->chapters_track_id =
GST_READ_UINT32_BE ((gint8 *) chap->data + 8);
}
}
}
/* fragmented files may have bogus duration in moov */
if (!qtdemux->fragmented &&
qtdemux->duration != G_MAXINT64 && stream->duration != G_MAXINT32) {
guint64 tdur1, tdur2;
/* don't overflow */
tdur1 = stream->timescale * (guint64) qtdemux->duration;
tdur2 = qtdemux->timescale * (guint64) stream->duration;
/* HACK:
* some of those trailers, nowadays, have prologue images that are
* themselves video tracks as well. I haven't really found a way to
* identify those yet, except for just looking at their duration. */
if (tdur1 != 0 && (tdur2 * 10 / tdur1) < 2) {
GST_WARNING_OBJECT (qtdemux,
"Track shorter than 20%% (%" G_GUINT64_FORMAT "/%" G_GUINT32_FORMAT
" vs. %" G_GUINT64_FORMAT "/%" G_GUINT32_FORMAT ") of the stream "
"found, assuming preview image or something; skipping track",
stream->duration, stream->timescale, qtdemux->duration,
qtdemux->timescale);
if (new_stream)
gst_qtdemux_stream_free (qtdemux, stream);
return TRUE;
}
}
if (!(hdlr = qtdemux_tree_get_child_by_type (mdia, FOURCC_hdlr)))
goto corrupt_file;
GST_LOG_OBJECT (qtdemux, "track type: %" GST_FOURCC_FORMAT,
GST_FOURCC_ARGS (QT_FOURCC ((guint8 *) hdlr->data + 12)));
len = QT_UINT32 ((guint8 *) hdlr->data);
if (len >= 20)
stream->subtype = QT_FOURCC ((guint8 *) hdlr->data + 16);
GST_LOG_OBJECT (qtdemux, "track subtype: %" GST_FOURCC_FORMAT,
GST_FOURCC_ARGS (stream->subtype));
if (!(minf = qtdemux_tree_get_child_by_type (mdia, FOURCC_minf)))
goto corrupt_file;
if (!(stbl = qtdemux_tree_get_child_by_type (minf, FOURCC_stbl)))
goto corrupt_file;
/*parse svmi header if existing */
svmi = qtdemux_tree_get_child_by_type (stbl, FOURCC_svmi);
if (svmi) {
len = QT_UINT32 ((guint8 *) svmi->data);
version = QT_UINT32 ((guint8 *) svmi->data + 8);
if (!version) {
GstVideoMultiviewMode mode = GST_VIDEO_MULTIVIEW_MODE_NONE;
GstVideoMultiviewFlags flags = GST_VIDEO_MULTIVIEW_FLAGS_NONE;
guint8 frame_type, frame_layout;
/* MPEG-A stereo video */
if (qtdemux->major_brand == FOURCC_ss02)
flags |= GST_VIDEO_MULTIVIEW_FLAGS_MIXED_MONO;
frame_type = QT_UINT8 ((guint8 *) svmi->data + 12);
frame_layout = QT_UINT8 ((guint8 *) svmi->data + 13) & 0x01;
switch (frame_type) {
case 0:
mode = GST_VIDEO_MULTIVIEW_MODE_SIDE_BY_SIDE;
break;
case 1:
mode = GST_VIDEO_MULTIVIEW_MODE_ROW_INTERLEAVED;
break;
case 2:
mode = GST_VIDEO_MULTIVIEW_MODE_FRAME_BY_FRAME;
break;
case 3:
/* mode 3 is primary/secondary view sequence, ie
* left/right views in separate tracks. See section 7.2
* of ISO/IEC 23000-11:2009 */
GST_FIXME_OBJECT (qtdemux,
"Implement stereo video in separate streams");
}
if ((frame_layout & 0x1) == 0)
flags |= GST_VIDEO_MULTIVIEW_FLAGS_RIGHT_VIEW_FIRST;
GST_LOG_OBJECT (qtdemux,
"StereoVideo: composition type: %u, is_left_first: %u",
frame_type, frame_layout);
stream->multiview_mode = mode;
stream->multiview_flags = flags;
}
}
/* parse stsd */
if (!(stsd = qtdemux_tree_get_child_by_type (stbl, FOURCC_stsd)))
goto corrupt_file;
stsd_data = (const guint8 *) stsd->data;
/* stsd should at least have one entry */
stsd_len = QT_UINT32 (stsd_data);
if (stsd_len < 24) {
/* .. but skip stream with empty stsd produced by some Vivotek cameras */
if (stream->subtype == FOURCC_vivo) {
if (new_stream)
gst_qtdemux_stream_free (qtdemux, stream);
return TRUE;
} else {
goto corrupt_file;
}
}
GST_LOG_OBJECT (qtdemux, "stsd len: %d", stsd_len);
/* and that entry should fit within stsd */
len = QT_UINT32 (stsd_data + 16);
if (len > stsd_len + 16)
goto corrupt_file;
stream->fourcc = fourcc = QT_FOURCC (stsd_data + 16 + 4);
GST_LOG_OBJECT (qtdemux, "stsd type: %" GST_FOURCC_FORMAT,
GST_FOURCC_ARGS (stream->fourcc));
GST_LOG_OBJECT (qtdemux, "stsd type len: %d", len);
if ((fourcc == FOURCC_drms) || (fourcc == FOURCC_drmi))
goto error_encrypted;
if (fourcc == FOURCC_encv || fourcc == FOURCC_enca) {
GNode *enc = qtdemux_tree_get_child_by_type (stsd, fourcc);
stream->protected = TRUE;
if (!qtdemux_parse_protection_scheme_info (qtdemux, stream, enc, &fourcc))
GST_ERROR_OBJECT (qtdemux, "Failed to parse protection scheme info");
}
if (stream->subtype == FOURCC_vide) {
guint32 w = 0, h = 0;
gboolean gray;
gint depth, palette_size, palette_count;
guint32 matrix[9];
guint32 *palette_data = NULL;
stream->sampled = TRUE;
/* version 1 uses some 64-bit ints */
if (!gst_byte_reader_skip (&tkhd, 20 + value_size))
goto corrupt_file;
if (!qtdemux_parse_transformation_matrix (qtdemux, &tkhd, matrix, "tkhd"))
goto corrupt_file;
if (!gst_byte_reader_get_uint32_be (&tkhd, &w)
|| !gst_byte_reader_get_uint32_be (&tkhd, &h))
goto corrupt_file;
stream->display_width = w >> 16;
stream->display_height = h >> 16;
qtdemux_inspect_transformation_matrix (qtdemux, stream, matrix,
&stream->pending_tags);
offset = 16;
if (len < 86)
goto corrupt_file;
stream->width = QT_UINT16 (stsd_data + offset + 32);
stream->height = QT_UINT16 (stsd_data + offset + 34);
stream->fps_n = 0; /* this is filled in later */
stream->fps_d = 0; /* this is filled in later */
stream->bits_per_sample = QT_UINT16 (stsd_data + offset + 82);
stream->color_table_id = QT_UINT16 (stsd_data + offset + 84);
/* if color_table_id is 0, ctab atom must follow; however some files
* produced by TMPEGEnc have color_table_id = 0 and no ctab atom, so
* if color table is not present we'll correct the value */
if (stream->color_table_id == 0 &&
(len < 90 || QT_FOURCC (stsd_data + offset + 86) != FOURCC_ctab)) {
stream->color_table_id = -1;
}
GST_LOG_OBJECT (qtdemux, "width %d, height %d, bps %d, color table id %d",
stream->width, stream->height, stream->bits_per_sample,
stream->color_table_id);
depth = stream->bits_per_sample;
/* more than 32 bits means grayscale */
gray = (depth > 32);
/* low 32 bits specify the depth */
depth &= 0x1F;
/* different number of palette entries is determined by depth. */
palette_count = 0;
if ((depth == 1) || (depth == 2) || (depth == 4) || (depth == 8))
palette_count = (1 << depth);
palette_size = palette_count * 4;
if (stream->color_table_id) {
switch (palette_count) {
case 0:
break;
case 2:
palette_data = g_memdup (ff_qt_default_palette_2, palette_size);
break;
case 4:
palette_data = g_memdup (ff_qt_default_palette_4, palette_size);
break;
case 16:
if (gray)
palette_data = g_memdup (ff_qt_grayscale_palette_16, palette_size);
else
palette_data = g_memdup (ff_qt_default_palette_16, palette_size);
break;
case 256:
if (gray)
palette_data = g_memdup (ff_qt_grayscale_palette_256, palette_size);
else
palette_data = g_memdup (ff_qt_default_palette_256, palette_size);
break;
default:
GST_ELEMENT_WARNING (qtdemux, STREAM, DEMUX,
(_("The video in this file might not play correctly.")),
("unsupported palette depth %d", depth));
break;
}
} else {
gint i, j, start, end;
if (len < 94)
goto corrupt_file;
/* read table */
start = QT_UINT32 (stsd_data + offset + 86);
palette_count = QT_UINT16 (stsd_data + offset + 90);
end = QT_UINT16 (stsd_data + offset + 92);
GST_LOG_OBJECT (qtdemux, "start %d, end %d, palette_count %d",
start, end, palette_count);
if (end > 255)
end = 255;
if (start > end)
start = end;
if (len < 94 + (end - start) * 8)
goto corrupt_file;
/* palette is always the same size */
palette_data = g_malloc0 (256 * 4);
palette_size = 256 * 4;
for (j = 0, i = start; i <= end; j++, i++) {
guint32 a, r, g, b;
a = QT_UINT16 (stsd_data + offset + 94 + (j * 8));
r = QT_UINT16 (stsd_data + offset + 96 + (j * 8));
g = QT_UINT16 (stsd_data + offset + 98 + (j * 8));
b = QT_UINT16 (stsd_data + offset + 100 + (j * 8));
palette_data[i] = ((a & 0xff00) << 16) | ((r & 0xff00) << 8) |
(g & 0xff00) | (b >> 8);
}
}
if (stream->caps)
gst_caps_unref (stream->caps);
stream->caps =
qtdemux_video_caps (qtdemux, stream, fourcc, stsd_data, &codec);
if (G_UNLIKELY (!stream->caps)) {
g_free (palette_data);
goto unknown_stream;
}
if (codec) {
gst_tag_list_add (stream->pending_tags, GST_TAG_MERGE_REPLACE,
GST_TAG_VIDEO_CODEC, codec, NULL);
g_free (codec);
codec = NULL;
}
if (palette_data) {
GstStructure *s;
if (stream->rgb8_palette)
gst_memory_unref (stream->rgb8_palette);
stream->rgb8_palette = gst_memory_new_wrapped (GST_MEMORY_FLAG_READONLY,
palette_data, palette_size, 0, palette_size, palette_data, g_free);
s = gst_caps_get_structure (stream->caps, 0);
/* non-raw video has a palette_data property. raw video has the palette as
* an extra plane that we append to the output buffers before we push
* them*/
if (!gst_structure_has_name (s, "video/x-raw")) {
GstBuffer *palette;
palette = gst_buffer_new ();
gst_buffer_append_memory (palette, stream->rgb8_palette);
stream->rgb8_palette = NULL;
gst_caps_set_simple (stream->caps, "palette_data",
GST_TYPE_BUFFER, palette, NULL);
gst_buffer_unref (palette);
}
} else if (palette_count != 0) {
GST_ELEMENT_WARNING (qtdemux, STREAM, NOT_IMPLEMENTED,
(NULL), ("Unsupported palette depth %d", depth));
}
GST_LOG_OBJECT (qtdemux, "frame count: %u",
QT_UINT16 (stsd_data + offset + 48));
esds = NULL;
pasp = NULL;
colr = NULL;
fiel = NULL;
/* pick 'the' stsd child */
if (!stream->protected)
mp4v = qtdemux_tree_get_child_by_type (stsd, fourcc);
else
mp4v = qtdemux_tree_get_child_by_type (stsd, FOURCC_encv);
if (mp4v) {
esds = qtdemux_tree_get_child_by_type (mp4v, FOURCC_esds);
pasp = qtdemux_tree_get_child_by_type (mp4v, FOURCC_pasp);
colr = qtdemux_tree_get_child_by_type (mp4v, FOURCC_colr);
fiel = qtdemux_tree_get_child_by_type (mp4v, FOURCC_fiel);
}
if (pasp) {
const guint8 *pasp_data = (const guint8 *) pasp->data;
stream->par_w = QT_UINT32 (pasp_data + 8);
stream->par_h = QT_UINT32 (pasp_data + 12);
} else {
stream->par_w = 0;
stream->par_h = 0;
}
if (fiel) {
const guint8 *fiel_data = (const guint8 *) fiel->data;
gint len = QT_UINT32 (fiel_data);
if (len == 10) {
stream->interlace_mode = GST_READ_UINT8 (fiel_data + 8);
stream->field_order = GST_READ_UINT8 (fiel_data + 9);
}
}
if (colr) {
const guint8 *colr_data = (const guint8 *) colr->data;
gint len = QT_UINT32 (colr_data);
if (len == 19 || len == 18) {
guint32 color_type = GST_READ_UINT32_LE (colr_data + 8);
if (color_type == FOURCC_nclx || color_type == FOURCC_nclc) {
guint16 primaries = GST_READ_UINT16_BE (colr_data + 12);
guint16 transfer_function = GST_READ_UINT16_BE (colr_data + 14);
guint16 matrix = GST_READ_UINT16_BE (colr_data + 16);
gboolean full_range = len == 19 ? colr_data[17] >> 7 : FALSE;
switch (primaries) {
case 1:
stream->colorimetry.primaries = GST_VIDEO_COLOR_PRIMARIES_BT709;
break;
case 5:
stream->colorimetry.primaries = GST_VIDEO_COLOR_PRIMARIES_BT470BG;
break;
case 6:
stream->colorimetry.primaries =
GST_VIDEO_COLOR_PRIMARIES_SMPTE170M;
break;
case 9:
stream->colorimetry.primaries = GST_VIDEO_COLOR_PRIMARIES_BT2020;
break;
default:
break;
}
switch (transfer_function) {
case 1:
stream->colorimetry.transfer = GST_VIDEO_TRANSFER_BT709;
break;
case 7:
stream->colorimetry.transfer = GST_VIDEO_TRANSFER_SMPTE240M;
break;
default:
break;
}
switch (matrix) {
case 1:
stream->colorimetry.matrix = GST_VIDEO_COLOR_MATRIX_BT709;
break;
case 6:
stream->colorimetry.matrix = GST_VIDEO_COLOR_MATRIX_BT601;
break;
case 7:
stream->colorimetry.matrix = GST_VIDEO_COLOR_MATRIX_SMPTE240M;
break;
case 9:
stream->colorimetry.matrix = GST_VIDEO_COLOR_MATRIX_BT2020;
break;
default:
break;
}
stream->colorimetry.range =
full_range ? GST_VIDEO_COLOR_RANGE_0_255 :
GST_VIDEO_COLOR_RANGE_16_235;
} else {
GST_DEBUG_OBJECT (qtdemux, "Unsupported color type");
}
} else {
GST_WARNING_OBJECT (qtdemux, "Invalid colr atom size");
}
}
if (esds) {
gst_qtdemux_handle_esds (qtdemux, stream, esds, stream->pending_tags);
} else {
switch (fourcc) {
case FOURCC_H264:
case FOURCC_avc1:
case FOURCC_avc3:
{
gint len = QT_UINT32 (stsd_data) - 0x66;
const guint8 *avc_data = stsd_data + 0x66;
/* find avcC */
while (len >= 0x8) {
gint size;
if (QT_UINT32 (avc_data) <= len)
size = QT_UINT32 (avc_data) - 0x8;
else
size = len - 0x8;
if (size < 1)
/* No real data, so break out */
break;
switch (QT_FOURCC (avc_data + 0x4)) {
case FOURCC_avcC:
{
/* parse, if found */
GstBuffer *buf;
GST_DEBUG_OBJECT (qtdemux, "found avcC codec_data in stsd");
/* First 4 bytes are the length of the atom, the next 4 bytes
* are the fourcc, the next 1 byte is the version, and the
* subsequent bytes are profile_tier_level structure like data. */
gst_codec_utils_h264_caps_set_level_and_profile (stream->caps,
avc_data + 8 + 1, size - 1);
buf = gst_buffer_new_and_alloc (size);
gst_buffer_fill (buf, 0, avc_data + 0x8, size);
gst_caps_set_simple (stream->caps,
"codec_data", GST_TYPE_BUFFER, buf, NULL);
gst_buffer_unref (buf);
break;
}
case FOURCC_strf:
{
GstBuffer *buf;
GST_DEBUG_OBJECT (qtdemux, "found strf codec_data in stsd");
/* First 4 bytes are the length of the atom, the next 4 bytes
* are the fourcc, next 40 bytes are BITMAPINFOHEADER,
* next 1 byte is the version, and the
* subsequent bytes are sequence parameter set like data. */
size -= 40; /* we'll be skipping BITMAPINFOHEADER */
if (size > 1) {
gst_codec_utils_h264_caps_set_level_and_profile (stream->caps,
avc_data + 8 + 40 + 1, size - 1);
buf = gst_buffer_new_and_alloc (size);
gst_buffer_fill (buf, 0, avc_data + 8 + 40, size);
gst_caps_set_simple (stream->caps,
"codec_data", GST_TYPE_BUFFER, buf, NULL);
gst_buffer_unref (buf);
}
break;
}
case FOURCC_btrt:
{
guint avg_bitrate, max_bitrate;
/* bufferSizeDB, maxBitrate and avgBitrate - 4 bytes each */
if (size < 12)
break;
max_bitrate = QT_UINT32 (avc_data + 0xc);
avg_bitrate = QT_UINT32 (avc_data + 0x10);
if (!max_bitrate && !avg_bitrate)
break;
/* Some muxers seem to swap the average and maximum bitrates
* (I'm looking at you, YouTube), so we swap for sanity. */
if (max_bitrate > 0 && max_bitrate < avg_bitrate) {
guint temp = avg_bitrate;
avg_bitrate = max_bitrate;
max_bitrate = temp;
}
if (max_bitrate > 0 && max_bitrate < G_MAXUINT32) {
gst_tag_list_add (stream->pending_tags, GST_TAG_MERGE_REPLACE,
GST_TAG_MAXIMUM_BITRATE, max_bitrate, NULL);
}
if (avg_bitrate > 0 && avg_bitrate < G_MAXUINT32) {
gst_tag_list_add (stream->pending_tags, GST_TAG_MERGE_REPLACE,
GST_TAG_BITRATE, avg_bitrate, NULL);
}
break;
}
default:
break;
}
len -= size + 8;
avc_data += size + 8;
}
break;
}
case FOURCC_H265:
case FOURCC_hvc1:
case FOURCC_hev1:
{
gint len = QT_UINT32 (stsd_data) - 0x66;
const guint8 *hevc_data = stsd_data + 0x66;
/* find hevc */
while (len >= 0x8) {
gint size;
if (QT_UINT32 (hevc_data) <= len)
size = QT_UINT32 (hevc_data) - 0x8;
else
size = len - 0x8;
if (size < 1)
/* No real data, so break out */
break;
switch (QT_FOURCC (hevc_data + 0x4)) {
case FOURCC_hvcC:
{
/* parse, if found */
GstBuffer *buf;
GST_DEBUG_OBJECT (qtdemux, "found avcC codec_data in stsd");
/* First 4 bytes are the length of the atom, the next 4 bytes
* are the fourcc, the next 1 byte is the version, and the
* subsequent bytes are sequence parameter set like data. */
gst_codec_utils_h265_caps_set_level_tier_and_profile
(stream->caps, hevc_data + 8 + 1, size - 1);
buf = gst_buffer_new_and_alloc (size);
gst_buffer_fill (buf, 0, hevc_data + 0x8, size);
gst_caps_set_simple (stream->caps,
"codec_data", GST_TYPE_BUFFER, buf, NULL);
gst_buffer_unref (buf);
break;
}
default:
break;
}
len -= size + 8;
hevc_data += size + 8;
}
break;
}
case FOURCC_mp4v:
case FOURCC_MP4V:
case FOURCC_fmp4:
case FOURCC_FMP4:
{
GNode *glbl;
GST_DEBUG_OBJECT (qtdemux, "found %" GST_FOURCC_FORMAT,
GST_FOURCC_ARGS (fourcc));
/* codec data might be in glbl extension atom */
glbl = mp4v ?
qtdemux_tree_get_child_by_type (mp4v, FOURCC_glbl) : NULL;
if (glbl) {
guint8 *data;
GstBuffer *buf;
gint len;
GST_DEBUG_OBJECT (qtdemux, "found glbl data in stsd");
data = glbl->data;
len = QT_UINT32 (data);
if (len > 0x8) {
len -= 0x8;
buf = gst_buffer_new_and_alloc (len);
gst_buffer_fill (buf, 0, data + 8, len);
gst_caps_set_simple (stream->caps,
"codec_data", GST_TYPE_BUFFER, buf, NULL);
gst_buffer_unref (buf);
}
}
break;
}
case FOURCC_mjp2:
{
/* see annex I of the jpeg2000 spec */
GNode *jp2h, *ihdr, *colr, *mjp2, *field, *prefix, *cmap, *cdef;
const guint8 *data;
const gchar *colorspace = NULL;
gint ncomp = 0;
guint32 ncomp_map = 0;
gint32 *comp_map = NULL;
guint32 nchan_def = 0;
gint32 *chan_def = NULL;
GST_DEBUG_OBJECT (qtdemux, "found mjp2");
/* some required atoms */
mjp2 = qtdemux_tree_get_child_by_type (stsd, FOURCC_mjp2);
if (!mjp2)
break;
jp2h = qtdemux_tree_get_child_by_type (mjp2, FOURCC_jp2h);
if (!jp2h)
break;
/* number of components; redundant with info in codestream, but useful
to a muxer */
ihdr = qtdemux_tree_get_child_by_type (jp2h, FOURCC_ihdr);
if (!ihdr || QT_UINT32 (ihdr->data) != 22)
break;
ncomp = QT_UINT16 (((guint8 *) ihdr->data) + 16);
colr = qtdemux_tree_get_child_by_type (jp2h, FOURCC_colr);
if (!colr)
break;
GST_DEBUG_OBJECT (qtdemux, "found colr");
/* extract colour space info */
if (QT_UINT8 ((guint8 *) colr->data + 8) == 1) {
switch (QT_UINT32 ((guint8 *) colr->data + 11)) {
case 16:
colorspace = "sRGB";
break;
case 17:
colorspace = "GRAY";
break;
case 18:
colorspace = "sYUV";
break;
default:
colorspace = NULL;
break;
}
}
if (!colorspace)
/* colr is required, and only values 16, 17, and 18 are specified,
so error if we have no colorspace */
break;
/* extract component mapping */
cmap = qtdemux_tree_get_child_by_type (jp2h, FOURCC_cmap);
if (cmap) {
guint32 cmap_len = 0;
int i;
cmap_len = QT_UINT32 (cmap->data);
if (cmap_len >= 8) {
/* normal box, subtract off header */
cmap_len -= 8;
/* cmap: { u16 cmp; u8 mtyp; u8 pcol; }* */
if (cmap_len % 4 == 0) {
ncomp_map = (cmap_len / 4);
comp_map = g_new0 (gint32, ncomp_map);
for (i = 0; i < ncomp_map; i++) {
guint16 cmp;
guint8 mtyp, pcol;
cmp = QT_UINT16 (((guint8 *) cmap->data) + 8 + i * 4);
mtyp = QT_UINT8 (((guint8 *) cmap->data) + 8 + i * 4 + 2);
pcol = QT_UINT8 (((guint8 *) cmap->data) + 8 + i * 4 + 3);
comp_map[i] = (mtyp << 24) | (pcol << 16) | cmp;
}
}
}
}
/* extract channel definitions */
cdef = qtdemux_tree_get_child_by_type (jp2h, FOURCC_cdef);
if (cdef) {
guint32 cdef_len = 0;
int i;
cdef_len = QT_UINT32 (cdef->data);
if (cdef_len >= 10) {
/* normal box, subtract off header and len */
cdef_len -= 10;
/* cdef: u16 n; { u16 cn; u16 typ; u16 asoc; }* */
if (cdef_len % 6 == 0) {
nchan_def = (cdef_len / 6);
chan_def = g_new0 (gint32, nchan_def);
for (i = 0; i < nchan_def; i++)
chan_def[i] = -1;
for (i = 0; i < nchan_def; i++) {
guint16 cn, typ, asoc;
cn = QT_UINT16 (((guint8 *) cdef->data) + 10 + i * 6);
typ = QT_UINT16 (((guint8 *) cdef->data) + 10 + i * 6 + 2);
asoc = QT_UINT16 (((guint8 *) cdef->data) + 10 + i * 6 + 4);
if (cn < nchan_def) {
switch (typ) {
case 0:
chan_def[cn] = asoc;
break;
case 1:
chan_def[cn] = 0; /* alpha */
break;
default:
chan_def[cn] = -typ;
}
}
}
}
}
}
gst_caps_set_simple (stream->caps,
"num-components", G_TYPE_INT, ncomp, NULL);
gst_caps_set_simple (stream->caps,
"colorspace", G_TYPE_STRING, colorspace, NULL);
if (comp_map) {
GValue arr = { 0, };
GValue elt = { 0, };
int i;
g_value_init (&arr, GST_TYPE_ARRAY);
g_value_init (&elt, G_TYPE_INT);
for (i = 0; i < ncomp_map; i++) {
g_value_set_int (&elt, comp_map[i]);
gst_value_array_append_value (&arr, &elt);
}
gst_structure_set_value (gst_caps_get_structure (stream->caps, 0),
"component-map", &arr);
g_value_unset (&elt);
g_value_unset (&arr);
g_free (comp_map);
}
if (chan_def) {
GValue arr = { 0, };
GValue elt = { 0, };
int i;
g_value_init (&arr, GST_TYPE_ARRAY);
g_value_init (&elt, G_TYPE_INT);
for (i = 0; i < nchan_def; i++) {
g_value_set_int (&elt, chan_def[i]);
gst_value_array_append_value (&arr, &elt);
}
gst_structure_set_value (gst_caps_get_structure (stream->caps, 0),
"channel-definitions", &arr);
g_value_unset (&elt);
g_value_unset (&arr);
g_free (chan_def);
}
/* some optional atoms */
field = qtdemux_tree_get_child_by_type (mjp2, FOURCC_fiel);
prefix = qtdemux_tree_get_child_by_type (mjp2, FOURCC_jp2x);
/* indicate possible fields in caps */
if (field) {
data = (guint8 *) field->data + 8;
if (*data != 1)
gst_caps_set_simple (stream->caps, "fields", G_TYPE_INT,
(gint) * data, NULL);
}
/* add codec_data if provided */
if (prefix) {
GstBuffer *buf;
gint len;
GST_DEBUG_OBJECT (qtdemux, "found prefix data in stsd");
data = prefix->data;
len = QT_UINT32 (data);
if (len > 0x8) {
len -= 0x8;
buf = gst_buffer_new_and_alloc (len);
gst_buffer_fill (buf, 0, data + 8, len);
gst_caps_set_simple (stream->caps,
"codec_data", GST_TYPE_BUFFER, buf, NULL);
gst_buffer_unref (buf);
}
}
break;
}
case FOURCC_jpeg:
{
/* https://developer.apple.com/standards/qtff-2001.pdf,
* page 92, "Video Sample Description", under table 3.1 */
GstByteReader br;
const gint compressor_offset = 16 + 4 + 4 * 3 + 2 * 2 + 2 * 4 + 4 + 2;
const gint min_size = compressor_offset + 32 + 2 + 2;
GNode *jpeg;
guint32 len;
guint16 color_table_id = 0;
gboolean ok;
GST_DEBUG_OBJECT (qtdemux, "found jpeg");
/* recover information on interlaced/progressive */
jpeg = qtdemux_tree_get_child_by_type (stsd, FOURCC_jpeg);
if (!jpeg)
break;
len = QT_UINT32 (jpeg->data);
GST_DEBUG_OBJECT (qtdemux, "Found jpeg: len %u, need %d", len,
min_size);
if (len >= min_size) {
gst_byte_reader_init (&br, jpeg->data, len);
gst_byte_reader_skip (&br, compressor_offset + 32 + 2);
gst_byte_reader_get_uint16_le (&br, &color_table_id);
if (color_table_id != 0) {
/* the spec says there can be concatenated chunks in the data, and we want
* to find one called field. Walk through them. */
gint offset = min_size;
while (offset + 8 < len) {
guint32 size = 0, tag;
ok = gst_byte_reader_get_uint32_le (&br, &size);
ok &= gst_byte_reader_get_uint32_le (&br, &tag);
if (!ok || size < 8) {
GST_WARNING_OBJECT (qtdemux,
"Failed to walk optional chunk list");
break;
}
GST_DEBUG_OBJECT (qtdemux,
"Found optional %4.4s chunk, size %u", (const char *) &tag,
size);
if (tag == FOURCC_fiel) {
guint8 n_fields, ordering;
gst_byte_reader_get_uint8 (&br, &n_fields);
gst_byte_reader_get_uint8 (&br, &ordering);
if (n_fields == 1 || n_fields == 2) {
GST_DEBUG_OBJECT (qtdemux,
"Found fiel tag with %u fields, ordering %u", n_fields,
ordering);
if (n_fields == 2)
gst_caps_set_simple (stream->caps, "interlace-mode",
G_TYPE_STRING, "interleaved", NULL);
} else {
GST_WARNING_OBJECT (qtdemux,
"Found fiel tag with invalid fields (%u)", n_fields);
}
}
offset += size;
}
} else {
GST_DEBUG_OBJECT (qtdemux,
"Color table ID is 0, not trying to get interlacedness");
}
} else {
GST_WARNING_OBJECT (qtdemux,
"Length of jpeg chunk is too small, not trying to get interlacedness");
}
break;
}
case FOURCC_SVQ3:
case FOURCC_VP31:
{
GstBuffer *buf;
GstBuffer *seqh = NULL;
guint8 *gamma_data = NULL;
gint len = QT_UINT32 (stsd_data);
qtdemux_parse_svq3_stsd_data (qtdemux, stsd, &gamma_data, &seqh);
if (gamma_data) {
gst_caps_set_simple (stream->caps, "applied-gamma", G_TYPE_DOUBLE,
QT_FP32 (gamma_data), NULL);
}
if (seqh) {
/* sorry for the bad name, but we don't know what this is, other
* than its own fourcc */
gst_caps_set_simple (stream->caps, "seqh", GST_TYPE_BUFFER, seqh,
NULL);
}
GST_DEBUG_OBJECT (qtdemux, "found codec_data in stsd");
buf = gst_buffer_new_and_alloc (len);
gst_buffer_fill (buf, 0, stsd_data, len);
gst_caps_set_simple (stream->caps,
"codec_data", GST_TYPE_BUFFER, buf, NULL);
gst_buffer_unref (buf);
break;
}
case FOURCC_rle_:
case FOURCC_WRLE:
{
gst_caps_set_simple (stream->caps,
"depth", G_TYPE_INT, QT_UINT16 (stsd_data + offset + 82), NULL);
break;
}
case FOURCC_XiTh:
{
GNode *xith, *xdxt;
GST_DEBUG_OBJECT (qtdemux, "found XiTh");
xith = qtdemux_tree_get_child_by_type (stsd, FOURCC_XiTh);
if (!xith)
break;
xdxt = qtdemux_tree_get_child_by_type (xith, FOURCC_XdxT);
if (!xdxt)
break;
GST_DEBUG_OBJECT (qtdemux, "found XdxT node");
/* collect the headers and store them in a stream list so that we can
* send them out first */
qtdemux_parse_theora_extension (qtdemux, stream, xdxt);
break;
}
case FOURCC_ovc1:
{
GNode *ovc1;
guint8 *ovc1_data;
guint ovc1_len;
GstBuffer *buf;
GST_DEBUG_OBJECT (qtdemux, "parse ovc1 header");
ovc1 = qtdemux_tree_get_child_by_type (stsd, FOURCC_ovc1);
if (!ovc1)
break;
ovc1_data = ovc1->data;
ovc1_len = QT_UINT32 (ovc1_data);
if (ovc1_len <= 198) {
GST_WARNING_OBJECT (qtdemux, "Too small ovc1 header, skipping");
break;
}
buf = gst_buffer_new_and_alloc (ovc1_len - 198);
gst_buffer_fill (buf, 0, ovc1_data + 198, ovc1_len - 198);
gst_caps_set_simple (stream->caps,
"codec_data", GST_TYPE_BUFFER, buf, NULL);
gst_buffer_unref (buf);
break;
}
case FOURCC_vc_1:
{
gint len = QT_UINT32 (stsd_data) - 0x66;
const guint8 *vc1_data = stsd_data + 0x66;
/* find dvc1 */
while (len >= 8) {
gint size;
if (QT_UINT32 (vc1_data) <= len)
size = QT_UINT32 (vc1_data) - 8;
else
size = len - 8;
if (size < 1)
/* No real data, so break out */
break;
switch (QT_FOURCC (vc1_data + 0x4)) {
case GST_MAKE_FOURCC ('d', 'v', 'c', '1'):
{
GstBuffer *buf;
GST_DEBUG_OBJECT (qtdemux, "found dvc1 codec_data in stsd");
buf = gst_buffer_new_and_alloc (size);
gst_buffer_fill (buf, 0, vc1_data + 8, size);
gst_caps_set_simple (stream->caps,
"codec_data", GST_TYPE_BUFFER, buf, NULL);
gst_buffer_unref (buf);
break;
}
default:
break;
}
len -= size + 8;
vc1_data += size + 8;
}
break;
}
default:
break;
}
}
GST_INFO_OBJECT (qtdemux,
"type %" GST_FOURCC_FORMAT " caps %" GST_PTR_FORMAT,
GST_FOURCC_ARGS (fourcc), stream->caps);
} else if (stream->subtype == FOURCC_soun) {
int version, samplesize;
guint16 compression_id;
gboolean amrwb = FALSE;
offset = 32;
/* sample description entry (16) + sound sample description v0 (20) */
if (len < 36)
goto corrupt_file;
version = QT_UINT32 (stsd_data + offset);
stream->n_channels = QT_UINT16 (stsd_data + offset + 8);
samplesize = QT_UINT16 (stsd_data + offset + 10);
compression_id = QT_UINT16 (stsd_data + offset + 12);
stream->rate = QT_FP32 (stsd_data + offset + 16);
GST_LOG_OBJECT (qtdemux, "version/rev: %08x", version);
GST_LOG_OBJECT (qtdemux, "vendor: %08x",
QT_UINT32 (stsd_data + offset + 4));
GST_LOG_OBJECT (qtdemux, "n_channels: %d", stream->n_channels);
GST_LOG_OBJECT (qtdemux, "sample_size: %d", samplesize);
GST_LOG_OBJECT (qtdemux, "compression_id: %d", compression_id);
GST_LOG_OBJECT (qtdemux, "packet size: %d",
QT_UINT16 (stsd_data + offset + 14));
GST_LOG_OBJECT (qtdemux, "sample rate: %g", stream->rate);
if (compression_id == 0xfffe)
stream->sampled = TRUE;
/* first assume uncompressed audio */
stream->bytes_per_sample = samplesize / 8;
stream->samples_per_frame = stream->n_channels;
stream->bytes_per_frame = stream->n_channels * stream->bytes_per_sample;
stream->samples_per_packet = stream->samples_per_frame;
stream->bytes_per_packet = stream->bytes_per_sample;
offset = 52;
switch (fourcc) {
/* Yes, these have to be hard-coded */
case FOURCC_MAC6:
{
stream->samples_per_packet = 6;
stream->bytes_per_packet = 1;
stream->bytes_per_frame = 1 * stream->n_channels;
stream->bytes_per_sample = 1;
stream->samples_per_frame = 6 * stream->n_channels;
break;
}
case FOURCC_MAC3:
{
stream->samples_per_packet = 3;
stream->bytes_per_packet = 1;
stream->bytes_per_frame = 1 * stream->n_channels;
stream->bytes_per_sample = 1;
stream->samples_per_frame = 3 * stream->n_channels;
break;
}
case FOURCC_ima4:
{
stream->samples_per_packet = 64;
stream->bytes_per_packet = 34;
stream->bytes_per_frame = 34 * stream->n_channels;
stream->bytes_per_sample = 2;
stream->samples_per_frame = 64 * stream->n_channels;
break;
}
case FOURCC_ulaw:
case FOURCC_alaw:
{
stream->samples_per_packet = 1;
stream->bytes_per_packet = 1;
stream->bytes_per_frame = 1 * stream->n_channels;
stream->bytes_per_sample = 1;
stream->samples_per_frame = 1 * stream->n_channels;
break;
}
case FOURCC_agsm:
{
stream->samples_per_packet = 160;
stream->bytes_per_packet = 33;
stream->bytes_per_frame = 33 * stream->n_channels;
stream->bytes_per_sample = 2;
stream->samples_per_frame = 160 * stream->n_channels;
break;
}
default:
break;
}
if (version == 0x00010000) {
/* sample description entry (16) + sound sample description v1 (20+16) */
if (len < 52)
goto corrupt_file;
switch (fourcc) {
case FOURCC_twos:
case FOURCC_sowt:
case FOURCC_raw_:
break;
default:
{
/* only parse extra decoding config for non-pcm audio */
stream->samples_per_packet = QT_UINT32 (stsd_data + offset);
stream->bytes_per_packet = QT_UINT32 (stsd_data + offset + 4);
stream->bytes_per_frame = QT_UINT32 (stsd_data + offset + 8);
stream->bytes_per_sample = QT_UINT32 (stsd_data + offset + 12);
GST_LOG_OBJECT (qtdemux, "samples/packet: %d",
stream->samples_per_packet);
GST_LOG_OBJECT (qtdemux, "bytes/packet: %d",
stream->bytes_per_packet);
GST_LOG_OBJECT (qtdemux, "bytes/frame: %d",
stream->bytes_per_frame);
GST_LOG_OBJECT (qtdemux, "bytes/sample: %d",
stream->bytes_per_sample);
if (!stream->sampled && stream->bytes_per_packet) {
stream->samples_per_frame = (stream->bytes_per_frame /
stream->bytes_per_packet) * stream->samples_per_packet;
GST_LOG_OBJECT (qtdemux, "samples/frame: %d",
stream->samples_per_frame);
}
break;
}
}
} else if (version == 0x00020000) {
union
{
gdouble fp;
guint64 val;
} qtfp;
/* sample description entry (16) + sound sample description v2 (56) */
if (len < 72)
goto corrupt_file;
qtfp.val = QT_UINT64 (stsd_data + offset + 4);
stream->rate = qtfp.fp;
stream->n_channels = QT_UINT32 (stsd_data + offset + 12);
GST_LOG_OBJECT (qtdemux, "Sound sample description Version 2");
GST_LOG_OBJECT (qtdemux, "sample rate: %g", stream->rate);
GST_LOG_OBJECT (qtdemux, "n_channels: %d", stream->n_channels);
GST_LOG_OBJECT (qtdemux, "bits/channel: %d",
QT_UINT32 (stsd_data + offset + 20));
GST_LOG_OBJECT (qtdemux, "format flags: %X",
QT_UINT32 (stsd_data + offset + 24));
GST_LOG_OBJECT (qtdemux, "bytes/packet: %d",
QT_UINT32 (stsd_data + offset + 28));
GST_LOG_OBJECT (qtdemux, "LPCM frames/packet: %d",
QT_UINT32 (stsd_data + offset + 32));
} else if (version != 0x00000) {
GST_WARNING_OBJECT (qtdemux, "unknown audio STSD version %08x", version);
}
if (stream->caps)
gst_caps_unref (stream->caps);
stream->caps = qtdemux_audio_caps (qtdemux, stream, fourcc,
stsd_data + 32, len - 16, &codec);
switch (fourcc) {
case FOURCC_in24:
{
GNode *enda;
GNode *in24;
in24 = qtdemux_tree_get_child_by_type (stsd, FOURCC_in24);
enda = qtdemux_tree_get_child_by_type (in24, FOURCC_enda);
if (!enda) {
wave = qtdemux_tree_get_child_by_type (in24, FOURCC_wave);
if (wave)
enda = qtdemux_tree_get_child_by_type (wave, FOURCC_enda);
}
if (enda) {
int enda_value = QT_UINT16 ((guint8 *) enda->data + 8);
gst_caps_set_simple (stream->caps,
"format", G_TYPE_STRING, (enda_value) ? "S24LE" : "S24BE", NULL);
}
break;
}
case FOURCC_owma:
{
GNode *owma;
const guint8 *owma_data;
const gchar *codec_name = NULL;
guint owma_len;
GstBuffer *buf;
gint version = 1;
/* from http://msdn.microsoft.com/en-us/library/dd757720(VS.85).aspx */
/* FIXME this should also be gst_riff_strf_auds,
* but the latter one is actually missing bits-per-sample :( */
typedef struct
{
gint16 wFormatTag;
gint16 nChannels;
gint32 nSamplesPerSec;
gint32 nAvgBytesPerSec;
gint16 nBlockAlign;
gint16 wBitsPerSample;
gint16 cbSize;
} WAVEFORMATEX;
WAVEFORMATEX *wfex;
GST_DEBUG_OBJECT (qtdemux, "parse owma");
owma = qtdemux_tree_get_child_by_type (stsd, FOURCC_owma);
if (!owma)
break;
owma_data = owma->data;
owma_len = QT_UINT32 (owma_data);
if (owma_len <= 54) {
GST_WARNING_OBJECT (qtdemux, "Too small owma header, skipping");
break;
}
wfex = (WAVEFORMATEX *) (owma_data + 36);
buf = gst_buffer_new_and_alloc (owma_len - 54);
gst_buffer_fill (buf, 0, owma_data + 54, owma_len - 54);
if (wfex->wFormatTag == 0x0161) {
codec_name = "Windows Media Audio";
version = 2;
} else if (wfex->wFormatTag == 0x0162) {
codec_name = "Windows Media Audio 9 Pro";
version = 3;
} else if (wfex->wFormatTag == 0x0163) {
codec_name = "Windows Media Audio 9 Lossless";
/* is that correct? gstffmpegcodecmap.c is missing it, but
* fluendo codec seems to support it */
version = 4;
}
gst_caps_set_simple (stream->caps,
"codec_data", GST_TYPE_BUFFER, buf,
"wmaversion", G_TYPE_INT, version,
"block_align", G_TYPE_INT, GST_READ_UINT16_LE (&wfex->nBlockAlign),
"bitrate", G_TYPE_INT, GST_READ_UINT32_LE (&wfex->nAvgBytesPerSec),
"width", G_TYPE_INT, GST_READ_UINT16_LE (&wfex->wBitsPerSample),
"depth", G_TYPE_INT, GST_READ_UINT16_LE (&wfex->wBitsPerSample),
NULL);
gst_buffer_unref (buf);
if (codec_name) {
g_free (codec);
codec = g_strdup (codec_name);
}
break;
}
case FOURCC_wma_:
{
gint len = QT_UINT32 (stsd_data) - offset;
const guint8 *wfex_data = stsd_data + offset;
const gchar *codec_name = NULL;
gint version = 1;
/* from http://msdn.microsoft.com/en-us/library/dd757720(VS.85).aspx */
/* FIXME this should also be gst_riff_strf_auds,
* but the latter one is actually missing bits-per-sample :( */
typedef struct
{
gint16 wFormatTag;
gint16 nChannels;
gint32 nSamplesPerSec;
gint32 nAvgBytesPerSec;
gint16 nBlockAlign;
gint16 wBitsPerSample;
gint16 cbSize;
} WAVEFORMATEX;
WAVEFORMATEX wfex;
/* FIXME: unify with similar wavformatex parsing code above */
GST_DEBUG_OBJECT (qtdemux, "parse wma, looking for wfex");
/* find wfex */
while (len >= 8) {
gint size;
if (QT_UINT32 (wfex_data) <= len)
size = QT_UINT32 (wfex_data) - 8;
else
size = len - 8;
if (size < 1)
/* No real data, so break out */
break;
switch (QT_FOURCC (wfex_data + 4)) {
case GST_MAKE_FOURCC ('w', 'f', 'e', 'x'):
{
GST_DEBUG_OBJECT (qtdemux, "found wfex in stsd");
if (size < 8 + 18)
break;
wfex.wFormatTag = GST_READ_UINT16_LE (wfex_data + 8 + 0);
wfex.nChannels = GST_READ_UINT16_LE (wfex_data + 8 + 2);
wfex.nSamplesPerSec = GST_READ_UINT32_LE (wfex_data + 8 + 4);
wfex.nAvgBytesPerSec = GST_READ_UINT32_LE (wfex_data + 8 + 8);
wfex.nBlockAlign = GST_READ_UINT16_LE (wfex_data + 8 + 12);
wfex.wBitsPerSample = GST_READ_UINT16_LE (wfex_data + 8 + 14);
wfex.cbSize = GST_READ_UINT16_LE (wfex_data + 8 + 16);
GST_LOG_OBJECT (qtdemux, "Found wfex box in stsd:");
GST_LOG_OBJECT (qtdemux, "FormatTag = 0x%04x, Channels = %u, "
"SamplesPerSec = %u, AvgBytesPerSec = %u, BlockAlign = %u, "
"BitsPerSample = %u, Size = %u", wfex.wFormatTag,
wfex.nChannels, wfex.nSamplesPerSec, wfex.nAvgBytesPerSec,
wfex.nBlockAlign, wfex.wBitsPerSample, wfex.cbSize);
if (wfex.wFormatTag == 0x0161) {
codec_name = "Windows Media Audio";
version = 2;
} else if (wfex.wFormatTag == 0x0162) {
codec_name = "Windows Media Audio 9 Pro";
version = 3;
} else if (wfex.wFormatTag == 0x0163) {
codec_name = "Windows Media Audio 9 Lossless";
/* is that correct? gstffmpegcodecmap.c is missing it, but
* fluendo codec seems to support it */
version = 4;
}
gst_caps_set_simple (stream->caps,
"wmaversion", G_TYPE_INT, version,
"block_align", G_TYPE_INT, wfex.nBlockAlign,
"bitrate", G_TYPE_INT, wfex.nAvgBytesPerSec,
"width", G_TYPE_INT, wfex.wBitsPerSample,
"depth", G_TYPE_INT, wfex.wBitsPerSample, NULL);
if (size > wfex.cbSize) {
GstBuffer *buf;
buf = gst_buffer_new_and_alloc (size - wfex.cbSize);
gst_buffer_fill (buf, 0, wfex_data + 8 + wfex.cbSize,
size - wfex.cbSize);
gst_caps_set_simple (stream->caps,
"codec_data", GST_TYPE_BUFFER, buf, NULL);
gst_buffer_unref (buf);
} else {
GST_WARNING_OBJECT (qtdemux, "no codec data");
}
if (codec_name) {
g_free (codec);
codec = g_strdup (codec_name);
}
break;
}
default:
break;
}
len -= size + 8;
wfex_data += size + 8;
}
break;
}
case FOURCC_opus:
{
GNode *opus;
const guint8 *opus_data;
guint8 *channel_mapping = NULL;
guint32 rate;
guint8 channels;
guint8 channel_mapping_family;
guint8 stream_count;
guint8 coupled_count;
guint8 i;
opus = qtdemux_tree_get_child_by_type (stsd, FOURCC_opus);
opus_data = opus->data;
channels = GST_READ_UINT8 (opus_data + 45);
rate = GST_READ_UINT32_LE (opus_data + 48);
channel_mapping_family = GST_READ_UINT8 (opus_data + 54);
stream_count = GST_READ_UINT8 (opus_data + 55);
coupled_count = GST_READ_UINT8 (opus_data + 56);
if (channels > 0) {
channel_mapping = g_malloc (channels * sizeof (guint8));
for (i = 0; i < channels; i++)
channel_mapping[i] = GST_READ_UINT8 (opus_data + i + 57);
}
stream->caps = gst_codec_utils_opus_create_caps (rate, channels,
channel_mapping_family, stream_count, coupled_count,
channel_mapping);
break;
}
default:
break;
}
if (codec) {
GstStructure *s;
gint bitrate = 0;
gst_tag_list_add (stream->pending_tags, GST_TAG_MERGE_REPLACE,
GST_TAG_AUDIO_CODEC, codec, NULL);
g_free (codec);
codec = NULL;
/* some bitrate info may have ended up in caps */
s = gst_caps_get_structure (stream->caps, 0);
gst_structure_get_int (s, "bitrate", &bitrate);
if (bitrate > 0)
gst_tag_list_add (stream->pending_tags, GST_TAG_MERGE_REPLACE,
GST_TAG_BITRATE, bitrate, NULL);
}
if (stream->protected && fourcc == FOURCC_mp4a)
mp4a = qtdemux_tree_get_child_by_type (stsd, FOURCC_enca);
else
mp4a = qtdemux_tree_get_child_by_type (stsd, FOURCC_mp4a);
wave = NULL;
esds = NULL;
if (mp4a) {
wave = qtdemux_tree_get_child_by_type (mp4a, FOURCC_wave);
if (wave)
esds = qtdemux_tree_get_child_by_type (wave, FOURCC_esds);
if (!esds)
esds = qtdemux_tree_get_child_by_type (mp4a, FOURCC_esds);
}
/* If the fourcc's bottom 16 bits gives 'sm', then the top
16 bits is a byte-swapped wave-style codec identifier,
and we can find a WAVE header internally to a 'wave' atom here.
This can more clearly be thought of as 'ms' as the top 16 bits, and a
codec id as the bottom 16 bits - but byte-swapped to store in QT (which
is big-endian).
*/
if ((fourcc & 0xffff) == (('s' << 8) | 'm')) {
if (len < offset + 20) {
GST_WARNING_OBJECT (qtdemux, "No wave atom in MS-style audio");
} else {
guint32 datalen = QT_UINT32 (stsd_data + offset + 16);
const guint8 *data = stsd_data + offset + 16;
GNode *wavenode;
GNode *waveheadernode;
wavenode = g_node_new ((guint8 *) data);
if (qtdemux_parse_node (qtdemux, wavenode, data, datalen)) {
const guint8 *waveheader;
guint32 headerlen;
waveheadernode = qtdemux_tree_get_child_by_type (wavenode, fourcc);
if (waveheadernode) {
waveheader = (const guint8 *) waveheadernode->data;
headerlen = QT_UINT32 (waveheader);
if (headerlen > 8) {
gst_riff_strf_auds *header = NULL;
GstBuffer *headerbuf;
GstBuffer *extra;
waveheader += 8;
headerlen -= 8;
headerbuf = gst_buffer_new_and_alloc (headerlen);
gst_buffer_fill (headerbuf, 0, waveheader, headerlen);
if (gst_riff_parse_strf_auds (GST_ELEMENT_CAST (qtdemux),
headerbuf, &header, &extra)) {
gst_caps_unref (stream->caps);
/* FIXME: Need to do something with the channel reorder map */
stream->caps = gst_riff_create_audio_caps (header->format, NULL,
header, extra, NULL, NULL, NULL);
if (extra)
gst_buffer_unref (extra);
g_free (header);
}
}
} else
GST_DEBUG ("Didn't find waveheadernode for this codec");
}
g_node_destroy (wavenode);
}
} else if (esds) {
gst_qtdemux_handle_esds (qtdemux, stream, esds, stream->pending_tags);
} else {
switch (fourcc) {
#if 0
/* FIXME: what is in the chunk? */
case FOURCC_QDMC:
{
gint len = QT_UINT32 (stsd_data);
/* seems to be always = 116 = 0x74 */
break;
}
#endif
case FOURCC_QDM2:
{
gint len = QT_UINT32 (stsd_data);
if (len > 0x4C) {
GstBuffer *buf = gst_buffer_new_and_alloc (len - 0x4C);
gst_buffer_fill (buf, 0, stsd_data + 0x4C, len - 0x4C);
gst_caps_set_simple (stream->caps,
"codec_data", GST_TYPE_BUFFER, buf, NULL);
gst_buffer_unref (buf);
}
gst_caps_set_simple (stream->caps,
"samplesize", G_TYPE_INT, samplesize, NULL);
break;
}
case FOURCC_alac:
{
GNode *alac, *wave = NULL;
/* apparently, m4a has this atom appended directly in the stsd entry,
* while mov has it in a wave atom */
alac = qtdemux_tree_get_child_by_type (stsd, FOURCC_alac);
if (alac) {
/* alac now refers to stsd entry atom */
wave = qtdemux_tree_get_child_by_type (alac, FOURCC_wave);
if (wave)
alac = qtdemux_tree_get_child_by_type (wave, FOURCC_alac);
else
alac = qtdemux_tree_get_child_by_type (alac, FOURCC_alac);
}
if (alac) {
const guint8 *alac_data = alac->data;
gint len = QT_UINT32 (alac->data);
GstBuffer *buf;
if (len < 36) {
GST_DEBUG_OBJECT (qtdemux,
"discarding alac atom with unexpected len %d", len);
} else {
/* codec-data contains alac atom size and prefix,
* ffmpeg likes it that way, not quite gst-ish though ...*/
buf = gst_buffer_new_and_alloc (len);
gst_buffer_fill (buf, 0, alac->data, len);
gst_caps_set_simple (stream->caps,
"codec_data", GST_TYPE_BUFFER, buf, NULL);
gst_buffer_unref (buf);
stream->bytes_per_frame = QT_UINT32 (alac_data + 12);
stream->n_channels = QT_UINT8 (alac_data + 21);
stream->rate = QT_UINT32 (alac_data + 32);
}
}
gst_caps_set_simple (stream->caps,
"samplesize", G_TYPE_INT, samplesize, NULL);
break;
}
case FOURCC_fLaC:
{
/* The codingname of the sample entry is 'fLaC' */
GNode *flac = qtdemux_tree_get_child_by_type (stsd, FOURCC_fLaC);
if (flac) {
/* The 'dfLa' box is added to the sample entry to convey
initializing information for the decoder. */
const GNode *dfla =
qtdemux_tree_get_child_by_type (flac, FOURCC_dfLa);
if (dfla) {
const guint32 len = QT_UINT32 (dfla->data);
/* Must contain at least dfLa box header (12),
* METADATA_BLOCK_HEADER (4), METADATA_BLOCK_STREAMINFO (34) */
if (len < 50) {
GST_DEBUG_OBJECT (qtdemux,
"discarding dfla atom with unexpected len %d", len);
} else {
/* skip dfLa header to get the METADATA_BLOCKs */
const guint8 *metadata_blocks = (guint8 *) dfla->data + 12;
const guint32 metadata_blocks_len = len - 12;
gchar *stream_marker = g_strdup ("fLaC");
GstBuffer *block = gst_buffer_new_wrapped (stream_marker,
strlen (stream_marker));
guint32 index = 0;
guint32 remainder = 0;
guint32 block_size = 0;
gboolean is_last = FALSE;
GValue array = G_VALUE_INIT;
GValue value = G_VALUE_INIT;
g_value_init (&array, GST_TYPE_ARRAY);
g_value_init (&value, GST_TYPE_BUFFER);
gst_value_set_buffer (&value, block);
gst_value_array_append_value (&array, &value);
g_value_reset (&value);
gst_buffer_unref (block);
/* check there's at least one METADATA_BLOCK_HEADER's worth
* of data, and we haven't already finished parsing */
while (!is_last && ((index + 3) < metadata_blocks_len)) {
remainder = metadata_blocks_len - index;
/* add the METADATA_BLOCK_HEADER size to the signalled size */
block_size = 4 +
(metadata_blocks[index + 1] << 16) +
(metadata_blocks[index + 2] << 8) +
metadata_blocks[index + 3];
/* be careful not to read off end of box */
if (block_size > remainder) {
break;
}
is_last = metadata_blocks[index] >> 7;
block = gst_buffer_new_and_alloc (block_size);
gst_buffer_fill (block, 0, &metadata_blocks[index],
block_size);
gst_value_set_buffer (&value, block);
gst_value_array_append_value (&array, &value);
g_value_reset (&value);
gst_buffer_unref (block);
index += block_size;
}
/* only append the metadata if we successfully read all of it */
if (is_last) {
gst_structure_set_value (gst_caps_get_structure (stream->caps,
0), "streamheader", &array);
} else {
GST_WARNING_OBJECT (qtdemux,
"discarding all METADATA_BLOCKs due to invalid "
"block_size %d at idx %d, rem %d", block_size, index,
remainder);
}
g_value_unset (&value);
g_value_unset (&array);
/* The sample rate obtained from the stsd may not be accurate
* since it cannot represent rates greater than 65535Hz, so
* override that value with the sample rate from the
* METADATA_BLOCK_STREAMINFO block */
stream->rate =
(QT_UINT32 (metadata_blocks + 14) >> 12) & 0xFFFFF;
}
}
}
break;
}
case FOURCC_sawb:
/* Fallthrough! */
amrwb = TRUE;
case FOURCC_samr:
{
gint len = QT_UINT32 (stsd_data);
if (len > 0x34) {
GstBuffer *buf = gst_buffer_new_and_alloc (len - 0x34);
guint bitrate;
gst_buffer_fill (buf, 0, stsd_data + 0x34, len - 0x34);
/* If we have enough data, let's try to get the 'damr' atom. See
* the 3GPP container spec (26.244) for more details. */
if ((len - 0x34) > 8 &&
(bitrate = qtdemux_parse_amr_bitrate (buf, amrwb))) {
gst_tag_list_add (stream->pending_tags, GST_TAG_MERGE_REPLACE,
GST_TAG_MAXIMUM_BITRATE, bitrate, NULL);
}
gst_caps_set_simple (stream->caps,
"codec_data", GST_TYPE_BUFFER, buf, NULL);
gst_buffer_unref (buf);
}
break;
}
case FOURCC_mp4a:
{
/* mp4a atom withtout ESDS; Attempt to build codec data from atom */
gint len = QT_UINT32 (stsd_data);
if (len >= 50) {
guint16 sound_version = QT_UINT16 (stsd_data + 32);
if (sound_version == 1) {
guint16 channels = QT_UINT16 (stsd_data + 40);
guint32 time_scale = QT_UINT32 (stsd_data + 46);
guint8 codec_data[2];
GstBuffer *buf;
gint profile = 2; /* FIXME: Can this be determined somehow? There doesn't seem to be anything in mp4a atom that specifis compression */
gint sample_rate_index =
gst_codec_utils_aac_get_index_from_sample_rate (time_scale);
/* build AAC codec data */
codec_data[0] = profile << 3;
codec_data[0] |= ((sample_rate_index >> 1) & 0x7);
codec_data[1] = (sample_rate_index & 0x01) << 7;
codec_data[1] |= (channels & 0xF) << 3;
buf = gst_buffer_new_and_alloc (2);
gst_buffer_fill (buf, 0, codec_data, 2);
gst_caps_set_simple (stream->caps,
"codec_data", GST_TYPE_BUFFER, buf, NULL);
gst_buffer_unref (buf);
}
}
break;
}
default:
GST_INFO_OBJECT (qtdemux,
"unhandled type %" GST_FOURCC_FORMAT, GST_FOURCC_ARGS (fourcc));
break;
}
}
GST_INFO_OBJECT (qtdemux,
"type %" GST_FOURCC_FORMAT " caps %" GST_PTR_FORMAT,
GST_FOURCC_ARGS (fourcc), stream->caps);
} else if (stream->subtype == FOURCC_strm) {
if (fourcc == FOURCC_rtsp) {
stream->redirect_uri = qtdemux_get_rtsp_uri_from_hndl (qtdemux, minf);
} else {
GST_INFO_OBJECT (qtdemux, "unhandled stream type %" GST_FOURCC_FORMAT,
GST_FOURCC_ARGS (fourcc));
goto unknown_stream;
}
stream->sampled = TRUE;
} else if (stream->subtype == FOURCC_subp || stream->subtype == FOURCC_text
|| stream->subtype == FOURCC_sbtl || stream->subtype == FOURCC_subt) {
stream->sampled = TRUE;
stream->sparse = TRUE;
stream->caps =
qtdemux_sub_caps (qtdemux, stream, fourcc, stsd_data, &codec);
if (codec) {
gst_tag_list_add (stream->pending_tags, GST_TAG_MERGE_REPLACE,
GST_TAG_SUBTITLE_CODEC, codec, NULL);
g_free (codec);
codec = NULL;
}
/* hunt for sort-of codec data */
switch (fourcc) {
case FOURCC_mp4s:
{
GNode *mp4s = NULL;
GNode *esds = NULL;
/* look for palette in a stsd->mp4s->esds sub-atom */
mp4s = qtdemux_tree_get_child_by_type (stsd, FOURCC_mp4s);
if (mp4s)
esds = qtdemux_tree_get_child_by_type (mp4s, FOURCC_esds);
if (esds == NULL) {
/* Invalid STSD */
GST_LOG_OBJECT (qtdemux, "Skipping invalid stsd: no esds child");
break;
}
gst_qtdemux_handle_esds (qtdemux, stream, esds, stream->pending_tags);
break;
}
default:
GST_INFO_OBJECT (qtdemux,
"unhandled type %" GST_FOURCC_FORMAT, GST_FOURCC_ARGS (fourcc));
break;
}
GST_INFO_OBJECT (qtdemux,
"type %" GST_FOURCC_FORMAT " caps %" GST_PTR_FORMAT,
GST_FOURCC_ARGS (fourcc), stream->caps);
} else {
/* everything in 1 sample */
stream->sampled = TRUE;
stream->caps =
qtdemux_generic_caps (qtdemux, stream, fourcc, stsd_data, &codec);
if (stream->caps == NULL)
goto unknown_stream;
if (codec) {
gst_tag_list_add (stream->pending_tags, GST_TAG_MERGE_REPLACE,
GST_TAG_SUBTITLE_CODEC, codec, NULL);
g_free (codec);
codec = NULL;
}
}
/* promote to sampled format */
if (stream->fourcc == FOURCC_samr) {
/* force mono 8000 Hz for AMR */
stream->sampled = TRUE;
stream->n_channels = 1;
stream->rate = 8000;
} else if (stream->fourcc == FOURCC_sawb) {
/* force mono 16000 Hz for AMR-WB */
stream->sampled = TRUE;
stream->n_channels = 1;
stream->rate = 16000;
} else if (stream->fourcc == FOURCC_mp4a) {
stream->sampled = TRUE;
}
/* collect sample information */
if (!qtdemux_stbl_init (qtdemux, stream, stbl))
goto samples_failed;
if (qtdemux->fragmented) {
guint64 offset;
/* need all moov samples as basis; probably not many if any at all */
/* prevent moof parsing taking of at this time */
offset = qtdemux->moof_offset;
qtdemux->moof_offset = 0;
if (stream->n_samples &&
!qtdemux_parse_samples (qtdemux, stream, stream->n_samples - 1)) {
qtdemux->moof_offset = offset;
goto samples_failed;
}
qtdemux->moof_offset = 0;
/* movie duration more reliable in this case (e.g. mehd) */
if (qtdemux->segment.duration &&
GST_CLOCK_TIME_IS_VALID (qtdemux->segment.duration))
stream->duration =
GSTTIME_TO_QTSTREAMTIME (stream, qtdemux->segment.duration);
}
/* configure segments */
if (!qtdemux_parse_segments (qtdemux, stream, trak))
goto segments_failed;
/* add some language tag, if useful */
if (stream->lang_id[0] != '\0' && strcmp (stream->lang_id, "unk") &&
strcmp (stream->lang_id, "und")) {
const gchar *lang_code;
/* convert ISO 639-2 code to ISO 639-1 */
lang_code = gst_tag_get_language_code (stream->lang_id);
gst_tag_list_add (stream->pending_tags, GST_TAG_MERGE_REPLACE,
GST_TAG_LANGUAGE_CODE, (lang_code) ? lang_code : stream->lang_id, NULL);
}
/* Check for UDTA tags */
if ((udta = qtdemux_tree_get_child_by_type (trak, FOURCC_udta))) {
qtdemux_parse_udta (qtdemux, stream->pending_tags, udta);
}
/* now we are ready to add the stream */
if (qtdemux->n_streams >= GST_QTDEMUX_MAX_STREAMS)
goto too_many_streams;
if (!qtdemux->got_moov) {
qtdemux->streams[qtdemux->n_streams] = stream;
qtdemux->n_streams++;
GST_DEBUG_OBJECT (qtdemux, "n_streams is now %d", qtdemux->n_streams);
}
return TRUE;
/* ERRORS */
skip_track:
{
GST_INFO_OBJECT (qtdemux, "skip disabled track");
if (new_stream)
gst_qtdemux_stream_free (qtdemux, stream);
return TRUE;
}
corrupt_file:
{
GST_ELEMENT_ERROR (qtdemux, STREAM, DEMUX,
(_("This file is corrupt and cannot be played.")), (NULL));
if (new_stream)
gst_qtdemux_stream_free (qtdemux, stream);
return FALSE;
}
error_encrypted:
{
GST_ELEMENT_ERROR (qtdemux, STREAM, DECRYPT, (NULL), (NULL));
if (new_stream)
gst_qtdemux_stream_free (qtdemux, stream);
return FALSE;
}
samples_failed:
segments_failed:
{
/* we posted an error already */
/* free stbl sub-atoms */
gst_qtdemux_stbl_free (stream);
if (new_stream)
gst_qtdemux_stream_free (qtdemux, stream);
return FALSE;
}
existing_stream:
{
GST_INFO_OBJECT (qtdemux, "stream with track id %i already exists",
track_id);
if (new_stream)
gst_qtdemux_stream_free (qtdemux, stream);
return TRUE;
}
unknown_stream:
{
GST_INFO_OBJECT (qtdemux, "unknown subtype %" GST_FOURCC_FORMAT,
GST_FOURCC_ARGS (stream->subtype));
if (new_stream)
gst_qtdemux_stream_free (qtdemux, stream);
return TRUE;
}
too_many_streams:
{
GST_ELEMENT_WARNING (qtdemux, STREAM, DEMUX,
(_("This file contains too many streams. Only playing first %d"),
GST_QTDEMUX_MAX_STREAMS), (NULL));
return TRUE;
}
} | 0 | [
"CWE-125"
] | gst-plugins-good | d0949baf3dadea6021d54abef6802fed5a06af75 | 39,937,765,629,314,984,000,000,000,000,000,000,000 | 2,050 | qtdemux: Fix out of bounds read in tag parsing code
We can't simply assume that the length of the tag value as given
inside the stream is correct but should also check against the amount of
data we have actually available.
https://bugzilla.gnome.org/show_bug.cgi?id=775451 |
static int check_packet_ptr_add(struct bpf_verifier_env *env,
struct bpf_insn *insn)
{
struct bpf_reg_state *regs = env->cur_state.regs;
struct bpf_reg_state *dst_reg = ®s[insn->dst_reg];
struct bpf_reg_state *src_reg = ®s[insn->src_reg];
struct bpf_reg_state tmp_reg;
s32 imm;
if (BPF_SRC(insn->code) == BPF_K) {
/* pkt_ptr += imm */
imm = insn->imm;
add_imm:
if (imm < 0) {
verbose("addition of negative constant to packet pointer is not allowed\n");
return -EACCES;
}
if (imm >= MAX_PACKET_OFF ||
imm + dst_reg->off >= MAX_PACKET_OFF) {
verbose("constant %d is too large to add to packet pointer\n",
imm);
return -EACCES;
}
/* a constant was added to pkt_ptr.
* Remember it while keeping the same 'id'
*/
dst_reg->off += imm;
} else {
if (src_reg->type == PTR_TO_PACKET) {
/* R6=pkt(id=0,off=0,r=62) R7=imm22; r7 += r6 */
tmp_reg = *dst_reg; /* save r7 state */
*dst_reg = *src_reg; /* copy pkt_ptr state r6 into r7 */
src_reg = &tmp_reg; /* pretend it's src_reg state */
/* if the checks below reject it, the copy won't matter,
* since we're rejecting the whole program. If all ok,
* then imm22 state will be added to r7
* and r7 will be pkt(id=0,off=22,r=62) while
* r6 will stay as pkt(id=0,off=0,r=62)
*/
}
if (src_reg->type == CONST_IMM) {
/* pkt_ptr += reg where reg is known constant */
imm = src_reg->imm;
goto add_imm;
}
/* disallow pkt_ptr += reg
* if reg is not uknown_value with guaranteed zero upper bits
* otherwise pkt_ptr may overflow and addition will become
* subtraction which is not allowed
*/
if (src_reg->type != UNKNOWN_VALUE) {
verbose("cannot add '%s' to ptr_to_packet\n",
reg_type_str[src_reg->type]);
return -EACCES;
}
if (src_reg->imm < 48) {
verbose("cannot add integer value with %lld upper zero bits to ptr_to_packet\n",
src_reg->imm);
return -EACCES;
}
/* dst_reg stays as pkt_ptr type and since some positive
* integer value was added to the pointer, increment its 'id'
*/
dst_reg->id = ++env->id_gen;
/* something was added to pkt_ptr, set range and off to zero */
dst_reg->off = 0;
dst_reg->range = 0;
}
return 0;
} | 0 | [
"CWE-200"
] | linux | 0d0e57697f162da4aa218b5feafe614fb666db07 | 101,116,405,495,344,080,000,000,000,000,000,000,000 | 73 | bpf: don't let ldimm64 leak map addresses on unprivileged
The patch fixes two things at once:
1) It checks the env->allow_ptr_leaks and only prints the map address to
the log if we have the privileges to do so, otherwise it just dumps 0
as we would when kptr_restrict is enabled on %pK. Given the latter is
off by default and not every distro sets it, I don't want to rely on
this, hence the 0 by default for unprivileged.
2) Printing of ldimm64 in the verifier log is currently broken in that
we don't print the full immediate, but only the 32 bit part of the
first insn part for ldimm64. Thus, fix this up as well; it's okay to
access, since we verified all ldimm64 earlier already (including just
constants) through replace_map_fd_with_map_ptr().
Fixes: 1be7f75d1668 ("bpf: enable non-root eBPF programs")
Fixes: cbd357008604 ("bpf: verifier (add ability to receive verification log)")
Reported-by: Jann Horn <[email protected]>
Signed-off-by: Daniel Borkmann <[email protected]>
Acked-by: Alexei Starovoitov <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
static inline void skb_set_dst_pending_confirm(struct sk_buff *skb, u32 val)
{
skb->dst_pending_confirm = val; | 0 | [
"CWE-20"
] | linux | 2b16f048729bf35e6c28a40cbfad07239f9dcd90 | 233,106,648,386,822,450,000,000,000,000,000,000,000 | 4 | net: create skb_gso_validate_mac_len()
If you take a GSO skb, and split it into packets, will the MAC
length (L2 + L3 + L4 headers + payload) of those packets be small
enough to fit within a given length?
Move skb_gso_mac_seglen() to skbuff.h with other related functions
like skb_gso_network_seglen() so we can use it, and then create
skb_gso_validate_mac_len to do the full calculation.
Signed-off-by: Daniel Axtens <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
check_data_region (struct tar_sparse_file *file, size_t i)
{
off_t size_left;
if (!lseek_or_error (file, file->stat_info->sparse_map[i].offset))
return false;
size_left = file->stat_info->sparse_map[i].numbytes;
mv_size_left (file->stat_info->archive_file_size - file->dumped_size);
while (size_left > 0)
{
size_t bytes_read;
size_t rdsize = (size_left > BLOCKSIZE) ? BLOCKSIZE : size_left;
char diff_buffer[BLOCKSIZE];
union block *blk = find_next_block ();
if (!blk)
{
ERROR ((0, 0, _("Unexpected EOF in archive")));
return false;
}
set_next_block_after (blk);
file->dumped_size += BLOCKSIZE;
bytes_read = safe_read (file->fd, diff_buffer, rdsize);
if (bytes_read == SAFE_READ_ERROR)
{
read_diag_details (file->stat_info->orig_file_name,
(file->stat_info->sparse_map[i].offset
+ file->stat_info->sparse_map[i].numbytes
- size_left),
rdsize);
return false;
}
else if (bytes_read == 0)
{
report_difference (¤t_stat_info, _("Size differs"));
return false;
}
size_left -= bytes_read;
mv_size_left (file->stat_info->archive_file_size - file->dumped_size);
if (memcmp (blk->buffer, diff_buffer, rdsize))
{
report_difference (file->stat_info, _("Contents differ"));
return false;
}
}
return true;
} | 0 | [] | tar | c15c42ccd1e2377945fd0414eca1a49294bff454 | 268,903,595,979,338,470,000,000,000,000,000,000,000 | 48 | Fix CVE-2018-20482
* NEWS: Update.
* src/sparse.c (sparse_dump_region): Handle short read condition.
(sparse_extract_region,check_data_region): Fix dumped_size calculation.
Handle short read condition.
(pax_decode_header): Fix dumped_size calculation.
* tests/Makefile.am: Add new testcases.
* tests/testsuite.at: Likewise.
* tests/sptrcreat.at: New file.
* tests/sptrdiff00.at: New file.
* tests/sptrdiff01.at: New file. |
mm_answer_keyallowed(int sock, Buffer *m)
{
Key *key;
char *cuser, *chost;
u_char *blob;
u_int bloblen, pubkey_auth_attempt;
enum mm_keytype type = 0;
int allowed = 0;
debug3("%s entering", __func__);
type = buffer_get_int(m);
cuser = buffer_get_string(m, NULL);
chost = buffer_get_string(m, NULL);
blob = buffer_get_string(m, &bloblen);
pubkey_auth_attempt = buffer_get_int(m);
key = key_from_blob(blob, bloblen);
if ((compat20 && type == MM_RSAHOSTKEY) ||
(!compat20 && type != MM_RSAHOSTKEY))
fatal("%s: key type and protocol mismatch", __func__);
debug3("%s: key_from_blob: %p", __func__, key);
if (key != NULL && authctxt->valid) {
/* These should not make it past the privsep child */
if (key_type_plain(key->type) == KEY_RSA &&
(datafellows & SSH_BUG_RSASIGMD5) != 0)
fatal("%s: passed a SSH_BUG_RSASIGMD5 key", __func__);
switch (type) {
case MM_USERKEY:
allowed = options.pubkey_authentication &&
!auth2_userkey_already_used(authctxt, key) &&
match_pattern_list(sshkey_ssh_name(key),
options.pubkey_key_types, 0) == 1 &&
user_key_allowed(authctxt->pw, key,
pubkey_auth_attempt);
pubkey_auth_info(authctxt, key, NULL);
auth_method = "publickey";
if (options.pubkey_authentication &&
(!pubkey_auth_attempt || allowed != 1))
auth_clear_options();
break;
case MM_HOSTKEY:
allowed = options.hostbased_authentication &&
match_pattern_list(sshkey_ssh_name(key),
options.hostbased_key_types, 0) == 1 &&
hostbased_key_allowed(authctxt->pw,
cuser, chost, key);
pubkey_auth_info(authctxt, key,
"client user \"%.100s\", client host \"%.100s\"",
cuser, chost);
auth_method = "hostbased";
break;
#ifdef WITH_SSH1
case MM_RSAHOSTKEY:
key->type = KEY_RSA1; /* XXX */
allowed = options.rhosts_rsa_authentication &&
auth_rhosts_rsa_key_allowed(authctxt->pw,
cuser, chost, key);
if (options.rhosts_rsa_authentication && allowed != 1)
auth_clear_options();
auth_method = "rsa";
break;
#endif
default:
fatal("%s: unknown key type %d", __func__, type);
break;
}
}
if (key != NULL)
key_free(key);
/* clear temporarily storage (used by verify) */
monitor_reset_key_state();
if (allowed) {
/* Save temporarily for comparison in verify */
key_blob = blob;
key_bloblen = bloblen;
key_blobtype = type;
hostbased_cuser = cuser;
hostbased_chost = chost;
} else {
/* Log failed attempt */
auth_log(authctxt, 0, 0, auth_method, NULL);
free(blob);
free(cuser);
free(chost);
}
debug3("%s: key %p is %s",
__func__, key, allowed ? "allowed" : "not allowed");
buffer_clear(m);
buffer_put_int(m, allowed);
buffer_put_int(m, forced_command != NULL);
mm_request_send(sock, MONITOR_ANS_KEYALLOWED, m);
if (type == MM_RSAHOSTKEY)
monitor_permit(mon_dispatch, MONITOR_REQ_RSACHALLENGE, allowed);
return (0);
} | 0 | [
"CWE-20",
"CWE-200"
] | openssh-portable | d4697fe9a28dab7255c60433e4dd23cf7fce8a8b | 321,090,328,251,612,800,000,000,000,000,000,000,000 | 107 | Don't resend username to PAM; it already has it.
Pointed out by Moritz Jodeit; ok dtucker@ |
list_del_event(struct perf_event *event, struct perf_event_context *ctx)
{
struct perf_cpu_context *cpuctx;
/*
* We can have double detach due to exit/hot-unplug + close.
*/
if (!(event->attach_state & PERF_ATTACH_CONTEXT))
return;
event->attach_state &= ~PERF_ATTACH_CONTEXT;
if (is_cgroup_event(event)) {
ctx->nr_cgroups--;
cpuctx = __get_cpu_context(ctx);
/*
* if there are no more cgroup events
* then cler cgrp to avoid stale pointer
* in update_cgrp_time_from_cpuctx()
*/
if (!ctx->nr_cgroups)
cpuctx->cgrp = NULL;
}
if (has_branch_stack(event))
ctx->nr_branch_stack--;
ctx->nr_events--;
if (event->attr.inherit_stat)
ctx->nr_stat--;
list_del_rcu(&event->event_entry);
if (event->group_leader == event)
list_del_init(&event->group_entry);
update_group_times(event);
/*
* If event was in error state, then keep it
* that way, otherwise bogus counts will be
* returned on read(). The only way to get out
* of error state is by explicit re-enabling
* of the event
*/
if (event->state > PERF_EVENT_STATE_OFF)
event->state = PERF_EVENT_STATE_OFF;
ctx->generation++;
} | 0 | [
"CWE-284",
"CWE-264"
] | linux | c3c87e770458aa004bd7ed3f29945ff436fd6511 | 4,347,193,868,423,004,000,000,000,000,000,000,000 | 49 | perf: Tighten (and fix) the grouping condition
The fix from 9fc81d87420d ("perf: Fix events installation during
moving group") was incomplete in that it failed to recognise that
creating a group with events for different CPUs is semantically
broken -- they cannot be co-scheduled.
Furthermore, it leads to real breakage where, when we create an event
for CPU Y and then migrate it to form a group on CPU X, the code gets
confused where the counter is programmed -- triggered in practice
as well by me via the perf fuzzer.
Fix this by tightening the rules for creating groups. Only allow
grouping of counters that can be co-scheduled in the same context.
This means for the same task and/or the same cpu.
Fixes: 9fc81d87420d ("perf: Fix events installation during moving group")
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Cc: Arnaldo Carvalho de Melo <[email protected]>
Cc: Jiri Olsa <[email protected]>
Cc: Linus Torvalds <[email protected]>
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]> |
EXPORTED int open_mailboxes_exist()
{
return open_mailboxes ? 1 : 0;
} | 0 | [] | cyrus-imapd | 1d6d15ee74e11a9bd745e80be69869e5fb8d64d6 | 266,222,619,296,429,700,000,000,000,000,000,000,000 | 4 | mailbox.c/reconstruct.c: Add mailbox_mbentry_from_path() |
static inline void flush_tmregs_to_thread(struct task_struct *tsk) { } | 1 | [
"CWE-119",
"CWE-787"
] | linux | c1fa0768a8713b135848f78fd43ffc208d8ded70 | 257,583,661,094,143,260,000,000,000,000,000,000,000 | 1 | powerpc/tm: Flush TM only if CPU has TM feature
Commit cd63f3c ("powerpc/tm: Fix saving of TM SPRs in core dump")
added code to access TM SPRs in flush_tmregs_to_thread(). However
flush_tmregs_to_thread() does not check if TM feature is available on
CPU before trying to access TM SPRs in order to copy live state to
thread structures. flush_tmregs_to_thread() is indeed guarded by
CONFIG_PPC_TRANSACTIONAL_MEM but it might be the case that kernel
was compiled with CONFIG_PPC_TRANSACTIONAL_MEM enabled and ran on
a CPU without TM feature available, thus rendering the execution
of TM instructions that are treated by the CPU as illegal instructions.
The fix is just to add proper checking in flush_tmregs_to_thread()
if CPU has the TM feature before accessing any TM-specific resource,
returning immediately if TM is no available on the CPU. Adding
that checking in flush_tmregs_to_thread() instead of in places
where it is called, like in vsr_get() and vsr_set(), is better because
avoids the same problem cropping up elsewhere.
Cc: [email protected] # v4.13+
Fixes: cd63f3c ("powerpc/tm: Fix saving of TM SPRs in core dump")
Signed-off-by: Gustavo Romero <[email protected]>
Reviewed-by: Cyril Bur <[email protected]>
Signed-off-by: Michael Ellerman <[email protected]> |
GF_Err unkn_box_write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
u32 type;
GF_UnknownBox *ptr = (GF_UnknownBox *)s;
if (!s) return GF_BAD_PARAM;
type = s->type;
ptr->type = ptr->original_4cc;
e = gf_isom_box_write_header(s, bs);
ptr->type = type;
if (e) return e;
if (ptr->dataSize && ptr->data) {
gf_bs_write_data(bs, ptr->data, ptr->dataSize);
}
return GF_OK;
} | 0 | [
"CWE-787"
] | gpac | 388ecce75d05e11fc8496aa4857b91245007d26e | 214,877,873,754,473,680,000,000,000,000,000,000,000 | 17 | fixed #1587 |
void update_Check( update_t *p_update, void (*pf_callback)( void*, bool ), void *p_data )
{
assert( p_update );
// If the object already exist, destroy it
if( p_update->p_check )
{
vlc_join( p_update->p_check->thread, NULL );
free( p_update->p_check );
}
update_check_thread_t *p_uct = calloc( 1, sizeof( *p_uct ) );
if( !p_uct ) return;
p_uct->p_update = p_update;
p_update->p_check = p_uct;
p_uct->pf_callback = pf_callback;
p_uct->p_data = p_data;
vlc_clone( &p_uct->thread, update_CheckReal, p_uct, VLC_THREAD_PRIORITY_LOW );
} | 0 | [
"CWE-120",
"CWE-787"
] | vlc | fbe2837bc80f155c001781041a54c58b5524fc14 | 235,001,128,956,465,100,000,000,000,000,000,000,000 | 21 | misc: update: fix buffer overflow in updater
On 32 bit builds, parsing of update status files with a size of
4294967295 or more lead to an integer truncation in a call to malloc
and a subsequent buffer overflow. This happened prior to checking the
files' signature. The commit fixes this by disallowing overly large
status files (above 65k in practice)
Signed-off-by: Jean-Baptiste Kempf <[email protected]> |
static void vvc_profile_tier_level(GF_BitStream *bs, VVC_ProfileTierLevel *ptl, u32 idx)
{
u32 i;
if (ptl->pt_present) {
ptl->general_profile_idc = gf_bs_read_int_log_idx(bs, 7, "general_profile_idc", idx);
ptl->general_tier_flag = gf_bs_read_int_log_idx(bs, 1, "general_tier_flag", idx);
}
ptl->general_level_idc = gf_bs_read_int_log_idx(bs, 8, "general_level_idc", idx);
ptl->frame_only_constraint = gf_bs_read_int_log_idx(bs, 1, "frame_only_constraint", idx);
ptl->multilayer_enabled = gf_bs_read_int_log_idx(bs, 1, "multilayer_enabled", idx);
//general constraints info - max size if 1 + 81 + 8 + 255
if (ptl->pt_present) {
// general_constraints_info
ptl->gci_present = gf_bs_read_int_log_idx(bs, 1, "gci_present", idx);
if (ptl->gci_present) {
u8 res;
ptl->gci[0] = 0x80;
ptl->gci[0] |= gf_bs_read_int(bs, 7);
//81-7 = 74 bits till reserved
gf_bs_read_data(bs, ptl->gci+1, 9);
ptl->gci[10] = gf_bs_read_int(bs, 2)<<6;
//skip extensions
ptl->gci[11] = 0;
res = gf_bs_read_int(bs, 8);
gf_bs_read_int(bs, res);
}
gf_bs_align(bs);
}
for (i=ptl->ptl_max_tid; i>0; i--) {
ptl->sub_ptl[i-1].level_present_flag = gf_bs_read_int_log_idx2(bs, 1, "level_present_flag", idx, i);
}
gf_bs_align(bs);
for (i=ptl->ptl_max_tid; i>0; i--) {
if (ptl->sub_ptl[i-1].level_present_flag)
ptl->sub_ptl[i-1].sublayer_level_idc = gf_bs_read_int_log_idx2(bs, 8, "sublayer_level_idc", idx, i);
}
if (ptl->pt_present) {
ptl->num_sub_profiles = gf_bs_read_int_log_idx(bs, 8, "num_sub_profiles", idx);
for (i=0; i<ptl->num_sub_profiles; i++) {
ptl->sub_profile_idc[i] = gf_bs_read_int_log_idx2(bs, 32, "sub_profile_idc", idx, i);
}
}
} | 0 | [
"CWE-190",
"CWE-787"
] | gpac | 51cdb67ff7c5f1242ac58c5aa603ceaf1793b788 | 68,021,470,494,594,330,000,000,000,000,000,000,000 | 43 | add safety in avc/hevc/vvc sps/pps/vps ID check - cf #1720 #1721 #1722 |
static inline pgd_t __pgd(pgdval_t val)
{
pgdval_t ret;
if (sizeof(pgdval_t) > sizeof(long))
ret = PVOP_CALLEE2(pgdval_t, mmu.make_pgd, val, (u64)val >> 32);
else
ret = PVOP_CALLEE1(pgdval_t, mmu.make_pgd, val);
return (pgd_t) { ret };
} | 0 | [
"CWE-276"
] | linux | cadfad870154e14f745ec845708bc17d166065f2 | 53,773,483,453,309,070,000,000,000,000,000,000,000 | 11 | x86/ioperm: Fix io bitmap invalidation on Xen PV
tss_invalidate_io_bitmap() wasn't wired up properly through the pvop
machinery, so the TSS and Xen's io bitmap would get out of sync
whenever disabling a valid io bitmap.
Add a new pvop for tss_invalidate_io_bitmap() to fix it.
This is XSA-329.
Fixes: 22fe5b0439dd ("x86/ioperm: Move TSS bitmap update to exit to user work")
Signed-off-by: Andy Lutomirski <[email protected]>
Signed-off-by: Thomas Gleixner <[email protected]>
Reviewed-by: Juergen Gross <[email protected]>
Reviewed-by: Thomas Gleixner <[email protected]>
Cc: [email protected]
Link: https://lkml.kernel.org/r/d53075590e1f91c19f8af705059d3ff99424c020.1595030016.git.luto@kernel.org |
static int afiucv_netdev_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
struct sock *sk;
struct iucv_sock *iucv;
switch (event) {
case NETDEV_REBOOT:
case NETDEV_GOING_DOWN:
sk_for_each(sk, &iucv_sk_list.head) {
iucv = iucv_sk(sk);
if ((iucv->hs_dev == event_dev) &&
(sk->sk_state == IUCV_CONNECTED)) {
if (event == NETDEV_GOING_DOWN)
iucv_send_ctrl(sk, AF_IUCV_FLAG_FIN);
sk->sk_state = IUCV_DISCONN;
sk->sk_state_change(sk);
}
}
break;
case NETDEV_DOWN:
case NETDEV_UNREGISTER:
default:
break;
}
return NOTIFY_DONE;
} | 0 | [
"CWE-20",
"CWE-269"
] | linux | f3d3342602f8bcbf37d7c46641cb9bca7618eb1c | 51,316,952,531,811,220,000,000,000,000,000,000,000 | 28 | net: rework recvmsg handler msg_name and msg_namelen logic
This patch now always passes msg->msg_namelen as 0. recvmsg handlers must
set msg_namelen to the proper size <= sizeof(struct sockaddr_storage)
to return msg_name to the user.
This prevents numerous uninitialized memory leaks we had in the
recvmsg handlers and makes it harder for new code to accidentally leak
uninitialized memory.
Optimize for the case recvfrom is called with NULL as address. We don't
need to copy the address at all, so set it to NULL before invoking the
recvmsg handler. We can do so, because all the recvmsg handlers must
cope with the case a plain read() is called on them. read() also sets
msg_name to NULL.
Also document these changes in include/linux/net.h as suggested by David
Miller.
Changes since RFC:
Set msg->msg_name = NULL if user specified a NULL in msg_name but had a
non-null msg_namelen in verify_iovec/verify_compat_iovec. This doesn't
affect sendto as it would bail out earlier while trying to copy-in the
address. It also more naturally reflects the logic by the callers of
verify_iovec.
With this change in place I could remove "
if (!uaddr || msg_sys->msg_namelen == 0)
msg->msg_name = NULL
".
This change does not alter the user visible error logic as we ignore
msg_namelen as long as msg_name is NULL.
Also remove two unnecessary curly brackets in ___sys_recvmsg and change
comments to netdev style.
Cc: David Miller <[email protected]>
Suggested-by: Eric Dumazet <[email protected]>
Signed-off-by: Hannes Frederic Sowa <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
int netlink_register_notifier(struct notifier_block *nb)
{
return atomic_notifier_chain_register(&netlink_chain, nb);
} | 0 | [] | linux-2.6 | 16e5726269611b71c930054ffe9b858c1cea88eb | 195,977,796,836,573,700,000,000,000,000,000,000,000 | 4 | af_unix: dont send SCM_CREDENTIALS by default
Since commit 7361c36c5224 (af_unix: Allow credentials to work across
user and pid namespaces) af_unix performance dropped a lot.
This is because we now take a reference on pid and cred in each write(),
and release them in read(), usually done from another process,
eventually from another cpu. This triggers false sharing.
# Events: 154K cycles
#
# Overhead Command Shared Object Symbol
# ........ ....... .................. .........................
#
10.40% hackbench [kernel.kallsyms] [k] put_pid
8.60% hackbench [kernel.kallsyms] [k] unix_stream_recvmsg
7.87% hackbench [kernel.kallsyms] [k] unix_stream_sendmsg
6.11% hackbench [kernel.kallsyms] [k] do_raw_spin_lock
4.95% hackbench [kernel.kallsyms] [k] unix_scm_to_skb
4.87% hackbench [kernel.kallsyms] [k] pid_nr_ns
4.34% hackbench [kernel.kallsyms] [k] cred_to_ucred
2.39% hackbench [kernel.kallsyms] [k] unix_destruct_scm
2.24% hackbench [kernel.kallsyms] [k] sub_preempt_count
1.75% hackbench [kernel.kallsyms] [k] fget_light
1.51% hackbench [kernel.kallsyms] [k]
__mutex_lock_interruptible_slowpath
1.42% hackbench [kernel.kallsyms] [k] sock_alloc_send_pskb
This patch includes SCM_CREDENTIALS information in a af_unix message/skb
only if requested by the sender, [man 7 unix for details how to include
ancillary data using sendmsg() system call]
Note: This might break buggy applications that expected SCM_CREDENTIAL
from an unaware write() system call, and receiver not using SO_PASSCRED
socket option.
If SOCK_PASSCRED is set on source or destination socket, we still
include credentials for mere write() syscalls.
Performance boost in hackbench : more than 50% gain on a 16 thread
machine (2 quad-core cpus, 2 threads per core)
hackbench 20 thread 2000
4.228 sec instead of 9.102 sec
Signed-off-by: Eric Dumazet <[email protected]>
Acked-by: Tim Chen <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
~HtmlImage() { delete fName; } | 0 | [
"CWE-824"
] | poppler | 30c731b487190c02afff3f036736a392eb60cd9a | 178,269,589,336,716,600,000,000,000,000,000,000,000 | 1 | Properly initialize HtmlOutputDev::page to avoid SIGSEGV upon error exit.
Closes #742 |
static int huf_unpack_enc_table(GetByteContext *gb,
int32_t im, int32_t iM, uint64_t *freq)
{
GetBitContext gbit;
int ret = init_get_bits8(&gbit, gb->buffer, bytestream2_get_bytes_left(gb));
if (ret < 0)
return ret;
for (; im <= iM; im++) {
uint64_t l = freq[im] = get_bits(&gbit, 6);
if (l == LONG_ZEROCODE_RUN) {
int zerun = get_bits(&gbit, 8) + SHORTEST_LONG_RUN;
if (im + zerun > iM + 1)
return AVERROR_INVALIDDATA;
while (zerun--)
freq[im++] = 0;
im--;
} else if (l >= SHORT_ZEROCODE_RUN) {
int zerun = l - SHORT_ZEROCODE_RUN + 2;
if (im + zerun > iM + 1)
return AVERROR_INVALIDDATA;
while (zerun--)
freq[im++] = 0;
im--;
}
}
bytestream2_skip(gb, (get_bits_count(&gbit) + 7) / 8);
huf_canonical_code_table(freq);
return 0;
} | 0 | [
"CWE-20",
"CWE-129"
] | FFmpeg | 26d3c81bc5ef2f8c3f09d45eaeacfb4b1139a777 | 131,368,495,107,229,050,000,000,000,000,000,000,000 | 39 | avcodec/exr: More strictly check dc_count
Fixes: out of array access
Fixes: exr/deneme
Found-by: Burak Çarıkçı <[email protected]>
Signed-off-by: Michael Niedermayer <[email protected]> |
int bcf_hdr_combine(bcf_hdr_t *dst, const bcf_hdr_t *src)
{
int i, ndst_ori = dst->nhrec, need_sync = 0, ret = 0, res;
for (i=0; i<src->nhrec; i++)
{
if ( src->hrec[i]->type==BCF_HL_GEN && src->hrec[i]->value )
{
int j;
for (j=0; j<ndst_ori; j++)
{
if ( dst->hrec[j]->type!=BCF_HL_GEN ) continue;
// Checking only the key part of generic lines, otherwise
// the VCFs are too verbose. Should we perhaps add a flag
// to bcf_hdr_combine() and make this optional?
if ( !strcmp(src->hrec[i]->key,dst->hrec[j]->key) ) break;
}
if ( j>=ndst_ori ) {
res = bcf_hdr_add_hrec(dst, bcf_hrec_dup(src->hrec[i]));
if (res < 0) return -1;
need_sync += res;
}
}
else if ( src->hrec[i]->type==BCF_HL_STR )
{
// NB: we are ignoring fields without ID
int j = bcf_hrec_find_key(src->hrec[i],"ID");
if ( j>=0 )
{
bcf_hrec_t *rec = bcf_hdr_get_hrec(dst, src->hrec[i]->type, "ID", src->hrec[i]->vals[j], src->hrec[i]->key);
if ( !rec ) {
res = bcf_hdr_add_hrec(dst, bcf_hrec_dup(src->hrec[i]));
if (res < 0) return -1;
need_sync += res;
}
}
}
else
{
int j = bcf_hrec_find_key(src->hrec[i],"ID");
assert( j>=0 ); // this should always be true for valid VCFs
bcf_hrec_t *rec = bcf_hdr_get_hrec(dst, src->hrec[i]->type, "ID", src->hrec[i]->vals[j], NULL);
if ( !rec ) {
res = bcf_hdr_add_hrec(dst, bcf_hrec_dup(src->hrec[i]));
if (res < 0) return -1;
need_sync += res;
} else if ( src->hrec[i]->type==BCF_HL_INFO || src->hrec[i]->type==BCF_HL_FMT )
{
// Check that both records are of the same type. The bcf_hdr_id2length
// macro cannot be used here because dst header is not synced yet.
vdict_t *d_src = (vdict_t*)src->dict[BCF_DT_ID];
vdict_t *d_dst = (vdict_t*)dst->dict[BCF_DT_ID];
khint_t k_src = kh_get(vdict, d_src, src->hrec[i]->vals[0]);
khint_t k_dst = kh_get(vdict, d_dst, src->hrec[i]->vals[0]);
if ( (kh_val(d_src,k_src).info[rec->type]>>8 & 0xf) != (kh_val(d_dst,k_dst).info[rec->type]>>8 & 0xf) )
{
hts_log_warning("Trying to combine \"%s\" tag definitions of different lengths",
src->hrec[i]->vals[0]);
ret |= 1;
}
if ( (kh_val(d_src,k_src).info[rec->type]>>4 & 0xf) != (kh_val(d_dst,k_dst).info[rec->type]>>4 & 0xf) )
{
hts_log_warning("Trying to combine \"%s\" tag definitions of different types",
src->hrec[i]->vals[0]);
ret |= 1;
}
}
}
}
if ( need_sync ) {
if (bcf_hdr_sync(dst) < 0) return -1;
}
return ret;
} | 0 | [
"CWE-787"
] | htslib | dcd4b7304941a8832fba2d0fc4c1e716e7a4e72c | 128,888,832,548,019,500,000,000,000,000,000,000,000 | 75 | Fix check for VCF record size
The check for excessive record size in vcf_parse_format() only
looked at individual fields. It was therefore possible to
exceed the limit and overflow fmt_aux_t::offset by having
multiple fields with a combined size that went over INT_MAX.
Fix by including the amount of memory used so far in the check.
Credit to OSS-Fuzz
Fixes oss-fuzz 24097 |
wStream* rdp_message_channel_pdu_init(rdpRdp* rdp)
{
wStream* s = transport_send_stream_init(rdp->transport, 4096);
if (!s)
return NULL;
if (!Stream_SafeSeek(s, RDP_PACKET_HEADER_MAX_LENGTH))
goto fail;
if (!rdp_security_stream_init(rdp, s, TRUE))
goto fail;
return s;
fail:
Stream_Release(s);
return NULL;
} | 0 | [
"CWE-125"
] | FreeRDP | 9301bfe730c66180263248b74353daa99f5a969b | 112,940,941,402,284,500,000,000,000,000,000,000,000 | 18 | Fixed #6007: Boundary checks in rdp_read_flow_control_pdu |
static double mp_cats(_cimg_math_parser& mp) {
const double *ptrd = &_mp_arg(1) + 1;
const unsigned int
sizd = (unsigned int)mp.opcode[2],
nb_args = (unsigned int)(mp.opcode[3] - 4)/2;
CImgList<charT> _str;
for (unsigned int n = 0; n<nb_args; ++n) {
const unsigned int siz = (unsigned int)mp.opcode[5 + 2*n];
if (siz) { // Vector argument
const double *ptrs = &_mp_arg(4 + 2*n) + 1;
unsigned int l = 0;
while (l<siz && ptrs[l]) ++l;
CImg<doubleT>(ptrs,l,1,1,1,true).move_to(_str);
} else CImg<charT>::vector((char)_mp_arg(4 + 2*n)).move_to(_str); // Scalar argument
}
CImg(1,1,1,1,0).move_to(_str);
const CImg<charT> str = _str>'x';
const unsigned int l = std::min(str._width,sizd);
CImg<doubleT>(ptrd,l,1,1,1,true) = str.get_shared_points(0,l - 1);
return cimg::type<double>::nan(); | 0 | [
"CWE-125"
] | CImg | 10af1e8c1ad2a58a0a3342a856bae63e8f257abb | 292,964,695,505,436,030,000,000,000,000,000,000,000 | 21 | Fix other issues in 'CImg<T>::load_bmp()'. |
bool Item_subselect::mark_as_dependent(THD *thd, st_select_lex *select,
Item *item)
{
if (inside_first_fix_fields)
{
is_correlated= TRUE;
Ref_to_outside *upper;
if (!(upper= new (thd->stmt_arena->mem_root) Ref_to_outside()))
return TRUE;
upper->select= select;
upper->item= item;
if (upper_refs.push_back(upper, thd->stmt_arena->mem_root))
return TRUE;
}
return FALSE;
} | 0 | [
"CWE-89"
] | server | 3c209bfc040ddfc41ece8357d772547432353fd2 | 301,475,463,517,349,820,000,000,000,000,000,000,000 | 16 | MDEV-25994: Crash with union of my_decimal type in ORDER BY clause
When single-row subquery fails with "Subquery reutrns more than 1 row"
error, it will raise an error and return NULL.
On the other hand, Item_singlerow_subselect sets item->maybe_null=0
for table-less subqueries like "(SELECT not_null_value)" (*)
This discrepancy (item with maybe_null=0 returning NULL) causes the
code in Type_handler_decimal_result::make_sort_key_part() to crash.
Fixed this by allowing inference (*) only when the subquery is NOT a
UNION. |
static int set_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data)
{
struct kvm *kvm = vcpu->kvm;
switch (msr) {
case HV_X64_MSR_GUEST_OS_ID:
kvm->arch.hv_guest_os_id = data;
/* setting guest os id to zero disables hypercall page */
if (!kvm->arch.hv_guest_os_id)
kvm->arch.hv_hypercall &= ~HV_X64_MSR_HYPERCALL_ENABLE;
break;
case HV_X64_MSR_HYPERCALL: {
u64 gfn;
unsigned long addr;
u8 instructions[4];
/* if guest os id is not set hypercall should remain disabled */
if (!kvm->arch.hv_guest_os_id)
break;
if (!(data & HV_X64_MSR_HYPERCALL_ENABLE)) {
kvm->arch.hv_hypercall = data;
break;
}
gfn = data >> HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT;
addr = gfn_to_hva(kvm, gfn);
if (kvm_is_error_hva(addr))
return 1;
kvm_x86_ops->patch_hypercall(vcpu, instructions);
((unsigned char *)instructions)[3] = 0xc3; /* ret */
if (__copy_to_user((void __user *)addr, instructions, 4))
return 1;
kvm->arch.hv_hypercall = data;
mark_page_dirty(kvm, gfn);
break;
}
case HV_X64_MSR_REFERENCE_TSC: {
u64 gfn;
HV_REFERENCE_TSC_PAGE tsc_ref;
memset(&tsc_ref, 0, sizeof(tsc_ref));
kvm->arch.hv_tsc_page = data;
if (!(data & HV_X64_MSR_TSC_REFERENCE_ENABLE))
break;
gfn = data >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT;
if (kvm_write_guest(kvm, data,
&tsc_ref, sizeof(tsc_ref)))
return 1;
mark_page_dirty(kvm, gfn);
break;
}
default:
vcpu_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x "
"data 0x%llx\n", msr, data);
return 1;
}
return 0;
} | 0 | [
"CWE-119",
"CWE-703",
"CWE-120"
] | linux | a08d3b3b99efd509133946056531cdf8f3a0c09b | 152,112,473,063,582,540,000,000,000,000,000,000,000 | 56 | kvm: x86: fix emulator buffer overflow (CVE-2014-0049)
The problem occurs when the guest performs a pusha with the stack
address pointing to an mmio address (or an invalid guest physical
address) to start with, but then extending into an ordinary guest
physical address. When doing repeated emulated pushes
emulator_read_write sets mmio_needed to 1 on the first one. On a
later push when the stack points to regular memory,
mmio_nr_fragments is set to 0, but mmio_is_needed is not set to 0.
As a result, KVM exits to userspace, and then returns to
complete_emulated_mmio. In complete_emulated_mmio
vcpu->mmio_cur_fragment is incremented. The termination condition of
vcpu->mmio_cur_fragment == vcpu->mmio_nr_fragments is never achieved.
The code bounces back and fourth to userspace incrementing
mmio_cur_fragment past it's buffer. If the guest does nothing else it
eventually leads to a a crash on a memcpy from invalid memory address.
However if a guest code can cause the vm to be destroyed in another
vcpu with excellent timing, then kvm_clear_async_pf_completion_queue
can be used by the guest to control the data that's pointed to by the
call to cancel_work_item, which can be used to gain execution.
Fixes: f78146b0f9230765c6315b2e14f56112513389ad
Signed-off-by: Andrew Honig <[email protected]>
Cc: [email protected] (3.5+)
Signed-off-by: Paolo Bonzini <[email protected]> |
Item_cache_int(THD *thd): Item_cache(thd, &type_handler_longlong),
value(0) {} | 0 | [
"CWE-617"
] | server | 807945f2eb5fa22e6f233cc17b85a2e141efe2c8 | 237,973,621,458,877,730,000,000,000,000,000,000,000 | 2 | MDEV-26402: A SEGV in Item_field::used_tables/update_depend_map_for_order...
When doing condition pushdown from HAVING into WHERE,
Item_equal::create_pushable_equalities() calls
item->set_extraction_flag(IMMUTABLE_FL) for constant items.
Then, Item::cleanup_excluding_immutables_processor() checks for this flag
to see if it should call item->cleanup() or leave the item as-is.
The failure happens when a constant item has a non-constant one inside it,
like:
(tbl.col=0 AND impossible_cond)
item->walk(cleanup_excluding_immutables_processor) works in a bottom-up
way so it
1. will call Item_func_eq(tbl.col=0)->cleanup()
2. will not call Item_cond_and->cleanup (as the AND is constant)
This creates an item tree where a fixed Item has an un-fixed Item inside
it which eventually causes an assertion failure.
Fixed by introducing this rule: instead of just calling
item->set_extraction_flag(IMMUTABLE_FL);
we call Item::walk() to set the flag for all sub-items of the item. |
request_set_user_header (struct request *req, const char *header)
{
char *name;
const char *p = strchr (header, ':');
if (!p)
return;
BOUNDED_TO_ALLOCA (header, p, name);
++p;
while (c_isspace (*p))
++p;
request_set_header (req, xstrdup (name), (char *) p, rel_name);
} | 0 | [
"CWE-20"
] | wget | 3e25a9817f47fbb8660cc6a3b2f3eea239526c6c | 134,261,861,880,073,320,000,000,000,000,000,000,000 | 12 | Introduce --trust-server-names. Close CVE-2010-2252. |
Status TrySimplify(NodeDef* node, string* simplified_node_name) override {
TF_RETURN_IF_ERROR(EnsureNodeIsSupported(node));
NodeDef* tail = node;
tail = GetTailOfIdempotentChain(*tail, *ctx().node_map,
*ctx().nodes_to_preserve);
NodeDef* first_transpose;
TF_RETURN_IF_ERROR(GetInputNode(tail->input(0), &first_transpose));
NodeDef* node_perm;
TF_RETURN_IF_ERROR(GetInputNode(node->input(1), &node_perm));
if (!IsConstant(*node_perm)) {
return Status::OK();
}
std::vector<int64> node_perm_values;
TF_RETURN_IF_ERROR(GetPermutation(*node_perm, &node_perm_values));
if (first_transpose->op() == node->op()) {
// Remove pairs of transposes that cancel each other.
NodeDef* first_transpose_perm;
TF_RETURN_IF_ERROR(
GetInputNode(first_transpose->input(1), &first_transpose_perm));
if (!IsConstant(*first_transpose_perm)) {
return Status::OK();
}
std::vector<int64> first_transpose_perm_values;
TF_RETURN_IF_ERROR(
GetPermutation(*first_transpose_perm, &first_transpose_perm_values));
if (AreInversePermutations(node_perm_values,
first_transpose_perm_values)) {
if (tail == node) {
// Bypass adjacent pair.
*simplified_node_name = first_transpose->input(0);
} else {
// Bypass pair connected through chain.
tail->set_input(0, first_transpose->input(0));
ctx().node_map->UpdateInput(tail->name(), first_transpose->name(),
first_transpose->input(0));
ForwardControlDependencies(tail, {first_transpose});
*simplified_node_name = node->input(0);
}
}
} else {
// Remove simple identity transposes.
if (IsIdentityPermutation(node_perm_values)) {
if (IsConjugateTranspose(*node)) {
const NodeScopeAndName transpose =
ParseNodeScopeAndName(node->name());
const string optimized_node_name = OptimizedNodeName(transpose);
NodeDef* new_op = AddCopyNode(optimized_node_name, node);
new_op->set_op("Conj");
new_op->mutable_input()->RemoveLast();
new_op->mutable_attr()->erase("Tperm");
ForwardControlDependencies(new_op, {node});
*simplified_node_name = new_op->name();
} else {
*simplified_node_name = node->input(0);
}
}
}
return Status::OK();
} | 0 | [
"CWE-476"
] | tensorflow | e6340f0665d53716ef3197ada88936c2a5f7a2d3 | 48,573,509,968,188,530,000,000,000,000,000,000,000 | 60 | Handle a special grappler case resulting in crash.
It might happen that a malformed input could be used to trick Grappler into trying to optimize a node with no inputs. This, in turn, would produce a null pointer dereference and a segfault.
PiperOrigin-RevId: 369242852
Change-Id: I2e5cbe7aec243d34a6d60220ac8ac9b16f136f6b |
static void nfs4_state_start_reclaim_reboot(struct nfs_client *clp)
{
/* Mark all delegations for reclaim */
nfs_delegation_mark_reclaim(clp);
nfs4_state_mark_reclaim_helper(clp, nfs4_state_mark_reclaim_reboot);
} | 0 | [
"CWE-703"
] | linux | dc0b027dfadfcb8a5504f7d8052754bf8d501ab9 | 223,496,691,741,589,100,000,000,000,000,000,000,000 | 6 | NFSv4: Convert the open and close ops to use fmode
Signed-off-by: Trond Myklebust <[email protected]> |
Subsets and Splits