func
stringlengths 0
484k
| target
int64 0
1
| cwe
listlengths 0
4
| project
stringclasses 799
values | commit_id
stringlengths 40
40
| hash
float64 1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
| size
int64 1
24k
| message
stringlengths 0
13.3k
|
---|---|---|---|---|---|---|---|
sc_pkcs15emu_esteid_init (sc_pkcs15_card_t * p15card)
{
sc_card_t *card = p15card->card;
unsigned char buff[128];
int r, i;
size_t field_length = 0, modulus_length = 0;
sc_path_t tmppath;
set_string (&p15card->tokeninfo->label, "ID-kaart");
set_string (&p15card->tokeninfo->manufacturer_id, "AS Sertifitseerimiskeskus");
/* Select application directory */
sc_format_path ("3f00eeee5044", &tmppath);
r = sc_select_file (card, &tmppath, NULL);
SC_TEST_RET(card->ctx, SC_LOG_DEBUG_NORMAL, r, "select esteid PD failed");
/* read the serial (document number) */
r = sc_read_record (card, SC_ESTEID_PD_DOCUMENT_NR, buff, sizeof(buff), SC_RECORD_BY_REC_NR);
SC_TEST_RET(card->ctx, SC_LOG_DEBUG_NORMAL, r, "read document number failed");
buff[r] = '\0';
set_string (&p15card->tokeninfo->serial_number, (const char *) buff);
p15card->tokeninfo->flags = SC_PKCS15_TOKEN_PRN_GENERATION
| SC_PKCS15_TOKEN_EID_COMPLIANT
| SC_PKCS15_TOKEN_READONLY;
/* add certificates */
for (i = 0; i < 2; i++) {
static const char *esteid_cert_names[2] = {
"Isikutuvastus",
"Allkirjastamine"};
static char const *esteid_cert_paths[2] = {
"3f00eeeeaace",
"3f00eeeeddce"};
static int esteid_cert_ids[2] = {1, 2};
struct sc_pkcs15_cert_info cert_info;
struct sc_pkcs15_object cert_obj;
memset(&cert_info, 0, sizeof(cert_info));
memset(&cert_obj, 0, sizeof(cert_obj));
cert_info.id.value[0] = esteid_cert_ids[i];
cert_info.id.len = 1;
sc_format_path(esteid_cert_paths[i], &cert_info.path);
strlcpy(cert_obj.label, esteid_cert_names[i], sizeof(cert_obj.label));
r = sc_pkcs15emu_add_x509_cert(p15card, &cert_obj, &cert_info);
if (r < 0)
return SC_ERROR_INTERNAL;
if (i == 0) {
sc_pkcs15_cert_t *cert = NULL;
r = sc_pkcs15_read_certificate(p15card, &cert_info, &cert);
if (r < 0)
return SC_ERROR_INTERNAL;
if (cert->key->algorithm == SC_ALGORITHM_EC)
field_length = cert->key->u.ec.params.field_length;
else
modulus_length = cert->key->u.rsa.modulus.len * 8;
if (r == SC_SUCCESS) {
static const struct sc_object_id cn_oid = {{ 2, 5, 4, 3, -1 }};
u8 *cn_name = NULL;
size_t cn_len = 0;
sc_pkcs15_get_name_from_dn(card->ctx, cert->subject,
cert->subject_len, &cn_oid, &cn_name, &cn_len);
if (cn_len > 0) {
char *token_name = malloc(cn_len+1);
if (token_name) {
memcpy(token_name, cn_name, cn_len);
token_name[cn_len] = '\0';
set_string(&p15card->tokeninfo->label, (const char*)token_name);
free(token_name);
}
}
free(cn_name);
sc_pkcs15_free_certificate(cert);
}
}
}
/* the file with key pin info (tries left) */
sc_format_path ("3f000016", &tmppath);
r = sc_select_file (card, &tmppath, NULL);
if (r < 0)
return SC_ERROR_INTERNAL;
/* add pins */
for (i = 0; i < 3; i++) {
unsigned char tries_left;
static const char *esteid_pin_names[3] = {
"PIN1",
"PIN2",
"PUK" };
static const int esteid_pin_min[3] = {4, 5, 8};
static const int esteid_pin_ref[3] = {1, 2, 0};
static const int esteid_pin_authid[3] = {1, 2, 3};
static const int esteid_pin_flags[3] = {0, 0, SC_PKCS15_PIN_FLAG_UNBLOCKING_PIN};
struct sc_pkcs15_auth_info pin_info;
struct sc_pkcs15_object pin_obj;
memset(&pin_info, 0, sizeof(pin_info));
memset(&pin_obj, 0, sizeof(pin_obj));
/* read the number of tries left for the PIN */
r = sc_read_record (card, i + 1, buff, sizeof(buff), SC_RECORD_BY_REC_NR);
if (r < 0)
return SC_ERROR_INTERNAL;
tries_left = buff[5];
pin_info.auth_id.len = 1;
pin_info.auth_id.value[0] = esteid_pin_authid[i];
pin_info.auth_type = SC_PKCS15_PIN_AUTH_TYPE_PIN;
pin_info.attrs.pin.reference = esteid_pin_ref[i];
pin_info.attrs.pin.flags = esteid_pin_flags[i];
pin_info.attrs.pin.type = SC_PKCS15_PIN_TYPE_ASCII_NUMERIC;
pin_info.attrs.pin.min_length = esteid_pin_min[i];
pin_info.attrs.pin.stored_length = 12;
pin_info.attrs.pin.max_length = 12;
pin_info.attrs.pin.pad_char = '\0';
pin_info.tries_left = (int)tries_left;
pin_info.max_tries = 3;
strlcpy(pin_obj.label, esteid_pin_names[i], sizeof(pin_obj.label));
pin_obj.flags = esteid_pin_flags[i];
/* Link normal PINs with PUK */
if (i < 2) {
pin_obj.auth_id.len = 1;
pin_obj.auth_id.value[0] = 3;
}
r = sc_pkcs15emu_add_pin_obj(p15card, &pin_obj, &pin_info);
if (r < 0)
return SC_ERROR_INTERNAL;
}
/* add private keys */
for (i = 0; i < 2; i++) {
static int prkey_pin[2] = {1, 2};
static const char *prkey_name[2] = {
"Isikutuvastus",
"Allkirjastamine"};
struct sc_pkcs15_prkey_info prkey_info;
struct sc_pkcs15_object prkey_obj;
memset(&prkey_info, 0, sizeof(prkey_info));
memset(&prkey_obj, 0, sizeof(prkey_obj));
prkey_info.id.len = 1;
prkey_info.id.value[0] = prkey_pin[i];
prkey_info.native = 1;
prkey_info.key_reference = i + 1;
prkey_info.field_length = field_length;
prkey_info.modulus_length = modulus_length;
if (i == 1)
prkey_info.usage = SC_PKCS15_PRKEY_USAGE_NONREPUDIATION;
else if(field_length > 0) // ECC has sign and derive usage
prkey_info.usage = SC_PKCS15_PRKEY_USAGE_SIGN | SC_PKCS15_PRKEY_USAGE_DERIVE;
else
prkey_info.usage = SC_PKCS15_PRKEY_USAGE_SIGN | SC_PKCS15_PRKEY_USAGE_ENCRYPT | SC_PKCS15_PRKEY_USAGE_DECRYPT;
strlcpy(prkey_obj.label, prkey_name[i], sizeof(prkey_obj.label));
prkey_obj.auth_id.len = 1;
prkey_obj.auth_id.value[0] = prkey_pin[i];
prkey_obj.user_consent = 0;
prkey_obj.flags = SC_PKCS15_CO_FLAG_PRIVATE;
if(field_length > 0)
r = sc_pkcs15emu_add_ec_prkey(p15card, &prkey_obj, &prkey_info);
else
r = sc_pkcs15emu_add_rsa_prkey(p15card, &prkey_obj, &prkey_info);
if (r < 0)
return SC_ERROR_INTERNAL;
}
return SC_SUCCESS;
}
| 1 |
[
"CWE-415",
"CWE-119"
] |
OpenSC
|
360e95d45ac4123255a4c796db96337f332160ad
| 277,667,268,773,819,880,000,000,000,000,000,000,000 | 180 |
fixed out of bounds writes
Thanks to Eric Sesterhenn from X41 D-SEC GmbH
for reporting the problems.
|
get_sw_value(buf_T *buf)
{
return buf->b_p_sw ? buf->b_p_sw : buf->b_p_ts;
}
| 0 |
[
"CWE-20"
] |
vim
|
d0b5138ba4bccff8a744c99836041ef6322ed39a
| 95,711,094,183,965,560,000,000,000,000,000,000,000 | 4 |
patch 8.0.0056
Problem: When setting 'filetype' there is no check for a valid name.
Solution: Only allow valid characters in 'filetype', 'syntax' and 'keymap'.
|
static int git_tcp_connect_sock(char *host, int flags)
{
struct strbuf error_message = STRBUF_INIT;
int sockfd = -1;
const char *port = STR(DEFAULT_GIT_PORT);
struct addrinfo hints, *ai0, *ai;
int gai;
int cnt = 0;
get_host_and_port(&host, &port);
if (!*port)
port = "<none>";
memset(&hints, 0, sizeof(hints));
if (flags & CONNECT_IPV4)
hints.ai_family = AF_INET;
else if (flags & CONNECT_IPV6)
hints.ai_family = AF_INET6;
hints.ai_socktype = SOCK_STREAM;
hints.ai_protocol = IPPROTO_TCP;
if (flags & CONNECT_VERBOSE)
fprintf(stderr, _("Looking up %s ... "), host);
gai = getaddrinfo(host, port, &hints, &ai);
if (gai)
die(_("unable to look up %s (port %s) (%s)"), host, port, gai_strerror(gai));
if (flags & CONNECT_VERBOSE)
/* TRANSLATORS: this is the end of "Looking up %s ... " */
fprintf(stderr, _("done.\nConnecting to %s (port %s) ... "), host, port);
for (ai0 = ai; ai; ai = ai->ai_next, cnt++) {
sockfd = socket(ai->ai_family,
ai->ai_socktype, ai->ai_protocol);
if ((sockfd < 0) ||
(connect(sockfd, ai->ai_addr, ai->ai_addrlen) < 0)) {
strbuf_addf(&error_message, "%s[%d: %s]: errno=%s\n",
host, cnt, ai_name(ai), strerror(errno));
if (0 <= sockfd)
close(sockfd);
sockfd = -1;
continue;
}
if (flags & CONNECT_VERBOSE)
fprintf(stderr, "%s ", ai_name(ai));
break;
}
freeaddrinfo(ai0);
if (sockfd < 0)
die(_("unable to connect to %s:\n%s"), host, error_message.buf);
enable_keepalive(sockfd);
if (flags & CONNECT_VERBOSE)
/* TRANSLATORS: this is the end of "Connecting to %s (port %s) ... " */
fprintf_ln(stderr, _("done."));
strbuf_release(&error_message);
return sockfd;
}
| 0 |
[] |
git
|
a02ea577174ab8ed18f847cf1693f213e0b9c473
| 183,604,319,394,710,660,000,000,000,000,000,000,000 | 64 |
git_connect_git(): forbid newlines in host and path
When we connect to a git:// server, we send an initial request that
looks something like:
002dgit-upload-pack repo.git\0host=example.com
If the repo path contains a newline, then it's included literally, and
we get:
002egit-upload-pack repo
.git\0host=example.com
This works fine if you really do have a newline in your repository name;
the server side uses the pktline framing to parse the string, not
newlines. However, there are many _other_ protocols in the wild that do
parse on newlines, such as HTTP. So a carefully constructed git:// URL
can actually turn into a valid HTTP request. For example:
git://localhost:1234/%0d%0a%0d%0aGET%20/%20HTTP/1.1 %0d%0aHost:localhost%0d%0a%0d%0a
becomes:
0050git-upload-pack /
GET / HTTP/1.1
Host:localhost
host=localhost:1234
on the wire. Again, this isn't a problem for a real Git server, but it
does mean that feeding a malicious URL to Git (e.g., through a
submodule) can cause it to make unexpected cross-protocol requests.
Since repository names with newlines are presumably quite rare (and
indeed, we already disallow them in git-over-http), let's just disallow
them over this protocol.
Hostnames could likewise inject a newline, but this is unlikely a
problem in practice; we'd try resolving the hostname with a newline in
it, which wouldn't work. Still, it doesn't hurt to err on the side of
caution there, since we would not expect them to work in the first
place.
The ssh and local code paths are unaffected by this patch. In both cases
we're trying to run upload-pack via a shell, and will quote the newline
so that it makes it intact. An attacker can point an ssh url at an
arbitrary port, of course, but unless there's an actual ssh server
there, we'd never get as far as sending our shell command anyway. We
_could_ similarly restrict newlines in those protocols out of caution,
but there seems little benefit to doing so.
The new test here is run alongside the git-daemon tests, which cover the
same protocol, but it shouldn't actually contact the daemon at all. In
theory we could make the test more robust by setting up an actual
repository with a newline in it (so that our clone would succeed if our
new check didn't kick in). But a repo directory with newline in it is
likely not portable across all filesystems. Likewise, we could check
git-daemon's log that it was not contacted at all, but we do not
currently record the log (and anyway, it would make the test racy with
the daemon's log write). We'll just check the client-side stderr to make
sure we hit the expected code path.
Reported-by: Harold Kim <[email protected]>
Signed-off-by: Jeff King <[email protected]>
Signed-off-by: Junio C Hamano <[email protected]>
|
static int is_in(struct ip_mc_list *pmc, struct ip_sf_list *psf, int type,
int gdeleted, int sdeleted)
{
switch (type) {
case IGMPV3_MODE_IS_INCLUDE:
case IGMPV3_MODE_IS_EXCLUDE:
if (gdeleted || sdeleted)
return 0;
if (!(pmc->gsquery && !psf->sf_gsresp)) {
if (pmc->sfmode == MCAST_INCLUDE)
return 1;
/* don't include if this source is excluded
* in all filters
*/
if (psf->sf_count[MCAST_INCLUDE])
return type == IGMPV3_MODE_IS_INCLUDE;
return pmc->sfcount[MCAST_EXCLUDE] ==
psf->sf_count[MCAST_EXCLUDE];
}
return 0;
case IGMPV3_CHANGE_TO_INCLUDE:
if (gdeleted || sdeleted)
return 0;
return psf->sf_count[MCAST_INCLUDE] != 0;
case IGMPV3_CHANGE_TO_EXCLUDE:
if (gdeleted || sdeleted)
return 0;
if (pmc->sfcount[MCAST_EXCLUDE] == 0 ||
psf->sf_count[MCAST_INCLUDE])
return 0;
return pmc->sfcount[MCAST_EXCLUDE] ==
psf->sf_count[MCAST_EXCLUDE];
case IGMPV3_ALLOW_NEW_SOURCES:
if (gdeleted || !psf->sf_crcount)
return 0;
return (pmc->sfmode == MCAST_INCLUDE) ^ sdeleted;
case IGMPV3_BLOCK_OLD_SOURCES:
if (pmc->sfmode == MCAST_INCLUDE)
return gdeleted || (psf->sf_crcount && sdeleted);
return psf->sf_crcount && !gdeleted && !sdeleted;
}
return 0;
}
| 0 |
[
"CWE-362"
] |
linux
|
23d2b94043ca8835bd1e67749020e839f396a1c2
| 292,290,283,488,335,420,000,000,000,000,000,000,000 | 43 |
igmp: Add ip_mc_list lock in ip_check_mc_rcu
I got below panic when doing fuzz test:
Kernel panic - not syncing: panic_on_warn set ...
CPU: 0 PID: 4056 Comm: syz-executor.3 Tainted: G B 5.14.0-rc1-00195-gcff5c4254439-dirty #2
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.12.0-59-gc9ba5276e321-prebuilt.qemu.org 04/01/2014
Call Trace:
dump_stack_lvl+0x7a/0x9b
panic+0x2cd/0x5af
end_report.cold+0x5a/0x5a
kasan_report+0xec/0x110
ip_check_mc_rcu+0x556/0x5d0
__mkroute_output+0x895/0x1740
ip_route_output_key_hash_rcu+0x2d0/0x1050
ip_route_output_key_hash+0x182/0x2e0
ip_route_output_flow+0x28/0x130
udp_sendmsg+0x165d/0x2280
udpv6_sendmsg+0x121e/0x24f0
inet6_sendmsg+0xf7/0x140
sock_sendmsg+0xe9/0x180
____sys_sendmsg+0x2b8/0x7a0
___sys_sendmsg+0xf0/0x160
__sys_sendmmsg+0x17e/0x3c0
__x64_sys_sendmmsg+0x9e/0x100
do_syscall_64+0x3b/0x90
entry_SYSCALL_64_after_hwframe+0x44/0xae
RIP: 0033:0x462eb9
Code: f7 d8 64 89 02 b8 ff ff ff ff c3 66 0f 1f 44 00 00 48 89 f8
48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48>
3d 01 f0 ff ff 73 01 c3 48 c7 c1 bc ff ff ff f7 d8 64 89 01 48
RSP: 002b:00007f3df5af1c58 EFLAGS: 00000246 ORIG_RAX: 0000000000000133
RAX: ffffffffffffffda RBX: 000000000073bf00 RCX: 0000000000462eb9
RDX: 0000000000000312 RSI: 0000000020001700 RDI: 0000000000000007
RBP: 0000000000000004 R08: 0000000000000000 R09: 0000000000000000
R10: 0000000000000000 R11: 0000000000000246 R12: 00007f3df5af26bc
R13: 00000000004c372d R14: 0000000000700b10 R15: 00000000ffffffff
It is one use-after-free in ip_check_mc_rcu.
In ip_mc_del_src, the ip_sf_list of pmc has been freed under pmc->lock protection.
But access to ip_sf_list in ip_check_mc_rcu is not protected by the lock.
Signed-off-by: Liu Jian <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
CACHE_LIMITER_FUNC(private_no_expire) /* {{{ */
{
char buf[MAX_STR + 1];
snprintf(buf, sizeof(buf), "Cache-Control: private, max-age=%ld, pre-check=%ld", PS(cache_expire) * 60, PS(cache_expire) * 60); /* SAFE */
ADD_HEADER(buf);
last_modified(TSRMLS_C);
}
| 0 |
[
"CWE-264"
] |
php-src
|
25e8fcc88fa20dc9d4c47184471003f436927cde
| 331,721,343,396,934,960,000,000,000,000,000,000,000 | 9 |
Strict session
|
int kvm_arch_prepare_memory_region(struct kvm *kvm,
const struct kvm_memory_slot *old,
struct kvm_memory_slot *new,
enum kvm_mr_change change)
{
if (change == KVM_MR_CREATE || change == KVM_MR_MOVE)
return kvm_alloc_memslot_metadata(kvm, new);
if (change == KVM_MR_FLAGS_ONLY)
memcpy(&new->arch, &old->arch, sizeof(old->arch));
else if (WARN_ON_ONCE(change != KVM_MR_DELETE))
return -EIO;
return 0;
}
| 0 |
[
"CWE-476"
] |
linux
|
55749769fe608fa3f4a075e42e89d237c8e37637
| 84,536,404,835,989,820,000,000,000,000,000,000,000 | 15 |
KVM: x86: Fix wall clock writes in Xen shared_info not to mark page dirty
When dirty ring logging is enabled, any dirty logging without an active
vCPU context will cause a kernel oops. But we've already declared that
the shared_info page doesn't get dirty tracking anyway, since it would
be kind of insane to mark it dirty every time we deliver an event channel
interrupt. Userspace is supposed to just assume it's always dirty any
time a vCPU can run or event channels are routed.
So stop using the generic kvm_write_wall_clock() and just write directly
through the gfn_to_pfn_cache that we already have set up.
We can make kvm_write_wall_clock() static in x86.c again now, but let's
not remove the 'sec_hi_ofs' argument even though it's not used yet. At
some point we *will* want to use that for KVM guests too.
Fixes: 629b5348841a ("KVM: x86/xen: update wallclock region")
Reported-by: butt3rflyh4ck <[email protected]>
Signed-off-by: David Woodhouse <[email protected]>
Message-Id: <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
|
GF_Err lsrc_box_size(GF_Box *s)
{
GF_LASERConfigurationBox *ptr = (GF_LASERConfigurationBox *)s;
ptr->size += ptr->hdr_size;
return GF_OK;
}
| 0 |
[
"CWE-787"
] |
gpac
|
77510778516803b7f7402d7423c6d6bef50254c3
| 171,771,482,482,904,100,000,000,000,000,000,000,000 | 6 |
fixed #2255
|
ssize_t smb_vfs_call_getxattr(struct vfs_handle_struct *handle,
const char *path, const char *name, void *value,
size_t size)
{
VFS_FIND(getxattr);
return handle->fns->getxattr_fn(handle, path, name, value, size);
}
| 0 |
[
"CWE-264"
] |
samba
|
4278ef25f64d5fdbf432ff1534e275416ec9561e
| 278,278,394,244,749,340,000,000,000,000,000,000,000 | 7 |
CVE-2015-5252: s3: smbd: Fix symlink verification (file access outside the share).
Ensure matching component ends in '/' or '\0'.
BUG: https://bugzilla.samba.org/show_bug.cgi?id=11395
Signed-off-by: Jeremy Allison <[email protected]>
Reviewed-by: Volker Lendecke <[email protected]>
|
static void print_cfs_stats(struct seq_file *m, int cpu)
{
struct cfs_rq *cfs_rq;
rcu_read_lock();
for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
print_cfs_rq(m, cpu, cfs_rq);
rcu_read_unlock();
}
| 0 |
[] |
linux-2.6
|
ac884dec6d4a7df252150af875cffddf8f1d9c15
| 333,231,720,781,053,270,000,000,000,000,000,000,000 | 9 |
sched: fair-group scheduling vs latency
Currently FAIR_GROUP sched grows the scheduler latency outside of
sysctl_sched_latency, invert this so it stays within.
Signed-off-by: Peter Zijlstra <[email protected]>
Signed-off-by: Ingo Molnar <[email protected]>
|
static void vmx_decache_cr3(struct kvm_vcpu *vcpu)
{
if (enable_ept && is_paging(vcpu))
vcpu->arch.cr3 = vmcs_readl(GUEST_CR3);
__set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
}
| 0 |
[] |
kvm
|
a642fc305053cc1c6e47e4f4df327895747ab485
| 271,508,063,192,997,200,000,000,000,000,000,000,000 | 6 |
kvm: vmx: handle invvpid vm exit gracefully
On systems with invvpid instruction support (corresponding bit in
IA32_VMX_EPT_VPID_CAP MSR is set) guest invocation of invvpid
causes vm exit, which is currently not handled and results in
propagation of unknown exit to userspace.
Fix this by installing an invvpid vm exit handler.
This is CVE-2014-3646.
Cc: [email protected]
Signed-off-by: Petr Matousek <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
|
template<typename tz, typename tc>
CImg<T>& draw_line(CImg<tz>& zbuffer,
const int x0, const int y0, const float z0,
const int x1, const int y1, const float z1,
const CImg<tc>& texture,
const int tx0, const int ty0,
const int tx1, const int ty1,
const float opacity=1,
const unsigned int pattern=~0U, const bool init_hatch=true) {
typedef typename cimg::superset<tz,float>::type tzfloat;
if (is_empty() || z0<=0 || z1<=0) return *this;
if (!is_sameXY(zbuffer))
throw CImgArgumentException(_cimg_instance
"draw_line(): Instance and specified Z-buffer (%u,%u,%u,%u,%p) have "
"different dimensions.",
cimg_instance,
zbuffer._width,zbuffer._height,zbuffer._depth,zbuffer._spectrum,zbuffer._data);
if (texture._depth>1 || texture._spectrum<_spectrum)
throw CImgArgumentException(_cimg_instance
"draw_line(): Invalid specified texture (%u,%u,%u,%u,%p).",
cimg_instance,
texture._width,texture._height,texture._depth,texture._spectrum,texture._data);
if (is_overlapped(texture))
return draw_line(zbuffer,x0,y0,z0,x1,y1,z1,+texture,tx0,ty0,tx1,ty1,opacity,pattern,init_hatch);
static unsigned int hatch = ~0U - (~0U>>1);
if (init_hatch) hatch = ~0U - (~0U>>1);
const bool xdir = x0<x1, ydir = y0<y1;
int
nx0 = x0, nx1 = x1, ny0 = y0, ny1 = y1,
&xleft = xdir?nx0:nx1, &yleft = xdir?ny0:ny1,
&xright = xdir?nx1:nx0, &yright = xdir?ny1:ny0,
&xup = ydir?nx0:nx1, &yup = ydir?ny0:ny1,
&xdown = ydir?nx1:nx0, &ydown = ydir?ny1:ny0;
float
Tx0 = tx0/z0, Tx1 = tx1/z1,
Ty0 = ty0/z0, Ty1 = ty1/z1,
dtx = Tx1 - Tx0, dty = Ty1 - Ty0,
tnx0 = Tx0, tnx1 = Tx1, tny0 = Ty0, tny1 = Ty1,
&txleft = xdir?tnx0:tnx1, &tyleft = xdir?tny0:tny1,
&txright = xdir?tnx1:tnx0, &tyright = xdir?tny1:tny0,
&txup = ydir?tnx0:tnx1, &tyup = ydir?tny0:tny1,
&txdown = ydir?tnx1:tnx0, &tydown = ydir?tny1:tny0;
tzfloat
Z0 = 1/(tzfloat)z0, Z1 = 1/(tzfloat)z1,
dz = Z1 - Z0, nz0 = Z0, nz1 = Z1,
&zleft = xdir?nz0:nz1,
&zright = xdir?nz1:nz0,
&zup = ydir?nz0:nz1,
&zdown = ydir?nz1:nz0;
if (xright<0 || xleft>=width()) return *this;
if (xleft<0) {
const float D = (float)xright - xleft;
yleft-=(int)((float)xleft*((float)yright - yleft)/D);
zleft-=(float)xleft*(zright - zleft)/D;
txleft-=(float)xleft*(txright - txleft)/D;
tyleft-=(float)xleft*(tyright - tyleft)/D;
xleft = 0;
}
if (xright>=width()) {
const float d = (float)xright - width(), D = (float)xright - xleft;
yright-=(int)(d*((float)yright - yleft)/D);
zright-=d*(zright - zleft)/D;
txright-=d*(txright - txleft)/D;
tyright-=d*(tyright - tyleft)/D;
xright = width() - 1;
}
if (ydown<0 || yup>=height()) return *this;
if (yup<0) {
const float D = (float)ydown - yup;
xup-=(int)((float)yup*((float)xdown - xup)/D);
zup-=yup*(zdown - zup)/D;
txup-=yup*(txdown - txup)/D;
tyup-=yup*(tydown - tyup)/D;
yup = 0;
}
if (ydown>=height()) {
const float d = (float)ydown - height(), D = (float)ydown - yup;
xdown-=(int)(d*((float)xdown - xup)/D);
zdown-=d*(zdown - zup)/D;
txdown-=d*(txdown - txup)/D;
tydown-=d*(tydown - tyup)/D;
ydown = height() - 1;
}
T *ptrd0 = data(nx0,ny0);
tz *ptrz = zbuffer.data(nx0,ny0);
int dx = xright - xleft, dy = ydown - yup;
const bool steep = dy>dx;
if (steep) cimg::swap(nx0,ny0,nx1,ny1,dx,dy);
const longT
offx = (longT)(nx0<nx1?1:-1)*(steep?width():1),
offy = (longT)(ny0<ny1?1:-1)*(steep?1:width()),
ndx = (longT)(dx>0?dx:1);
const ulongT
whd = (ulongT)_width*_height*_depth,
twh = (ulongT)texture._width*texture._height;
if (opacity>=1) {
if (~pattern) for (int error = dx>>1, x = 0; x<=dx; ++x) {
if (pattern&hatch) {
const tzfloat z = Z0 + x*dz/ndx;
if (z>=(tzfloat)*ptrz) {
*ptrz = (tz)z;
const float tx = Tx0 + x*dtx/ndx, ty = Ty0 + x*dty/ndx;
const tc *col = &texture._atXY((int)(tx/z),(int)(ty/z));
T *ptrd = ptrd0;
cimg_forC(*this,c) { *ptrd = (T)*col; ptrd+=whd; col+=twh; }
}
}
hatch>>=1; if (!hatch) hatch = ~0U - (~0U>>1);
ptrd0+=offx; ptrz+=offx;
if ((error-=dy)<0) { ptrd0+=offy; ptrz+=offy; error+=dx; }
} else for (int error = dx>>1, x = 0; x<=dx; ++x) {
const tzfloat z = Z0 + x*dz/ndx;
if (z>=(tzfloat)*ptrz) {
*ptrz = (tz)z;
const float tx = Tx0 + x*dtx/ndx, ty = Ty0 + x*dty/ndx;
const tc *col = &texture._atXY((int)(tx/z),(int)(ty/z));
T *ptrd = ptrd0;
cimg_forC(*this,c) { *ptrd = (T)*col; ptrd+=whd; col+=twh; }
}
ptrd0+=offx; ptrz+=offx;
if ((error-=dy)<0) { ptrd0+=offy; ptrz+=offy; error+=dx; }
}
} else {
const float nopacity = cimg::abs(opacity), copacity = 1 - std::max(opacity,0.0f);
if (~pattern) for (int error = dx>>1, x = 0; x<=dx; ++x) {
if (pattern&hatch) {
const tzfloat z = Z0 + x*dz/ndx;
if (z>=(tzfloat)*ptrz) {
*ptrz = (tz)z;
const float tx = Tx0 + x*dtx/ndx, ty = Ty0 + x*dty/ndx;
const tc *col = &texture._atXY((int)(tx/z),(int)(ty/z));
T *ptrd = ptrd0;
cimg_forC(*this,c) { *ptrd = (T)(nopacity**col + *ptrd*copacity); ptrd+=whd; col+=twh; }
}
}
hatch>>=1; if (!hatch) hatch = ~0U - (~0U>>1);
ptrd0+=offx; ptrz+=offx;
if ((error-=dy)<0) { ptrd0+=offy; ptrz+=offy; error+=dx; }
} else for (int error = dx>>1, x = 0; x<=dx; ++x) {
const tzfloat z = Z0 + x*dz/ndx;
if (z>=(tzfloat)*ptrz) {
*ptrz = (tz)z;
const float tx = Tx0 + x*dtx/ndx, ty = Ty0 + x*dty/ndx;
const tc *col = &texture._atXY((int)(tx/z),(int)(ty/z));
T *ptrd = ptrd0;
cimg_forC(*this,c) { *ptrd = (T)(nopacity**col + *ptrd*copacity); ptrd+=whd; col+=twh; }
}
ptrd0+=offx; ptrz+=offx;
if ((error-=dy)<0) { ptrd0+=offy; ptrz+=offy; error+=dx; }
}
}
return *this;
| 0 |
[
"CWE-125"
] |
CImg
|
10af1e8c1ad2a58a0a3342a856bae63e8f257abb
| 335,793,274,016,545,240,000,000,000,000,000,000,000 | 153 |
Fix other issues in 'CImg<T>::load_bmp()'.
|
Item *Item_sum_udf_float::copy_or_same(THD* thd)
{
return new (thd->mem_root) Item_sum_udf_float(thd, this);
}
| 0 |
[
"CWE-120"
] |
server
|
eca207c46293bc72dd8d0d5622153fab4d3fccf1
| 208,573,795,754,841,100,000,000,000,000,000,000,000 | 4 |
MDEV-25317 Assertion `scale <= precision' failed in decimal_bin_size And Assertion `scale >= 0 && precision > 0 && scale <= precision' failed in decimal_bin_size_inline/decimal_bin_size.
Precision should be kept below DECIMAL_MAX_SCALE for computations.
It can be bigger in Item_decimal. I'd fix this too but it changes the
existing behaviour so problemmatic to ix.
|
int tcp_disconnect(struct sock *sk, int flags)
{
struct inet_sock *inet = inet_sk(sk);
struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
int old_state = sk->sk_state;
if (old_state != TCP_CLOSE)
tcp_set_state(sk, TCP_CLOSE);
/* ABORT function of RFC793 */
if (old_state == TCP_LISTEN) {
inet_csk_listen_stop(sk);
} else if (unlikely(tp->repair)) {
sk->sk_err = ECONNABORTED;
} else if (tcp_need_reset(old_state) ||
(tp->snd_nxt != tp->write_seq &&
(1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK))) {
/* The last check adjusts for discrepancy of Linux wrt. RFC
* states
*/
tcp_send_active_reset(sk, gfp_any());
sk->sk_err = ECONNRESET;
} else if (old_state == TCP_SYN_SENT)
sk->sk_err = ECONNRESET;
tcp_clear_xmit_timers(sk);
__skb_queue_purge(&sk->sk_receive_queue);
if (sk->sk_rx_skb_cache) {
__kfree_skb(sk->sk_rx_skb_cache);
sk->sk_rx_skb_cache = NULL;
}
tp->copied_seq = tp->rcv_nxt;
tp->urg_data = 0;
tcp_write_queue_purge(sk);
tcp_fastopen_active_disable_ofo_check(sk);
skb_rbtree_purge(&tp->out_of_order_queue);
inet->inet_dport = 0;
if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
inet_reset_saddr(sk);
sk->sk_shutdown = 0;
sock_reset_flag(sk, SOCK_DONE);
tp->srtt_us = 0;
tp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT);
tp->rcv_rtt_last_tsecr = 0;
tp->write_seq += tp->max_window + 2;
if (tp->write_seq == 0)
tp->write_seq = 1;
icsk->icsk_backoff = 0;
tp->snd_cwnd = 2;
icsk->icsk_probes_out = 0;
icsk->icsk_rto = TCP_TIMEOUT_INIT;
tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
tp->snd_cwnd = TCP_INIT_CWND;
tp->snd_cwnd_cnt = 0;
tp->window_clamp = 0;
tp->delivered_ce = 0;
tcp_set_ca_state(sk, TCP_CA_Open);
tp->is_sack_reneg = 0;
tcp_clear_retrans(tp);
inet_csk_delack_init(sk);
/* Initialize rcv_mss to TCP_MIN_MSS to avoid division by 0
* issue in __tcp_select_window()
*/
icsk->icsk_ack.rcv_mss = TCP_MIN_MSS;
memset(&tp->rx_opt, 0, sizeof(tp->rx_opt));
__sk_dst_reset(sk);
dst_release(sk->sk_rx_dst);
sk->sk_rx_dst = NULL;
tcp_saved_syn_free(tp);
tp->compressed_ack = 0;
tp->bytes_sent = 0;
tp->bytes_retrans = 0;
tp->duplicate_sack[0].start_seq = 0;
tp->duplicate_sack[0].end_seq = 0;
tp->dsack_dups = 0;
tp->reord_seen = 0;
tp->retrans_out = 0;
tp->sacked_out = 0;
tp->tlp_high_seq = 0;
tp->last_oow_ack_time = 0;
/* There's a bubble in the pipe until at least the first ACK. */
tp->app_limited = ~0U;
tp->rack.mstamp = 0;
tp->rack.advanced = 0;
tp->rack.reo_wnd_steps = 1;
tp->rack.last_delivered = 0;
tp->rack.reo_wnd_persist = 0;
tp->rack.dsack_seen = 0;
tp->syn_data_acked = 0;
tp->rx_opt.saw_tstamp = 0;
tp->rx_opt.dsack = 0;
tp->rx_opt.num_sacks = 0;
/* Clean up fastopen related fields */
tcp_free_fastopen_req(tp);
inet->defer_connect = 0;
WARN_ON(inet->inet_num && !icsk->icsk_bind_hash);
if (sk->sk_frag.page) {
put_page(sk->sk_frag.page);
sk->sk_frag.page = NULL;
sk->sk_frag.offset = 0;
}
sk->sk_error_report(sk);
return 0;
}
| 0 |
[
"CWE-190"
] |
net
|
3b4929f65b0d8249f19a50245cd88ed1a2f78cff
| 300,560,759,677,392,240,000,000,000,000,000,000,000 | 113 |
tcp: limit payload size of sacked skbs
Jonathan Looney reported that TCP can trigger the following crash
in tcp_shifted_skb() :
BUG_ON(tcp_skb_pcount(skb) < pcount);
This can happen if the remote peer has advertized the smallest
MSS that linux TCP accepts : 48
An skb can hold 17 fragments, and each fragment can hold 32KB
on x86, or 64KB on PowerPC.
This means that the 16bit witdh of TCP_SKB_CB(skb)->tcp_gso_segs
can overflow.
Note that tcp_sendmsg() builds skbs with less than 64KB
of payload, so this problem needs SACK to be enabled.
SACK blocks allow TCP to coalesce multiple skbs in the retransmit
queue, thus filling the 17 fragments to maximal capacity.
CVE-2019-11477 -- u16 overflow of TCP_SKB_CB(skb)->tcp_gso_segs
Fixes: 832d11c5cd07 ("tcp: Try to restore large SKBs while SACK processing")
Signed-off-by: Eric Dumazet <[email protected]>
Reported-by: Jonathan Looney <[email protected]>
Acked-by: Neal Cardwell <[email protected]>
Reviewed-by: Tyler Hicks <[email protected]>
Cc: Yuchung Cheng <[email protected]>
Cc: Bruce Curtis <[email protected]>
Cc: Jonathan Lemon <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
void initialize() override {
if (use_eds_) {
on_server_init_function_ = [this]() {
AssertionResult result =
fake_upstreams_[1]->waitForHttpConnection(*dispatcher_, eds_connection_);
RELEASE_ASSERT(result, result.message());
result = eds_connection_->waitForNewStream(*dispatcher_, eds_stream_);
RELEASE_ASSERT(result, result.message());
eds_stream_->startGrpcStream();
envoy::service::discovery::v3::DiscoveryRequest discovery_request;
result = eds_stream_->waitForGrpcMessage(*dispatcher_, discovery_request);
RELEASE_ASSERT(result, result.message());
envoy::service::discovery::v3::DiscoveryResponse discovery_response;
discovery_response.set_version_info("1");
discovery_response.set_type_url(Config::TypeUrl::get().ClusterLoadAssignment);
auto cluster_load_assignment =
TestUtility::parseYaml<envoy::config::endpoint::v3::ClusterLoadAssignment>(fmt::format(
R"EOF(
cluster_name: cluster_0
endpoints:
- lb_endpoints:
- endpoint:
address:
socket_address:
address: {}
port_value: {}
metadata:
filter_metadata:
test.namespace:
key: metadata-value
)EOF",
Network::Test::getLoopbackAddressString(std::get<0>(GetParam())),
fake_upstreams_[0]->localAddress()->ip()->port()));
discovery_response.add_resources()->PackFrom(cluster_load_assignment);
eds_stream_->sendGrpcMessage(discovery_response);
// Wait for the next request to make sure the first response was consumed.
result = eds_stream_->waitForGrpcMessage(*dispatcher_, discovery_request);
RELEASE_ASSERT(result, result.message());
};
}
HttpIntegrationTest::initialize();
}
| 0 |
[
"CWE-22"
] |
envoy
|
5333b928d8bcffa26ab19bf018369a835f697585
| 72,624,964,928,140,495,000,000,000,000,000,000,000 | 48 |
Implement handling of escaped slash characters in URL path
Fixes: CVE-2021-29492
Signed-off-by: Yan Avlasov <[email protected]>
|
void MS_CALLBACK msg_cb(int write_p, int version, int content_type, const void *buf, size_t len, SSL *ssl, void *arg)
{
BIO *bio = arg;
const char *str_write_p, *str_version, *str_content_type = "", *str_details1 = "", *str_details2= "";
str_write_p = write_p ? ">>>" : "<<<";
switch (version)
{
case SSL2_VERSION:
str_version = "SSL 2.0";
break;
case SSL3_VERSION:
str_version = "SSL 3.0 ";
break;
case TLS1_VERSION:
str_version = "TLS 1.0 ";
break;
case TLS1_1_VERSION:
str_version = "TLS 1.1 ";
break;
case DTLS1_VERSION:
str_version = "DTLS 1.0 ";
break;
case DTLS1_BAD_VER:
str_version = "DTLS 1.0 (bad) ";
break;
default:
str_version = "???";
}
if (version == SSL2_VERSION)
{
str_details1 = "???";
if (len > 0)
{
switch (((const unsigned char*)buf)[0])
{
case 0:
str_details1 = ", ERROR:";
str_details2 = " ???";
if (len >= 3)
{
unsigned err = (((const unsigned char*)buf)[1]<<8) + ((const unsigned char*)buf)[2];
switch (err)
{
case 0x0001:
str_details2 = " NO-CIPHER-ERROR";
break;
case 0x0002:
str_details2 = " NO-CERTIFICATE-ERROR";
break;
case 0x0004:
str_details2 = " BAD-CERTIFICATE-ERROR";
break;
case 0x0006:
str_details2 = " UNSUPPORTED-CERTIFICATE-TYPE-ERROR";
break;
}
}
break;
case 1:
str_details1 = ", CLIENT-HELLO";
break;
case 2:
str_details1 = ", CLIENT-MASTER-KEY";
break;
case 3:
str_details1 = ", CLIENT-FINISHED";
break;
case 4:
str_details1 = ", SERVER-HELLO";
break;
case 5:
str_details1 = ", SERVER-VERIFY";
break;
case 6:
str_details1 = ", SERVER-FINISHED";
break;
case 7:
str_details1 = ", REQUEST-CERTIFICATE";
break;
case 8:
str_details1 = ", CLIENT-CERTIFICATE";
break;
}
}
}
if (version == SSL3_VERSION ||
version == TLS1_VERSION ||
version == DTLS1_VERSION ||
version == DTLS1_BAD_VER)
{
switch (content_type)
{
case 20:
str_content_type = "ChangeCipherSpec";
break;
case 21:
str_content_type = "Alert";
break;
case 22:
str_content_type = "Handshake";
break;
}
if (content_type == 21) /* Alert */
{
str_details1 = ", ???";
if (len == 2)
{
switch (((const unsigned char*)buf)[0])
{
case 1:
str_details1 = ", warning";
break;
case 2:
str_details1 = ", fatal";
break;
}
str_details2 = " ???";
switch (((const unsigned char*)buf)[1])
{
case 0:
str_details2 = " close_notify";
break;
case 10:
str_details2 = " unexpected_message";
break;
case 20:
str_details2 = " bad_record_mac";
break;
case 21:
str_details2 = " decryption_failed";
break;
case 22:
str_details2 = " record_overflow";
break;
case 30:
str_details2 = " decompression_failure";
break;
case 40:
str_details2 = " handshake_failure";
break;
case 42:
str_details2 = " bad_certificate";
break;
case 43:
str_details2 = " unsupported_certificate";
break;
case 44:
str_details2 = " certificate_revoked";
break;
case 45:
str_details2 = " certificate_expired";
break;
case 46:
str_details2 = " certificate_unknown";
break;
case 47:
str_details2 = " illegal_parameter";
break;
case 48:
str_details2 = " unknown_ca";
break;
case 49:
str_details2 = " access_denied";
break;
case 50:
str_details2 = " decode_error";
break;
case 51:
str_details2 = " decrypt_error";
break;
case 60:
str_details2 = " export_restriction";
break;
case 70:
str_details2 = " protocol_version";
break;
case 71:
str_details2 = " insufficient_security";
break;
case 80:
str_details2 = " internal_error";
break;
case 90:
str_details2 = " user_canceled";
break;
case 100:
str_details2 = " no_renegotiation";
break;
case 110:
str_details2 = " unsupported_extension";
break;
case 111:
str_details2 = " certificate_unobtainable";
break;
case 112:
str_details2 = " unrecognized_name";
break;
case 113:
str_details2 = " bad_certificate_status_response";
break;
case 114:
str_details2 = " bad_certificate_hash_value";
break;
case 115:
str_details2 = " unknown_psk_identity";
break;
}
}
}
if (content_type == 22) /* Handshake */
{
str_details1 = "???";
if (len > 0)
{
switch (((const unsigned char*)buf)[0])
{
case 0:
str_details1 = ", HelloRequest";
break;
case 1:
str_details1 = ", ClientHello";
break;
case 2:
str_details1 = ", ServerHello";
break;
case 3:
str_details1 = ", HelloVerifyRequest";
break;
case 11:
str_details1 = ", Certificate";
break;
case 12:
str_details1 = ", ServerKeyExchange";
break;
case 13:
str_details1 = ", CertificateRequest";
break;
case 14:
str_details1 = ", ServerHelloDone";
break;
case 15:
str_details1 = ", CertificateVerify";
break;
case 16:
str_details1 = ", ClientKeyExchange";
break;
case 20:
str_details1 = ", Finished";
break;
}
}
}
}
BIO_printf(bio, "%s %s%s [length %04lx]%s%s\n", str_write_p, str_version, str_content_type, (unsigned long)len, str_details1, str_details2);
if (len > 0)
{
size_t num, i;
BIO_printf(bio, " ");
num = len;
#if 0
if (num > 16)
num = 16;
#endif
for (i = 0; i < num; i++)
{
if (i % 16 == 0 && i > 0)
BIO_printf(bio, "\n ");
BIO_printf(bio, " %02x", ((const unsigned char*)buf)[i]);
}
if (i < len)
BIO_printf(bio, " ...");
BIO_printf(bio, "\n");
}
(void)BIO_flush(bio);
}
| 1 |
[] |
openssl
|
4817504d069b4c5082161b02a22116ad75f822b1
| 292,758,813,900,585,440,000,000,000,000,000,000,000 | 290 |
PR: 2658
Submitted by: Robin Seggelmann <[email protected]>
Reviewed by: steve
Support for TLS/DTLS heartbeats.
|
fetch_var_cell_from_evbuffer(struct evbuffer *buf, var_cell_t **out,
int linkproto)
{
char *hdr = NULL;
int free_hdr = 0;
size_t n;
size_t buf_len;
uint8_t command;
uint16_t cell_length;
var_cell_t *cell;
int result = 0;
const int wide_circ_ids = linkproto >= MIN_LINK_PROTO_FOR_WIDE_CIRC_IDS;
const int circ_id_len = get_circ_id_size(wide_circ_ids);
const unsigned header_len = get_var_cell_header_size(wide_circ_ids);
*out = NULL;
buf_len = evbuffer_get_length(buf);
if (buf_len < header_len)
return 0;
n = inspect_evbuffer(buf, &hdr, header_len, &free_hdr, NULL);
tor_assert(n >= header_len);
command = get_uint8(hdr + circ_id_len);
if (!(cell_command_is_var_length(command, linkproto))) {
goto done;
}
cell_length = ntohs(get_uint16(hdr + circ_id_len + 1));
if (buf_len < (size_t)(header_len+cell_length)) {
result = 1; /* Not all here yet. */
goto done;
}
cell = var_cell_new(cell_length);
cell->command = command;
if (wide_circ_ids)
cell->circ_id = ntohl(get_uint32(hdr));
else
cell->circ_id = ntohs(get_uint16(hdr));
evbuffer_drain(buf, header_len);
evbuffer_remove(buf, cell->payload, cell_length);
*out = cell;
result = 1;
done:
if (free_hdr && hdr)
tor_free(hdr);
return result;
}
| 0 |
[] |
tor
|
19df037e53331ae528b876f225be08f198e0f8b6
| 26,029,921,911,980,855,000,000,000,000,000,000,000 | 50 |
Log malformed hostnames in socks5 request respecting SafeLogging
|
hook_config (struct t_weechat_plugin *plugin, const char *option,
t_hook_callback_config *callback, void *callback_data)
{
struct t_hook *new_hook;
struct t_hook_config *new_hook_config;
int priority;
const char *ptr_option;
if (!callback)
return NULL;
new_hook = malloc (sizeof (*new_hook));
if (!new_hook)
return NULL;
new_hook_config = malloc (sizeof (*new_hook_config));
if (!new_hook_config)
{
free (new_hook);
return NULL;
}
hook_get_priority_and_name (option, &priority, &ptr_option);
hook_init_data (new_hook, plugin, HOOK_TYPE_CONFIG, priority,
callback_data);
new_hook->hook_data = new_hook_config;
new_hook_config->callback = callback;
new_hook_config->option = strdup ((ptr_option) ? ptr_option :
((option) ? option : ""));
hook_add_to_list (new_hook);
return new_hook;
}
| 0 |
[
"CWE-20"
] |
weechat
|
c265cad1c95b84abfd4e8d861f25926ef13b5d91
| 279,621,123,870,311,600,000,000,000,000,000,000,000 | 34 |
Fix verification of SSL certificates by calling gnutls verify callback (patch #7459)
|
TEST_F(ConnectionManagerUtilityTest, SamplingWithRouteOverride) {
EXPECT_CALL(
runtime_.snapshot_,
featureEnabled("tracing.random_sampling", An<const envoy::type::v3::FractionalPercent&>(), _))
.WillOnce(Return(false));
NiceMock<Router::MockRouteTracing> tracingConfig;
EXPECT_CALL(route_, tracingConfig()).WillRepeatedly(Return(&tracingConfig));
const envoy::type::v3::FractionalPercent percent;
EXPECT_CALL(tracingConfig, getClientSampling()).WillRepeatedly(ReturnRef(percent));
EXPECT_CALL(tracingConfig, getRandomSampling()).WillRepeatedly(ReturnRef(percent));
EXPECT_CALL(tracingConfig, getOverallSampling()).WillRepeatedly(ReturnRef(percent));
Http::TestRequestHeaderMapImpl request_headers{
{"x-request-id", "125a4afb-6f55-44ba-ad80-413f09f48a28"}};
callMutateRequestHeaders(request_headers, Protocol::Http2);
EXPECT_EQ(Tracing::Reason::NotTraceable, request_id_extension_->getTraceReason(request_headers));
}
| 0 |
[
"CWE-22"
] |
envoy
|
5333b928d8bcffa26ab19bf018369a835f697585
| 72,141,285,003,683,550,000,000,000,000,000,000,000 | 19 |
Implement handling of escaped slash characters in URL path
Fixes: CVE-2021-29492
Signed-off-by: Yan Avlasov <[email protected]>
|
static RRebaseInfo *r_rebase_info_new_from_mach0(RBuffer *cache_buf, struct MACH0_(obj_t) *mach0) {
RFileRange *rebase_ranges = NULL;
struct section_t *sections = NULL;
if (!(sections = MACH0_(get_sections) (mach0))) {
return NULL;
}
ut64 starts_offset = 0, starts_size = 0;
int i = 0;
for (; !sections[i].last; i++) {
if (strstr (sections[i].name, "__TEXT.__thread_starts")) {
starts_offset = sections[i].offset;
starts_size = sections[i].size;
break;
}
}
R_FREE (sections);
ut64 kernel_base = 0;
struct MACH0_(segment_command) *seg;
int nsegs = R_MIN (mach0->nsegs, 128);
for (i = 0; i < nsegs; i++) {
char segname[17];
seg = &mach0->segs[i];
r_str_ncpy (segname, seg->segname, 17);
if (!strncmp (segname, "__TEXT", 6) && segname[6] == '\0') {
kernel_base = seg->vmaddr;
break;
}
}
if (starts_offset == 0 || starts_size == 0 || kernel_base == 0) {
return NULL;
}
int n_starts = starts_size / 4;
if (n_starts <= 1) {
return NULL;
}
rebase_ranges = R_NEWS0 (RFileRange, n_starts - 1);
if (rebase_ranges == NULL) {
return NULL;
}
ut64 multiplier = 4;
for (i = 0; i != n_starts; i++) {
ut8 bytes[4];
if (r_buf_read_at (cache_buf, starts_offset + i * 4, bytes, 4) < 4) {
goto beach;
}
if (i == 0) {
multiplier += 4 * (r_read_le32 (bytes) & 1);
continue;
}
rebase_ranges[i - 1].offset = r_read_le32 (bytes);
rebase_ranges[i - 1].size = UT64_MAX;
}
RRebaseInfo *rebase_info = R_NEW0 (RRebaseInfo);
if (rebase_info == NULL) {
goto beach;
}
rebase_info->ranges = rebase_ranges;
rebase_info->n_ranges = n_starts - 1;
rebase_info->multiplier = multiplier;
rebase_info->kernel_base = kernel_base;
return rebase_info;
beach:
R_FREE (rebase_ranges);
return NULL;
}
| 0 |
[
"CWE-476"
] |
radare2
|
feaa4e7f7399c51ee6f52deb84dc3f795b4035d6
| 231,665,564,570,125,630,000,000,000,000,000,000,000 | 79 |
Fix null deref in xnu.kernelcache ##crash
* Reported by @xshad3 via huntr.dev
|
ConnPoolImplBase::tryCreateNewConnection(float global_preconnect_ratio) {
// There are already enough CONNECTING connections for the number of queued streams.
if (!shouldCreateNewConnection(global_preconnect_ratio)) {
ENVOY_LOG(trace, "not creating a new connection, shouldCreateNewConnection returned false.");
return ConnectionResult::ShouldNotConnect;
}
const bool can_create_connection =
host_->cluster().resourceManager(priority_).connections().canCreate();
if (!can_create_connection) {
host_->cluster().stats().upstream_cx_overflow_.inc();
}
// If we are at the connection circuit-breaker limit due to other upstreams having
// too many open connections, and this upstream has no connections, always create one, to
// prevent pending streams being queued to this upstream with no way to be processed.
if (can_create_connection ||
(ready_clients_.empty() && busy_clients_.empty() && connecting_clients_.empty())) {
ENVOY_LOG(debug, "creating a new connection");
ActiveClientPtr client = instantiateActiveClient();
if (client.get() == nullptr) {
ENVOY_LOG(trace, "connection creation failed");
return ConnectionResult::FailedToCreateConnection;
}
ASSERT(client->state() == ActiveClient::State::CONNECTING);
ASSERT(std::numeric_limits<uint64_t>::max() - connecting_stream_capacity_ >=
client->effectiveConcurrentStreamLimit());
ASSERT(client->real_host_description_);
// Increase the connecting capacity to reflect the streams this connection can serve.
state_.incrConnectingAndConnectedStreamCapacity(client->effectiveConcurrentStreamLimit());
connecting_stream_capacity_ += client->effectiveConcurrentStreamLimit();
LinkedList::moveIntoList(std::move(client), owningList(client->state()));
return can_create_connection ? ConnectionResult::CreatedNewConnection
: ConnectionResult::CreatedButRateLimited;
} else {
ENVOY_LOG(trace, "not creating a new connection: connection constrained");
return ConnectionResult::NoConnectionRateLimited;
}
}
| 0 |
[
"CWE-703",
"CWE-674"
] |
envoy
|
4b6dd3b53cd5c6d4d4df378a2fc62c1707522b31
| 169,703,051,411,475,950,000,000,000,000,000,000,000 | 38 |
CVE-2022-23606
Avoid closing other connections to prevent deep recursion when a large number of idle connections are closed at the start of a pool drain, when a connection is closed.
Signed-off-by: Yan Avlasov <[email protected]>
|
TEST_F(Http1ServerConnectionImplTest, WatermarkTest) {
EXPECT_CALL(connection_, bufferLimit()).WillOnce(Return(10));
initialize();
NiceMock<MockRequestDecoder> decoder;
Http::ResponseEncoder* response_encoder = nullptr;
EXPECT_CALL(callbacks_, newStream(_, _))
.WillOnce(Invoke([&](ResponseEncoder& encoder, bool) -> RequestDecoder& {
response_encoder = &encoder;
return decoder;
}));
Buffer::OwnedImpl buffer("GET / HTTP/1.1\r\n\r\n");
auto status = codec_->dispatch(buffer);
Http::MockStreamCallbacks stream_callbacks;
response_encoder->getStream().addCallbacks(stream_callbacks);
// Fake a call from the underlying Network::Connection and verify the stream is notified.
EXPECT_CALL(stream_callbacks, onAboveWriteBufferHighWatermark());
static_cast<ServerConnection*>(codec_.get())
->onUnderlyingConnectionAboveWriteBufferHighWatermark();
EXPECT_CALL(stream_callbacks, onAboveWriteBufferHighWatermark());
EXPECT_CALL(stream_callbacks, onBelowWriteBufferLowWatermark());
TestResponseHeaderMapImpl headers{{":status", "200"}};
response_encoder->encodeHeaders(headers, false);
// Fake out the underlying Network::Connection buffer being drained.
EXPECT_CALL(stream_callbacks, onBelowWriteBufferLowWatermark());
static_cast<ServerConnection*>(codec_.get())
->onUnderlyingConnectionBelowWriteBufferLowWatermark();
}
| 0 |
[
"CWE-770"
] |
envoy
|
7ca28ff7d46454ae930e193d97b7d08156b1ba59
| 236,462,682,603,660,020,000,000,000,000,000,000,000 | 33 |
[http1] Include request URL in request header size computation, and reject partial headers that exceed configured limits (#145)
Signed-off-by: antonio <[email protected]>
|
static int tcf_del_walker(struct sk_buff *skb, struct tc_action *a)
{
struct tcf_hashinfo *hinfo = a->ops->hinfo;
struct hlist_head *head;
struct hlist_node *n;
struct tcf_common *p;
struct nlattr *nest;
int i = 0, n_i = 0;
int ret = -EINVAL;
nest = nla_nest_start(skb, a->order);
if (nest == NULL)
goto nla_put_failure;
if (nla_put_string(skb, TCA_KIND, a->ops->kind))
goto nla_put_failure;
for (i = 0; i < (hinfo->hmask + 1); i++) {
head = &hinfo->htab[tcf_hash(i, hinfo->hmask)];
hlist_for_each_entry_safe(p, n, head, tcfc_head) {
a->priv = p;
ret = tcf_hash_release(a, 0);
if (ret == ACT_P_DELETED) {
module_put(a->ops->owner);
n_i++;
} else if (ret < 0)
goto nla_put_failure;
}
}
if (nla_put_u32(skb, TCA_FCNT, n_i))
goto nla_put_failure;
nla_nest_end(skb, nest);
return n_i;
nla_put_failure:
nla_nest_cancel(skb, nest);
return ret;
}
| 0 |
[
"CWE-264"
] |
net
|
90f62cf30a78721641e08737bda787552428061e
| 235,715,112,932,697,800,000,000,000,000,000,000,000 | 36 |
net: Use netlink_ns_capable to verify the permisions of netlink messages
It is possible by passing a netlink socket to a more privileged
executable and then to fool that executable into writing to the socket
data that happens to be valid netlink message to do something that
privileged executable did not intend to do.
To keep this from happening replace bare capable and ns_capable calls
with netlink_capable, netlink_net_calls and netlink_ns_capable calls.
Which act the same as the previous calls except they verify that the
opener of the socket had the desired permissions as well.
Reported-by: Andy Lutomirski <[email protected]>
Signed-off-by: "Eric W. Biederman" <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static int vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf)
{
struct page *page;
unsigned int i;
if (!boot_cpu_has_bug(X86_BUG_L1TF)) {
l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_NOT_REQUIRED;
return 0;
}
if (!enable_ept) {
l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_EPT_DISABLED;
return 0;
}
if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES)) {
u64 msr;
rdmsrl(MSR_IA32_ARCH_CAPABILITIES, msr);
if (msr & ARCH_CAP_SKIP_VMENTRY_L1DFLUSH) {
l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_NOT_REQUIRED;
return 0;
}
}
/* If set to auto use the default l1tf mitigation method */
if (l1tf == VMENTER_L1D_FLUSH_AUTO) {
switch (l1tf_mitigation) {
case L1TF_MITIGATION_OFF:
l1tf = VMENTER_L1D_FLUSH_NEVER;
break;
case L1TF_MITIGATION_FLUSH_NOWARN:
case L1TF_MITIGATION_FLUSH:
case L1TF_MITIGATION_FLUSH_NOSMT:
l1tf = VMENTER_L1D_FLUSH_COND;
break;
case L1TF_MITIGATION_FULL:
case L1TF_MITIGATION_FULL_FORCE:
l1tf = VMENTER_L1D_FLUSH_ALWAYS;
break;
}
} else if (l1tf_mitigation == L1TF_MITIGATION_FULL_FORCE) {
l1tf = VMENTER_L1D_FLUSH_ALWAYS;
}
if (l1tf != VMENTER_L1D_FLUSH_NEVER && !vmx_l1d_flush_pages &&
!boot_cpu_has(X86_FEATURE_FLUSH_L1D)) {
/*
* This allocation for vmx_l1d_flush_pages is not tied to a VM
* lifetime and so should not be charged to a memcg.
*/
page = alloc_pages(GFP_KERNEL, L1D_CACHE_ORDER);
if (!page)
return -ENOMEM;
vmx_l1d_flush_pages = page_address(page);
/*
* Initialize each page with a different pattern in
* order to protect against KSM in the nested
* virtualization case.
*/
for (i = 0; i < 1u << L1D_CACHE_ORDER; ++i) {
memset(vmx_l1d_flush_pages + i * PAGE_SIZE, i + 1,
PAGE_SIZE);
}
}
l1tf_vmx_mitigation = l1tf;
if (l1tf != VMENTER_L1D_FLUSH_NEVER)
static_branch_enable(&vmx_l1d_should_flush);
else
static_branch_disable(&vmx_l1d_should_flush);
if (l1tf == VMENTER_L1D_FLUSH_COND)
static_branch_enable(&vmx_l1d_flush_cond);
else
static_branch_disable(&vmx_l1d_flush_cond);
return 0;
}
| 0 |
[
"CWE-787"
] |
linux
|
04c4f2ee3f68c9a4bf1653d15f1a9a435ae33f7a
| 281,776,645,786,467,650,000,000,000,000,000,000,000 | 80 |
KVM: VMX: Don't use vcpu->run->internal.ndata as an array index
__vmx_handle_exit() uses vcpu->run->internal.ndata as an index for
an array access. Since vcpu->run is (can be) mapped to a user address
space with a writer permission, the 'ndata' could be updated by the
user process at anytime (the user process can set it to outside the
bounds of the array).
So, it is not safe that __vmx_handle_exit() uses the 'ndata' that way.
Fixes: 1aa561b1a4c0 ("kvm: x86: Add "last CPU" to some KVM_EXIT information")
Signed-off-by: Reiji Watanabe <[email protected]>
Reviewed-by: Jim Mattson <[email protected]>
Message-Id: <[email protected]>
Cc: [email protected]
Signed-off-by: Paolo Bonzini <[email protected]>
|
static int shorten_decode_frame(AVCodecContext *avctx, void *data,
int *got_frame_ptr, AVPacket *avpkt)
{
AVFrame *frame = data;
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
ShortenContext *s = avctx->priv_data;
int i, input_buf_size = 0;
int ret;
/* allocate internal bitstream buffer */
if (s->max_framesize == 0) {
void *tmp_ptr;
s->max_framesize = 1024; // should hopefully be enough for the first header
tmp_ptr = av_fast_realloc(s->bitstream, &s->allocated_bitstream_size,
s->max_framesize);
if (!tmp_ptr) {
av_log(avctx, AV_LOG_ERROR, "error allocating bitstream buffer\n");
return AVERROR(ENOMEM);
}
s->bitstream = tmp_ptr;
}
/* append current packet data to bitstream buffer */
if (1 && s->max_framesize) { //FIXME truncated
buf_size = FFMIN(buf_size, s->max_framesize - s->bitstream_size);
input_buf_size = buf_size;
if (s->bitstream_index + s->bitstream_size + buf_size >
s->allocated_bitstream_size) {
memmove(s->bitstream, &s->bitstream[s->bitstream_index],
s->bitstream_size);
s->bitstream_index = 0;
}
if (buf)
memcpy(&s->bitstream[s->bitstream_index + s->bitstream_size], buf,
buf_size);
buf = &s->bitstream[s->bitstream_index];
buf_size += s->bitstream_size;
s->bitstream_size = buf_size;
/* do not decode until buffer has at least max_framesize bytes or
* the end of the file has been reached */
if (buf_size < s->max_framesize && avpkt->data) {
*got_frame_ptr = 0;
return input_buf_size;
}
}
/* init and position bitstream reader */
init_get_bits(&s->gb, buf, buf_size * 8);
skip_bits(&s->gb, s->bitindex);
/* process header or next subblock */
if (!s->got_header) {
if ((ret = read_header(s)) < 0)
return ret;
*got_frame_ptr = 0;
goto finish_frame;
}
/* if quit command was read previously, don't decode anything */
if (s->got_quit_command) {
*got_frame_ptr = 0;
return avpkt->size;
}
s->cur_chan = 0;
while (s->cur_chan < s->channels) {
unsigned cmd;
int len;
if (get_bits_left(&s->gb) < 3 + FNSIZE) {
*got_frame_ptr = 0;
break;
}
cmd = get_ur_golomb_shorten(&s->gb, FNSIZE);
if (cmd > FN_VERBATIM) {
av_log(avctx, AV_LOG_ERROR, "unknown shorten function %d\n", cmd);
*got_frame_ptr = 0;
break;
}
if (!is_audio_command[cmd]) {
/* process non-audio command */
switch (cmd) {
case FN_VERBATIM:
len = get_ur_golomb_shorten(&s->gb, VERBATIM_CKSIZE_SIZE);
while (len--)
get_ur_golomb_shorten(&s->gb, VERBATIM_BYTE_SIZE);
break;
case FN_BITSHIFT:
s->bitshift = get_ur_golomb_shorten(&s->gb, BITSHIFTSIZE);
break;
case FN_BLOCKSIZE: {
unsigned blocksize = get_uint(s, av_log2(s->blocksize));
if (blocksize > s->blocksize) {
av_log(avctx, AV_LOG_ERROR,
"Increasing block size is not supported\n");
return AVERROR_PATCHWELCOME;
}
if (!blocksize || blocksize > MAX_BLOCKSIZE) {
av_log(avctx, AV_LOG_ERROR, "invalid or unsupported "
"block size: %d\n", blocksize);
return AVERROR(EINVAL);
}
s->blocksize = blocksize;
break;
}
case FN_QUIT:
s->got_quit_command = 1;
break;
}
if (cmd == FN_BLOCKSIZE || cmd == FN_QUIT) {
*got_frame_ptr = 0;
break;
}
} else {
/* process audio command */
int residual_size = 0;
int channel = s->cur_chan;
int32_t coffset;
/* get Rice code for residual decoding */
if (cmd != FN_ZERO) {
residual_size = get_ur_golomb_shorten(&s->gb, ENERGYSIZE);
/* This is a hack as version 0 differed in the definition
* of get_sr_golomb_shorten(). */
if (s->version == 0)
residual_size--;
}
/* calculate sample offset using means from previous blocks */
if (s->nmean == 0)
coffset = s->offset[channel][0];
else {
int32_t sum = (s->version < 2) ? 0 : s->nmean / 2;
for (i = 0; i < s->nmean; i++)
sum += s->offset[channel][i];
coffset = sum / s->nmean;
if (s->version >= 2)
coffset >>= FFMIN(1, s->bitshift);
}
/* decode samples for this channel */
if (cmd == FN_ZERO) {
for (i = 0; i < s->blocksize; i++)
s->decoded[channel][i] = 0;
} else {
if ((ret = decode_subframe_lpc(s, cmd, channel,
residual_size, coffset)) < 0)
return ret;
}
/* update means with info from the current block */
if (s->nmean > 0) {
int32_t sum = (s->version < 2) ? 0 : s->blocksize / 2;
for (i = 0; i < s->blocksize; i++)
sum += s->decoded[channel][i];
for (i = 1; i < s->nmean; i++)
s->offset[channel][i - 1] = s->offset[channel][i];
if (s->version < 2)
s->offset[channel][s->nmean - 1] = sum / s->blocksize;
else
s->offset[channel][s->nmean - 1] = (sum / s->blocksize) << s->bitshift;
}
/* copy wrap samples for use with next block */
for (i = -s->nwrap; i < 0; i++)
s->decoded[channel][i] = s->decoded[channel][i + s->blocksize];
/* shift samples to add in unused zero bits which were removed
* during encoding */
fix_bitshift(s, s->decoded[channel]);
/* if this is the last channel in the block, output the samples */
s->cur_chan++;
if (s->cur_chan == s->channels) {
/* get output buffer */
frame->nb_samples = s->blocksize;
if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
/* interleave output */
output_buffer((int16_t **)frame->extended_data, s->channels,
s->blocksize, s->decoded);
*got_frame_ptr = 1;
}
}
}
if (s->cur_chan < s->channels)
*got_frame_ptr = 0;
finish_frame:
s->bitindex = get_bits_count(&s->gb) - 8 * (get_bits_count(&s->gb) / 8);
i = get_bits_count(&s->gb) / 8;
if (i > buf_size) {
av_log(s->avctx, AV_LOG_ERROR, "overread: %d\n", i - buf_size);
s->bitstream_size = 0;
s->bitstream_index = 0;
return AVERROR_INVALIDDATA;
}
if (s->bitstream_size) {
s->bitstream_index += i;
s->bitstream_size -= i;
return input_buf_size;
} else
return i;
}
| 1 |
[
"CWE-703"
] |
FFmpeg
|
1713eec29add37b654ec6bf262b843d139c1ffc6
| 266,866,423,995,128,970,000,000,000,000,000,000,000 | 214 |
shorten: pad the internal bitstream buffer
Fixes invalid reads.
Found-by: Mateusz "j00ru" Jurczyk and Gynvael Coldwind
CC:[email protected]
|
static void i40e_service_timer(struct timer_list *t)
{
struct i40e_pf *pf = from_timer(pf, t, service_timer);
mod_timer(&pf->service_timer,
round_jiffies(jiffies + pf->service_timer_period));
i40e_service_event_schedule(pf);
}
| 0 |
[
"CWE-400",
"CWE-401"
] |
linux
|
27d461333459d282ffa4a2bdb6b215a59d493a8f
| 199,017,879,288,820,050,000,000,000,000,000,000,000 | 8 |
i40e: prevent memory leak in i40e_setup_macvlans
In i40e_setup_macvlans if i40e_setup_channel fails the allocated memory
for ch should be released.
Signed-off-by: Navid Emamdoost <[email protected]>
Tested-by: Andrew Bowers <[email protected]>
Signed-off-by: Jeff Kirsher <[email protected]>
|
static void jsB_new_RegExp(js_State *J)
{
js_Regexp *old;
const char *pattern;
int flags;
if (js_isregexp(J, 1)) {
if (js_isdefined(J, 2))
js_typeerror(J, "cannot supply flags when creating one RegExp from another");
old = js_toregexp(J, 1);
pattern = old->source;
flags = old->flags;
} else if (js_isundefined(J, 1)) {
pattern = "(?:)";
flags = 0;
} else {
pattern = js_tostring(J, 1);
flags = 0;
}
if (strlen(pattern) == 0)
pattern = "(?:)";
if (js_isdefined(J, 2)) {
const char *s = js_tostring(J, 2);
int g = 0, i = 0, m = 0;
while (*s) {
if (*s == 'g') ++g;
else if (*s == 'i') ++i;
else if (*s == 'm') ++m;
else js_syntaxerror(J, "invalid regular expression flag: '%c'", *s);
++s;
}
if (g > 1) js_syntaxerror(J, "invalid regular expression flag: 'g'");
if (i > 1) js_syntaxerror(J, "invalid regular expression flag: 'i'");
if (m > 1) js_syntaxerror(J, "invalid regular expression flag: 'm'");
if (g) flags |= JS_REGEXP_G;
if (i) flags |= JS_REGEXP_I;
if (m) flags |= JS_REGEXP_M;
}
js_newregexp(J, pattern, flags);
}
| 0 |
[
"CWE-400",
"CWE-674",
"CWE-787"
] |
mujs
|
00d4606c3baf813b7b1c176823b2729bf51002a2
| 45,404,125,818,721,530,000,000,000,000,000,000,000 | 43 |
Bug 700937: Limit recursion in regexp matcher.
Also handle negative return code as an error in the JS bindings.
|
Luv32fromLuv48(LogLuvState* sp, uint8* op, tmsize_t n)
{
uint32* luv = (uint32*) sp->tbuf;
int16* luv3 = (int16*) op;
if (sp->encode_meth == SGILOGENCODE_NODITHER) {
while (n-- > 0) {
*luv++ = (uint32)luv3[0] << 16 |
(luv3[1]*(uint32)(UVSCALE+.5) >> 7 & 0xff00) |
(luv3[2]*(uint32)(UVSCALE+.5) >> 15 & 0xff);
luv3 += 3;
}
return;
}
while (n-- > 0) {
*luv++ = (uint32)luv3[0] << 16 |
(itrunc(luv3[1]*(UVSCALE/(1<<15)), sp->encode_meth) << 8 & 0xff00) |
(itrunc(luv3[2]*(UVSCALE/(1<<15)), sp->encode_meth) & 0xff);
luv3 += 3;
}
}
| 0 |
[
"CWE-787"
] |
libtiff
|
aaab5c3c9d2a2c6984f23ccbc79702610439bc65
| 109,022,964,793,335,280,000,000,000,000,000,000,000 | 21 |
* libtiff/tif_luv.c: fix potential out-of-bound writes in decode
functions in non debug builds by replacing assert()s by regular if
checks (bugzilla #2522).
Fix potential out-of-bound reads in case of short input data.
|
int gnutls_x509_crt_get_version(gnutls_x509_crt_t cert)
{
opaque version[5];
int len, result;
if (cert == NULL) {
gnutls_assert();
return GNUTLS_E_INVALID_REQUEST;
}
len = sizeof(version);
if ((result =
asn1_read_value(cert->cert, "tbsCertificate.version", version,
&len)) != ASN1_SUCCESS) {
if (result == ASN1_ELEMENT_NOT_FOUND)
return 1; /* the DEFAULT version */
gnutls_assert();
return _gnutls_asn2err(result);
}
return (int) version[0] + 1;
}
| 0 |
[] |
gnutls
|
112d537da5f3500f14316db26d18c37d678a5e0e
| 14,950,731,787,807,170,000,000,000,000,000,000,000 | 23 |
some changes for 64bit machines.
|
static int print_attach_list (LIST *lp, char op, char *name)
{
while (lp) {
printf("attachments %c%s %s/%s\n", op, name,
((ATTACH_MATCH *)lp->data)->major,
((ATTACH_MATCH *)lp->data)->minor);
lp = lp->next;
}
return 0;
}
| 0 |
[
"CWE-668"
] |
mutt
|
6d0624411a979e2e1d76af4dd97d03f47679ea4a
| 175,482,662,465,029,000,000,000,000,000,000,000,000 | 11 |
use a 64-bit random value in temporary filenames.
closes #3158
|
TO_EDGE_CONN(connection_t *c)
{
tor_assert(c->magic == EDGE_CONNECTION_MAGIC ||
c->magic == ENTRY_CONNECTION_MAGIC);
return DOWNCAST(edge_connection_t, c);
}
| 0 |
[
"CWE-532"
] |
tor
|
80c404c4b79f3bcba3fc4585d4c62a62a04f3ed9
| 334,713,317,825,546,500,000,000,000,000,000,000,000 | 6 |
Log warning when connecting to soon-to-be-deprecated v2 onions.
|
static inline bool svm_nested_virtualize_tpr(struct kvm_vcpu *vcpu)
{
return is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK);
}
| 0 |
[
"CWE-401"
] |
linux
|
d80b64ff297e40c2b6f7d7abc1b3eba70d22a068
| 6,452,856,159,503,168,000,000,000,000,000,000,000 | 4 |
KVM: SVM: Fix potential memory leak in svm_cpu_init()
When kmalloc memory for sd->sev_vmcbs failed, we forget to free the page
held by sd->save_area. Also get rid of the var r as '-ENOMEM' is actually
the only possible outcome here.
Reviewed-by: Liran Alon <[email protected]>
Reviewed-by: Vitaly Kuznetsov <[email protected]>
Signed-off-by: Miaohe Lin <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
|
void diff_flush(struct diff_options *options)
{
struct diff_queue_struct *q = &diff_queued_diff;
int i, output_format = options->output_format;
int separator = 0;
/*
* Order: raw, stat, summary, patch
* or: name/name-status/checkdiff (other bits clear)
*/
if (!q->nr)
goto free_queue;
if (output_format & (DIFF_FORMAT_RAW |
DIFF_FORMAT_NAME |
DIFF_FORMAT_NAME_STATUS |
DIFF_FORMAT_CHECKDIFF)) {
for (i = 0; i < q->nr; i++) {
struct diff_filepair *p = q->queue[i];
if (check_pair_status(p))
flush_one_pair(p, options);
}
separator++;
}
if (output_format & (DIFF_FORMAT_DIFFSTAT|DIFF_FORMAT_SHORTSTAT|DIFF_FORMAT_NUMSTAT)) {
struct diffstat_t diffstat;
memset(&diffstat, 0, sizeof(struct diffstat_t));
diffstat.xm.consume = diffstat_consume;
for (i = 0; i < q->nr; i++) {
struct diff_filepair *p = q->queue[i];
if (check_pair_status(p))
diff_flush_stat(p, options, &diffstat);
}
if (output_format & DIFF_FORMAT_NUMSTAT)
show_numstat(&diffstat, options);
if (output_format & DIFF_FORMAT_DIFFSTAT)
show_stats(&diffstat, options);
if (output_format & DIFF_FORMAT_SHORTSTAT)
show_shortstats(&diffstat, options);
free_diffstat_info(&diffstat);
separator++;
}
if (output_format & DIFF_FORMAT_DIRSTAT)
show_dirstat(options);
if (output_format & DIFF_FORMAT_SUMMARY && !is_summary_empty(q)) {
for (i = 0; i < q->nr; i++)
diff_summary(options->file, q->queue[i]);
separator++;
}
if (output_format & DIFF_FORMAT_PATCH) {
if (separator) {
if (options->stat_sep) {
/* attach patch instead of inline */
fputs(options->stat_sep, options->file);
} else {
putc(options->line_termination, options->file);
}
}
for (i = 0; i < q->nr; i++) {
struct diff_filepair *p = q->queue[i];
if (check_pair_status(p))
diff_flush_patch(p, options);
}
}
if (output_format & DIFF_FORMAT_CALLBACK)
options->format_callback(q, options, options->format_callback_data);
for (i = 0; i < q->nr; i++)
diff_free_filepair(q->queue[i]);
free_queue:
free(q->queue);
q->queue = NULL;
q->nr = q->alloc = 0;
if (options->close_file)
fclose(options->file);
}
| 0 |
[
"CWE-119"
] |
git
|
fd55a19eb1d49ae54008d932a65f79cd6fda45c9
| 308,738,556,650,741,360,000,000,000,000,000,000,000 | 82 |
Fix buffer overflow in git diff
If PATH_MAX on your system is smaller than a path stored, it may cause
buffer overflow and stack corruption in diff_addremove() and diff_change()
functions when running git-diff
Signed-off-by: Dmitry Potapov <[email protected]>
Signed-off-by: Junio C Hamano <[email protected]>
|
static void qemu_chr_socket_restart_timer(CharDriverState *chr)
{
TCPCharDriver *s = chr->opaque;
assert(s->connected == 0);
s->reconnect_timer = g_timeout_add_seconds(s->reconnect_time,
socket_reconnect_timeout, chr);
}
| 0 |
[
"CWE-416"
] |
qemu
|
a4afa548fc6dd9842ed86639b4d37d4d1c4ad480
| 31,060,744,818,248,140,000,000,000,000,000,000,000 | 7 |
char: move front end handlers in CharBackend
Since the hanlders are associated with a CharBackend, rather than the
CharDriverState, it is more appropriate to store in CharBackend. This
avoids the handler copy dance in qemu_chr_fe_set_handlers() then
mux_chr_update_read_handler(), by storing the CharBackend pointer
directly.
Also a mux CharDriver should go through mux->backends[focused], since
chr->be will stay NULL. Before that, it was possible to call
chr->handler by mistake with surprising results, for ex through
qemu_chr_be_can_write(), which would result in calling the last set
handler front end, not the one with focus.
Signed-off-by: Marc-André Lureau <[email protected]>
Message-Id: <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
|
md_fnv1a(unsigned base, const void* data, size_t n)
{
const unsigned char* buf = (const unsigned char*) data;
unsigned hash = base;
size_t i;
for(i = 0; i < n; i++) {
hash ^= buf[i];
hash *= MD_FNV1A_PRIME;
}
return hash;
}
| 0 |
[
"CWE-125",
"CWE-908"
] |
md4c
|
4fc808d8fe8d8904f8525bb4231d854f45e23a19
| 317,044,534,151,958,360,000,000,000,000,000,000,000 | 13 |
md_analyze_line: Avoid reading 1 byte beyond the input size.
Fixes #155.
|
ofputil_decode_table_status(const struct ofp_header *oh,
struct ofputil_table_status *ts)
{
const struct ofp14_table_status *ots;
struct ofpbuf b;
enum ofperr error;
enum ofpraw raw;
ofpbuf_use_const(&b, oh, ntohs(oh->length));
raw = ofpraw_pull_assert(&b);
ots = ofpbuf_pull(&b, sizeof *ots);
if (raw == OFPRAW_OFPT14_TABLE_STATUS) {
if (ots->reason != OFPTR_VACANCY_DOWN
&& ots->reason != OFPTR_VACANCY_UP) {
return OFPERR_OFPBPC_BAD_VALUE;
}
ts->reason = ots->reason;
error = ofputil_decode_table_desc(&b, &ts->desc, oh->version);
return error;
} else {
return OFPERR_OFPBRC_BAD_VERSION;
}
return 0;
}
| 0 |
[
"CWE-772"
] |
ovs
|
77ad4225d125030420d897c873e4734ac708c66b
| 85,104,724,379,600,280,000,000,000,000,000,000,000 | 27 |
ofp-util: Fix memory leaks on error cases in ofputil_decode_group_mod().
Found by libFuzzer.
Reported-by: Bhargava Shastry <[email protected]>
Signed-off-by: Ben Pfaff <[email protected]>
Acked-by: Justin Pettit <[email protected]>
|
}
static JSValue js_sys_mkdir(JSContext *ctx, JSValueConst this_val, int argc, JSValueConst *argv)
{
return js_sys_file_opt(ctx, this_val, argc, argv, OPT_MKDIR);
| 0 |
[
"CWE-787"
] |
gpac
|
ea1eca00fd92fa17f0e25ac25652622924a9a6a0
| 116,280,238,059,095,600,000,000,000,000,000,000,000 | 4 |
fixed #2138
|
SYSCALL_DEFINE4(request_key, const char __user *, _type,
const char __user *, _description,
const char __user *, _callout_info,
key_serial_t, destringid)
{
struct key_type *ktype;
struct key *key;
key_ref_t dest_ref;
size_t callout_len;
char type[32], *description, *callout_info;
long ret;
/* pull the type into kernel space */
ret = key_get_type_from_user(type, _type, sizeof(type));
if (ret < 0)
goto error;
/* pull the description into kernel space */
description = strndup_user(_description, PAGE_SIZE);
if (IS_ERR(description)) {
ret = PTR_ERR(description);
goto error;
}
/* pull the callout info into kernel space */
callout_info = NULL;
callout_len = 0;
if (_callout_info) {
callout_info = strndup_user(_callout_info, PAGE_SIZE);
if (IS_ERR(callout_info)) {
ret = PTR_ERR(callout_info);
goto error2;
}
callout_len = strlen(callout_info);
}
/* get the destination keyring if specified */
dest_ref = NULL;
if (destringid) {
dest_ref = lookup_user_key(destringid, KEY_LOOKUP_CREATE,
KEY_WRITE);
if (IS_ERR(dest_ref)) {
ret = PTR_ERR(dest_ref);
goto error3;
}
}
/* find the key type */
ktype = key_type_lookup(type);
if (IS_ERR(ktype)) {
ret = PTR_ERR(ktype);
goto error4;
}
/* do the search */
key = request_key_and_link(ktype, description, callout_info,
callout_len, NULL, key_ref_to_ptr(dest_ref),
KEY_ALLOC_IN_QUOTA);
if (IS_ERR(key)) {
ret = PTR_ERR(key);
goto error5;
}
ret = key->serial;
key_put(key);
error5:
key_type_put(ktype);
error4:
key_ref_put(dest_ref);
error3:
kfree(callout_info);
error2:
kfree(description);
error:
return ret;
} /* end sys_request_key() */
| 0 |
[
"CWE-476"
] |
linux-2.6
|
9d1ac65a9698513d00e5608d93fca0c53f536c14
| 318,666,042,251,779,730,000,000,000,000,000,000,000 | 78 |
KEYS: Fix RCU no-lock warning in keyctl_session_to_parent()
There's an protected access to the parent process's credentials in the middle
of keyctl_session_to_parent(). This results in the following RCU warning:
===================================================
[ INFO: suspicious rcu_dereference_check() usage. ]
---------------------------------------------------
security/keys/keyctl.c:1291 invoked rcu_dereference_check() without protection!
other info that might help us debug this:
rcu_scheduler_active = 1, debug_locks = 0
1 lock held by keyctl-session-/2137:
#0: (tasklist_lock){.+.+..}, at: [<ffffffff811ae2ec>] keyctl_session_to_parent+0x60/0x236
stack backtrace:
Pid: 2137, comm: keyctl-session- Not tainted 2.6.36-rc2-cachefs+ #1
Call Trace:
[<ffffffff8105606a>] lockdep_rcu_dereference+0xaa/0xb3
[<ffffffff811ae379>] keyctl_session_to_parent+0xed/0x236
[<ffffffff811af77e>] sys_keyctl+0xb4/0xb6
[<ffffffff81001eab>] system_call_fastpath+0x16/0x1b
The code should take the RCU read lock to make sure the parents credentials
don't go away, even though it's holding a spinlock and has IRQ disabled.
Signed-off-by: David Howells <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr,
unsigned int *size, struct xt_counters *counters,
unsigned int i)
{
struct xt_entry_target *t;
struct compat_ip6t_entry __user *ce;
u_int16_t target_offset, next_offset;
compat_uint_t origsize;
const struct xt_entry_match *ematch;
int ret = 0;
origsize = *size;
ce = (struct compat_ip6t_entry __user *)*dstptr;
if (copy_to_user(ce, e, sizeof(struct ip6t_entry)) != 0 ||
copy_to_user(&ce->counters, &counters[i],
sizeof(counters[i])) != 0)
return -EFAULT;
*dstptr += sizeof(struct compat_ip6t_entry);
*size -= sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
xt_ematch_foreach(ematch, e) {
ret = xt_compat_match_to_user(ematch, dstptr, size);
if (ret != 0)
return ret;
}
target_offset = e->target_offset - (origsize - *size);
t = ip6t_get_target(e);
ret = xt_compat_target_to_user(t, dstptr, size);
if (ret)
return ret;
next_offset = e->next_offset - (origsize - *size);
if (put_user(target_offset, &ce->target_offset) != 0 ||
put_user(next_offset, &ce->next_offset) != 0)
return -EFAULT;
return 0;
}
| 0 |
[
"CWE-200"
] |
linux-2.6
|
6a8ab060779779de8aea92ce3337ca348f973f54
| 319,599,852,619,286,920,000,000,000,000,000,000,000 | 37 |
ipv6: netfilter: ip6_tables: fix infoleak to userspace
Structures ip6t_replace, compat_ip6t_replace, and xt_get_revision are
copied from userspace. Fields of these structs that are
zero-terminated strings are not checked. When they are used as argument
to a format string containing "%s" in request_module(), some sensitive
information is leaked to userspace via argument of spawned modprobe
process.
The first bug was introduced before the git epoch; the second was
introduced in 3bc3fe5e (v2.6.25-rc1); the third is introduced by
6b7d31fc (v2.6.15-rc1). To trigger the bug one should have
CAP_NET_ADMIN.
Signed-off-by: Vasiliy Kulikov <[email protected]>
Signed-off-by: Patrick McHardy <[email protected]>
|
static opj_image_t* bmp8toimage(const OPJ_UINT8* pData, OPJ_UINT32 stride,
opj_image_t* image, OPJ_UINT8 const* const* pLUT)
{
OPJ_UINT32 width, height;
const OPJ_UINT8 *pSrc = NULL;
width = image->comps[0].w;
height = image->comps[0].h;
pSrc = pData + (height - 1U) * stride;
if (image->numcomps == 1U) {
opj_applyLUT8u_8u32s_C1R(pSrc, -(OPJ_INT32)stride, image->comps[0].data,
(OPJ_INT32)width, pLUT[0], width, height);
} else {
OPJ_INT32* pDst[3];
OPJ_INT32 pDstStride[3];
pDst[0] = image->comps[0].data;
pDst[1] = image->comps[1].data;
pDst[2] = image->comps[2].data;
pDstStride[0] = (OPJ_INT32)width;
pDstStride[1] = (OPJ_INT32)width;
pDstStride[2] = (OPJ_INT32)width;
opj_applyLUT8u_8u32s_C1P3R(pSrc, -(OPJ_INT32)stride, pDst, pDstStride, pLUT,
width, height);
}
return image;
}
| 0 |
[
"CWE-119",
"CWE-787"
] |
openjpeg
|
baf0c1ad4572daa89caa3b12985bdd93530f0dd7
| 69,154,879,923,229,630,000,000,000,000,000,000,000 | 28 |
bmp_read_info_header(): reject bmp files with biBitCount == 0 (#983)
|
flatpak_remote_state_new (void)
{
FlatpakRemoteState *state = g_new0 (FlatpakRemoteState, 1);
state->refcount = 1;
state->sideload_repos = g_ptr_array_new_with_free_func ((GDestroyNotify)flatpak_sideload_state_free);
state->subsummaries = g_hash_table_new_full (g_str_hash, g_str_equal, g_free, (GDestroyNotify)variant_maybe_unref);
return state;
}
| 0 |
[
"CWE-74"
] |
flatpak
|
fb473cad801c6b61706353256cab32330557374a
| 263,553,357,457,465,880,000,000,000,000,000,000,000 | 9 |
dir: Pass environment via bwrap --setenv when running apply_extra
This means we can systematically pass the environment variables
through bwrap(1), even if it is setuid and thus is filtering out
security-sensitive environment variables. bwrap ends up being
run with an empty environment instead.
As with the previous commit, this regressed while fixing CVE-2021-21261.
Fixes: 6d1773d2 "run: Convert all environment variables into bwrap arguments"
Signed-off-by: Simon McVittie <[email protected]>
|
static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
{
return true;
}
| 0 |
[] |
linux
|
400e22499dd92613821374c8c6c88c7225359980
| 131,618,826,779,284,340,000,000,000,000,000,000,000 | 4 |
mm: don't warn about allocations which stall for too long
Commit 63f53dea0c98 ("mm: warn about allocations which stall for too
long") was a great step for reducing possibility of silent hang up
problem caused by memory allocation stalls. But this commit reverts it,
for it is possible to trigger OOM lockup and/or soft lockups when many
threads concurrently called warn_alloc() (in order to warn about memory
allocation stalls) due to current implementation of printk(), and it is
difficult to obtain useful information due to limitation of synchronous
warning approach.
Current printk() implementation flushes all pending logs using the
context of a thread which called console_unlock(). printk() should be
able to flush all pending logs eventually unless somebody continues
appending to printk() buffer.
Since warn_alloc() started appending to printk() buffer while waiting
for oom_kill_process() to make forward progress when oom_kill_process()
is processing pending logs, it became possible for warn_alloc() to force
oom_kill_process() loop inside printk(). As a result, warn_alloc()
significantly increased possibility of preventing oom_kill_process()
from making forward progress.
---------- Pseudo code start ----------
Before warn_alloc() was introduced:
retry:
if (mutex_trylock(&oom_lock)) {
while (atomic_read(&printk_pending_logs) > 0) {
atomic_dec(&printk_pending_logs);
print_one_log();
}
// Send SIGKILL here.
mutex_unlock(&oom_lock)
}
goto retry;
After warn_alloc() was introduced:
retry:
if (mutex_trylock(&oom_lock)) {
while (atomic_read(&printk_pending_logs) > 0) {
atomic_dec(&printk_pending_logs);
print_one_log();
}
// Send SIGKILL here.
mutex_unlock(&oom_lock)
} else if (waited_for_10seconds()) {
atomic_inc(&printk_pending_logs);
}
goto retry;
---------- Pseudo code end ----------
Although waited_for_10seconds() becomes true once per 10 seconds,
unbounded number of threads can call waited_for_10seconds() at the same
time. Also, since threads doing waited_for_10seconds() keep doing
almost busy loop, the thread doing print_one_log() can use little CPU
resource. Therefore, this situation can be simplified like
---------- Pseudo code start ----------
retry:
if (mutex_trylock(&oom_lock)) {
while (atomic_read(&printk_pending_logs) > 0) {
atomic_dec(&printk_pending_logs);
print_one_log();
}
// Send SIGKILL here.
mutex_unlock(&oom_lock)
} else {
atomic_inc(&printk_pending_logs);
}
goto retry;
---------- Pseudo code end ----------
when printk() is called faster than print_one_log() can process a log.
One of possible mitigation would be to introduce a new lock in order to
make sure that no other series of printk() (either oom_kill_process() or
warn_alloc()) can append to printk() buffer when one series of printk()
(either oom_kill_process() or warn_alloc()) is already in progress.
Such serialization will also help obtaining kernel messages in readable
form.
---------- Pseudo code start ----------
retry:
if (mutex_trylock(&oom_lock)) {
mutex_lock(&oom_printk_lock);
while (atomic_read(&printk_pending_logs) > 0) {
atomic_dec(&printk_pending_logs);
print_one_log();
}
// Send SIGKILL here.
mutex_unlock(&oom_printk_lock);
mutex_unlock(&oom_lock)
} else {
if (mutex_trylock(&oom_printk_lock)) {
atomic_inc(&printk_pending_logs);
mutex_unlock(&oom_printk_lock);
}
}
goto retry;
---------- Pseudo code end ----------
But this commit does not go that direction, for we don't want to
introduce a new lock dependency, and we unlikely be able to obtain
useful information even if we serialized oom_kill_process() and
warn_alloc().
Synchronous approach is prone to unexpected results (e.g. too late [1],
too frequent [2], overlooked [3]). As far as I know, warn_alloc() never
helped with providing information other than "something is going wrong".
I want to consider asynchronous approach which can obtain information
during stalls with possibly relevant threads (e.g. the owner of
oom_lock and kswapd-like threads) and serve as a trigger for actions
(e.g. turn on/off tracepoints, ask libvirt daemon to take a memory dump
of stalling KVM guest for diagnostic purpose).
This commit temporarily loses ability to report e.g. OOM lockup due to
unable to invoke the OOM killer due to !__GFP_FS allocation request.
But asynchronous approach will be able to detect such situation and emit
warning. Thus, let's remove warn_alloc().
[1] https://bugzilla.kernel.org/show_bug.cgi?id=192981
[2] http://lkml.kernel.org/r/CAM_iQpWuPVGc2ky8M-9yukECtS+zKjiDasNymX7rMcBjBFyM_A@mail.gmail.com
[3] commit db73ee0d46379922 ("mm, vmscan: do not loop on too_many_isolated for ever"))
Link: http://lkml.kernel.org/r/1509017339-4802-1-git-send-email-penguin-kernel@I-love.SAKURA.ne.jp
Signed-off-by: Tetsuo Handa <[email protected]>
Reported-by: Cong Wang <[email protected]>
Reported-by: yuwang.yuwang <[email protected]>
Reported-by: Johannes Weiner <[email protected]>
Acked-by: Michal Hocko <[email protected]>
Acked-by: Johannes Weiner <[email protected]>
Cc: Vlastimil Babka <[email protected]>
Cc: Mel Gorman <[email protected]>
Cc: Dave Hansen <[email protected]>
Cc: Sergey Senozhatsky <[email protected]>
Cc: Petr Mladek <[email protected]>
Cc: Steven Rostedt <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
void operator()(OpKernelContext* context, const T1* input_data,
int input_batches, int input_height, int input_width,
int input_depth, int input_offset, const T2* filter_data,
int filter_height, int filter_width, int filter_count,
int filter_offset, int stride, Padding padding,
T3* output_data, int output_height, int output_width,
int output_shift, int output_offset, int output_mult) {
if (input_offset < 0) {
// Only log the first few occurrences of this warning.
static int warning_count = 0;
if (warning_count < 10) {
++warning_count;
LOG(WARNING)
<< "For kernel '" << context->op_kernel().name() << "' from input '"
<< context->op_kernel().requested_input(0)
<< "': Zero is not representable in the quantized range used by the"
<< " input. This means QuantizedConv2d has to fall back to a slow"
<< " implementation, since the border of zero values can't be"
<< " represented easily. You should try to construct graphs that"
<< " avoid this situation.";
}
ReferenceConvFunctor<T1, T2, T3> conv_functor;
conv_functor(context, input_data, input_batches, input_height,
input_width, input_depth, input_offset, filter_data,
filter_height, filter_width, filter_count, filter_offset,
stride, padding, output_data, output_height, output_width,
output_shift, output_offset, output_mult);
return;
}
OP_REQUIRES(
context, output_width > 0,
errors::InvalidArgument("output_width must be strictly positive"));
OP_REQUIRES(
context, output_height > 0,
errors::InvalidArgument("output_height must be strictly positive"));
int filter_left_offset;
int filter_top_offset;
if (padding == VALID) {
filter_left_offset =
((output_width - 1) * stride + filter_width - input_width + 1) / 2;
filter_top_offset =
((output_height - 1) * stride + filter_height - input_height + 1) / 2;
} else {
filter_left_offset =
((output_width - 1) * stride + filter_width - input_width) / 2;
filter_top_offset =
((output_height - 1) * stride + filter_height - input_height) / 2;
}
// The im2col buffer has # of patches rows, and # of filters cols.
// It's laid out like this, in row major order in memory:
// < filter value count >
// ^ +---------------------+
// patch | |
// count | |
// v +---------------------+
// Each patch row contains a filter_width x filter_height patch of the
// input, with the depth channel as the most contiguous in memory, followed
// by the width, then the height. This is the standard memory order in the
// image world if it helps to visualize it.
const int filter_value_count = filter_width * filter_height * input_depth;
OP_REQUIRES(context, filter_value_count > 0,
errors::InvalidArgument(
"filter patch must contain at least one element"));
const int64_t patches_per_chunk =
kMaxChunkSize / (filter_value_count * sizeof(T1));
const int64_t chunk_value_count =
(kMaxChunkSize + (sizeof(T1) - 1)) / sizeof(T1);
// TODO(petewarden) - Memory allocation can be very slow on Android. Can we
// optimize this by keeping the scratch buffer around?
// Because memory allocation is very expensive on mobile platforms, try to
// allocate a persistent buffer that will be kept around between calls. We
// use TensorFlow's resource management to ensure that the memory will be
// released when the session is over.
Im2ColBufferResource<T1, chunk_value_count>* im2col_buffer_resource;
std::function<Status(Im2ColBufferResource<T1, chunk_value_count>**)>
creator = [](Im2ColBufferResource<T1, chunk_value_count>** resource) {
#ifdef _MSC_VER
// MSVC complains about the capture of chunk_value_count which oddly
// works fine in conv_ops_using_gemm.cc for example.
// Define chunk_value_count inside the lambda for now.
const int64 chunk_value_count =
(kMaxChunkSize + (sizeof(T1) - 1)) / sizeof(T1);
#endif
*resource = new Im2ColBufferResource<T1, chunk_value_count>();
return Status::OK();
};
OP_REQUIRES_OK(context, context->resource_manager()->LookupOrCreate(
"Conv2d", "im2col_buffer",
&im2col_buffer_resource, creator));
// This means that multiple ops can't be run simultaneously on different
// threads, because we have a single shared resource. The platforms this is
// aimed at have intra-op parallelism as their focus though, so it shouldn't
// be an issue.
mutex_lock lock_buffer(im2col_buffer_resource->mu);
core::ScopedUnref unref_buffer(im2col_buffer_resource);
T1* im2col_buffer = im2col_buffer_resource->data;
const int64_t patch_count = (input_batches * output_height * output_width);
const int64_t chunk_count =
(patch_count + (patches_per_chunk - 1)) / patches_per_chunk;
for (int64_t chunk_index = 0; chunk_index < chunk_count; ++chunk_index) {
const int64_t patch_index_start = chunk_index * patches_per_chunk;
const int64_t patch_index_end =
std::min(patch_index_start + patches_per_chunk, patch_count);
for (int64_t patch_index = patch_index_start;
patch_index < patch_index_end; ++patch_index) {
const int64_t batch = patch_index / (output_height * output_width);
const int64_t out_y = (patch_index / output_width) % output_height;
const int64_t out_x = patch_index % output_width;
const T1* input_batch_start =
input_data + (batch * input_height * input_width * input_depth);
const int in_y_origin = (out_y * stride) - filter_top_offset;
const int in_x_origin = (out_x * stride) - filter_left_offset;
const int patch_index_within_chunk = patch_index % patches_per_chunk;
T1* im2col_patch_start =
im2col_buffer + (patch_index_within_chunk * filter_value_count);
for (int filter_y = 0; filter_y < filter_height; ++filter_y) {
const int in_y = in_y_origin + filter_y;
T1* im2col_row_start =
im2col_patch_start + (filter_y * filter_width * input_depth);
// If we're off the top or the bottom of the input, fill the
// whole row with zeroes.
if ((in_y < 0) || (in_y >= input_height)) {
// On Android, memset and memcpy are significantly faster than the
// more modern std::set and std::copy equivalents.
memset(im2col_row_start, input_offset,
(filter_width * input_depth));
} else {
// What we're doing here is trying to copy and fill the im2col
// buffer as efficiently as possible, using functions to set or
// duplicate values en masse. We know we don't have to worry about
// vertical edges because we dealt with that case above, so we
// just need to handle filters that overlap the left or right
// edges. Here's what that looks like:
//
// < left_zero_count > < center_copy_count > < right_zero_count >
// +------------------+---------------------+--------------------+
// | (filter) | (image) | (filter) |
// +------------------+---------------------+--------------------+
// in_x_origin 0 input_width in_x_end
//
// In reality it's unlikely that a filter patch will be wider
// than an input, but this shows all the edge cases.
// We use memset() to set the left and right sections to zeroes
// and memcpy() to copy over the input data for the center. These
// are preferred to std::fill and std::copy because they're much
// faster on Android.
const int in_x_end = in_x_origin + filter_width;
const int left_zero_count = std::max(0, 0 - in_x_origin);
const int right_zero_count = std::max(0, in_x_end - input_width);
const int center_copy_count =
filter_width - (left_zero_count + right_zero_count);
if (left_zero_count > 0) {
T1* im2col_left_start = im2col_row_start;
memset(im2col_left_start, input_offset,
(left_zero_count * input_depth));
}
if (center_copy_count > 0) {
const T1* input_row_start =
input_batch_start + (in_y * input_width * input_depth) +
(std::max(0, in_x_origin) * input_depth);
T1* im2col_center_start =
im2col_row_start + (left_zero_count * input_depth);
memcpy(im2col_center_start, input_row_start,
(center_copy_count * input_depth));
}
if (right_zero_count > 0) {
T1* im2col_right_start =
im2col_row_start +
((left_zero_count + center_copy_count) * input_depth);
memset(im2col_right_start, input_offset,
(right_zero_count * input_depth));
}
}
}
}
// Now we've assembled a set of image patches into a matrix, apply a
// GEMM matrix multiply of the patches as rows, times the filter
// weights in columns, to get partial results in the output matrix.
const int how_many_patches = patch_index_end - patch_index_start;
const bool transpose_a = false;
const bool transpose_b = false;
const bool transpose_c = false;
const int m = how_many_patches;
const int n = filter_count;
const int k = filter_value_count;
const int lda = filter_value_count;
const int ldb = filter_count;
const int ldc = filter_count;
T3* chunk_output_data = output_data + (patch_index_start * filter_count);
if (meta::IsSupportedAndEnabled() && std::is_same<T1, quint8>() &&
std::is_same<T2, quint8>() && std::is_same<T3, qint32>() &&
(output_offset == 0) && (output_mult == 1) && (output_shift == 0) &&
(transpose_c == false) && (k <= 2048)) {
meta::QuantizedGemm(context, transpose_a, transpose_b, im2col_buffer,
filter_data, chunk_output_data, m, n, k,
-input_offset, -filter_offset, lda, ldb, ldc);
} else if (std::is_same<T1, quint8>() && std::is_same<T2, quint8>() &&
std::is_same<T3, qint32>() && (output_offset == 0) &&
(output_mult == 1) && (output_shift == 0)) {
// The gemmlowp optimized library only works for a particular set of
// data types, so check if we meet those requirements and fall back to a
// slower reference implementation if not.
const uint8* im2col_data_as_uint8 = &(im2col_buffer->value);
const uint8* filter_data_as_uint8 = &(filter_data->value);
int32* output_data_as_int32 = &(chunk_output_data->value);
// All of the transpose_* variables are currently compile-time consts,
// so we could just hard-code these values too, but that would break if
// anybody changed those values in the future (e.g. to match the ability
// of MatMul to specify them as attributes). We're using a verbose
// approach of deriving the order values from the transpose variables to
// be able to catch any changes like that.
static const gemmlowp::MapOrder ResultOrder =
!transpose_c ? gemmlowp::MapOrder::RowMajor
: gemmlowp::MapOrder::ColMajor;
static const gemmlowp::MapOrder LhsOrder =
!transpose_a ? gemmlowp::MapOrder::RowMajor
: gemmlowp::MapOrder::ColMajor;
static const gemmlowp::MapOrder RhsOrder =
!transpose_b ? gemmlowp::MapOrder::RowMajor
: gemmlowp::MapOrder::ColMajor;
gemmlowp::MatrixMap<const std::uint8_t, LhsOrder> lhs(
im2col_data_as_uint8, m, k, lda);
gemmlowp::MatrixMap<const std::uint8_t, RhsOrder> rhs(
filter_data_as_uint8, k, n, ldb);
gemmlowp::MatrixMap<std::int32_t, ResultOrder> result(
output_data_as_int32, m, n, ldc);
const std::tuple<> empty_pipeline = {};
auto& worker_threads =
*(context->device()->tensorflow_cpu_worker_threads());
TensorflowGemmContext context(worker_threads.num_threads,
worker_threads.workers);
gemmlowp::GemmWithOutputPipeline<std::uint8_t, std::int32_t,
gemmlowp::DefaultL8R8BitDepthParams>(
&context, lhs, rhs, &result, -input_offset, -filter_offset,
empty_pipeline);
// Since gemmlowp uses assembly to write to the output, msan won't
// detect the output buffer as written to, so we mark it manually.
TF_ANNOTATE_MEMORY_IS_INITIALIZED(output_data_as_int32,
m * n * sizeof(int32));
} else {
ReferenceGemm<T1, T2, T3>(
transpose_a, transpose_b, transpose_c, m, n, k, im2col_buffer,
input_offset, lda, filter_data, filter_offset, ldb,
chunk_output_data, output_shift, output_offset, output_mult, ldc);
}
}
}
| 0 |
[
"CWE-20",
"CWE-476"
] |
tensorflow
|
0f0b080ecde4d3dfec158d6f60da34d5e31693c4
| 33,230,209,734,889,010,000,000,000,000,000,000,000 | 253 |
Fix undefined behavior in QuantizedConv2D
Added more input validation and tests. Prior to this, we could get
`nullptr` exceptions when attempting to access 0th elements of 0-sized
inputs, leading to security vulnerability bugs.
Also needed to modify `quantized_conv_ops_test.cc` for consistency.
Previously the CPU kernel did technically support passing tensors
of rank larger than 0 for min/max values. However, the XLA kernels do not.
PiperOrigin-RevId: 445518507
|
static void ldb_dn_mark_invalid(struct ldb_dn *dn)
{
dn->invalid = true;
}
| 0 |
[
"CWE-200"
] |
samba
|
7f51ec8c4ed9ba1f53d722e44fb6fb3cde933b72
| 93,292,605,910,564,180,000,000,000,000,000,000,000 | 4 |
CVE-2015-5330: ldb_dn: simplify and fix ldb_dn_escape_internal()
Previously we relied on NUL terminated strings and jumped back and
forth between copying escaped bytes and memcpy()ing un-escaped chunks.
This simple version is easier to reason about and works with
unterminated strings. It may also be faster as it avoids reading the
string twice (first with strcspn, then with memcpy).
Bug: https://bugzilla.samba.org/show_bug.cgi?id=11599
Signed-off-by: Douglas Bagnall <[email protected]>
Pair-programmed-with: Andrew Bartlett <[email protected]>
Reviewed-by: Ralph Boehme <[email protected]>
|
static int sctp_disconnect(struct sock *sk, int flags)
{
return -EOPNOTSUPP; /* STUB */
}
| 0 |
[
"CWE-617",
"CWE-362"
] |
linux
|
2dcab598484185dea7ec22219c76dcdd59e3cb90
| 309,291,468,079,556,160,000,000,000,000,000,000,000 | 4 |
sctp: avoid BUG_ON on sctp_wait_for_sndbuf
Alexander Popov reported that an application may trigger a BUG_ON in
sctp_wait_for_sndbuf if the socket tx buffer is full, a thread is
waiting on it to queue more data and meanwhile another thread peels off
the association being used by the first thread.
This patch replaces the BUG_ON call with a proper error handling. It
will return -EPIPE to the original sendmsg call, similarly to what would
have been done if the association wasn't found in the first place.
Acked-by: Alexander Popov <[email protected]>
Signed-off-by: Marcelo Ricardo Leitner <[email protected]>
Reviewed-by: Xin Long <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
inline bool Virtual_column_info::is_equal(const Virtual_column_info* vcol) const
{
return type_handler() == vcol->type_handler()
&& stored_in_db == vcol->is_stored()
&& expr->eq(vcol->expr, true);
}
| 0 |
[
"CWE-617"
] |
server
|
807945f2eb5fa22e6f233cc17b85a2e141efe2c8
| 79,590,138,679,955,325,000,000,000,000,000,000,000 | 6 |
MDEV-26402: A SEGV in Item_field::used_tables/update_depend_map_for_order...
When doing condition pushdown from HAVING into WHERE,
Item_equal::create_pushable_equalities() calls
item->set_extraction_flag(IMMUTABLE_FL) for constant items.
Then, Item::cleanup_excluding_immutables_processor() checks for this flag
to see if it should call item->cleanup() or leave the item as-is.
The failure happens when a constant item has a non-constant one inside it,
like:
(tbl.col=0 AND impossible_cond)
item->walk(cleanup_excluding_immutables_processor) works in a bottom-up
way so it
1. will call Item_func_eq(tbl.col=0)->cleanup()
2. will not call Item_cond_and->cleanup (as the AND is constant)
This creates an item tree where a fixed Item has an un-fixed Item inside
it which eventually causes an assertion failure.
Fixed by introducing this rule: instead of just calling
item->set_extraction_flag(IMMUTABLE_FL);
we call Item::walk() to set the flag for all sub-items of the item.
|
xmlDocPtr soap_xmlParseMemory(const void *buf, size_t buf_size)
{
xmlParserCtxtPtr ctxt = NULL;
xmlDocPtr ret;
/*
xmlInitParser();
*/
ctxt = xmlCreateMemoryParserCtxt(buf, buf_size);
if (ctxt) {
ctxt->options -= XML_PARSE_DTDLOAD;
ctxt->sax->ignorableWhitespace = soap_ignorableWhitespace;
ctxt->sax->comment = soap_Comment;
ctxt->sax->warning = NULL;
ctxt->sax->error = NULL;
/*ctxt->sax->fatalError = NULL;*/
#if LIBXML_VERSION >= 20703
ctxt->options |= XML_PARSE_HUGE;
#endif
xmlParseDocument(ctxt);
if (ctxt->wellFormed) {
ret = ctxt->myDoc;
if (ret->URL == NULL && ctxt->directory != NULL) {
ret->URL = xmlCharStrdup(ctxt->directory);
}
} else {
ret = NULL;
xmlFreeDoc(ctxt->myDoc);
ctxt->myDoc = NULL;
}
xmlFreeParserCtxt(ctxt);
} else {
ret = NULL;
}
/*
xmlCleanupParser();
*/
/*
if (ret) {
cleanup_xml_node((xmlNodePtr)ret);
}
*/
return ret;
}
| 1 |
[
"CWE-200"
] |
php-src
|
188c196d4da60bdde9190d2fc532650d17f7af2d
| 58,278,029,812,532,210,000,000,000,000,000,000,000 | 46 |
Proper bit reset code
|
static int check_xflags(unsigned int flags)
{
if (flags & ~(FS_XFLAG_APPEND | FS_XFLAG_IMMUTABLE | FS_XFLAG_NOATIME |
FS_XFLAG_NODUMP | FS_XFLAG_SYNC))
return -EOPNOTSUPP;
return 0;
}
| 0 |
[
"CWE-476",
"CWE-284"
] |
linux
|
09ba3bc9dd150457c506e4661380a6183af651c1
| 264,579,627,398,758,340,000,000,000,000,000,000,000 | 7 |
btrfs: merge btrfs_find_device and find_device
Both btrfs_find_device() and find_device() does the same thing except
that the latter does not take the seed device onto account in the device
scanning context. We can merge them.
Signed-off-by: Anand Jain <[email protected]>
Reviewed-by: David Sterba <[email protected]>
Signed-off-by: David Sterba <[email protected]>
|
HttpHeader::operator =(const HttpHeader &other)
{
if (this != &other) {
// we do not really care, but the caller probably does
assert(owner == other.owner);
clean();
update(&other); // will update the mask as well
len = other.len;
conflictingContentLength_ = other.conflictingContentLength_;
}
return *this;
}
| 1 |
[
"CWE-444"
] |
squid
|
fd68382860633aca92065e6c343cfd1b12b126e7
| 143,322,972,738,858,370,000,000,000,000,000,000,000 | 12 |
Improve Transfer-Encoding handling (#702)
Reject messages containing Transfer-Encoding header with coding other
than chunked or identity. Squid does not support other codings.
For simplicity and security sake, also reject messages where
Transfer-Encoding contains unnecessary complex values that are
technically equivalent to "chunked" or "identity" (e.g., ",,chunked" or
"identity, chunked").
RFC 7230 formally deprecated and removed identity coding, but it is
still used by some agents.
|
int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
struct ext4_map_blocks *map, int flags)
{
struct ext4_ext_path *path = NULL;
struct ext4_extent newex, *ex, *ex2;
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
ext4_fsblk_t newblock = 0;
int free_on_err = 0, err = 0, depth, ret;
unsigned int allocated = 0, offset = 0;
unsigned int allocated_clusters = 0;
struct ext4_allocation_request ar;
ext4_io_end_t *io = ext4_inode_aio(inode);
ext4_lblk_t cluster_offset;
int set_unwritten = 0;
bool map_from_cluster = false;
ext_debug("blocks %u/%u requested for inode %lu\n",
map->m_lblk, map->m_len, inode->i_ino);
trace_ext4_ext_map_blocks_enter(inode, map->m_lblk, map->m_len, flags);
/* find extent for this block */
path = ext4_find_extent(inode, map->m_lblk, NULL, 0);
if (IS_ERR(path)) {
err = PTR_ERR(path);
path = NULL;
goto out2;
}
depth = ext_depth(inode);
/*
* consistent leaf must not be empty;
* this situation is possible, though, _during_ tree modification;
* this is why assert can't be put in ext4_find_extent()
*/
if (unlikely(path[depth].p_ext == NULL && depth != 0)) {
EXT4_ERROR_INODE(inode, "bad extent address "
"lblock: %lu, depth: %d pblock %lld",
(unsigned long) map->m_lblk, depth,
path[depth].p_block);
err = -EIO;
goto out2;
}
ex = path[depth].p_ext;
if (ex) {
ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block);
ext4_fsblk_t ee_start = ext4_ext_pblock(ex);
unsigned short ee_len;
/*
* unwritten extents are treated as holes, except that
* we split out initialized portions during a write.
*/
ee_len = ext4_ext_get_actual_len(ex);
trace_ext4_ext_show_extent(inode, ee_block, ee_start, ee_len);
/* if found extent covers block, simply return it */
if (in_range(map->m_lblk, ee_block, ee_len)) {
newblock = map->m_lblk - ee_block + ee_start;
/* number of remaining blocks in the extent */
allocated = ee_len - (map->m_lblk - ee_block);
ext_debug("%u fit into %u:%d -> %llu\n", map->m_lblk,
ee_block, ee_len, newblock);
/*
* If the extent is initialized check whether the
* caller wants to convert it to unwritten.
*/
if ((!ext4_ext_is_unwritten(ex)) &&
(flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN)) {
allocated = convert_initialized_extent(
handle, inode, map, &path,
flags, allocated, newblock);
goto out2;
} else if (!ext4_ext_is_unwritten(ex))
goto out;
ret = ext4_ext_handle_unwritten_extents(
handle, inode, map, &path, flags,
allocated, newblock);
if (ret < 0)
err = ret;
else
allocated = ret;
goto out2;
}
}
/*
* requested block isn't allocated yet;
* we couldn't try to create block if create flag is zero
*/
if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
/*
* put just found gap into cache to speed up
* subsequent requests
*/
ext4_ext_put_gap_in_cache(inode, path, map->m_lblk);
goto out2;
}
/*
* Okay, we need to do block allocation.
*/
newex.ee_block = cpu_to_le32(map->m_lblk);
cluster_offset = EXT4_LBLK_COFF(sbi, map->m_lblk);
/*
* If we are doing bigalloc, check to see if the extent returned
* by ext4_find_extent() implies a cluster we can use.
*/
if (cluster_offset && ex &&
get_implied_cluster_alloc(inode->i_sb, map, ex, path)) {
ar.len = allocated = map->m_len;
newblock = map->m_pblk;
map_from_cluster = true;
goto got_allocated_blocks;
}
/* find neighbour allocated blocks */
ar.lleft = map->m_lblk;
err = ext4_ext_search_left(inode, path, &ar.lleft, &ar.pleft);
if (err)
goto out2;
ar.lright = map->m_lblk;
ex2 = NULL;
err = ext4_ext_search_right(inode, path, &ar.lright, &ar.pright, &ex2);
if (err)
goto out2;
/* Check if the extent after searching to the right implies a
* cluster we can use. */
if ((sbi->s_cluster_ratio > 1) && ex2 &&
get_implied_cluster_alloc(inode->i_sb, map, ex2, path)) {
ar.len = allocated = map->m_len;
newblock = map->m_pblk;
map_from_cluster = true;
goto got_allocated_blocks;
}
/*
* See if request is beyond maximum number of blocks we can have in
* a single extent. For an initialized extent this limit is
* EXT_INIT_MAX_LEN and for an unwritten extent this limit is
* EXT_UNWRITTEN_MAX_LEN.
*/
if (map->m_len > EXT_INIT_MAX_LEN &&
!(flags & EXT4_GET_BLOCKS_UNWRIT_EXT))
map->m_len = EXT_INIT_MAX_LEN;
else if (map->m_len > EXT_UNWRITTEN_MAX_LEN &&
(flags & EXT4_GET_BLOCKS_UNWRIT_EXT))
map->m_len = EXT_UNWRITTEN_MAX_LEN;
/* Check if we can really insert (m_lblk)::(m_lblk + m_len) extent */
newex.ee_len = cpu_to_le16(map->m_len);
err = ext4_ext_check_overlap(sbi, inode, &newex, path);
if (err)
allocated = ext4_ext_get_actual_len(&newex);
else
allocated = map->m_len;
/* allocate new block */
ar.inode = inode;
ar.goal = ext4_ext_find_goal(inode, path, map->m_lblk);
ar.logical = map->m_lblk;
/*
* We calculate the offset from the beginning of the cluster
* for the logical block number, since when we allocate a
* physical cluster, the physical block should start at the
* same offset from the beginning of the cluster. This is
* needed so that future calls to get_implied_cluster_alloc()
* work correctly.
*/
offset = EXT4_LBLK_COFF(sbi, map->m_lblk);
ar.len = EXT4_NUM_B2C(sbi, offset+allocated);
ar.goal -= offset;
ar.logical -= offset;
if (S_ISREG(inode->i_mode))
ar.flags = EXT4_MB_HINT_DATA;
else
/* disable in-core preallocation for non-regular files */
ar.flags = 0;
if (flags & EXT4_GET_BLOCKS_NO_NORMALIZE)
ar.flags |= EXT4_MB_HINT_NOPREALLOC;
if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
ar.flags |= EXT4_MB_DELALLOC_RESERVED;
newblock = ext4_mb_new_blocks(handle, &ar, &err);
if (!newblock)
goto out2;
ext_debug("allocate new block: goal %llu, found %llu/%u\n",
ar.goal, newblock, allocated);
free_on_err = 1;
allocated_clusters = ar.len;
ar.len = EXT4_C2B(sbi, ar.len) - offset;
if (ar.len > allocated)
ar.len = allocated;
got_allocated_blocks:
/* try to insert new extent into found leaf and return */
ext4_ext_store_pblock(&newex, newblock + offset);
newex.ee_len = cpu_to_le16(ar.len);
/* Mark unwritten */
if (flags & EXT4_GET_BLOCKS_UNWRIT_EXT){
ext4_ext_mark_unwritten(&newex);
map->m_flags |= EXT4_MAP_UNWRITTEN;
/*
* io_end structure was created for every IO write to an
* unwritten extent. To avoid unnecessary conversion,
* here we flag the IO that really needs the conversion.
* For non asycn direct IO case, flag the inode state
* that we need to perform conversion when IO is done.
*/
if (flags & EXT4_GET_BLOCKS_PRE_IO)
set_unwritten = 1;
}
err = 0;
if ((flags & EXT4_GET_BLOCKS_KEEP_SIZE) == 0)
err = check_eofblocks_fl(handle, inode, map->m_lblk,
path, ar.len);
if (!err)
err = ext4_ext_insert_extent(handle, inode, &path,
&newex, flags);
if (!err && set_unwritten) {
if (io)
ext4_set_io_unwritten_flag(inode, io);
else
ext4_set_inode_state(inode,
EXT4_STATE_DIO_UNWRITTEN);
}
if (err && free_on_err) {
int fb_flags = flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE ?
EXT4_FREE_BLOCKS_NO_QUOT_UPDATE : 0;
/* free data blocks we just allocated */
/* not a good idea to call discard here directly,
* but otherwise we'd need to call it every free() */
ext4_discard_preallocations(inode);
ext4_free_blocks(handle, inode, NULL, newblock,
EXT4_C2B(sbi, allocated_clusters), fb_flags);
goto out2;
}
/* previous routine could use block we allocated */
newblock = ext4_ext_pblock(&newex);
allocated = ext4_ext_get_actual_len(&newex);
if (allocated > map->m_len)
allocated = map->m_len;
map->m_flags |= EXT4_MAP_NEW;
/*
* Update reserved blocks/metadata blocks after successful
* block allocation which had been deferred till now.
*/
if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) {
unsigned int reserved_clusters;
/*
* Check how many clusters we had reserved this allocated range
*/
reserved_clusters = get_reserved_cluster_alloc(inode,
map->m_lblk, allocated);
if (map_from_cluster) {
if (reserved_clusters) {
/*
* We have clusters reserved for this range.
* But since we are not doing actual allocation
* and are simply using blocks from previously
* allocated cluster, we should release the
* reservation and not claim quota.
*/
ext4_da_update_reserve_space(inode,
reserved_clusters, 0);
}
} else {
BUG_ON(allocated_clusters < reserved_clusters);
if (reserved_clusters < allocated_clusters) {
struct ext4_inode_info *ei = EXT4_I(inode);
int reservation = allocated_clusters -
reserved_clusters;
/*
* It seems we claimed few clusters outside of
* the range of this allocation. We should give
* it back to the reservation pool. This can
* happen in the following case:
*
* * Suppose s_cluster_ratio is 4 (i.e., each
* cluster has 4 blocks. Thus, the clusters
* are [0-3],[4-7],[8-11]...
* * First comes delayed allocation write for
* logical blocks 10 & 11. Since there were no
* previous delayed allocated blocks in the
* range [8-11], we would reserve 1 cluster
* for this write.
* * Next comes write for logical blocks 3 to 8.
* In this case, we will reserve 2 clusters
* (for [0-3] and [4-7]; and not for [8-11] as
* that range has a delayed allocated blocks.
* Thus total reserved clusters now becomes 3.
* * Now, during the delayed allocation writeout
* time, we will first write blocks [3-8] and
* allocate 3 clusters for writing these
* blocks. Also, we would claim all these
* three clusters above.
* * Now when we come here to writeout the
* blocks [10-11], we would expect to claim
* the reservation of 1 cluster we had made
* (and we would claim it since there are no
* more delayed allocated blocks in the range
* [8-11]. But our reserved cluster count had
* already gone to 0.
*
* Thus, at the step 4 above when we determine
* that there are still some unwritten delayed
* allocated blocks outside of our current
* block range, we should increment the
* reserved clusters count so that when the
* remaining blocks finally gets written, we
* could claim them.
*/
dquot_reserve_block(inode,
EXT4_C2B(sbi, reservation));
spin_lock(&ei->i_block_reservation_lock);
ei->i_reserved_data_blocks += reservation;
spin_unlock(&ei->i_block_reservation_lock);
}
/*
* We will claim quota for all newly allocated blocks.
* We're updating the reserved space *after* the
* correction above so we do not accidentally free
* all the metadata reservation because we might
* actually need it later on.
*/
ext4_da_update_reserve_space(inode, allocated_clusters,
1);
}
}
/*
* Cache the extent and update transaction to commit on fdatasync only
* when it is _not_ an unwritten extent.
*/
if ((flags & EXT4_GET_BLOCKS_UNWRIT_EXT) == 0)
ext4_update_inode_fsync_trans(handle, inode, 1);
else
ext4_update_inode_fsync_trans(handle, inode, 0);
out:
if (allocated > map->m_len)
allocated = map->m_len;
ext4_ext_show_leaf(inode, path);
map->m_flags |= EXT4_MAP_MAPPED;
map->m_pblk = newblock;
map->m_len = allocated;
out2:
ext4_ext_drop_refs(path);
kfree(path);
trace_ext4_ext_map_blocks_exit(inode, flags, map,
err ? err : allocated);
return err ? err : allocated;
}
| 0 |
[
"CWE-17"
] |
linux
|
0f2af21aae11972fa924374ddcf52e88347cf5a8
| 238,282,362,326,712,050,000,000,000,000,000,000,000 | 364 |
ext4: allocate entire range in zero range
Currently there is a bug in zero range code which causes zero range
calls to only allocate block aligned portion of the range, while
ignoring the rest in some cases.
In some cases, namely if the end of the range is past i_size, we do
attempt to preallocate the last nonaligned block. However this might
cause kernel to BUG() in some carefully designed zero range requests
on setups where page size > block size.
Fix this problem by first preallocating the entire range, including
the nonaligned edges and converting the written extents to unwritten
in the next step. This approach will also give us the advantage of
having the range to be as linearly contiguous as possible.
Signed-off-by: Lukas Czerner <[email protected]>
Signed-off-by: Theodore Ts'o <[email protected]>
|
static char *analysis_fcn_autoname(RzCore *core, RzAnalysisFunction *fcn, int dump, int mode) {
int use_getopt = 0;
int use_isatty = 0;
PJ *pj = NULL;
char *do_call = NULL;
RzAnalysisXRef *xref;
RzListIter *iter;
RzList *xrefs = rz_analysis_function_get_xrefs_from(fcn);
if (mode == 'j') {
// start a new JSON object
pj = pj_new();
pj_a(pj);
}
if (xrefs) {
rz_list_foreach (xrefs, iter, xref) {
RzFlagItem *f = rz_flag_get_i(core->flags, xref->to);
if (f) {
// If dump is true, print all strings referenced by the function
if (dump) {
// take only strings flags
if (!strncmp(f->name, "str.", 4)) {
if (mode == 'j') {
// add new json item
pj_o(pj);
pj_kn(pj, "addr", xref->from);
pj_kn(pj, "ref", xref->to);
pj_ks(pj, "flag", f->name);
pj_end(pj);
} else {
rz_cons_printf("0x%08" PFMT64x " 0x%08" PFMT64x " %s\n", xref->from, xref->to, f->name);
}
}
} else if (do_call) { // break if a proper autoname found and not in dump mode
break;
}
// enter only if a candidate name hasn't found yet
if (!do_call) {
if (blacklisted_word(f->name)) {
continue;
}
if (strstr(f->name, ".isatty")) {
use_isatty = 1;
}
if (strstr(f->name, ".getopt")) {
use_getopt = 1;
}
if (!strncmp(f->name, "method.", 7)) {
free(do_call);
do_call = strdup(f->name + 7);
continue;
}
if (!strncmp(f->name, "str.", 4)) {
free(do_call);
do_call = strdup(f->name + 4);
continue;
}
if (!strncmp(f->name, "sym.imp.", 8)) {
free(do_call);
do_call = strdup(f->name + 8);
continue;
}
if (!strncmp(f->name, "reloc.", 6)) {
free(do_call);
do_call = strdup(f->name + 6);
continue;
}
}
}
}
rz_list_free(xrefs);
}
if (mode == 'j') {
pj_end(pj);
}
if (pj) {
rz_cons_printf("%s\n", pj_string(pj));
pj_free(pj);
}
// TODO: append counter if name already exists
if (use_getopt) {
RzFlagItem *item = rz_flag_get(core->flags, "main");
free(do_call);
// if referenced from entrypoint. this should be main
if (item && item->offset == fcn->addr) {
return strdup("main"); // main?
}
return strdup("parse_args"); // main?
}
if (use_isatty) {
char *ret = rz_str_newf("sub.setup_tty_%s_%" PFMT64x, do_call, fcn->addr);
free(do_call);
return ret;
}
if (do_call) {
char *ret = rz_str_newf("sub.%s_%" PFMT64x, do_call, fcn->addr);
free(do_call);
return ret;
}
return NULL;
}
| 0 |
[
"CWE-703"
] |
rizin
|
6ce71d8aa3dafe3cdb52d5d72ae8f4b95916f939
| 302,664,909,197,692,230,000,000,000,000,000,000,000 | 100 |
Initialize retctx,ctx before freeing the inner elements
In rz_core_analysis_type_match retctx structure was initialized on the
stack only after a "goto out_function", where a field of that structure
was freed. When the goto path is taken, the field is not properly
initialized and it cause cause a crash of Rizin or have other effects.
Fixes: CVE-2021-4022
|
static void fill_inode_item(struct btrfs_trans_handle *trans,
struct extent_buffer *leaf,
struct btrfs_inode_item *item,
struct inode *inode)
{
btrfs_set_inode_uid(leaf, item, i_uid_read(inode));
btrfs_set_inode_gid(leaf, item, i_gid_read(inode));
btrfs_set_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size);
btrfs_set_inode_mode(leaf, item, inode->i_mode);
btrfs_set_inode_nlink(leaf, item, inode->i_nlink);
btrfs_set_timespec_sec(leaf, btrfs_inode_atime(item),
inode->i_atime.tv_sec);
btrfs_set_timespec_nsec(leaf, btrfs_inode_atime(item),
inode->i_atime.tv_nsec);
btrfs_set_timespec_sec(leaf, btrfs_inode_mtime(item),
inode->i_mtime.tv_sec);
btrfs_set_timespec_nsec(leaf, btrfs_inode_mtime(item),
inode->i_mtime.tv_nsec);
btrfs_set_timespec_sec(leaf, btrfs_inode_ctime(item),
inode->i_ctime.tv_sec);
btrfs_set_timespec_nsec(leaf, btrfs_inode_ctime(item),
inode->i_ctime.tv_nsec);
btrfs_set_inode_nbytes(leaf, item, inode_get_bytes(inode));
btrfs_set_inode_generation(leaf, item, BTRFS_I(inode)->generation);
btrfs_set_inode_sequence(leaf, item, inode->i_version);
btrfs_set_inode_transid(leaf, item, trans->transid);
btrfs_set_inode_rdev(leaf, item, inode->i_rdev);
btrfs_set_inode_flags(leaf, item, BTRFS_I(inode)->flags);
btrfs_set_inode_block_group(leaf, item, 0);
}
| 0 |
[
"CWE-310"
] |
linux-2.6
|
9c52057c698fb96f8f07e7a4bcf4801a092bda89
| 63,747,244,072,778,990,000,000,000,000,000,000,000 | 34 |
Btrfs: fix hash overflow handling
The handling for directory crc hash overflows was fairly obscure,
split_leaf returns EOVERFLOW when we try to extend the item and that is
supposed to bubble up to userland. For a while it did so, but along the
way we added better handling of errors and forced the FS readonly if we
hit IO errors during the directory insertion.
Along the way, we started testing only for EEXIST and the EOVERFLOW case
was dropped. The end result is that we may force the FS readonly if we
catch a directory hash bucket overflow.
This fixes a few problem spots. First I add tests for EOVERFLOW in the
places where we can safely just return the error up the chain.
btrfs_rename is harder though, because it tries to insert the new
directory item only after it has already unlinked anything the rename
was going to overwrite. Rather than adding very complex logic, I added
a helper to test for the hash overflow case early while it is still safe
to bail out.
Snapshot and subvolume creation had a similar problem, so they are using
the new helper now too.
Signed-off-by: Chris Mason <[email protected]>
Reported-by: Pascal Junod <[email protected]>
|
nfsd4_putfh(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
union nfsd4_op_u *u)
{
struct nfsd4_putfh *putfh = &u->putfh;
fh_put(&cstate->current_fh);
cstate->current_fh.fh_handle.fh_size = putfh->pf_fhlen;
memcpy(&cstate->current_fh.fh_handle.fh_base, putfh->pf_fhval,
putfh->pf_fhlen);
return fh_verify(rqstp, &cstate->current_fh, 0, NFSD_MAY_BYPASS_GSS);
}
| 0 |
[
"CWE-476"
] |
linux
|
01310bb7c9c98752cc763b36532fab028e0f8f81
| 299,460,339,583,096,460,000,000,000,000,000,000,000 | 11 |
nfsd: COPY and CLONE operations require the saved filehandle to be set
Make sure we have a saved filehandle, otherwise we'll oops with a null
pointer dereference in nfs4_preprocess_stateid_op().
Signed-off-by: Scott Mayhew <[email protected]>
Cc: [email protected]
Signed-off-by: J. Bruce Fields <[email protected]>
|
scanner_scan_statement (parser_context_t *context_p, /**< context */
scanner_context_t *scanner_context_p, /**< scanner context */
lexer_token_type_t type, /**< current token type */
scan_stack_modes_t stack_top) /**< current stack top */
{
switch (type)
{
case LEXER_SEMICOLON:
{
scanner_context_p->mode = SCAN_MODE_STATEMENT_END;
return SCAN_KEEP_TOKEN;
}
case LEXER_LEFT_BRACE:
{
#if ENABLED (JERRY_ES2015)
scanner_literal_pool_t *literal_pool_p;
literal_pool_p = scanner_push_literal_pool (context_p,
scanner_context_p,
SCANNER_LITERAL_POOL_BLOCK);
literal_pool_p->source_p = context_p->source_p;
#endif /* ENABLED (JERRY_ES2015) */
scanner_context_p->mode = SCAN_MODE_STATEMENT_OR_TERMINATOR;
parser_stack_push_uint8 (context_p, SCAN_STACK_BLOCK_STATEMENT);
return SCAN_NEXT_TOKEN;
}
case LEXER_KEYW_DO:
{
scanner_context_p->mode = SCAN_MODE_STATEMENT;
parser_stack_push_uint8 (context_p, SCAN_STACK_DO_STATEMENT);
return SCAN_NEXT_TOKEN;
}
case LEXER_KEYW_TRY:
{
lexer_next_token (context_p);
if (context_p->token.type != LEXER_LEFT_BRACE)
{
scanner_raise_error (context_p);
}
#if ENABLED (JERRY_ES2015)
scanner_literal_pool_t *literal_pool_p;
literal_pool_p = scanner_push_literal_pool (context_p,
scanner_context_p,
SCANNER_LITERAL_POOL_BLOCK);
literal_pool_p->source_p = context_p->source_p;
#endif /* ENABLED (JERRY_ES2015) */
scanner_context_p->mode = SCAN_MODE_STATEMENT_OR_TERMINATOR;
parser_stack_push_uint8 (context_p, SCAN_STACK_TRY_STATEMENT);
return SCAN_NEXT_TOKEN;
}
case LEXER_KEYW_DEBUGGER:
{
scanner_context_p->mode = SCAN_MODE_STATEMENT_END;
return SCAN_NEXT_TOKEN;
}
case LEXER_KEYW_IF:
case LEXER_KEYW_WITH:
case LEXER_KEYW_SWITCH:
{
lexer_next_token (context_p);
if (context_p->token.type != LEXER_LEFT_PAREN)
{
scanner_raise_error (context_p);
}
uint8_t mode = SCAN_STACK_STATEMENT_WITH_EXPR;
if (type == LEXER_KEYW_IF)
{
parser_stack_push_uint8 (context_p, SCAN_STACK_IF_STATEMENT);
}
else if (type == LEXER_KEYW_WITH)
{
mode = SCAN_STACK_WITH_EXPRESSION;
}
else if (type == LEXER_KEYW_SWITCH)
{
mode = SCAN_STACK_SWITCH_EXPRESSION;
}
scanner_context_p->mode = SCAN_MODE_PRIMARY_EXPRESSION;
parser_stack_push_uint8 (context_p, mode);
return SCAN_NEXT_TOKEN;
}
case LEXER_KEYW_WHILE:
{
lexer_next_token (context_p);
if (context_p->token.type != LEXER_LEFT_PAREN)
{
scanner_raise_error (context_p);
}
scanner_context_p->mode = SCAN_MODE_PRIMARY_EXPRESSION;
scanner_source_start_t source_start;
source_start.source_p = context_p->source_p;
parser_stack_push (context_p, &source_start, sizeof (scanner_source_start_t));
parser_stack_push_uint8 (context_p, SCAN_STACK_WHILE_EXPRESSION);
return SCAN_NEXT_TOKEN;
}
case LEXER_KEYW_FOR:
{
lexer_next_token (context_p);
if (context_p->token.type != LEXER_LEFT_PAREN)
{
scanner_raise_error (context_p);
}
scanner_for_statement_t for_statement;
for_statement.u.source_p = context_p->source_p;
uint8_t stack_mode = SCAN_STACK_FOR_START;
scan_return_types_t return_type = SCAN_KEEP_TOKEN;
lexer_next_token (context_p);
scanner_context_p->mode = SCAN_MODE_PRIMARY_EXPRESSION;
#if ENABLED (JERRY_ES2015)
const uint8_t *source_p = context_p->source_p;
#endif /* ENABLED (JERRY_ES2015) */
switch (context_p->token.type)
{
case LEXER_SEMICOLON:
{
scanner_context_p->mode = SCAN_MODE_PRIMARY_EXPRESSION_END;
break;
}
case LEXER_KEYW_VAR:
{
scanner_context_p->mode = SCAN_MODE_VAR_STATEMENT;
stack_mode = SCAN_STACK_FOR_VAR_START;
return_type = SCAN_NEXT_TOKEN;
break;
}
#if ENABLED (JERRY_ES2015)
case LEXER_LITERAL:
{
if (!lexer_token_is_let (context_p))
{
break;
}
parser_line_counter_t line = context_p->line;
parser_line_counter_t column = context_p->column;
if (lexer_check_arrow (context_p))
{
context_p->source_p = source_p;
context_p->line = line;
context_p->column = column;
context_p->token.flags &= (uint8_t) ~LEXER_NO_SKIP_SPACES;
break;
}
lexer_next_token (context_p);
type = (lexer_token_type_t) context_p->token.type;
if (type != LEXER_LEFT_SQUARE
&& type != LEXER_LEFT_BRACE
&& (type != LEXER_LITERAL || context_p->token.lit_location.type != LEXER_IDENT_LITERAL))
{
scanner_info_t *info_p = scanner_insert_info (context_p, source_p, sizeof (scanner_info_t));
info_p->type = SCANNER_TYPE_LET_EXPRESSION;
scanner_context_p->mode = SCAN_MODE_POST_PRIMARY_EXPRESSION;
break;
}
scanner_context_p->mode = SCAN_MODE_VAR_STATEMENT;
/* FALLTHRU */
}
case LEXER_KEYW_LET:
case LEXER_KEYW_CONST:
{
scanner_literal_pool_t *literal_pool_p;
literal_pool_p = scanner_push_literal_pool (context_p, scanner_context_p, SCANNER_LITERAL_POOL_BLOCK);
literal_pool_p->source_p = source_p;
if (scanner_context_p->mode == SCAN_MODE_PRIMARY_EXPRESSION)
{
scanner_context_p->mode = SCAN_MODE_VAR_STATEMENT;
return_type = SCAN_NEXT_TOKEN;
}
stack_mode = ((context_p->token.type == LEXER_KEYW_CONST) ? SCAN_STACK_FOR_CONST_START
: SCAN_STACK_FOR_LET_START);
break;
}
#endif /* ENABLED (JERRY_ES2015) */
}
parser_stack_push (context_p, &for_statement, sizeof (scanner_for_statement_t));
parser_stack_push_uint8 (context_p, stack_mode);
return return_type;
}
case LEXER_KEYW_VAR:
{
scanner_context_p->mode = SCAN_MODE_VAR_STATEMENT;
parser_stack_push_uint8 (context_p, SCAN_STACK_VAR);
return SCAN_NEXT_TOKEN;
}
#if ENABLED (JERRY_ES2015)
case LEXER_KEYW_LET:
{
scanner_context_p->mode = SCAN_MODE_VAR_STATEMENT;
parser_stack_push_uint8 (context_p, SCAN_STACK_LET);
return SCAN_NEXT_TOKEN;
}
case LEXER_KEYW_CONST:
{
scanner_context_p->mode = SCAN_MODE_VAR_STATEMENT;
parser_stack_push_uint8 (context_p, SCAN_STACK_CONST);
return SCAN_NEXT_TOKEN;
}
#endif /* ENABLED (JERRY_ES2015) */
case LEXER_KEYW_THROW:
{
scanner_context_p->mode = SCAN_MODE_PRIMARY_EXPRESSION;
return SCAN_NEXT_TOKEN;
}
case LEXER_KEYW_RETURN:
{
lexer_next_token (context_p);
if (!(context_p->token.flags & LEXER_WAS_NEWLINE)
&& context_p->token.type != LEXER_SEMICOLON
&& context_p->token.type != LEXER_EOS
&& context_p->token.type != LEXER_RIGHT_BRACE)
{
scanner_context_p->mode = SCAN_MODE_PRIMARY_EXPRESSION;
return SCAN_KEEP_TOKEN;
}
scanner_context_p->mode = SCAN_MODE_STATEMENT_END;
return SCAN_KEEP_TOKEN;
}
case LEXER_KEYW_BREAK:
case LEXER_KEYW_CONTINUE:
{
lexer_next_token (context_p);
scanner_context_p->mode = SCAN_MODE_STATEMENT_END;
if (!(context_p->token.flags & LEXER_WAS_NEWLINE)
&& context_p->token.type == LEXER_LITERAL
&& context_p->token.lit_location.type == LEXER_IDENT_LITERAL)
{
return SCAN_NEXT_TOKEN;
}
return SCAN_KEEP_TOKEN;
}
case LEXER_KEYW_CASE:
case LEXER_KEYW_DEFAULT:
{
if (stack_top != SCAN_STACK_SWITCH_BLOCK)
{
scanner_raise_error (context_p);
}
scanner_case_info_t *case_info_p;
case_info_p = (scanner_case_info_t *) scanner_malloc (context_p, sizeof (scanner_case_info_t));
*(scanner_context_p->active_switch_statement.last_case_p) = case_info_p;
scanner_context_p->active_switch_statement.last_case_p = &case_info_p->next_p;
case_info_p->next_p = NULL;
scanner_get_location (&case_info_p->location, context_p);
if (type == LEXER_KEYW_DEFAULT)
{
lexer_next_token (context_p);
if (context_p->token.type != LEXER_COLON)
{
scanner_raise_error (context_p);
}
scanner_context_p->mode = SCAN_MODE_STATEMENT_OR_TERMINATOR;
return SCAN_NEXT_TOKEN;
}
scanner_source_start_t source_start;
source_start.source_p = context_p->source_p;
parser_stack_push (context_p, &source_start, sizeof (scanner_source_start_t));
parser_stack_push_uint8 (context_p, SCAN_STACK_CASE_STATEMENT);
scanner_context_p->mode = SCAN_MODE_PRIMARY_EXPRESSION;
return SCAN_NEXT_TOKEN;
}
case LEXER_KEYW_FUNCTION:
{
#if ENABLED (JERRY_ES2015)
uint16_t status_flags = SCANNER_LITERAL_POOL_FUNCTION | SCANNER_LITERAL_POOL_FUNCTION_STATEMENT;
if (scanner_context_p->async_source_p != NULL)
{
scanner_context_p->status_flags |= SCANNER_CONTEXT_THROW_ERR_ASYNC_FUNCTION;
status_flags |= SCANNER_LITERAL_POOL_ASYNC;
}
#endif /* ENABLED (JERRY_ES2015) */
lexer_next_token (context_p);
#if ENABLED (JERRY_ES2015)
if (context_p->token.type == LEXER_MULTIPLY)
{
status_flags |= SCANNER_LITERAL_POOL_GENERATOR;
lexer_next_token (context_p);
}
#endif /* ENABLED (JERRY_ES2015) */
if (context_p->token.type != LEXER_LITERAL
|| context_p->token.lit_location.type != LEXER_IDENT_LITERAL)
{
scanner_raise_error (context_p);
}
lexer_lit_location_t *literal_p = scanner_add_literal (context_p, scanner_context_p);
#if ENABLED (JERRY_ES2015)
const uint8_t mask = (SCANNER_LITERAL_IS_ARG | SCANNER_LITERAL_IS_FUNC | SCANNER_LITERAL_IS_LOCAL);
if ((literal_p->type & SCANNER_LITERAL_IS_LOCAL)
&& (literal_p->type & mask) != (SCANNER_LITERAL_IS_ARG | SCANNER_LITERAL_IS_DESTRUCTURED_ARG)
&& (literal_p->type & mask) != (SCANNER_LITERAL_IS_FUNC | SCANNER_LITERAL_IS_FUNC_DECLARATION))
{
scanner_raise_redeclaration_error (context_p);
}
literal_p->type |= SCANNER_LITERAL_IS_FUNC | SCANNER_LITERAL_IS_FUNC_DECLARATION;
scanner_context_p->status_flags &= (uint16_t) ~SCANNER_CONTEXT_THROW_ERR_ASYNC_FUNCTION;
#else
literal_p->type |= SCANNER_LITERAL_IS_VAR | SCANNER_LITERAL_IS_FUNC;
uint16_t status_flags = SCANNER_LITERAL_POOL_FUNCTION;
#endif /* ENABLED (JERRY_ES2015) */
scanner_push_literal_pool (context_p, scanner_context_p, status_flags);
scanner_context_p->mode = SCAN_MODE_FUNCTION_ARGUMENTS;
parser_stack_push_uint8 (context_p, SCAN_STACK_FUNCTION_STATEMENT);
return SCAN_NEXT_TOKEN;
}
#if ENABLED (JERRY_ES2015)
case LEXER_KEYW_CLASS:
{
scanner_push_class_declaration (context_p, scanner_context_p, SCAN_STACK_CLASS_STATEMENT);
if (context_p->token.type != LEXER_LITERAL || context_p->token.lit_location.type != LEXER_IDENT_LITERAL)
{
scanner_raise_error (context_p);
}
lexer_lit_location_t *literal_p = scanner_add_literal (context_p, scanner_context_p);
scanner_detect_invalid_let (context_p, literal_p);
literal_p->type |= SCANNER_LITERAL_IS_LET;
#if ENABLED (JERRY_ES2015_MODULE_SYSTEM)
if (scanner_context_p->active_literal_pool_p->status_flags & SCANNER_LITERAL_POOL_IN_EXPORT)
{
literal_p->type |= SCANNER_LITERAL_NO_REG;
scanner_context_p->active_literal_pool_p->status_flags &= (uint16_t) ~SCANNER_LITERAL_POOL_IN_EXPORT;
}
#endif /* ENABLED (JERRY_ES2015_MODULE_SYSTEM) */
return SCAN_NEXT_TOKEN;
}
#endif /* ENABLED (JERRY_ES2015) */
#if ENABLED (JERRY_ES2015_MODULE_SYSTEM)
case LEXER_KEYW_IMPORT:
{
if (stack_top != SCAN_STACK_SCRIPT)
{
scanner_raise_error (context_p);
}
context_p->global_status_flags |= ECMA_PARSE_MODULE;
scanner_context_p->mode = SCAN_MODE_STATEMENT_END;
lexer_next_token (context_p);
if (context_p->token.type == LEXER_LITERAL
&& context_p->token.lit_location.type == LEXER_STRING_LITERAL)
{
return SCAN_NEXT_TOKEN;
}
bool parse_imports = true;
if (context_p->token.type == LEXER_LITERAL
&& context_p->token.lit_location.type == LEXER_IDENT_LITERAL)
{
lexer_lit_location_t *literal_p = scanner_add_literal (context_p, scanner_context_p);
#if ENABLED (JERRY_ES2015)
scanner_detect_invalid_let (context_p, literal_p);
literal_p->type |= SCANNER_LITERAL_IS_LOCAL | SCANNER_LITERAL_NO_REG;
#else /* !ENABLED (JERRY_ES2015) */
literal_p->type |= SCANNER_LITERAL_IS_VAR | SCANNER_LITERAL_NO_REG;
#endif /* ENABLED (JERRY_ES2015) */
lexer_next_token (context_p);
if (context_p->token.type == LEXER_COMMA)
{
lexer_next_token (context_p);
}
else
{
parse_imports = false;
}
}
if (parse_imports)
{
if (context_p->token.type == LEXER_MULTIPLY)
{
lexer_next_token (context_p);
if (!lexer_token_is_identifier (context_p, "as", 2))
{
scanner_raise_error (context_p);
}
lexer_next_token (context_p);
if (context_p->token.type != LEXER_LITERAL
&& context_p->token.lit_location.type == LEXER_IDENT_LITERAL)
{
scanner_raise_error (context_p);
}
lexer_lit_location_t *literal_p = scanner_add_literal (context_p, scanner_context_p);
#if ENABLED (JERRY_ES2015)
scanner_detect_invalid_let (context_p, literal_p);
literal_p->type |= SCANNER_LITERAL_IS_LOCAL | SCANNER_LITERAL_NO_REG;
#else /* !ENABLED (JERRY_ES2015) */
literal_p->type |= SCANNER_LITERAL_IS_VAR | SCANNER_LITERAL_NO_REG;
#endif /* ENABLED (JERRY_ES2015) */
lexer_next_token (context_p);
}
else if (context_p->token.type == LEXER_LEFT_BRACE)
{
lexer_next_token (context_p);
while (context_p->token.type != LEXER_RIGHT_BRACE)
{
if (context_p->token.type != LEXER_LITERAL
|| context_p->token.lit_location.type != LEXER_IDENT_LITERAL)
{
scanner_raise_error (context_p);
}
#if ENABLED (JERRY_ES2015)
const uint8_t *source_p = context_p->source_p;
#endif /* ENABLED (JERRY_ES2015) */
if (lexer_check_next_character (context_p, LIT_CHAR_LOWERCASE_A))
{
lexer_next_token (context_p);
if (!lexer_token_is_identifier (context_p, "as", 2))
{
scanner_raise_error (context_p);
}
lexer_next_token (context_p);
if (context_p->token.type != LEXER_LITERAL
&& context_p->token.lit_location.type == LEXER_IDENT_LITERAL)
{
scanner_raise_error (context_p);
}
#if ENABLED (JERRY_ES2015)
source_p = context_p->source_p;
#endif /* ENABLED (JERRY_ES2015) */
}
lexer_lit_location_t *literal_p = scanner_add_literal (context_p, scanner_context_p);
#if ENABLED (JERRY_ES2015)
if (literal_p->type & (SCANNER_LITERAL_IS_ARG
| SCANNER_LITERAL_IS_VAR
| SCANNER_LITERAL_IS_LOCAL))
{
context_p->source_p = source_p;
scanner_raise_redeclaration_error (context_p);
}
if (literal_p->type & SCANNER_LITERAL_IS_FUNC)
{
literal_p->type &= (uint8_t) ~SCANNER_LITERAL_IS_FUNC;
}
literal_p->type |= SCANNER_LITERAL_IS_LOCAL | SCANNER_LITERAL_NO_REG;
#else /* !ENABLED (JERRY_ES2015) */
literal_p->type |= SCANNER_LITERAL_IS_VAR | SCANNER_LITERAL_NO_REG;
#endif /* ENABLED (JERRY_ES2015) */
lexer_next_token (context_p);
if (context_p->token.type != LEXER_RIGHT_BRACE)
{
if (context_p->token.type != LEXER_COMMA)
{
scanner_raise_error (context_p);
}
lexer_next_token (context_p);
}
}
lexer_next_token (context_p);
}
else
{
scanner_raise_error (context_p);
}
}
if (!lexer_token_is_identifier (context_p, "from", 4))
{
scanner_raise_error (context_p);
}
lexer_next_token (context_p);
if (context_p->token.type != LEXER_LITERAL
&& context_p->token.lit_location.type != LEXER_STRING_LITERAL)
{
scanner_raise_error (context_p);
}
return SCAN_NEXT_TOKEN;
}
case LEXER_KEYW_EXPORT:
{
if (stack_top != SCAN_STACK_SCRIPT)
{
scanner_raise_error (context_p);
}
context_p->global_status_flags |= ECMA_PARSE_MODULE;
lexer_next_token (context_p);
if (context_p->token.type == LEXER_KEYW_DEFAULT)
{
lexer_next_token (context_p);
if (context_p->token.type == LEXER_KEYW_FUNCTION)
{
lexer_next_token (context_p);
if (context_p->token.type == LEXER_LITERAL
&& context_p->token.lit_location.type == LEXER_IDENT_LITERAL)
{
lexer_lit_location_t *location_p = scanner_add_literal (context_p, scanner_context_p);
#if ENABLED (JERRY_ES2015)
if (location_p->type & SCANNER_LITERAL_IS_LOCAL
&& !(location_p->type & SCANNER_LITERAL_IS_FUNC))
{
scanner_raise_redeclaration_error (context_p);
}
location_p->type |= SCANNER_LITERAL_IS_FUNC | SCANNER_LITERAL_IS_LET;
#else /* !ENABLED (JERRY_ES2015) */
location_p->type |= SCANNER_LITERAL_IS_VAR | SCANNER_LITERAL_IS_FUNC;
#endif /* ENABLED (JERRY_ES2015) */
lexer_next_token (context_p);
}
else
{
lexer_lit_location_t *location_p;
location_p = scanner_add_custom_literal (context_p,
scanner_context_p->active_literal_pool_p,
&lexer_default_literal);
#if ENABLED (JERRY_ES2015)
location_p->type |= SCANNER_LITERAL_IS_FUNC | SCANNER_LITERAL_IS_LET;
#else /* !ENABLED (JERRY_ES2015) */
location_p->type |= SCANNER_LITERAL_IS_VAR | SCANNER_LITERAL_IS_FUNC;
#endif /* ENABLED (JERRY_ES2015) */
}
scanner_push_literal_pool (context_p, scanner_context_p, SCANNER_LITERAL_POOL_FUNCTION);
parser_stack_push_uint8 (context_p, SCAN_STACK_FUNCTION_STATEMENT);
scanner_context_p->mode = SCAN_MODE_FUNCTION_ARGUMENTS;
return SCAN_KEEP_TOKEN;
}
#if ENABLED (JERRY_ES2015)
if (context_p->token.type == LEXER_KEYW_CLASS)
{
scanner_push_class_declaration (context_p, scanner_context_p, SCAN_STACK_CLASS_STATEMENT);
if (context_p->token.type == LEXER_LITERAL && context_p->token.lit_location.type == LEXER_IDENT_LITERAL)
{
lexer_lit_location_t *literal_p = scanner_add_literal (context_p, scanner_context_p);
scanner_detect_invalid_let (context_p, literal_p);
literal_p->type |= SCANNER_LITERAL_IS_LET | SCANNER_LITERAL_NO_REG;
return SCAN_NEXT_TOKEN;
}
lexer_lit_location_t *literal_p;
literal_p = scanner_add_custom_literal (context_p,
scanner_context_p->active_literal_pool_p,
&lexer_default_literal);
literal_p->type |= SCANNER_LITERAL_IS_LET | SCANNER_LITERAL_NO_REG;
return SCAN_KEEP_TOKEN;
}
#endif /* ENABLED (JERRY_ES2015) */
/* Assignment expression. */
lexer_lit_location_t *location_p;
location_p = scanner_add_custom_literal (context_p,
scanner_context_p->active_literal_pool_p,
&lexer_default_literal);
location_p->type |= SCANNER_LITERAL_IS_VAR;
scanner_context_p->mode = SCAN_MODE_PRIMARY_EXPRESSION;
if (context_p->token.type != LEXER_LITERAL || context_p->token.lit_location.type != LEXER_IDENT_LITERAL)
{
return SCAN_KEEP_TOKEN;
}
location_p = scanner_add_literal (context_p, scanner_context_p);
location_p->type |= SCANNER_LITERAL_IS_VAR;
scanner_context_p->mode = SCAN_MODE_POST_PRIMARY_EXPRESSION;
return SCAN_NEXT_TOKEN;
}
scanner_context_p->mode = SCAN_MODE_STATEMENT_END;
if (context_p->token.type == LEXER_MULTIPLY)
{
lexer_next_token (context_p);
if (!lexer_token_is_identifier (context_p, "from", 4))
{
scanner_raise_error (context_p);
}
lexer_next_token (context_p);
if (context_p->token.type != LEXER_LITERAL
&& context_p->token.lit_location.type == LEXER_STRING_LITERAL)
{
scanner_raise_error (context_p);
}
return SCAN_NEXT_TOKEN;
}
if (context_p->token.type == LEXER_LEFT_BRACE)
{
lexer_next_token (context_p);
while (context_p->token.type != LEXER_RIGHT_BRACE)
{
if (context_p->token.type != LEXER_LITERAL
|| context_p->token.lit_location.type != LEXER_IDENT_LITERAL)
{
scanner_raise_error (context_p);
}
lexer_next_token (context_p);
if (lexer_token_is_identifier (context_p, "as", 2))
{
lexer_next_token (context_p);
if (context_p->token.type != LEXER_LITERAL
&& context_p->token.lit_location.type == LEXER_IDENT_LITERAL)
{
scanner_raise_error (context_p);
}
lexer_next_token (context_p);
}
if (context_p->token.type != LEXER_RIGHT_BRACE)
{
if (context_p->token.type != LEXER_COMMA)
{
scanner_raise_error (context_p);
}
lexer_next_token (context_p);
}
}
lexer_next_token (context_p);
if (!lexer_token_is_identifier (context_p, "from", 4))
{
return SCAN_KEEP_TOKEN;
}
lexer_next_token (context_p);
if (context_p->token.type != LEXER_LITERAL
&& context_p->token.lit_location.type == LEXER_STRING_LITERAL)
{
scanner_raise_error (context_p);
}
return SCAN_NEXT_TOKEN;
}
switch (context_p->token.type)
{
#if ENABLED (JERRY_ES2015)
case LEXER_KEYW_CLASS:
case LEXER_KEYW_LET:
case LEXER_KEYW_CONST:
#endif /* ENABLED (JERRY_ES2015) */
case LEXER_KEYW_VAR:
{
scanner_context_p->active_literal_pool_p->status_flags |= SCANNER_LITERAL_POOL_IN_EXPORT;
break;
}
}
scanner_context_p->mode = SCAN_MODE_STATEMENT;
return SCAN_KEEP_TOKEN;
}
#endif /* ENABLED (JERRY_ES2015_MODULE_SYSTEM) */
default:
{
break;
}
}
scanner_context_p->mode = SCAN_MODE_PRIMARY_EXPRESSION;
if (type == LEXER_LITERAL
&& context_p->token.lit_location.type == LEXER_IDENT_LITERAL)
{
if (JERRY_UNLIKELY (lexer_check_next_character (context_p, LIT_CHAR_COLON)))
{
lexer_consume_next_character (context_p);
scanner_context_p->mode = SCAN_MODE_STATEMENT;
return SCAN_NEXT_TOKEN;
}
JERRY_ASSERT (context_p->token.flags & LEXER_NO_SKIP_SPACES);
#if ENABLED (JERRY_ES2015)
/* The colon needs to be checked first because the parser also checks
* it first, and this check skips the spaces which affects source_p. */
if (JERRY_UNLIKELY (lexer_check_arrow (context_p)))
{
scanner_scan_simple_arrow (context_p, scanner_context_p, context_p->source_p);
return SCAN_KEEP_TOKEN;
}
if (JERRY_UNLIKELY (lexer_token_is_let (context_p)))
{
lexer_lit_location_t let_literal = context_p->token.lit_location;
const uint8_t *source_p = context_p->source_p;
lexer_next_token (context_p);
type = (lexer_token_type_t) context_p->token.type;
if (type == LEXER_LEFT_SQUARE
|| type == LEXER_LEFT_BRACE
|| (type == LEXER_LITERAL && context_p->token.lit_location.type == LEXER_IDENT_LITERAL))
{
scanner_context_p->mode = SCAN_MODE_VAR_STATEMENT;
parser_stack_push_uint8 (context_p, SCAN_STACK_LET);
return SCAN_KEEP_TOKEN;
}
scanner_info_t *info_p = scanner_insert_info (context_p, source_p, sizeof (scanner_info_t));
info_p->type = SCANNER_TYPE_LET_EXPRESSION;
lexer_lit_location_t *lit_location_p = scanner_add_custom_literal (context_p,
scanner_context_p->active_literal_pool_p,
&let_literal);
lit_location_p->type |= SCANNER_LITERAL_IS_USED;
if (scanner_context_p->active_literal_pool_p->status_flags & SCANNER_LITERAL_POOL_IN_WITH)
{
lit_location_p->type |= SCANNER_LITERAL_NO_REG;
}
scanner_context_p->mode = SCAN_MODE_POST_PRIMARY_EXPRESSION;
return SCAN_KEEP_TOKEN;
}
if (JERRY_UNLIKELY (lexer_token_is_async (context_p)))
{
scanner_context_p->async_source_p = context_p->source_p;
if (scanner_check_async_function (context_p, scanner_context_p))
{
scanner_context_p->mode = SCAN_MODE_STATEMENT;
}
return SCAN_KEEP_TOKEN;
}
#endif /* ENABLED (JERRY_ES2015) */
scanner_add_reference (context_p, scanner_context_p);
scanner_context_p->mode = SCAN_MODE_POST_PRIMARY_EXPRESSION;
return SCAN_NEXT_TOKEN;
}
return SCAN_KEEP_TOKEN;
} /* scanner_scan_statement */
| 0 |
[
"CWE-476",
"CWE-703",
"CWE-754"
] |
jerryscript
|
69f8e78c2f8d562bd6d8002b5488f1662ac30d24
| 58,151,765,943,309,890,000,000,000,000,000,000,000 | 822 |
Fix error handling in scanner when in case of OOM (#3793)
This patch fixes #3786 and fixes #3788.
JerryScript-DCO-1.0-Signed-off-by: Robert Fancsik [email protected]
|
gdImagePtr gdImageCreateFromJpegPtrEx (int size, void *data, int ignore_warning)
{
gdImagePtr im;
gdIOCtx *in = gdNewDynamicCtxEx(size, data, 0);
im = gdImageCreateFromJpegCtxEx(in, ignore_warning);
in->gd_free(in);
return im;
}
| 0 |
[
"CWE-415"
] |
php-src
|
089f7c0bc28d399b0420aa6ef058e4c1c120b2ae
| 16,282,597,878,218,798,000,000,000,000,000,000,000 | 9 |
Sync with upstream
Even though libgd/libgd#492 is not a relevant bug fix for PHP, since
the binding doesn't use the `gdImage*Ptr()` functions at all, we're
porting the fix to stay in sync here.
|
static MagickBooleanType SetsRGBImageProfile(Image *image,
ExceptionInfo *exception)
{
static unsigned char
sRGBProfile[] =
{
0x00, 0x00, 0x0c, 0x8c, 0x61, 0x72, 0x67, 0x6c, 0x02, 0x20, 0x00, 0x00,
0x6d, 0x6e, 0x74, 0x72, 0x52, 0x47, 0x42, 0x20, 0x58, 0x59, 0x5a, 0x20,
0x07, 0xde, 0x00, 0x01, 0x00, 0x06, 0x00, 0x16, 0x00, 0x0f, 0x00, 0x3a,
0x61, 0x63, 0x73, 0x70, 0x4d, 0x53, 0x46, 0x54, 0x00, 0x00, 0x00, 0x00,
0x49, 0x45, 0x43, 0x20, 0x73, 0x52, 0x47, 0x42, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf6, 0xd6,
0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0xd3, 0x2d, 0x61, 0x72, 0x67, 0x6c,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11,
0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x01, 0x50, 0x00, 0x00, 0x00, 0x99,
0x63, 0x70, 0x72, 0x74, 0x00, 0x00, 0x01, 0xec, 0x00, 0x00, 0x00, 0x67,
0x64, 0x6d, 0x6e, 0x64, 0x00, 0x00, 0x02, 0x54, 0x00, 0x00, 0x00, 0x70,
0x64, 0x6d, 0x64, 0x64, 0x00, 0x00, 0x02, 0xc4, 0x00, 0x00, 0x00, 0x88,
0x74, 0x65, 0x63, 0x68, 0x00, 0x00, 0x03, 0x4c, 0x00, 0x00, 0x00, 0x0c,
0x76, 0x75, 0x65, 0x64, 0x00, 0x00, 0x03, 0x58, 0x00, 0x00, 0x00, 0x67,
0x76, 0x69, 0x65, 0x77, 0x00, 0x00, 0x03, 0xc0, 0x00, 0x00, 0x00, 0x24,
0x6c, 0x75, 0x6d, 0x69, 0x00, 0x00, 0x03, 0xe4, 0x00, 0x00, 0x00, 0x14,
0x6d, 0x65, 0x61, 0x73, 0x00, 0x00, 0x03, 0xf8, 0x00, 0x00, 0x00, 0x24,
0x77, 0x74, 0x70, 0x74, 0x00, 0x00, 0x04, 0x1c, 0x00, 0x00, 0x00, 0x14,
0x62, 0x6b, 0x70, 0x74, 0x00, 0x00, 0x04, 0x30, 0x00, 0x00, 0x00, 0x14,
0x72, 0x58, 0x59, 0x5a, 0x00, 0x00, 0x04, 0x44, 0x00, 0x00, 0x00, 0x14,
0x67, 0x58, 0x59, 0x5a, 0x00, 0x00, 0x04, 0x58, 0x00, 0x00, 0x00, 0x14,
0x62, 0x58, 0x59, 0x5a, 0x00, 0x00, 0x04, 0x6c, 0x00, 0x00, 0x00, 0x14,
0x72, 0x54, 0x52, 0x43, 0x00, 0x00, 0x04, 0x80, 0x00, 0x00, 0x08, 0x0c,
0x67, 0x54, 0x52, 0x43, 0x00, 0x00, 0x04, 0x80, 0x00, 0x00, 0x08, 0x0c,
0x62, 0x54, 0x52, 0x43, 0x00, 0x00, 0x04, 0x80, 0x00, 0x00, 0x08, 0x0c,
0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f,
0x73, 0x52, 0x47, 0x42, 0x20, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36,
0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x28, 0x45, 0x71, 0x75, 0x69, 0x76,
0x61, 0x6c, 0x65, 0x6e, 0x74, 0x20, 0x74, 0x6f, 0x20, 0x77, 0x77, 0x77,
0x2e, 0x73, 0x72, 0x67, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x20, 0x31, 0x39,
0x39, 0x38, 0x20, 0x48, 0x50, 0x20, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c,
0x65, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x3f, 0x73, 0x52, 0x47, 0x42, 0x20, 0x49, 0x45, 0x43, 0x36, 0x31,
0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x28, 0x45, 0x71, 0x75,
0x69, 0x76, 0x61, 0x6c, 0x65, 0x6e, 0x74, 0x20, 0x74, 0x6f, 0x20, 0x77,
0x77, 0x77, 0x2e, 0x73, 0x72, 0x67, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x20,
0x31, 0x39, 0x39, 0x38, 0x20, 0x48, 0x50, 0x20, 0x70, 0x72, 0x6f, 0x66,
0x69, 0x6c, 0x65, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x74, 0x65, 0x78, 0x74, 0x00, 0x00, 0x00, 0x00, 0x43, 0x72, 0x65, 0x61,
0x74, 0x65, 0x64, 0x20, 0x62, 0x79, 0x20, 0x47, 0x72, 0x61, 0x65, 0x6d,
0x65, 0x20, 0x57, 0x2e, 0x20, 0x47, 0x69, 0x6c, 0x6c, 0x2e, 0x20, 0x52,
0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x64, 0x20, 0x69, 0x6e, 0x74, 0x6f,
0x20, 0x74, 0x68, 0x65, 0x20, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x20,
0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x2e, 0x20, 0x4e, 0x6f, 0x20, 0x57,
0x61, 0x72, 0x72, 0x61, 0x6e, 0x74, 0x79, 0x2c, 0x20, 0x55, 0x73, 0x65,
0x20, 0x61, 0x74, 0x20, 0x79, 0x6f, 0x75, 0x72, 0x20, 0x6f, 0x77, 0x6e,
0x20, 0x72, 0x69, 0x73, 0x6b, 0x2e, 0x00, 0x00, 0x64, 0x65, 0x73, 0x63,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x16, 0x49, 0x45, 0x43, 0x20,
0x68, 0x74, 0x74, 0x70, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x69,
0x65, 0x63, 0x2e, 0x63, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x16, 0x49, 0x45, 0x43, 0x20, 0x68, 0x74, 0x74,
0x70, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x69, 0x65, 0x63, 0x2e,
0x63, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2e,
0x49, 0x45, 0x43, 0x20, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e,
0x31, 0x20, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x20, 0x52, 0x47,
0x42, 0x20, 0x63, 0x6f, 0x6c, 0x6f, 0x75, 0x72, 0x20, 0x73, 0x70, 0x61,
0x63, 0x65, 0x20, 0x2d, 0x20, 0x73, 0x52, 0x47, 0x42, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2e, 0x49, 0x45, 0x43,
0x20, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x44,
0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x20, 0x52, 0x47, 0x42, 0x20, 0x63,
0x6f, 0x6c, 0x6f, 0x75, 0x72, 0x20, 0x73, 0x70, 0x61, 0x63, 0x65, 0x20,
0x2d, 0x20, 0x73, 0x52, 0x47, 0x42, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x73, 0x69, 0x67, 0x20, 0x00, 0x00, 0x00, 0x00,
0x43, 0x52, 0x54, 0x20, 0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x0d, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36, 0x36,
0x2d, 0x32, 0x2e, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x0d, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36, 0x36,
0x2d, 0x32, 0x2e, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x76, 0x69, 0x65, 0x77, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0xa4, 0x7c,
0x00, 0x14, 0x5f, 0x30, 0x00, 0x10, 0xce, 0x02, 0x00, 0x03, 0xed, 0xb2,
0x00, 0x04, 0x13, 0x0a, 0x00, 0x03, 0x5c, 0x67, 0x00, 0x00, 0x00, 0x01,
0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4c, 0x0a, 0x3d,
0x00, 0x50, 0x00, 0x00, 0x00, 0x57, 0x1e, 0xb8, 0x6d, 0x65, 0x61, 0x73,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x02, 0x8f, 0x00, 0x00, 0x00, 0x02, 0x58, 0x59, 0x5a, 0x20,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf3, 0x51, 0x00, 0x01, 0x00, 0x00,
0x00, 0x01, 0x16, 0xcc, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6f, 0xa0,
0x00, 0x00, 0x38, 0xf5, 0x00, 0x00, 0x03, 0x90, 0x58, 0x59, 0x5a, 0x20,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x62, 0x97, 0x00, 0x00, 0xb7, 0x87,
0x00, 0x00, 0x18, 0xd9, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x24, 0x9f, 0x00, 0x00, 0x0f, 0x84, 0x00, 0x00, 0xb6, 0xc4,
0x63, 0x75, 0x72, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00,
0x00, 0x00, 0x00, 0x05, 0x00, 0x0a, 0x00, 0x0f, 0x00, 0x14, 0x00, 0x19,
0x00, 0x1e, 0x00, 0x23, 0x00, 0x28, 0x00, 0x2d, 0x00, 0x32, 0x00, 0x37,
0x00, 0x3b, 0x00, 0x40, 0x00, 0x45, 0x00, 0x4a, 0x00, 0x4f, 0x00, 0x54,
0x00, 0x59, 0x00, 0x5e, 0x00, 0x63, 0x00, 0x68, 0x00, 0x6d, 0x00, 0x72,
0x00, 0x77, 0x00, 0x7c, 0x00, 0x81, 0x00, 0x86, 0x00, 0x8b, 0x00, 0x90,
0x00, 0x95, 0x00, 0x9a, 0x00, 0x9f, 0x00, 0xa4, 0x00, 0xa9, 0x00, 0xae,
0x00, 0xb2, 0x00, 0xb7, 0x00, 0xbc, 0x00, 0xc1, 0x00, 0xc6, 0x00, 0xcb,
0x00, 0xd0, 0x00, 0xd5, 0x00, 0xdb, 0x00, 0xe0, 0x00, 0xe5, 0x00, 0xeb,
0x00, 0xf0, 0x00, 0xf6, 0x00, 0xfb, 0x01, 0x01, 0x01, 0x07, 0x01, 0x0d,
0x01, 0x13, 0x01, 0x19, 0x01, 0x1f, 0x01, 0x25, 0x01, 0x2b, 0x01, 0x32,
0x01, 0x38, 0x01, 0x3e, 0x01, 0x45, 0x01, 0x4c, 0x01, 0x52, 0x01, 0x59,
0x01, 0x60, 0x01, 0x67, 0x01, 0x6e, 0x01, 0x75, 0x01, 0x7c, 0x01, 0x83,
0x01, 0x8b, 0x01, 0x92, 0x01, 0x9a, 0x01, 0xa1, 0x01, 0xa9, 0x01, 0xb1,
0x01, 0xb9, 0x01, 0xc1, 0x01, 0xc9, 0x01, 0xd1, 0x01, 0xd9, 0x01, 0xe1,
0x01, 0xe9, 0x01, 0xf2, 0x01, 0xfa, 0x02, 0x03, 0x02, 0x0c, 0x02, 0x14,
0x02, 0x1d, 0x02, 0x26, 0x02, 0x2f, 0x02, 0x38, 0x02, 0x41, 0x02, 0x4b,
0x02, 0x54, 0x02, 0x5d, 0x02, 0x67, 0x02, 0x71, 0x02, 0x7a, 0x02, 0x84,
0x02, 0x8e, 0x02, 0x98, 0x02, 0xa2, 0x02, 0xac, 0x02, 0xb6, 0x02, 0xc1,
0x02, 0xcb, 0x02, 0xd5, 0x02, 0xe0, 0x02, 0xeb, 0x02, 0xf5, 0x03, 0x00,
0x03, 0x0b, 0x03, 0x16, 0x03, 0x21, 0x03, 0x2d, 0x03, 0x38, 0x03, 0x43,
0x03, 0x4f, 0x03, 0x5a, 0x03, 0x66, 0x03, 0x72, 0x03, 0x7e, 0x03, 0x8a,
0x03, 0x96, 0x03, 0xa2, 0x03, 0xae, 0x03, 0xba, 0x03, 0xc7, 0x03, 0xd3,
0x03, 0xe0, 0x03, 0xec, 0x03, 0xf9, 0x04, 0x06, 0x04, 0x13, 0x04, 0x20,
0x04, 0x2d, 0x04, 0x3b, 0x04, 0x48, 0x04, 0x55, 0x04, 0x63, 0x04, 0x71,
0x04, 0x7e, 0x04, 0x8c, 0x04, 0x9a, 0x04, 0xa8, 0x04, 0xb6, 0x04, 0xc4,
0x04, 0xd3, 0x04, 0xe1, 0x04, 0xf0, 0x04, 0xfe, 0x05, 0x0d, 0x05, 0x1c,
0x05, 0x2b, 0x05, 0x3a, 0x05, 0x49, 0x05, 0x58, 0x05, 0x67, 0x05, 0x77,
0x05, 0x86, 0x05, 0x96, 0x05, 0xa6, 0x05, 0xb5, 0x05, 0xc5, 0x05, 0xd5,
0x05, 0xe5, 0x05, 0xf6, 0x06, 0x06, 0x06, 0x16, 0x06, 0x27, 0x06, 0x37,
0x06, 0x48, 0x06, 0x59, 0x06, 0x6a, 0x06, 0x7b, 0x06, 0x8c, 0x06, 0x9d,
0x06, 0xaf, 0x06, 0xc0, 0x06, 0xd1, 0x06, 0xe3, 0x06, 0xf5, 0x07, 0x07,
0x07, 0x19, 0x07, 0x2b, 0x07, 0x3d, 0x07, 0x4f, 0x07, 0x61, 0x07, 0x74,
0x07, 0x86, 0x07, 0x99, 0x07, 0xac, 0x07, 0xbf, 0x07, 0xd2, 0x07, 0xe5,
0x07, 0xf8, 0x08, 0x0b, 0x08, 0x1f, 0x08, 0x32, 0x08, 0x46, 0x08, 0x5a,
0x08, 0x6e, 0x08, 0x82, 0x08, 0x96, 0x08, 0xaa, 0x08, 0xbe, 0x08, 0xd2,
0x08, 0xe7, 0x08, 0xfb, 0x09, 0x10, 0x09, 0x25, 0x09, 0x3a, 0x09, 0x4f,
0x09, 0x64, 0x09, 0x79, 0x09, 0x8f, 0x09, 0xa4, 0x09, 0xba, 0x09, 0xcf,
0x09, 0xe5, 0x09, 0xfb, 0x0a, 0x11, 0x0a, 0x27, 0x0a, 0x3d, 0x0a, 0x54,
0x0a, 0x6a, 0x0a, 0x81, 0x0a, 0x98, 0x0a, 0xae, 0x0a, 0xc5, 0x0a, 0xdc,
0x0a, 0xf3, 0x0b, 0x0b, 0x0b, 0x22, 0x0b, 0x39, 0x0b, 0x51, 0x0b, 0x69,
0x0b, 0x80, 0x0b, 0x98, 0x0b, 0xb0, 0x0b, 0xc8, 0x0b, 0xe1, 0x0b, 0xf9,
0x0c, 0x12, 0x0c, 0x2a, 0x0c, 0x43, 0x0c, 0x5c, 0x0c, 0x75, 0x0c, 0x8e,
0x0c, 0xa7, 0x0c, 0xc0, 0x0c, 0xd9, 0x0c, 0xf3, 0x0d, 0x0d, 0x0d, 0x26,
0x0d, 0x40, 0x0d, 0x5a, 0x0d, 0x74, 0x0d, 0x8e, 0x0d, 0xa9, 0x0d, 0xc3,
0x0d, 0xde, 0x0d, 0xf8, 0x0e, 0x13, 0x0e, 0x2e, 0x0e, 0x49, 0x0e, 0x64,
0x0e, 0x7f, 0x0e, 0x9b, 0x0e, 0xb6, 0x0e, 0xd2, 0x0e, 0xee, 0x0f, 0x09,
0x0f, 0x25, 0x0f, 0x41, 0x0f, 0x5e, 0x0f, 0x7a, 0x0f, 0x96, 0x0f, 0xb3,
0x0f, 0xcf, 0x0f, 0xec, 0x10, 0x09, 0x10, 0x26, 0x10, 0x43, 0x10, 0x61,
0x10, 0x7e, 0x10, 0x9b, 0x10, 0xb9, 0x10, 0xd7, 0x10, 0xf5, 0x11, 0x13,
0x11, 0x31, 0x11, 0x4f, 0x11, 0x6d, 0x11, 0x8c, 0x11, 0xaa, 0x11, 0xc9,
0x11, 0xe8, 0x12, 0x07, 0x12, 0x26, 0x12, 0x45, 0x12, 0x64, 0x12, 0x84,
0x12, 0xa3, 0x12, 0xc3, 0x12, 0xe3, 0x13, 0x03, 0x13, 0x23, 0x13, 0x43,
0x13, 0x63, 0x13, 0x83, 0x13, 0xa4, 0x13, 0xc5, 0x13, 0xe5, 0x14, 0x06,
0x14, 0x27, 0x14, 0x49, 0x14, 0x6a, 0x14, 0x8b, 0x14, 0xad, 0x14, 0xce,
0x14, 0xf0, 0x15, 0x12, 0x15, 0x34, 0x15, 0x56, 0x15, 0x78, 0x15, 0x9b,
0x15, 0xbd, 0x15, 0xe0, 0x16, 0x03, 0x16, 0x26, 0x16, 0x49, 0x16, 0x6c,
0x16, 0x8f, 0x16, 0xb2, 0x16, 0xd6, 0x16, 0xfa, 0x17, 0x1d, 0x17, 0x41,
0x17, 0x65, 0x17, 0x89, 0x17, 0xae, 0x17, 0xd2, 0x17, 0xf7, 0x18, 0x1b,
0x18, 0x40, 0x18, 0x65, 0x18, 0x8a, 0x18, 0xaf, 0x18, 0xd5, 0x18, 0xfa,
0x19, 0x20, 0x19, 0x45, 0x19, 0x6b, 0x19, 0x91, 0x19, 0xb7, 0x19, 0xdd,
0x1a, 0x04, 0x1a, 0x2a, 0x1a, 0x51, 0x1a, 0x77, 0x1a, 0x9e, 0x1a, 0xc5,
0x1a, 0xec, 0x1b, 0x14, 0x1b, 0x3b, 0x1b, 0x63, 0x1b, 0x8a, 0x1b, 0xb2,
0x1b, 0xda, 0x1c, 0x02, 0x1c, 0x2a, 0x1c, 0x52, 0x1c, 0x7b, 0x1c, 0xa3,
0x1c, 0xcc, 0x1c, 0xf5, 0x1d, 0x1e, 0x1d, 0x47, 0x1d, 0x70, 0x1d, 0x99,
0x1d, 0xc3, 0x1d, 0xec, 0x1e, 0x16, 0x1e, 0x40, 0x1e, 0x6a, 0x1e, 0x94,
0x1e, 0xbe, 0x1e, 0xe9, 0x1f, 0x13, 0x1f, 0x3e, 0x1f, 0x69, 0x1f, 0x94,
0x1f, 0xbf, 0x1f, 0xea, 0x20, 0x15, 0x20, 0x41, 0x20, 0x6c, 0x20, 0x98,
0x20, 0xc4, 0x20, 0xf0, 0x21, 0x1c, 0x21, 0x48, 0x21, 0x75, 0x21, 0xa1,
0x21, 0xce, 0x21, 0xfb, 0x22, 0x27, 0x22, 0x55, 0x22, 0x82, 0x22, 0xaf,
0x22, 0xdd, 0x23, 0x0a, 0x23, 0x38, 0x23, 0x66, 0x23, 0x94, 0x23, 0xc2,
0x23, 0xf0, 0x24, 0x1f, 0x24, 0x4d, 0x24, 0x7c, 0x24, 0xab, 0x24, 0xda,
0x25, 0x09, 0x25, 0x38, 0x25, 0x68, 0x25, 0x97, 0x25, 0xc7, 0x25, 0xf7,
0x26, 0x27, 0x26, 0x57, 0x26, 0x87, 0x26, 0xb7, 0x26, 0xe8, 0x27, 0x18,
0x27, 0x49, 0x27, 0x7a, 0x27, 0xab, 0x27, 0xdc, 0x28, 0x0d, 0x28, 0x3f,
0x28, 0x71, 0x28, 0xa2, 0x28, 0xd4, 0x29, 0x06, 0x29, 0x38, 0x29, 0x6b,
0x29, 0x9d, 0x29, 0xd0, 0x2a, 0x02, 0x2a, 0x35, 0x2a, 0x68, 0x2a, 0x9b,
0x2a, 0xcf, 0x2b, 0x02, 0x2b, 0x36, 0x2b, 0x69, 0x2b, 0x9d, 0x2b, 0xd1,
0x2c, 0x05, 0x2c, 0x39, 0x2c, 0x6e, 0x2c, 0xa2, 0x2c, 0xd7, 0x2d, 0x0c,
0x2d, 0x41, 0x2d, 0x76, 0x2d, 0xab, 0x2d, 0xe1, 0x2e, 0x16, 0x2e, 0x4c,
0x2e, 0x82, 0x2e, 0xb7, 0x2e, 0xee, 0x2f, 0x24, 0x2f, 0x5a, 0x2f, 0x91,
0x2f, 0xc7, 0x2f, 0xfe, 0x30, 0x35, 0x30, 0x6c, 0x30, 0xa4, 0x30, 0xdb,
0x31, 0x12, 0x31, 0x4a, 0x31, 0x82, 0x31, 0xba, 0x31, 0xf2, 0x32, 0x2a,
0x32, 0x63, 0x32, 0x9b, 0x32, 0xd4, 0x33, 0x0d, 0x33, 0x46, 0x33, 0x7f,
0x33, 0xb8, 0x33, 0xf1, 0x34, 0x2b, 0x34, 0x65, 0x34, 0x9e, 0x34, 0xd8,
0x35, 0x13, 0x35, 0x4d, 0x35, 0x87, 0x35, 0xc2, 0x35, 0xfd, 0x36, 0x37,
0x36, 0x72, 0x36, 0xae, 0x36, 0xe9, 0x37, 0x24, 0x37, 0x60, 0x37, 0x9c,
0x37, 0xd7, 0x38, 0x14, 0x38, 0x50, 0x38, 0x8c, 0x38, 0xc8, 0x39, 0x05,
0x39, 0x42, 0x39, 0x7f, 0x39, 0xbc, 0x39, 0xf9, 0x3a, 0x36, 0x3a, 0x74,
0x3a, 0xb2, 0x3a, 0xef, 0x3b, 0x2d, 0x3b, 0x6b, 0x3b, 0xaa, 0x3b, 0xe8,
0x3c, 0x27, 0x3c, 0x65, 0x3c, 0xa4, 0x3c, 0xe3, 0x3d, 0x22, 0x3d, 0x61,
0x3d, 0xa1, 0x3d, 0xe0, 0x3e, 0x20, 0x3e, 0x60, 0x3e, 0xa0, 0x3e, 0xe0,
0x3f, 0x21, 0x3f, 0x61, 0x3f, 0xa2, 0x3f, 0xe2, 0x40, 0x23, 0x40, 0x64,
0x40, 0xa6, 0x40, 0xe7, 0x41, 0x29, 0x41, 0x6a, 0x41, 0xac, 0x41, 0xee,
0x42, 0x30, 0x42, 0x72, 0x42, 0xb5, 0x42, 0xf7, 0x43, 0x3a, 0x43, 0x7d,
0x43, 0xc0, 0x44, 0x03, 0x44, 0x47, 0x44, 0x8a, 0x44, 0xce, 0x45, 0x12,
0x45, 0x55, 0x45, 0x9a, 0x45, 0xde, 0x46, 0x22, 0x46, 0x67, 0x46, 0xab,
0x46, 0xf0, 0x47, 0x35, 0x47, 0x7b, 0x47, 0xc0, 0x48, 0x05, 0x48, 0x4b,
0x48, 0x91, 0x48, 0xd7, 0x49, 0x1d, 0x49, 0x63, 0x49, 0xa9, 0x49, 0xf0,
0x4a, 0x37, 0x4a, 0x7d, 0x4a, 0xc4, 0x4b, 0x0c, 0x4b, 0x53, 0x4b, 0x9a,
0x4b, 0xe2, 0x4c, 0x2a, 0x4c, 0x72, 0x4c, 0xba, 0x4d, 0x02, 0x4d, 0x4a,
0x4d, 0x93, 0x4d, 0xdc, 0x4e, 0x25, 0x4e, 0x6e, 0x4e, 0xb7, 0x4f, 0x00,
0x4f, 0x49, 0x4f, 0x93, 0x4f, 0xdd, 0x50, 0x27, 0x50, 0x71, 0x50, 0xbb,
0x51, 0x06, 0x51, 0x50, 0x51, 0x9b, 0x51, 0xe6, 0x52, 0x31, 0x52, 0x7c,
0x52, 0xc7, 0x53, 0x13, 0x53, 0x5f, 0x53, 0xaa, 0x53, 0xf6, 0x54, 0x42,
0x54, 0x8f, 0x54, 0xdb, 0x55, 0x28, 0x55, 0x75, 0x55, 0xc2, 0x56, 0x0f,
0x56, 0x5c, 0x56, 0xa9, 0x56, 0xf7, 0x57, 0x44, 0x57, 0x92, 0x57, 0xe0,
0x58, 0x2f, 0x58, 0x7d, 0x58, 0xcb, 0x59, 0x1a, 0x59, 0x69, 0x59, 0xb8,
0x5a, 0x07, 0x5a, 0x56, 0x5a, 0xa6, 0x5a, 0xf5, 0x5b, 0x45, 0x5b, 0x95,
0x5b, 0xe5, 0x5c, 0x35, 0x5c, 0x86, 0x5c, 0xd6, 0x5d, 0x27, 0x5d, 0x78,
0x5d, 0xc9, 0x5e, 0x1a, 0x5e, 0x6c, 0x5e, 0xbd, 0x5f, 0x0f, 0x5f, 0x61,
0x5f, 0xb3, 0x60, 0x05, 0x60, 0x57, 0x60, 0xaa, 0x60, 0xfc, 0x61, 0x4f,
0x61, 0xa2, 0x61, 0xf5, 0x62, 0x49, 0x62, 0x9c, 0x62, 0xf0, 0x63, 0x43,
0x63, 0x97, 0x63, 0xeb, 0x64, 0x40, 0x64, 0x94, 0x64, 0xe9, 0x65, 0x3d,
0x65, 0x92, 0x65, 0xe7, 0x66, 0x3d, 0x66, 0x92, 0x66, 0xe8, 0x67, 0x3d,
0x67, 0x93, 0x67, 0xe9, 0x68, 0x3f, 0x68, 0x96, 0x68, 0xec, 0x69, 0x43,
0x69, 0x9a, 0x69, 0xf1, 0x6a, 0x48, 0x6a, 0x9f, 0x6a, 0xf7, 0x6b, 0x4f,
0x6b, 0xa7, 0x6b, 0xff, 0x6c, 0x57, 0x6c, 0xaf, 0x6d, 0x08, 0x6d, 0x60,
0x6d, 0xb9, 0x6e, 0x12, 0x6e, 0x6b, 0x6e, 0xc4, 0x6f, 0x1e, 0x6f, 0x78,
0x6f, 0xd1, 0x70, 0x2b, 0x70, 0x86, 0x70, 0xe0, 0x71, 0x3a, 0x71, 0x95,
0x71, 0xf0, 0x72, 0x4b, 0x72, 0xa6, 0x73, 0x01, 0x73, 0x5d, 0x73, 0xb8,
0x74, 0x14, 0x74, 0x70, 0x74, 0xcc, 0x75, 0x28, 0x75, 0x85, 0x75, 0xe1,
0x76, 0x3e, 0x76, 0x9b, 0x76, 0xf8, 0x77, 0x56, 0x77, 0xb3, 0x78, 0x11,
0x78, 0x6e, 0x78, 0xcc, 0x79, 0x2a, 0x79, 0x89, 0x79, 0xe7, 0x7a, 0x46,
0x7a, 0xa5, 0x7b, 0x04, 0x7b, 0x63, 0x7b, 0xc2, 0x7c, 0x21, 0x7c, 0x81,
0x7c, 0xe1, 0x7d, 0x41, 0x7d, 0xa1, 0x7e, 0x01, 0x7e, 0x62, 0x7e, 0xc2,
0x7f, 0x23, 0x7f, 0x84, 0x7f, 0xe5, 0x80, 0x47, 0x80, 0xa8, 0x81, 0x0a,
0x81, 0x6b, 0x81, 0xcd, 0x82, 0x30, 0x82, 0x92, 0x82, 0xf4, 0x83, 0x57,
0x83, 0xba, 0x84, 0x1d, 0x84, 0x80, 0x84, 0xe3, 0x85, 0x47, 0x85, 0xab,
0x86, 0x0e, 0x86, 0x72, 0x86, 0xd7, 0x87, 0x3b, 0x87, 0x9f, 0x88, 0x04,
0x88, 0x69, 0x88, 0xce, 0x89, 0x33, 0x89, 0x99, 0x89, 0xfe, 0x8a, 0x64,
0x8a, 0xca, 0x8b, 0x30, 0x8b, 0x96, 0x8b, 0xfc, 0x8c, 0x63, 0x8c, 0xca,
0x8d, 0x31, 0x8d, 0x98, 0x8d, 0xff, 0x8e, 0x66, 0x8e, 0xce, 0x8f, 0x36,
0x8f, 0x9e, 0x90, 0x06, 0x90, 0x6e, 0x90, 0xd6, 0x91, 0x3f, 0x91, 0xa8,
0x92, 0x11, 0x92, 0x7a, 0x92, 0xe3, 0x93, 0x4d, 0x93, 0xb6, 0x94, 0x20,
0x94, 0x8a, 0x94, 0xf4, 0x95, 0x5f, 0x95, 0xc9, 0x96, 0x34, 0x96, 0x9f,
0x97, 0x0a, 0x97, 0x75, 0x97, 0xe0, 0x98, 0x4c, 0x98, 0xb8, 0x99, 0x24,
0x99, 0x90, 0x99, 0xfc, 0x9a, 0x68, 0x9a, 0xd5, 0x9b, 0x42, 0x9b, 0xaf,
0x9c, 0x1c, 0x9c, 0x89, 0x9c, 0xf7, 0x9d, 0x64, 0x9d, 0xd2, 0x9e, 0x40,
0x9e, 0xae, 0x9f, 0x1d, 0x9f, 0x8b, 0x9f, 0xfa, 0xa0, 0x69, 0xa0, 0xd8,
0xa1, 0x47, 0xa1, 0xb6, 0xa2, 0x26, 0xa2, 0x96, 0xa3, 0x06, 0xa3, 0x76,
0xa3, 0xe6, 0xa4, 0x56, 0xa4, 0xc7, 0xa5, 0x38, 0xa5, 0xa9, 0xa6, 0x1a,
0xa6, 0x8b, 0xa6, 0xfd, 0xa7, 0x6e, 0xa7, 0xe0, 0xa8, 0x52, 0xa8, 0xc4,
0xa9, 0x37, 0xa9, 0xa9, 0xaa, 0x1c, 0xaa, 0x8f, 0xab, 0x02, 0xab, 0x75,
0xab, 0xe9, 0xac, 0x5c, 0xac, 0xd0, 0xad, 0x44, 0xad, 0xb8, 0xae, 0x2d,
0xae, 0xa1, 0xaf, 0x16, 0xaf, 0x8b, 0xb0, 0x00, 0xb0, 0x75, 0xb0, 0xea,
0xb1, 0x60, 0xb1, 0xd6, 0xb2, 0x4b, 0xb2, 0xc2, 0xb3, 0x38, 0xb3, 0xae,
0xb4, 0x25, 0xb4, 0x9c, 0xb5, 0x13, 0xb5, 0x8a, 0xb6, 0x01, 0xb6, 0x79,
0xb6, 0xf0, 0xb7, 0x68, 0xb7, 0xe0, 0xb8, 0x59, 0xb8, 0xd1, 0xb9, 0x4a,
0xb9, 0xc2, 0xba, 0x3b, 0xba, 0xb5, 0xbb, 0x2e, 0xbb, 0xa7, 0xbc, 0x21,
0xbc, 0x9b, 0xbd, 0x15, 0xbd, 0x8f, 0xbe, 0x0a, 0xbe, 0x84, 0xbe, 0xff,
0xbf, 0x7a, 0xbf, 0xf5, 0xc0, 0x70, 0xc0, 0xec, 0xc1, 0x67, 0xc1, 0xe3,
0xc2, 0x5f, 0xc2, 0xdb, 0xc3, 0x58, 0xc3, 0xd4, 0xc4, 0x51, 0xc4, 0xce,
0xc5, 0x4b, 0xc5, 0xc8, 0xc6, 0x46, 0xc6, 0xc3, 0xc7, 0x41, 0xc7, 0xbf,
0xc8, 0x3d, 0xc8, 0xbc, 0xc9, 0x3a, 0xc9, 0xb9, 0xca, 0x38, 0xca, 0xb7,
0xcb, 0x36, 0xcb, 0xb6, 0xcc, 0x35, 0xcc, 0xb5, 0xcd, 0x35, 0xcd, 0xb5,
0xce, 0x36, 0xce, 0xb6, 0xcf, 0x37, 0xcf, 0xb8, 0xd0, 0x39, 0xd0, 0xba,
0xd1, 0x3c, 0xd1, 0xbe, 0xd2, 0x3f, 0xd2, 0xc1, 0xd3, 0x44, 0xd3, 0xc6,
0xd4, 0x49, 0xd4, 0xcb, 0xd5, 0x4e, 0xd5, 0xd1, 0xd6, 0x55, 0xd6, 0xd8,
0xd7, 0x5c, 0xd7, 0xe0, 0xd8, 0x64, 0xd8, 0xe8, 0xd9, 0x6c, 0xd9, 0xf1,
0xda, 0x76, 0xda, 0xfb, 0xdb, 0x80, 0xdc, 0x05, 0xdc, 0x8a, 0xdd, 0x10,
0xdd, 0x96, 0xde, 0x1c, 0xde, 0xa2, 0xdf, 0x29, 0xdf, 0xaf, 0xe0, 0x36,
0xe0, 0xbd, 0xe1, 0x44, 0xe1, 0xcc, 0xe2, 0x53, 0xe2, 0xdb, 0xe3, 0x63,
0xe3, 0xeb, 0xe4, 0x73, 0xe4, 0xfc, 0xe5, 0x84, 0xe6, 0x0d, 0xe6, 0x96,
0xe7, 0x1f, 0xe7, 0xa9, 0xe8, 0x32, 0xe8, 0xbc, 0xe9, 0x46, 0xe9, 0xd0,
0xea, 0x5b, 0xea, 0xe5, 0xeb, 0x70, 0xeb, 0xfb, 0xec, 0x86, 0xed, 0x11,
0xed, 0x9c, 0xee, 0x28, 0xee, 0xb4, 0xef, 0x40, 0xef, 0xcc, 0xf0, 0x58,
0xf0, 0xe5, 0xf1, 0x72, 0xf1, 0xff, 0xf2, 0x8c, 0xf3, 0x19, 0xf3, 0xa7,
0xf4, 0x34, 0xf4, 0xc2, 0xf5, 0x50, 0xf5, 0xde, 0xf6, 0x6d, 0xf6, 0xfb,
0xf7, 0x8a, 0xf8, 0x19, 0xf8, 0xa8, 0xf9, 0x38, 0xf9, 0xc7, 0xfa, 0x57,
0xfa, 0xe7, 0xfb, 0x77, 0xfc, 0x07, 0xfc, 0x98, 0xfd, 0x29, 0xfd, 0xba,
0xfe, 0x4b, 0xfe, 0xdc, 0xff, 0x6d, 0xff, 0xff
};
StringInfo
*profile;
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (GetImageProfile(image,"icc") != (const StringInfo *) NULL)
return(MagickFalse);
profile=AcquireStringInfo(sizeof(sRGBProfile));
SetStringInfoDatum(profile,sRGBProfile);
status=SetImageProfile(image,"icc",profile,exception);
profile=DestroyStringInfo(profile);
return(status);
}
| 0 |
[
"CWE-190",
"CWE-125"
] |
ImageMagick
|
d8ab7f046587f2e9f734b687ba7e6e10147c294b
| 36,919,084,919,901,760,000,000,000,000,000,000,000 | 292 |
Improve checking of EXIF profile to prevent integer overflow (bug report from Ibrahim el-sayed)
|
static void SFDDumpHintList(FILE *sfd,const char *key, StemInfo *h) {
HintInstance *hi;
if ( h==NULL )
return;
fprintf(sfd, "%s", key );
for ( ; h!=NULL; h=h->next ) {
fprintf(sfd, "%g %g", (double) h->start,(double) h->width );
if ( h->ghost ) putc('G',sfd);
if ( h->where!=NULL ) {
putc('<',sfd);
for ( hi=h->where; hi!=NULL; hi=hi->next )
fprintf(sfd, "%g %g%c", (double) hi->begin, (double) hi->end, hi->next?' ':'>');
}
putc(h->next?' ':'\n',sfd);
}
}
| 0 |
[
"CWE-416"
] |
fontforge
|
048a91e2682c1a8936ae34dbc7bd70291ec05410
| 191,601,069,801,655,200,000,000,000,000,000,000,000 | 17 |
Fix for #4084 Use-after-free (heap) in the SFD_GetFontMetaData() function
Fix for #4086 NULL pointer dereference in the SFDGetSpiros() function
Fix for #4088 NULL pointer dereference in the SFD_AssignLookups() function
Add empty sf->fontname string if it isn't set, fixing #4089 #4090 and many
other potential issues (many downstream calls to strlen() on the value).
|
e_ews_config_utils_open_connection_for (ESource *source,
CamelEwsSettings *ews_settings,
const gchar *connect_url,
EEwsConfigUtilTryCredentialsFunc try_credentials_func,
gpointer user_data,
GCancellable *cancellable,
GError **perror)
{
EEwsConnection *conn = NULL;
CamelNetworkSettings *network_settings;
GError *local_error = NULL;
g_return_val_if_fail (source != NULL, NULL);
g_return_val_if_fail (ews_settings != NULL, NULL);
network_settings = CAMEL_NETWORK_SETTINGS (ews_settings);
/* use the one from mailer, if there, otherwise open new */
conn = e_ews_connection_find (
connect_url && *connect_url ? connect_url : camel_ews_settings_get_hosturl (ews_settings),
camel_network_settings_get_user (network_settings));
if (conn) {
if (try_credentials_func &&
try_credentials_func (conn, NULL, user_data, cancellable, perror) != E_SOURCE_AUTHENTICATION_ACCEPTED) {
g_clear_object (&conn);
}
return conn;
}
while (!conn && !g_cancellable_is_cancelled (cancellable) && !local_error) {
if (e_ews_connection_utils_get_without_password (ews_settings)) {
ESourceAuthenticationResult result;
gchar *hosturl;
hosturl = camel_ews_settings_dup_hosturl (ews_settings);
conn = e_ews_connection_new (source, connect_url && *connect_url ? connect_url : hosturl, ews_settings);
g_free (hosturl);
e_ews_connection_update_credentials (conn, NULL);
if (try_credentials_func)
result = try_credentials_func (conn, NULL, user_data, cancellable, &local_error);
else
result = e_ews_connection_try_credentials_sync (conn, NULL, NULL, NULL, NULL, cancellable, &local_error);
if (result != E_SOURCE_AUTHENTICATION_ACCEPTED) {
g_clear_object (&conn);
if (result != E_SOURCE_AUTHENTICATION_REJECTED || local_error)
break;
}
}
if (!conn) {
EShell *shell;
TryCredentialsData data;
e_ews_connection_utils_force_off_ntlm_auth_check ();
g_clear_error (&local_error);
shell = e_shell_get_default ();
data.ews_settings = g_object_ref (ews_settings);
data.connect_url = connect_url && *connect_url ? connect_url : NULL;
data.try_credentials_func = try_credentials_func;
data.user_data = user_data;
data.conn = NULL;
e_credentials_prompter_loop_prompt_sync (e_shell_get_credentials_prompter (shell),
source, E_CREDENTIALS_PROMPTER_PROMPT_FLAG_ALLOW_SOURCE_SAVE,
ews_config_utils_try_credentials_sync, &data, cancellable, &local_error);
if (data.conn)
conn = g_object_ref (data.conn);
g_clear_object (&data.ews_settings);
g_clear_object (&data.conn);
}
}
if (local_error)
g_propagate_error (perror, local_error);
return conn;
}
| 0 |
[
"CWE-295"
] |
evolution-ews
|
915226eca9454b8b3e5adb6f2fff9698451778de
| 46,219,290,291,396,540,000,000,000,000,000,000,000 | 84 |
I#27 - SSL Certificates are not validated
This depends on https://gitlab.gnome.org/GNOME/evolution-data-server/commit/6672b8236139bd6ef41ecb915f4c72e2a052dba5 too.
Closes https://gitlab.gnome.org/GNOME/evolution-ews/issues/27
|
void __kprobes sub_preempt_count(int val)
{
#ifdef CONFIG_DEBUG_PREEMPT
/*
* Underflow?
*/
if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
return;
/*
* Is the spinlock portion underflowing?
*/
if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
!(preempt_count() & PREEMPT_MASK)))
return;
#endif
if (preempt_count() == val)
trace_preempt_on(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
preempt_count() -= val;
}
| 0 |
[
"CWE-703",
"CWE-835"
] |
linux
|
f26f9aff6aaf67e9a430d16c266f91b13a5bff64
| 230,665,976,062,130,400,000,000,000,000,000,000,000 | 20 |
Sched: fix skip_clock_update optimization
idle_balance() drops/retakes rq->lock, leaving the previous task
vulnerable to set_tsk_need_resched(). Clear it after we return
from balancing instead, and in setup_thread_stack() as well, so
no successfully descheduled or never scheduled task has it set.
Need resched confused the skip_clock_update logic, which assumes
that the next call to update_rq_clock() will come nearly immediately
after being set. Make the optimization robust against the waking
a sleeper before it sucessfully deschedules case by checking that
the current task has not been dequeued before setting the flag,
since it is that useless clock update we're trying to save, and
clear unconditionally in schedule() proper instead of conditionally
in put_prev_task().
Signed-off-by: Mike Galbraith <[email protected]>
Reported-by: Bjoern B. Brandenburg <[email protected]>
Tested-by: Yong Zhang <[email protected]>
Signed-off-by: Peter Zijlstra <[email protected]>
Cc: [email protected]
LKML-Reference: <[email protected]>
Signed-off-by: Ingo Molnar <[email protected]>
|
R_API double my_pow(ut64 base, int exp) {
ut8 flag = 0;
ut64 res = 1;
if (exp < 0) {
flag = 1;
exp *= -1;
}
while (exp) {
if (exp & 1) {
res *= base;
}
exp >>= 1;
base *= base;
IFDBG eprintf("Result: %"PFMT64d ", base: %"PFMT64d ", exp: %d\n", res, base, exp);
}
if (flag == 0) {
return 1.0 * res;
}
return (1.0 / res);
}
| 0 |
[
"CWE-787"
] |
radare2
|
9650e3c352f675687bf6c6f65ff2c4a3d0e288fa
| 132,065,880,712,765,940,000,000,000,000,000,000,000 | 20 |
Fix oobread segfault in java arith8.class ##crash
* Reported by Cen Zhang via huntr.dev
|
ippTimeToDate(time_t t) /* I - Time in seconds */
{
struct tm *unixdate; /* UNIX unixdate/time info */
ipp_uchar_t *date = _cupsGlobals()->ipp_date;
/* RFC-2579 date/time data */
/*
* RFC-2579 date/time format is:
*
* Byte(s) Description
* ------- -----------
* 0-1 Year (0 to 65535)
* 2 Month (1 to 12)
* 3 Day (1 to 31)
* 4 Hours (0 to 23)
* 5 Minutes (0 to 59)
* 6 Seconds (0 to 60, 60 = "leap second")
* 7 Deciseconds (0 to 9)
* 8 +/- UTC
* 9 UTC hours (0 to 11)
* 10 UTC minutes (0 to 59)
*/
unixdate = gmtime(&t);
unixdate->tm_year += 1900;
date[0] = (ipp_uchar_t)(unixdate->tm_year >> 8);
date[1] = (ipp_uchar_t)(unixdate->tm_year);
date[2] = (ipp_uchar_t)(unixdate->tm_mon + 1);
date[3] = (ipp_uchar_t)unixdate->tm_mday;
date[4] = (ipp_uchar_t)unixdate->tm_hour;
date[5] = (ipp_uchar_t)unixdate->tm_min;
date[6] = (ipp_uchar_t)unixdate->tm_sec;
date[7] = 0;
date[8] = '+';
date[9] = 0;
date[10] = 0;
return (date);
}
| 0 |
[
"CWE-120"
] |
cups
|
f24e6cf6a39300ad0c3726a41a4aab51ad54c109
| 100,678,836,465,821,180,000,000,000,000,000,000,000 | 41 |
Fix multiple security/disclosure issues:
- CVE-2019-8696 and CVE-2019-8675: Fixed SNMP buffer overflows (rdar://51685251)
- Fixed IPP buffer overflow (rdar://50035411)
- Fixed memory disclosure issue in the scheduler (rdar://51373853)
- Fixed DoS issues in the scheduler (rdar://51373929)
|
jbd2_get_transaction(journal_t *journal, transaction_t *transaction)
{
transaction->t_journal = journal;
transaction->t_state = T_RUNNING;
transaction->t_start_time = ktime_get();
transaction->t_tid = journal->j_transaction_sequence++;
transaction->t_expires = jiffies + journal->j_commit_interval;
spin_lock_init(&transaction->t_handle_lock);
atomic_set(&transaction->t_updates, 0);
atomic_set(&transaction->t_outstanding_credits,
atomic_read(&journal->j_reserved_credits));
atomic_set(&transaction->t_handle_count, 0);
INIT_LIST_HEAD(&transaction->t_inode_list);
INIT_LIST_HEAD(&transaction->t_private_list);
/* Set up the commit timer for the new transaction. */
journal->j_commit_timer.expires = round_jiffies_up(transaction->t_expires);
add_timer(&journal->j_commit_timer);
J_ASSERT(journal->j_running_transaction == NULL);
journal->j_running_transaction = transaction;
transaction->t_max_wait = 0;
transaction->t_start = jiffies;
transaction->t_requested = 0;
return transaction;
}
| 0 |
[
"CWE-787"
] |
linux
|
e09463f220ca9a1a1ecfda84fcda658f99a1f12a
| 295,856,512,080,542,840,000,000,000,000,000,000,000 | 27 |
jbd2: don't mark block as modified if the handle is out of credits
Do not set the b_modified flag in block's journal head should not
until after we're sure that jbd2_journal_dirty_metadat() will not
abort with an error due to there not being enough space reserved in
the jbd2 handle.
Otherwise, future attempts to modify the buffer may lead a large
number of spurious errors and warnings.
This addresses CVE-2018-10883.
https://bugzilla.kernel.org/show_bug.cgi?id=200071
Signed-off-by: Theodore Ts'o <[email protected]>
Cc: [email protected]
|
bool Scanner::include(const std::string &filename)
{
// get name of the current file (before unreading)
DASSERT(!files.empty());
const std::string &parent = files.back()->escaped_name;
// unread buffer tail: we'll return to it later
// In the buffer nested files go before outer files. In the file stack,
// however, outer files go before nested files (nested are at the top).
// We want to break from the unreading cycle early, therefore we go in
// reverse order of file offsets in buffer and break as soon as the end
// offset is less than cursor (current position).
for (size_t i = 0; i < files.size(); ++i) {
Input *in = files[i];
if (in->so >= cur) {
// unread whole fragment
fseek(in->file, in->so - in->eo, SEEK_CUR);
in->so = in->eo = ENDPOS;
}
else if (in->eo >= cur) {
// fragment on the boundary, unread partially
fseek(in->file, cur - in->eo, SEEK_CUR);
in->eo = cur - 1;
}
else {
// the rest has been consumed already
break;
}
}
// open new file and place place at the top of stack
if (!open(filename, &parent)) {
return false;
}
// refill buffer (discard everything up to cursor, clear EOF)
lim = cur = mar = ctx = tok = ptr = pos = bot + BSIZE;
eof = NULL;
return fill(BSIZE);
}
| 0 |
[
"CWE-787"
] |
re2c
|
c4603ba5ce229db83a2a4fb93e6d4b4e3ec3776a
| 29,081,583,238,915,933,000,000,000,000,000,000,000 | 40 |
Fix crash in lexer refill (reported by Agostino Sarubbo).
The crash happened in a rare case of a very long lexeme that doen't fit
into the buffer, forcing buffer reallocation.
The crash was caused by an incorrect calculation of the shift offset
(it was smaller than necessary). As a consequence, the data from buffer
start and up to the beginning of the current lexeme was not discarded
(as it should have been), resulting in less free space for new data than
expected.
|
int32_t ByteArray::Get(int32_t index) {
if (index < 0 || index >= Length())
return -1;
return InternalGet(index) & 0xff;
}
| 0 |
[
"CWE-119",
"CWE-703"
] |
sfntly
|
c56b85408bab232efd7e650f0994272a174e3b92
| 94,767,916,537,540,140,000,000,000,000,000,000,000 | 5 |
Add a bounds check to ByteArray::Get().
|
void CLASS linear_table(unsigned len)
{
int i;
if (len > 0x10000)
len = 0x10000;
else if(len < 1)
return;
read_shorts(curve, len);
for (i = len; i < 0x10000; i++)
curve[i] = curve[i - 1];
maximum = curve[len < 0x1000 ? 0xfff : len - 1];
}
| 0 |
[
"CWE-190"
] |
LibRaw
|
4554e24ce24beaef5d0ef48372801cfd91039076
| 317,880,333,959,918,970,000,000,000,000,000,000,000 | 12 |
parse_qt: possible integer overflow
|
static bool getsockopt_needs_rtnl(int optname)
{
switch (optname) {
case IP_MSFILTER:
case MCAST_MSFILTER:
return true;
}
return false;
}
| 0 |
[
"CWE-476",
"CWE-284"
] |
linux
|
34b2cef20f19c87999fff3da4071e66937db9644
| 267,412,408,575,202,260,000,000,000,000,000,000,000 | 9 |
ipv4: keep skb->dst around in presence of IP options
Andrey Konovalov got crashes in __ip_options_echo() when a NULL skb->dst
is accessed.
ipv4_pktinfo_prepare() should not drop the dst if (evil) IP options
are present.
We could refine the test to the presence of ts_needtime or srr,
but IP options are not often used, so let's be conservative.
Thanks to syzkaller team for finding this bug.
Fixes: d826eb14ecef ("ipv4: PKTINFO doesnt need dst reference")
Signed-off-by: Eric Dumazet <[email protected]>
Reported-by: Andrey Konovalov <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static int session_process_altsvc_frame(nghttp2_session *session) {
nghttp2_inbound_frame *iframe = &session->iframe;
nghttp2_frame *frame = &iframe->frame;
nghttp2_frame_unpack_altsvc_payload(
&frame->ext, nghttp2_get_uint16(iframe->sbuf.pos), iframe->lbuf.pos,
nghttp2_buf_len(&iframe->lbuf));
/* nghttp2_frame_unpack_altsvc_payload steals buffer from
iframe->lbuf */
nghttp2_buf_wrap_init(&iframe->lbuf, NULL, 0);
return nghttp2_session_on_altsvc_received(session, frame);
}
| 0 |
[] |
nghttp2
|
0a6ce87c22c69438ecbffe52a2859c3a32f1620f
| 101,853,864,712,945,730,000,000,000,000,000,000,000 | 14 |
Add nghttp2_option_set_max_outbound_ack
|
txt_add_fragment(gx_device_txtwrite_t *tdev, textw_text_enum_t *penum)
{
text_list_entry_t *unsorted_entry, *t;
/* Create a duplicate entry for the unsorted list */
unsorted_entry = (text_list_entry_t *)gs_malloc(tdev->memory->stable_memory, 1,
sizeof(text_list_entry_t), "txtwrite alloc sorted text state");
if (!unsorted_entry)
return gs_note_error(gs_error_VMerror);
/* Calculate the start and end points of the text */
penum->text_state->start.x = fixed2float(penum->origin.x);
penum->text_state->start.y = fixed2float(penum->origin.y);
penum->text_state->end.x = penum->text_state->start.x + penum->returned.total_width.x;
penum->text_state->end.y = penum->text_state->start.y + penum->returned.total_width.y;
penum->text_state->Unicode_Text_Size = penum->TextBufferIndex;
*unsorted_entry = *(penum->text_state);
/* Update the saved text state with the acccumulated Unicode data */
/* The working buffer (penum->TextBuffer) is freed in the text_release method */
penum->text_state->Unicode_Text = (unsigned short *)gs_malloc(tdev->memory->stable_memory,
penum->TextBufferIndex, sizeof(unsigned short), "txtwrite alloc text buffer");
if (!penum->text_state->Unicode_Text)
return gs_note_error(gs_error_VMerror);
memcpy(penum->text_state->Unicode_Text, penum->TextBuffer, penum->TextBufferIndex * sizeof(unsigned short));
penum->text_state->Widths = (float *)gs_malloc(tdev->memory->stable_memory,
penum->TextBufferIndex, sizeof(float), "txtwrite alloc widths array");
if (!penum->text_state->Widths)
return gs_note_error(gs_error_VMerror);
memcpy(penum->text_state->Widths, penum->Widths, penum->TextBufferIndex * sizeof(float));
unsorted_entry->Unicode_Text = (unsigned short *)gs_malloc(tdev->memory->stable_memory,
penum->TextBufferIndex, sizeof(unsigned short), "txtwrite alloc sorted text buffer");
if (!unsorted_entry->Unicode_Text)
return gs_note_error(gs_error_VMerror);
memcpy(unsorted_entry->Unicode_Text, penum->TextBuffer, penum->TextBufferIndex * sizeof(unsigned short));
unsorted_entry->Widths = (float *)gs_malloc(tdev->memory->stable_memory,
penum->TextBufferIndex, sizeof(float), "txtwrite alloc widths array");
if (!unsorted_entry->Widths)
return gs_note_error(gs_error_VMerror);
memcpy(unsorted_entry->Widths, penum->Widths, penum->TextBufferIndex * sizeof(float));
unsorted_entry->FontName = (char *)gs_malloc(tdev->memory->stable_memory,
(strlen(penum->text_state->FontName) + 1), sizeof(unsigned char), "txtwrite alloc sorted text buffer");
if (!unsorted_entry->FontName)
return gs_note_error(gs_error_VMerror);
memcpy(unsorted_entry->FontName, penum->text_state->FontName, strlen(penum->text_state->FontName) * sizeof(unsigned char));
unsorted_entry->FontName[strlen(penum->text_state->FontName)] = 0x00;
/* First add one entry to the unsorted list */
if (!tdev->PageData.unsorted_text_list) {
tdev->PageData.unsorted_text_list = unsorted_entry;
unsorted_entry->next = unsorted_entry->previous = NULL;
} else {
t = tdev->PageData.unsorted_text_list;
while (t->next)
t = t->next;
t->next = unsorted_entry;
unsorted_entry->next = NULL;
unsorted_entry->previous = t;
}
/* Then add the other entry to the sorted list */
return txt_add_sorted_fragment(tdev, penum);
}
| 0 |
[
"CWE-476"
] |
ghostpdl
|
407c98a38c3a6ac1681144ed45cc2f4fc374c91f
| 309,275,222,713,538,170,000,000,000,000,000,000,000 | 68 |
txtwrite - guard against using GS_NO_GLYPH to retrieve Unicode values
Bug 701822 "Segmentation fault at psi/iname.c:296 in names_index_ref"
Avoid using a glyph with the value GS_NO_GLYPH to retrieve a glyph
name or Unicode code point from the glyph ID, as this is not a valid
ID.
|
int g_dhcpv6_client_set_ia(GDHCPClient *dhcp_client, int index,
int code, uint32_t *T1, uint32_t *T2,
bool add_iaaddr, const char *ia_na)
{
if (code == G_DHCPV6_IA_TA) {
uint8_t ia_options[4];
put_iaid(dhcp_client, index, ia_options);
g_dhcp_client_set_request(dhcp_client, G_DHCPV6_IA_TA);
g_dhcpv6_client_set_send(dhcp_client, G_DHCPV6_IA_TA,
ia_options, sizeof(ia_options));
} else if (code == G_DHCPV6_IA_NA) {
struct in6_addr addr;
g_dhcp_client_set_request(dhcp_client, G_DHCPV6_IA_NA);
/*
* If caller has specified the IPv6 address it wishes to
* to use (ia_na != NULL and address is valid), then send
* the address to server.
* If caller did not specify the address (ia_na == NULL) and
* if the current address is not set, then we should not send
* the address sub-option.
*/
if (add_iaaddr && ((!ia_na &&
!IN6_IS_ADDR_UNSPECIFIED(&dhcp_client->ia_na))
|| (ia_na &&
inet_pton(AF_INET6, ia_na, &addr) == 1))) {
#define IAADDR_LEN (16+4+4)
uint8_t ia_options[4+4+4+2+2+IAADDR_LEN];
if (ia_na)
memcpy(&dhcp_client->ia_na, &addr,
sizeof(struct in6_addr));
put_iaid(dhcp_client, index, ia_options);
if (T1) {
ia_options[4] = *T1 >> 24;
ia_options[5] = *T1 >> 16;
ia_options[6] = *T1 >> 8;
ia_options[7] = *T1;
} else
memset(&ia_options[4], 0x00, 4);
if (T2) {
ia_options[8] = *T2 >> 24;
ia_options[9] = *T2 >> 16;
ia_options[10] = *T2 >> 8;
ia_options[11] = *T2;
} else
memset(&ia_options[8], 0x00, 4);
create_iaaddr(dhcp_client, &ia_options[12],
IAADDR_LEN);
g_dhcpv6_client_set_send(dhcp_client, G_DHCPV6_IA_NA,
ia_options, sizeof(ia_options));
} else {
uint8_t ia_options[4+4+4];
put_iaid(dhcp_client, index, ia_options);
memset(&ia_options[4], 0x00, 4); /* T1 (4 bytes) */
memset(&ia_options[8], 0x00, 4); /* T2 (4 bytes) */
g_dhcpv6_client_set_send(dhcp_client, G_DHCPV6_IA_NA,
ia_options, sizeof(ia_options));
}
} else
return -EINVAL;
return 0;
}
| 0 |
[] |
connman
|
a74524b3e3fad81b0fd1084ffdf9f2ea469cd9b1
| 294,703,024,338,176,870,000,000,000,000,000,000,000 | 77 |
gdhcp: Avoid leaking stack data via unitiialized variable
Fixes: CVE-2021-26676
|
GF_Err gf_isom_set_cts_packing(GF_ISOFile *the_file, u32 trackNumber, Bool unpack)
{
GF_Err e;
GF_Err stbl_repackCTS(GF_CompositionOffsetBox *ctts);
GF_Err stbl_unpackCTS(GF_SampleTableBox *stbl);
GF_SampleTableBox *stbl;
GF_TrackBox *trak = gf_isom_get_track_from_file(the_file, trackNumber);
if (!trak) return GF_BAD_PARAM;
stbl = trak->Media->information->sampleTable;
if (unpack) {
if (!stbl->CompositionOffset) {
stbl->CompositionOffset = (GF_CompositionOffsetBox *) gf_isom_box_new_parent(&stbl->child_boxes, GF_ISOM_BOX_TYPE_CTTS);
if (!stbl->CompositionOffset) return GF_OUT_OF_MEM;
}
e = stbl_unpackCTS(stbl);
} else {
if (!stbl->CompositionOffset) return GF_OK;
e = stbl_repackCTS(stbl->CompositionOffset);
}
if (e) return e;
return SetTrackDuration(trak);
}
| 0 |
[
"CWE-476"
] |
gpac
|
ebfa346eff05049718f7b80041093b4c5581c24e
| 247,251,770,704,113,720,000,000,000,000,000,000,000 | 24 |
fixed #1706
|
static void gotoxay(struct vc_data *vc, int new_x, int new_y)
{
gotoxy(vc, new_x, vc->vc_decom ? (vc->vc_top + new_y) : new_y);
}
| 0 |
[
"CWE-125"
] |
linux
|
3c4e0dff2095c579b142d5a0693257f1c58b4804
| 306,165,377,031,881,700,000,000,000,000,000,000,000 | 4 |
vt: Disable KD_FONT_OP_COPY
It's buggy:
On Fri, Nov 06, 2020 at 10:30:08PM +0800, Minh Yuan wrote:
> We recently discovered a slab-out-of-bounds read in fbcon in the latest
> kernel ( v5.10-rc2 for now ). The root cause of this vulnerability is that
> "fbcon_do_set_font" did not handle "vc->vc_font.data" and
> "vc->vc_font.height" correctly, and the patch
> <https://lkml.org/lkml/2020/9/27/223> for VT_RESIZEX can't handle this
> issue.
>
> Specifically, we use KD_FONT_OP_SET to set a small font.data for tty6, and
> use KD_FONT_OP_SET again to set a large font.height for tty1. After that,
> we use KD_FONT_OP_COPY to assign tty6's vc_font.data to tty1's vc_font.data
> in "fbcon_do_set_font", while tty1 retains the original larger
> height. Obviously, this will cause an out-of-bounds read, because we can
> access a smaller vc_font.data with a larger vc_font.height.
Further there was only one user ever.
- Android's loadfont, busybox and console-tools only ever use OP_GET
and OP_SET
- fbset documentation only mentions the kernel cmdline font: option,
not anything else.
- systemd used OP_COPY before release 232 published in Nov 2016
Now unfortunately the crucial report seems to have gone down with
gmane, and the commit message doesn't say much. But the pull request
hints at OP_COPY being broken
https://github.com/systemd/systemd/pull/3651
So in other words, this never worked, and the only project which
foolishly every tried to use it, realized that rather quickly too.
Instead of trying to fix security issues here on dead code by adding
missing checks, fix the entire thing by removing the functionality.
Note that systemd code using the OP_COPY function ignored the return
value, so it doesn't matter what we're doing here really - just in
case a lone server somewhere happens to be extremely unlucky and
running an affected old version of systemd. The relevant code from
font_copy_to_all_vcs() in systemd was:
/* copy font from active VT, where the font was uploaded to */
cfo.op = KD_FONT_OP_COPY;
cfo.height = vcs.v_active-1; /* tty1 == index 0 */
(void) ioctl(vcfd, KDFONTOP, &cfo);
Note this just disables the ioctl, garbage collecting the now unused
callbacks is left for -next.
v2: Tetsuo found the old mail, which allowed me to find it on another
archive. Add the link too.
Acked-by: Peilin Ye <[email protected]>
Reported-by: Minh Yuan <[email protected]>
References: https://lists.freedesktop.org/archives/systemd-devel/2016-June/036935.html
References: https://github.com/systemd/systemd/pull/3651
Cc: Greg KH <[email protected]>
Cc: Peilin Ye <[email protected]>
Cc: Tetsuo Handa <[email protected]>
Signed-off-by: Daniel Vetter <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Greg Kroah-Hartman <[email protected]>
|
TEST_F(GrpcHealthCheckerImplTest, GoAwayErrorProbeInProgress) {
// FailureType::Network will be issued, it will render host unhealthy only if unhealthy_threshold
// is reached.
setupHCWithUnhealthyThreshold(1);
expectSingleHealthcheck(HealthTransition::Changed);
EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _));
EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true));
// GOAWAY with non-NO_ERROR code will result in a healthcheck failure
// and the connection closing.
test_sessions_[0]->codec_client_->raiseGoAway(Http::GoAwayErrorCode::Other);
EXPECT_TRUE(cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->healthFlagGet(
Host::HealthFlag::FAILED_ACTIVE_HC));
EXPECT_EQ(Host::Health::Unhealthy,
cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->health());
}
| 0 |
[
"CWE-476"
] |
envoy
|
9b1c3962172a972bc0359398af6daa3790bb59db
| 73,792,627,718,021,860,000,000,000,000,000,000,000 | 17 |
healthcheck: fix grpc inline removal crashes (#749)
Signed-off-by: Matt Klein <[email protected]>
Signed-off-by: Pradeep Rao <[email protected]>
|
int ssl3_send_server_key_exchange(SSL *s)
{
#ifndef OPENSSL_NO_RSA
unsigned char *q;
int j,num;
RSA *rsa;
unsigned char md_buf[MD5_DIGEST_LENGTH+SHA_DIGEST_LENGTH];
unsigned int u;
#endif
#ifndef OPENSSL_NO_DH
DH *dh=NULL,*dhp;
#endif
#ifndef OPENSSL_NO_ECDH
EC_KEY *ecdh=NULL, *ecdhp;
unsigned char *encodedPoint = NULL;
int encodedlen = 0;
int curve_id = 0;
BN_CTX *bn_ctx = NULL;
#endif
EVP_PKEY *pkey;
unsigned char *p,*d;
int al,i;
unsigned long type;
int n;
CERT *cert;
BIGNUM *r[4];
int nr[4],kn;
BUF_MEM *buf;
EVP_MD_CTX md_ctx;
EVP_MD_CTX_init(&md_ctx);
if (s->state == SSL3_ST_SW_KEY_EXCH_A)
{
type=s->s3->tmp.new_cipher->algorithms & SSL_MKEY_MASK;
cert=s->cert;
buf=s->init_buf;
r[0]=r[1]=r[2]=r[3]=NULL;
n=0;
#ifndef OPENSSL_NO_RSA
if (type & SSL_kRSA)
{
rsa=cert->rsa_tmp;
if ((rsa == NULL) && (s->cert->rsa_tmp_cb != NULL))
{
rsa=s->cert->rsa_tmp_cb(s,
SSL_C_IS_EXPORT(s->s3->tmp.new_cipher),
SSL_C_EXPORT_PKEYLENGTH(s->s3->tmp.new_cipher));
if(rsa == NULL)
{
al=SSL_AD_HANDSHAKE_FAILURE;
SSLerr(SSL_F_SSL3_SEND_SERVER_KEY_EXCHANGE,SSL_R_ERROR_GENERATING_TMP_RSA_KEY);
goto f_err;
}
RSA_up_ref(rsa);
cert->rsa_tmp=rsa;
}
if (rsa == NULL)
{
al=SSL_AD_HANDSHAKE_FAILURE;
SSLerr(SSL_F_SSL3_SEND_SERVER_KEY_EXCHANGE,SSL_R_MISSING_TMP_RSA_KEY);
goto f_err;
}
r[0]=rsa->n;
r[1]=rsa->e;
s->s3->tmp.use_rsa_tmp=1;
}
else
#endif
#ifndef OPENSSL_NO_DH
if (type & SSL_kEDH)
{
dhp=cert->dh_tmp;
if ((dhp == NULL) && (s->cert->dh_tmp_cb != NULL))
dhp=s->cert->dh_tmp_cb(s,
SSL_C_IS_EXPORT(s->s3->tmp.new_cipher),
SSL_C_EXPORT_PKEYLENGTH(s->s3->tmp.new_cipher));
if (dhp == NULL)
{
al=SSL_AD_HANDSHAKE_FAILURE;
SSLerr(SSL_F_SSL3_SEND_SERVER_KEY_EXCHANGE,SSL_R_MISSING_TMP_DH_KEY);
goto f_err;
}
if (s->s3->tmp.dh != NULL)
{
DH_free(dh);
SSLerr(SSL_F_SSL3_SEND_SERVER_KEY_EXCHANGE, ERR_R_INTERNAL_ERROR);
goto err;
}
if ((dh=DHparams_dup(dhp)) == NULL)
{
SSLerr(SSL_F_SSL3_SEND_SERVER_KEY_EXCHANGE,ERR_R_DH_LIB);
goto err;
}
s->s3->tmp.dh=dh;
if ((dhp->pub_key == NULL ||
dhp->priv_key == NULL ||
(s->options & SSL_OP_SINGLE_DH_USE)))
{
if(!DH_generate_key(dh))
{
SSLerr(SSL_F_SSL3_SEND_SERVER_KEY_EXCHANGE,
ERR_R_DH_LIB);
goto err;
}
}
else
{
dh->pub_key=BN_dup(dhp->pub_key);
dh->priv_key=BN_dup(dhp->priv_key);
if ((dh->pub_key == NULL) ||
(dh->priv_key == NULL))
{
SSLerr(SSL_F_SSL3_SEND_SERVER_KEY_EXCHANGE,ERR_R_DH_LIB);
goto err;
}
}
r[0]=dh->p;
r[1]=dh->g;
r[2]=dh->pub_key;
}
else
#endif
#ifndef OPENSSL_NO_ECDH
if (type & SSL_kECDHE)
{
const EC_GROUP *group;
ecdhp=cert->ecdh_tmp;
if ((ecdhp == NULL) && (s->cert->ecdh_tmp_cb != NULL))
{
ecdhp=s->cert->ecdh_tmp_cb(s,
SSL_C_IS_EXPORT(s->s3->tmp.new_cipher),
SSL_C_EXPORT_PKEYLENGTH(s->s3->tmp.new_cipher));
}
if (ecdhp == NULL)
{
al=SSL_AD_HANDSHAKE_FAILURE;
SSLerr(SSL_F_SSL3_SEND_SERVER_KEY_EXCHANGE,SSL_R_MISSING_TMP_ECDH_KEY);
goto f_err;
}
if (s->s3->tmp.ecdh != NULL)
{
EC_KEY_free(s->s3->tmp.ecdh);
SSLerr(SSL_F_SSL3_SEND_SERVER_KEY_EXCHANGE, ERR_R_INTERNAL_ERROR);
goto err;
}
/* Duplicate the ECDH structure. */
if (ecdhp == NULL)
{
SSLerr(SSL_F_SSL3_SEND_SERVER_KEY_EXCHANGE,ERR_R_ECDH_LIB);
goto err;
}
if (!EC_KEY_up_ref(ecdhp))
{
SSLerr(SSL_F_SSL3_SEND_SERVER_KEY_EXCHANGE,ERR_R_ECDH_LIB);
goto err;
}
ecdh = ecdhp;
s->s3->tmp.ecdh=ecdh;
if ((EC_KEY_get0_public_key(ecdh) == NULL) ||
(EC_KEY_get0_private_key(ecdh) == NULL) ||
(s->options & SSL_OP_SINGLE_ECDH_USE))
{
if(!EC_KEY_generate_key(ecdh))
{
SSLerr(SSL_F_SSL3_SEND_SERVER_KEY_EXCHANGE,ERR_R_ECDH_LIB);
goto err;
}
}
if (((group = EC_KEY_get0_group(ecdh)) == NULL) ||
(EC_KEY_get0_public_key(ecdh) == NULL) ||
(EC_KEY_get0_private_key(ecdh) == NULL))
{
SSLerr(SSL_F_SSL3_SEND_SERVER_KEY_EXCHANGE,ERR_R_ECDH_LIB);
goto err;
}
if (SSL_C_IS_EXPORT(s->s3->tmp.new_cipher) &&
(EC_GROUP_get_degree(group) > 163))
{
SSLerr(SSL_F_SSL3_SEND_SERVER_KEY_EXCHANGE,SSL_R_ECGROUP_TOO_LARGE_FOR_CIPHER);
goto err;
}
/* XXX: For now, we only support ephemeral ECDH
* keys over named (not generic) curves. For
* supported named curves, curve_id is non-zero.
*/
if ((curve_id =
nid2curve_id(EC_GROUP_get_curve_name(group)))
== 0)
{
SSLerr(SSL_F_SSL3_SEND_SERVER_KEY_EXCHANGE,SSL_R_UNSUPPORTED_ELLIPTIC_CURVE);
goto err;
}
/* Encode the public key.
* First check the size of encoding and
* allocate memory accordingly.
*/
encodedlen = EC_POINT_point2oct(group,
EC_KEY_get0_public_key(ecdh),
POINT_CONVERSION_UNCOMPRESSED,
NULL, 0, NULL);
encodedPoint = (unsigned char *)
OPENSSL_malloc(encodedlen*sizeof(unsigned char));
bn_ctx = BN_CTX_new();
if ((encodedPoint == NULL) || (bn_ctx == NULL))
{
SSLerr(SSL_F_SSL3_SEND_SERVER_KEY_EXCHANGE,ERR_R_MALLOC_FAILURE);
goto err;
}
encodedlen = EC_POINT_point2oct(group,
EC_KEY_get0_public_key(ecdh),
POINT_CONVERSION_UNCOMPRESSED,
encodedPoint, encodedlen, bn_ctx);
if (encodedlen == 0)
{
SSLerr(SSL_F_SSL3_SEND_SERVER_KEY_EXCHANGE,ERR_R_ECDH_LIB);
goto err;
}
BN_CTX_free(bn_ctx); bn_ctx=NULL;
/* XXX: For now, we only support named (not
* generic) curves in ECDH ephemeral key exchanges.
* In this situation, we need four additional bytes
* to encode the entire ServerECDHParams
* structure.
*/
n = 4 + encodedlen;
/* We'll generate the serverKeyExchange message
* explicitly so we can set these to NULLs
*/
r[0]=NULL;
r[1]=NULL;
r[2]=NULL;
r[3]=NULL;
}
else
#endif /* !OPENSSL_NO_ECDH */
#ifndef OPENSSL_NO_PSK
if (type & SSL_kPSK)
{
/* reserve size for record length and PSK identity hint*/
n+=2+strlen(s->ctx->psk_identity_hint);
}
else
#endif /* !OPENSSL_NO_PSK */
{
al=SSL_AD_HANDSHAKE_FAILURE;
SSLerr(SSL_F_SSL3_SEND_SERVER_KEY_EXCHANGE,SSL_R_UNKNOWN_KEY_EXCHANGE_TYPE);
goto f_err;
}
for (i=0; r[i] != NULL; i++)
{
nr[i]=BN_num_bytes(r[i]);
n+=2+nr[i];
}
if (!(s->s3->tmp.new_cipher->algorithms & SSL_aNULL)
&& !(s->s3->tmp.new_cipher->algorithms & SSL_kPSK))
{
if ((pkey=ssl_get_sign_pkey(s,s->s3->tmp.new_cipher))
== NULL)
{
al=SSL_AD_DECODE_ERROR;
goto f_err;
}
kn=EVP_PKEY_size(pkey);
}
else
{
pkey=NULL;
kn=0;
}
if (!BUF_MEM_grow_clean(buf,n+4+kn))
{
SSLerr(SSL_F_SSL3_SEND_SERVER_KEY_EXCHANGE,ERR_LIB_BUF);
goto err;
}
d=(unsigned char *)s->init_buf->data;
p= &(d[4]);
for (i=0; r[i] != NULL; i++)
{
s2n(nr[i],p);
BN_bn2bin(r[i],p);
p+=nr[i];
}
#ifndef OPENSSL_NO_ECDH
if (type & SSL_kECDHE)
{
/* XXX: For now, we only support named (not generic) curves.
* In this situation, the serverKeyExchange message has:
* [1 byte CurveType], [2 byte CurveName]
* [1 byte length of encoded point], followed by
* the actual encoded point itself
*/
*p = NAMED_CURVE_TYPE;
p += 1;
*p = 0;
p += 1;
*p = curve_id;
p += 1;
*p = encodedlen;
p += 1;
memcpy((unsigned char*)p,
(unsigned char *)encodedPoint,
encodedlen);
OPENSSL_free(encodedPoint);
p += encodedlen;
}
#endif
#ifndef OPENSSL_NO_PSK
if (type & SSL_kPSK)
{
/* copy PSK identity hint */
s2n(strlen(s->ctx->psk_identity_hint), p);
strncpy(p, s->ctx->psk_identity_hint, strlen(s->ctx->psk_identity_hint));
p+=strlen(s->ctx->psk_identity_hint);
}
#endif
/* not anonymous */
if (pkey != NULL)
{
/* n is the length of the params, they start at &(d[4])
* and p points to the space at the end. */
#ifndef OPENSSL_NO_RSA
if (pkey->type == EVP_PKEY_RSA)
{
q=md_buf;
j=0;
for (num=2; num > 0; num--)
{
EVP_DigestInit_ex(&md_ctx,(num == 2)
?s->ctx->md5:s->ctx->sha1, NULL);
EVP_DigestUpdate(&md_ctx,&(s->s3->client_random[0]),SSL3_RANDOM_SIZE);
EVP_DigestUpdate(&md_ctx,&(s->s3->server_random[0]),SSL3_RANDOM_SIZE);
EVP_DigestUpdate(&md_ctx,&(d[4]),n);
EVP_DigestFinal_ex(&md_ctx,q,
(unsigned int *)&i);
q+=i;
j+=i;
}
if (RSA_sign(NID_md5_sha1, md_buf, j,
&(p[2]), &u, pkey->pkey.rsa) <= 0)
{
SSLerr(SSL_F_SSL3_SEND_SERVER_KEY_EXCHANGE,ERR_LIB_RSA);
goto err;
}
s2n(u,p);
n+=u+2;
}
else
#endif
#if !defined(OPENSSL_NO_DSA)
if (pkey->type == EVP_PKEY_DSA)
{
/* lets do DSS */
EVP_SignInit_ex(&md_ctx,EVP_dss1(), NULL);
EVP_SignUpdate(&md_ctx,&(s->s3->client_random[0]),SSL3_RANDOM_SIZE);
EVP_SignUpdate(&md_ctx,&(s->s3->server_random[0]),SSL3_RANDOM_SIZE);
EVP_SignUpdate(&md_ctx,&(d[4]),n);
if (!EVP_SignFinal(&md_ctx,&(p[2]),
(unsigned int *)&i,pkey))
{
SSLerr(SSL_F_SSL3_SEND_SERVER_KEY_EXCHANGE,ERR_LIB_DSA);
goto err;
}
s2n(i,p);
n+=i+2;
}
else
#endif
#if !defined(OPENSSL_NO_ECDSA)
if (pkey->type == EVP_PKEY_EC)
{
/* let's do ECDSA */
EVP_SignInit_ex(&md_ctx,EVP_ecdsa(), NULL);
EVP_SignUpdate(&md_ctx,&(s->s3->client_random[0]),SSL3_RANDOM_SIZE);
EVP_SignUpdate(&md_ctx,&(s->s3->server_random[0]),SSL3_RANDOM_SIZE);
EVP_SignUpdate(&md_ctx,&(d[4]),n);
if (!EVP_SignFinal(&md_ctx,&(p[2]),
(unsigned int *)&i,pkey))
{
SSLerr(SSL_F_SSL3_SEND_SERVER_KEY_EXCHANGE,ERR_LIB_ECDSA);
goto err;
}
s2n(i,p);
n+=i+2;
}
else
#endif
{
/* Is this error check actually needed? */
al=SSL_AD_HANDSHAKE_FAILURE;
SSLerr(SSL_F_SSL3_SEND_SERVER_KEY_EXCHANGE,SSL_R_UNKNOWN_PKEY_TYPE);
goto f_err;
}
}
*(d++)=SSL3_MT_SERVER_KEY_EXCHANGE;
l2n3(n,d);
/* we should now have things packed up, so lets send
* it off */
s->init_num=n+4;
s->init_off=0;
}
s->state = SSL3_ST_SW_KEY_EXCH_B;
EVP_MD_CTX_cleanup(&md_ctx);
return(ssl3_do_write(s,SSL3_RT_HANDSHAKE));
f_err:
ssl3_send_alert(s,SSL3_AL_FATAL,al);
err:
#ifndef OPENSSL_NO_ECDH
if (encodedPoint != NULL) OPENSSL_free(encodedPoint);
BN_CTX_free(bn_ctx);
#endif
EVP_MD_CTX_cleanup(&md_ctx);
return(-1);
}
| 0 |
[] |
openssl
|
36ca4ba63d083da6f9d4598f18f17a8c32c8eca2
| 58,695,478,510,777,300,000,000,000,000,000,000,000 | 442 |
Implement the Supported Point Formats Extension for ECC ciphersuites
Submitted by: Douglas Stebila
|
PHP_FUNCTION(imagecopymergegray)
{
zval *SIM, *DIM;
zend_long SX, SY, SW, SH, DX, DY, PCT;
gdImagePtr im_dst, im_src;
int srcH, srcW, srcY, srcX, dstY, dstX, pct;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "rrlllllll", &DIM, &SIM, &DX, &DY, &SX, &SY, &SW, &SH, &PCT) == FAILURE) {
return;
}
if ((im_dst = (gdImagePtr)zend_fetch_resource(Z_RES_P(DIM), "Image", le_gd)) == NULL) {
RETURN_FALSE;
}
if ((im_src = (gdImagePtr)zend_fetch_resource(Z_RES_P(SIM), "Image", le_gd)) == NULL) {
RETURN_FALSE;
}
srcX = SX;
srcY = SY;
srcH = SH;
srcW = SW;
dstX = DX;
dstY = DY;
pct = PCT;
gdImageCopyMergeGray(im_dst, im_src, dstX, dstY, srcX, srcY, srcW, srcH, pct);
RETURN_TRUE;
}
| 0 |
[
"CWE-787"
] |
php-src
|
28022c9b1fd937436ab67bb3d61f652c108baf96
| 222,487,620,919,897,500,000,000,000,000,000,000,000 | 30 |
Fix bug#72697 - select_colors write out-of-bounds
(cherry picked from commit b6f13a5ef9d6280cf984826a5de012a32c396cd4)
Conflicts:
ext/gd/gd.c
|
void uwsgi_chown(char *filename, char *owner) {
uid_t new_uid = -1;
uid_t new_gid = -1;
struct group *new_group = NULL;
struct passwd *new_user = NULL;
char *colon = strchr(owner, ':');
if (colon) {
colon[0] = 0;
}
if (is_a_number(owner)) {
new_uid = atoi(owner);
}
else {
new_user = getpwnam(owner);
if (!new_user) {
uwsgi_log("unable to find user %s\n", owner);
exit(1);
}
new_uid = new_user->pw_uid;
}
if (colon) {
colon[0] = ':';
if (is_a_number(colon + 1)) {
new_gid = atoi(colon + 1);
}
else {
new_group = getgrnam(colon + 1);
if (!new_group) {
uwsgi_log("unable to find group %s\n", colon + 1);
exit(1);
}
new_gid = new_group->gr_gid;
}
}
if (chown(filename, new_uid, new_gid)) {
uwsgi_error("chown()");
exit(1);
}
}
| 0 |
[
"CWE-119",
"CWE-703",
"CWE-787"
] |
uwsgi
|
cb4636f7c0af2e97a4eef7a3cdcbd85a71247bfe
| 318,696,793,245,371,400,000,000,000,000,000,000,000 | 46 |
improve uwsgi_expand_path() to sanitize input, avoiding stack corruption and potential security issue
|
bool slaveOk() const override {
return false;
}
| 0 |
[
"CWE-20"
] |
mongo
|
5c7c6729c37514760fd34da462b6961a2e385417
| 279,478,886,689,775,800,000,000,000,000,000,000,000 | 3 |
SERVER-38275 ban explain with UUID
|
pipe_poll(struct file *filp, poll_table *wait)
{
__poll_t mask;
struct pipe_inode_info *pipe = filp->private_data;
unsigned int head, tail;
/* Epoll has some historical nasty semantics, this enables them */
pipe->poll_usage = 1;
/*
* Reading pipe state only -- no need for acquiring the semaphore.
*
* But because this is racy, the code has to add the
* entry to the poll table _first_ ..
*/
if (filp->f_mode & FMODE_READ)
poll_wait(filp, &pipe->rd_wait, wait);
if (filp->f_mode & FMODE_WRITE)
poll_wait(filp, &pipe->wr_wait, wait);
/*
* .. and only then can you do the racy tests. That way,
* if something changes and you got it wrong, the poll
* table entry will wake you up and fix it.
*/
head = READ_ONCE(pipe->head);
tail = READ_ONCE(pipe->tail);
mask = 0;
if (filp->f_mode & FMODE_READ) {
if (!pipe_empty(head, tail))
mask |= EPOLLIN | EPOLLRDNORM;
if (!pipe->writers && filp->f_version != pipe->w_counter)
mask |= EPOLLHUP;
}
if (filp->f_mode & FMODE_WRITE) {
if (!pipe_full(head, tail, pipe->max_usage))
mask |= EPOLLOUT | EPOLLWRNORM;
/*
* Most Unices do not set EPOLLERR for FIFOs but on Linux they
* behave exactly like pipes for poll().
*/
if (!pipe->readers)
mask |= EPOLLERR;
}
return mask;
}
| 0 |
[
"CWE-362"
] |
linux
|
189b0ddc245139af81198d1a3637cac74f96e13a
| 125,727,059,116,740,850,000,000,000,000,000,000,000 | 49 |
pipe: Fix missing lock in pipe_resize_ring()
pipe_resize_ring() needs to take the pipe->rd_wait.lock spinlock to
prevent post_one_notification() from trying to insert into the ring
whilst the ring is being replaced.
The occupancy check must be done after the lock is taken, and the lock
must be taken after the new ring is allocated.
The bug can lead to an oops looking something like:
BUG: KASAN: use-after-free in post_one_notification.isra.0+0x62e/0x840
Read of size 4 at addr ffff88801cc72a70 by task poc/27196
...
Call Trace:
post_one_notification.isra.0+0x62e/0x840
__post_watch_notification+0x3b7/0x650
key_create_or_update+0xb8b/0xd20
__do_sys_add_key+0x175/0x340
__x64_sys_add_key+0xbe/0x140
do_syscall_64+0x5c/0xc0
entry_SYSCALL_64_after_hwframe+0x44/0xae
Reported by Selim Enes Karaduman @Enesdex working with Trend Micro Zero
Day Initiative.
Fixes: c73be61cede5 ("pipe: Add general notification queue support")
Reported-by: [email protected] # ZDI-CAN-17291
Signed-off-by: David Howells <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
GF_Err mp4s_on_child_box(GF_Box *s, GF_Box *a)
{
GF_MPEGSampleEntryBox *ptr = (GF_MPEGSampleEntryBox *)s;
switch (a->type) {
case GF_ISOM_BOX_TYPE_ESDS:
if (ptr->esd) ERROR_ON_DUPLICATED_BOX(a, ptr)
ptr->esd = (GF_ESDBox *)a;
break;
}
return GF_OK;
}
| 0 |
[
"CWE-787"
] |
gpac
|
388ecce75d05e11fc8496aa4857b91245007d26e
| 293,639,417,012,632,600,000,000,000,000,000,000,000 | 11 |
fixed #1587
|
static int xudc_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
{
struct xusb_ep *ep = to_xusb_ep(_ep);
struct xusb_req *req = to_xusb_req(_req);
struct xusb_udc *udc = ep->udc;
unsigned long flags;
spin_lock_irqsave(&udc->lock, flags);
/* Make sure it's actually queued on this endpoint */
list_for_each_entry(req, &ep->queue, queue) {
if (&req->usb_req == _req)
break;
}
if (&req->usb_req != _req) {
spin_unlock_irqrestore(&udc->lock, flags);
return -EINVAL;
}
xudc_done(ep, req, -ECONNRESET);
spin_unlock_irqrestore(&udc->lock, flags);
return 0;
}
| 0 |
[
"CWE-20",
"CWE-129"
] |
linux
|
7f14c7227f342d9932f9b918893c8814f86d2a0d
| 14,814,057,677,754,341,000,000,000,000,000,000,000 | 22 |
USB: gadget: validate endpoint index for xilinx udc
Assure that host may not manipulate the index to point
past endpoint array.
Signed-off-by: Szymon Heidrich <[email protected]>
Cc: stable <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]>
|
EXPORTED void appendstrlist(struct strlist **l, char *s)
{
struct strlist **tail = l;
while (*tail) tail = &(*tail)->next;
*tail = (struct strlist *)xmalloc(sizeof(struct strlist));
(*tail)->s = xstrdup(s);
(*tail)->p = 0;
(*tail)->next = 0;
}
| 0 |
[
"CWE-732"
] |
cyrus-imapd
|
621f9e41465b521399f691c241181300fab55995
| 101,492,862,638,406,870,000,000,000,000,000,000,000 | 11 |
annotate: don't allow everyone to write shared server entries
|
static int xennet_xdp(struct net_device *dev, struct netdev_bpf *xdp)
{
struct netfront_info *np = netdev_priv(dev);
if (np->broken)
return -ENODEV;
switch (xdp->command) {
case XDP_SETUP_PROG:
return xennet_xdp_set(dev, xdp->prog, xdp->extack);
default:
return -EINVAL;
}
}
| 0 |
[] |
linux
|
f63c2c2032c2e3caad9add3b82cc6e91c376fd26
| 5,836,469,699,959,389,000,000,000,000,000,000,000 | 14 |
xen-netfront: restore __skb_queue_tail() positioning in xennet_get_responses()
The commit referenced below moved the invocation past the "next" label,
without any explanation. In fact this allows misbehaving backends undue
control over the domain the frontend runs in, as earlier detected errors
require the skb to not be freed (it may be retained for later processing
via xennet_move_rx_slot(), or it may simply be unsafe to have it freed).
This is CVE-2022-33743 / XSA-405.
Fixes: 6c5aa6fc4def ("xen networking: add basic XDP support for xen-netfront")
Signed-off-by: Jan Beulich <[email protected]>
Reviewed-by: Juergen Gross <[email protected]>
Signed-off-by: Juergen Gross <[email protected]>
|
inline bool Row_definition_list::eq_name(const Spvar_definition *def,
const LEX_CSTRING *name) const
{
return def->field_name.length == name->length && my_strcasecmp(system_charset_info, def->field_name.str, name->str) == 0;
}
| 0 |
[
"CWE-416",
"CWE-703"
] |
server
|
08c7ab404f69d9c4ca6ca7a9cf7eec74c804f917
| 5,020,663,537,975,311,000,000,000,000,000,000,000 | 5 |
MDEV-24176 Server crashes after insert in the table with virtual
column generated using date_format() and if()
vcol_info->expr is allocated on expr_arena at parsing stage. Since
expr item is allocated on expr_arena all its containee items must be
allocated on expr_arena too. Otherwise fix_session_expr() will
encounter prematurely freed item.
When table is reopened from cache vcol_info contains stale
expression. We refresh expression via TABLE::vcol_fix_exprs() but
first we must prepare a proper context (Vcol_expr_context) which meets
some requirements:
1. As noted above expr update must be done on expr_arena as there may
be new items created. It was a bug in fix_session_expr_for_read() and
was just not reproduced because of no second refix. Now refix is done
for more cases so it does reproduce. Tests affected: vcol.binlog
2. Also name resolution context must be narrowed to the single table.
Tested by: vcol.update main.default vcol.vcol_syntax gcol.gcol_bugfixes
3. sql_mode must be clean and not fail expr update.
sql_mode such as MODE_NO_BACKSLASH_ESCAPES, MODE_NO_ZERO_IN_DATE, etc
must not affect vcol expression update. If the table was created
successfully any further evaluation must not fail. Tests affected:
main.func_like
Reviewed by: Sergei Golubchik <[email protected]>
|
void setRequestedApplicationProtocols(const std::vector<absl::string_view>& protocols) override {
application_protocols_.clear();
for (const auto& protocol : protocols) {
application_protocols_.emplace_back(protocol);
}
}
| 0 |
[
"CWE-400"
] |
envoy
|
542f84c66e9f6479bc31c6f53157c60472b25240
| 12,871,008,308,887,418,000,000,000,000,000,000,000 | 6 |
overload: Runtime configurable global connection limits (#147)
Signed-off-by: Tony Allen <[email protected]>
|
static av_always_inline void snow_horizontal_compose_lift_lead_out(int i, IDWTELEM * dst, IDWTELEM * src, IDWTELEM * ref, int width, int w, int lift_high, int mul, int add, int shift){
for(; i<w; i++){
dst[i] = src[i] - ((mul * (ref[i] + ref[i + 1]) + add) >> shift);
}
if((width^lift_high)&1){
dst[w] = src[w] - ((mul * 2 * ref[w] + add) >> shift);
}
}
| 0 |
[
"CWE-703"
] |
FFmpeg
|
61d59703c91869f4e5cdacd8d6be52f8b89d4ba4
| 128,321,084,506,162,730,000,000,000,000,000,000,000 | 9 |
avcodec/snow: split block clipping checks
Fixes out of array read
Fixes: d4476f68ca1c1c57afbc45806f581963-asan_heap-oob_2266b27_8607_cov_4044577381_snow_chroma_bug.avi
Found-by: Mateusz "j00ru" Jurczyk and Gynvael Coldwind
Signed-off-by: Michael Niedermayer <[email protected]>
|
ssize_t enc_untrusted_send(int sockfd, const void *buf, size_t len, int flags) {
return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_sendto,
sockfd, buf, len, flags,
/*dest_addr=*/nullptr,
/*addrlen=*/0);
}
| 0 |
[
"CWE-125"
] |
asylo
|
b1d120a2c7d7446d2cc58d517e20a1b184b82200
| 329,247,936,179,992,520,000,000,000,000,000,000,000 | 6 |
Check for return size in enc_untrusted_read
Check return size does not exceed requested. The returned result and
content still cannot be trusted, but it's expected behavior when not
using a secure file system.
PiperOrigin-RevId: 333827386
Change-Id: I0bdec0aec9356ea333dc8c647eba5d2772875f29
|
static void sdma_field_flush(struct work_struct *work)
{
unsigned long flags;
struct sdma_engine *sde =
container_of(work, struct sdma_engine, flush_worker);
write_seqlock_irqsave(&sde->head_lock, flags);
if (!__sdma_running(sde))
sdma_flush(sde);
write_sequnlock_irqrestore(&sde->head_lock, flags);
}
| 0 |
[
"CWE-400",
"CWE-401"
] |
linux
|
34b3be18a04ecdc610aae4c48e5d1b799d8689f6
| 165,575,515,232,253,210,000,000,000,000,000,000,000 | 11 |
RDMA/hfi1: Prevent memory leak in sdma_init
In sdma_init if rhashtable_init fails the allocated memory for
tmp_sdma_rht should be released.
Fixes: 5a52a7acf7e2 ("IB/hfi1: NULL pointer dereference when freeing rhashtable")
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Navid Emamdoost <[email protected]>
Acked-by: Dennis Dalessandro <[email protected]>
Signed-off-by: Jason Gunthorpe <[email protected]>
|
write_applet_private_values_to_gconf (CopyOneSettingValueInfo *info)
{
NMSetting8021x *s_8021x;
g_return_if_fail (info != NULL);
/* Handle values private to the applet that are not supposed to
* be sent to NetworkManager.
*/
s_8021x = NM_SETTING_802_1X (nm_connection_get_setting (info->connection, NM_TYPE_SETTING_802_1X));
if (s_8021x) {
write_ignore_ca_cert_helper (info, NMA_CA_CERT_IGNORE_TAG,
nm_setting_802_1x_get_ca_cert (s_8021x));
write_ignore_ca_cert_helper (info, NMA_PHASE2_CA_CERT_IGNORE_TAG,
nm_setting_802_1x_get_phase2_ca_cert (s_8021x));
/* Binary certificate and key data doesn't get stored in GConf. Instead,
* the path to the certificate gets stored in a special key and the
* certificate is read and stuffed into the setting right before
* the connection is sent to NM
*/
write_one_private_string_value (info, NMA_PATH_CA_CERT_TAG);
write_one_private_string_value (info, NMA_PATH_CLIENT_CERT_TAG);
write_one_private_string_value (info, NMA_PATH_PRIVATE_KEY_TAG);
write_one_private_string_value (info, NMA_PATH_PHASE2_CA_CERT_TAG);
write_one_private_string_value (info, NMA_PATH_PHASE2_CLIENT_CERT_TAG);
write_one_private_string_value (info, NMA_PATH_PHASE2_PRIVATE_KEY_TAG);
write_one_password (info, NMA_PRIVATE_KEY_PASSWORD_TAG);
write_one_password (info, NMA_PHASE2_PRIVATE_KEY_PASSWORD_TAG);
}
}
| 0 |
[
"CWE-310"
] |
network-manager-applet
|
4020594dfbf566f1852f0acb36ad631a9e73a82b
| 300,051,307,808,486,700,000,000,000,000,000,000,000 | 32 |
core: fix CA cert mishandling after cert file deletion (deb #560067) (rh #546793)
If a connection was created with a CA certificate, but the user later
moved or deleted that CA certificate, the applet would simply provide the
connection to NetworkManager without any CA certificate. This could cause
NM to connect to the original network (or a network spoofing the original
network) without verifying the identity of the network as the user
expects.
In the future we can/should do better here by (1) alerting the user that
some connection is now no longer complete by flagging it in the connection
editor or notifying the user somehow, and (2) by using a freaking' cert
store already (not that Linux has one yet).
|
resp_addr_get_rrset(struct resp_addr* addr)
{
return addr ? addr->data : NULL;
}
| 0 |
[
"CWE-190"
] |
unbound
|
02080f6b180232f43b77f403d0c038e9360a460f
| 115,868,760,342,832,380,000,000,000,000,000,000,000 | 4 |
- Fix Integer Overflows in Size Calculations,
reported by X41 D-Sec.
|
void streamPropagateGroupID(client *c, robj *key, streamCG *group, robj *groupname) {
robj *argv[7];
argv[0] = shared.xgroup;
argv[1] = shared.setid;
argv[2] = key;
argv[3] = groupname;
argv[4] = createObjectFromStreamID(&group->last_id);
argv[5] = shared.entriesread;
argv[6] = createStringObjectFromLongLong(group->entries_read);
alsoPropagate(c->db->id,argv,7,PROPAGATE_AOF|PROPAGATE_REPL);
decrRefCount(argv[4]);
decrRefCount(argv[6]);
}
| 0 |
[
"CWE-703",
"CWE-401"
] |
redis
|
4a7a4e42db8ff757cdf3f4a824f66426036034ef
| 37,531,282,854,584,387,000,000,000,000,000,000,000 | 15 |
Fix memory leak in streamGetEdgeID (#10753)
si is initialized by streamIteratorStart(), we should call
streamIteratorStop() on it when done.
regression introduced in #9127 (redis 7.0)
|
_public_ sd_bus *sd_bus_close_unref(sd_bus *bus) {
if (!bus)
return NULL;
sd_bus_close(bus);
return sd_bus_unref(bus);
}
| 0 |
[
"CWE-416"
] |
systemd
|
1068447e6954dc6ce52f099ed174c442cb89ed54
| 183,066,412,597,020,100,000,000,000,000,000,000,000 | 8 |
sd-bus: introduce API for re-enqueuing incoming messages
When authorizing via PolicyKit we want to process incoming method calls
twice: once to process and figure out that we need PK authentication,
and a second time after we aquired PK authentication to actually execute
the operation. With this new call sd_bus_enqueue_for_read() we have a
way to put an incoming message back into the read queue for this
purpose.
This might have other uses too, for example debugging.
|
uint32_t
router_get_advertised_bandwidth_capped(const routerinfo_t *router)
{
uint32_t result = router->bandwidthcapacity;
if (result > router->bandwidthrate)
result = router->bandwidthrate;
if (result > DEFAULT_MAX_BELIEVABLE_BANDWIDTH)
result = DEFAULT_MAX_BELIEVABLE_BANDWIDTH;
return result;
| 0 |
[] |
tor
|
1afc2ed956a35b40dfd1d207652af5b50c295da7
| 150,685,103,315,728,370,000,000,000,000,000,000,000 | 9 |
Fix policies.c instance of the "if (r=(a-b)) return r" pattern
I think this one probably can't underflow, since the input ranges
are small. But let's not tempt fate.
This patch also replaces the "cmp" functions here with just "eq"
functions, since nothing actually checked for anything besides 0 and
nonzero.
Related to 21278.
|
static inline int attributes_need_mount(u32 *bmval)
{
if (bmval[0] & ~(FATTR4_WORD0_RDATTR_ERROR | FATTR4_WORD0_LEASE_TIME))
return 1;
if (bmval[1] & ~FATTR4_WORD1_MOUNTED_ON_FILEID)
return 1;
return 0;
}
| 0 |
[
"CWE-20",
"CWE-129"
] |
linux
|
f961e3f2acae94b727380c0b74e2d3954d0edf79
| 335,671,065,975,038,250,000,000,000,000,000,000,000 | 8 |
nfsd: encoders mustn't use unitialized values in error cases
In error cases, lgp->lg_layout_type may be out of bounds; so we
shouldn't be using it until after the check of nfserr.
This was seen to crash nfsd threads when the server receives a LAYOUTGET
request with a large layout type.
GETDEVICEINFO has the same problem.
Reported-by: Ari Kauppi <[email protected]>
Reviewed-by: Christoph Hellwig <[email protected]>
Cc: [email protected]
Signed-off-by: J. Bruce Fields <[email protected]>
|
GF_Err mdat_Size(GF_Box *s)
{
GF_MediaDataBox *ptr = (GF_MediaDataBox *)s;
ptr->size += ptr->dataSize;
return GF_OK;
}
| 0 |
[
"CWE-400",
"CWE-401"
] |
gpac
|
d2371b4b204f0a3c0af51ad4e9b491144dd1225c
| 175,060,773,739,890,500,000,000,000,000,000,000,000 | 6 |
prevent dref memleak on invalid input (#1183)
|
SSL_COMP *ssl3_comp_find(STACK_OF(SSL_COMP) *sk, int n)
{
SSL_COMP *ctmp;
int i,nn;
if ((n == 0) || (sk == NULL)) return(NULL);
nn=sk_SSL_COMP_num(sk);
for (i=0; i<nn; i++)
{
ctmp=sk_SSL_COMP_value(sk,i);
if (ctmp->id == n)
return(ctmp);
}
return(NULL);
}
| 0 |
[] |
openssl
|
edc032b5e3f3ebb1006a9c89e0ae00504f47966f
| 181,869,739,695,390,770,000,000,000,000,000,000,000 | 15 |
Add SRP support.
|
cupsdCloseClient(cupsd_client_t *con) /* I - Client to close */
{
int partial; /* Do partial close for SSL? */
cupsdLogClient(con, CUPSD_LOG_DEBUG, "Closing connection.");
/*
* Flush pending writes before closing...
*/
httpFlushWrite(con->http);
partial = 0;
if (con->pipe_pid != 0)
{
/*
* Stop any CGI process...
*/
cupsdEndProcess(con->pipe_pid, 1);
con->pipe_pid = 0;
}
if (con->file >= 0)
{
cupsdRemoveSelect(con->file);
close(con->file);
con->file = -1;
}
/*
* Close the socket and clear the file from the input set for select()...
*/
if (httpGetFd(con->http) >= 0)
{
cupsArrayRemove(ActiveClients, con);
cupsdSetBusyState();
#ifdef HAVE_SSL
/*
* Shutdown encryption as needed...
*/
if (httpIsEncrypted(con->http))
partial = 1;
#endif /* HAVE_SSL */
if (partial)
{
/*
* Only do a partial close so that the encrypted client gets everything.
*/
httpShutdown(con->http);
cupsdAddSelect(httpGetFd(con->http), (cupsd_selfunc_t)cupsdReadClient,
NULL, con);
cupsdLogClient(con, CUPSD_LOG_DEBUG, "Waiting for socket close.");
}
else
{
/*
* Shut the socket down fully...
*/
cupsdRemoveSelect(httpGetFd(con->http));
httpClose(con->http);
con->http = NULL;
}
}
if (!partial)
{
/*
* Free memory...
*/
cupsdRemoveSelect(httpGetFd(con->http));
httpClose(con->http);
if (con->filename)
{
unlink(con->filename);
cupsdClearString(&con->filename);
}
cupsdClearString(&con->command);
cupsdClearString(&con->options);
cupsdClearString(&con->query_string);
if (con->request)
{
ippDelete(con->request);
con->request = NULL;
}
if (con->response)
{
ippDelete(con->response);
con->response = NULL;
}
if (con->language)
{
cupsLangFree(con->language);
con->language = NULL;
}
#ifdef HAVE_AUTHORIZATION_H
if (con->authref)
{
AuthorizationFree(con->authref, kAuthorizationFlagDefaults);
con->authref = NULL;
}
#endif /* HAVE_AUTHORIZATION_H */
/*
* Re-enable new client connections if we are going back under the
* limit...
*/
if (cupsArrayCount(Clients) == MaxClients)
cupsdResumeListening();
/*
* Compact the list of clients as necessary...
*/
cupsArrayRemove(Clients, con);
free(con);
}
return (partial);
}
| 0 |
[
"CWE-290"
] |
cups
|
afa80cb2b457bf8d64f775bed307588610476c41
| 234,637,688,550,274,280,000,000,000,000,000,000,000 | 140 |
Don't treat "localhost.localdomain" as an allowed replacement for localhost, since it isn't.
|
static void FVToggleCharSelected(FontView *fv,int enc) {
int i, j;
if ( fv->v==NULL || fv->colcnt==0 ) /* Can happen in scripts */
return;
i = enc / fv->colcnt;
j = enc - i*fv->colcnt;
i -= fv->rowoff;
/* Normally we should be checking against fv->rowcnt (rather than <=rowcnt) */
/* but every now and then the WM forces us to use a window size which doesn't */
/* fit our expectations (maximized view) and we must be prepared for half */
/* lines */
if ( i>=0 && i<=fv->rowcnt )
FVDrawGlyph(fv->v,fv,enc,true);
}
| 0 |
[
"CWE-119",
"CWE-787"
] |
fontforge
|
626f751752875a0ddd74b9e217b6f4828713573c
| 255,319,132,886,800,260,000,000,000,000,000,000,000 | 16 |
Warn users before discarding their unsaved scripts (#3852)
* Warn users before discarding their unsaved scripts
This closes #3846.
|
static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
struct loaded_vmcs *vmcs02;
u32 msr_entry_idx;
u32 exit_qual;
vmcs02 = nested_get_current_vmcs02(vmx);
if (!vmcs02)
return -ENOMEM;
enter_guest_mode(vcpu);
if (!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS))
vmx->nested.vmcs01_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL);
vmx_switch_vmcs(vcpu, vmcs02);
vmx_segment_cache_clear(vmx);
if (prepare_vmcs02(vcpu, vmcs12, from_vmentry, &exit_qual)) {
leave_guest_mode(vcpu);
vmx_switch_vmcs(vcpu, &vmx->vmcs01);
nested_vmx_entry_failure(vcpu, vmcs12,
EXIT_REASON_INVALID_STATE, exit_qual);
return 1;
}
nested_get_vmcs12_pages(vcpu, vmcs12);
msr_entry_idx = nested_vmx_load_msr(vcpu,
vmcs12->vm_entry_msr_load_addr,
vmcs12->vm_entry_msr_load_count);
if (msr_entry_idx) {
leave_guest_mode(vcpu);
vmx_switch_vmcs(vcpu, &vmx->vmcs01);
nested_vmx_entry_failure(vcpu, vmcs12,
EXIT_REASON_MSR_LOAD_FAIL, msr_entry_idx);
return 1;
}
/*
* Note no nested_vmx_succeed or nested_vmx_fail here. At this point
* we are no longer running L1, and VMLAUNCH/VMRESUME has not yet
* returned as far as L1 is concerned. It will only return (and set
* the success flag) when L2 exits (see nested_vmx_vmexit()).
*/
return 0;
}
| 0 |
[
"CWE-20",
"CWE-617"
] |
linux
|
3a8b0677fc6180a467e26cc32ce6b0c09a32f9bb
| 50,167,333,725,516,310,000,000,000,000,000,000,000 | 49 |
KVM: VMX: Do not BUG() on out-of-bounds guest IRQ
The value of the guest_irq argument to vmx_update_pi_irte() is
ultimately coming from a KVM_IRQFD API call. Do not BUG() in
vmx_update_pi_irte() if the value is out-of bounds. (Especially,
since KVM as a whole seems to hang after that.)
Instead, print a message only once if we find that we don't have a
route for a certain IRQ (which can be out-of-bounds or within the
array).
This fixes CVE-2017-1000252.
Fixes: efc644048ecde54 ("KVM: x86: Update IRTE for posted-interrupts")
Signed-off-by: Jan H. Schönherr <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
|
static int mov_init(AVFormatContext *s)
{
MOVMuxContext *mov = s->priv_data;
AVDictionaryEntry *global_tcr = av_dict_get(s->metadata, "timecode", NULL, 0);
int i, ret;
mov->fc = s;
/* Default mode == MP4 */
mov->mode = MODE_MP4;
if (s->oformat) {
if (!strcmp("3gp", s->oformat->name)) mov->mode = MODE_3GP;
else if (!strcmp("3g2", s->oformat->name)) mov->mode = MODE_3GP|MODE_3G2;
else if (!strcmp("mov", s->oformat->name)) mov->mode = MODE_MOV;
else if (!strcmp("psp", s->oformat->name)) mov->mode = MODE_PSP;
else if (!strcmp("ipod",s->oformat->name)) mov->mode = MODE_IPOD;
else if (!strcmp("ismv",s->oformat->name)) mov->mode = MODE_ISM;
else if (!strcmp("f4v", s->oformat->name)) mov->mode = MODE_F4V;
}
if (mov->flags & FF_MOV_FLAG_DELAY_MOOV)
mov->flags |= FF_MOV_FLAG_EMPTY_MOOV;
/* Set the FRAGMENT flag if any of the fragmentation methods are
* enabled. */
if (mov->max_fragment_duration || mov->max_fragment_size ||
mov->flags & (FF_MOV_FLAG_EMPTY_MOOV |
FF_MOV_FLAG_FRAG_KEYFRAME |
FF_MOV_FLAG_FRAG_CUSTOM |
FF_MOV_FLAG_FRAG_EVERY_FRAME))
mov->flags |= FF_MOV_FLAG_FRAGMENT;
/* Set other implicit flags immediately */
if (mov->mode == MODE_ISM)
mov->flags |= FF_MOV_FLAG_EMPTY_MOOV | FF_MOV_FLAG_SEPARATE_MOOF |
FF_MOV_FLAG_FRAGMENT;
if (mov->flags & FF_MOV_FLAG_DASH)
mov->flags |= FF_MOV_FLAG_FRAGMENT | FF_MOV_FLAG_EMPTY_MOOV |
FF_MOV_FLAG_DEFAULT_BASE_MOOF;
if (mov->flags & FF_MOV_FLAG_EMPTY_MOOV && s->flags & AVFMT_FLAG_AUTO_BSF) {
av_log(s, AV_LOG_VERBOSE, "Empty MOOV enabled; disabling automatic bitstream filtering\n");
s->flags &= ~AVFMT_FLAG_AUTO_BSF;
}
if (mov->flags & FF_MOV_FLAG_FASTSTART) {
mov->reserved_moov_size = -1;
}
if (mov->use_editlist < 0) {
mov->use_editlist = 1;
if (mov->flags & FF_MOV_FLAG_FRAGMENT &&
!(mov->flags & FF_MOV_FLAG_DELAY_MOOV)) {
// If we can avoid needing an edit list by shifting the
// tracks, prefer that over (trying to) write edit lists
// in fragmented output.
if (s->avoid_negative_ts == AVFMT_AVOID_NEG_TS_AUTO ||
s->avoid_negative_ts == AVFMT_AVOID_NEG_TS_MAKE_ZERO)
mov->use_editlist = 0;
}
}
if (mov->flags & FF_MOV_FLAG_EMPTY_MOOV &&
!(mov->flags & FF_MOV_FLAG_DELAY_MOOV) && mov->use_editlist)
av_log(s, AV_LOG_WARNING, "No meaningful edit list will be written when using empty_moov without delay_moov\n");
if (!mov->use_editlist && s->avoid_negative_ts == AVFMT_AVOID_NEG_TS_AUTO)
s->avoid_negative_ts = AVFMT_AVOID_NEG_TS_MAKE_ZERO;
/* Clear the omit_tfhd_offset flag if default_base_moof is set;
* if the latter is set that's enough and omit_tfhd_offset doesn't
* add anything extra on top of that. */
if (mov->flags & FF_MOV_FLAG_OMIT_TFHD_OFFSET &&
mov->flags & FF_MOV_FLAG_DEFAULT_BASE_MOOF)
mov->flags &= ~FF_MOV_FLAG_OMIT_TFHD_OFFSET;
if (mov->frag_interleave &&
mov->flags & (FF_MOV_FLAG_OMIT_TFHD_OFFSET | FF_MOV_FLAG_SEPARATE_MOOF)) {
av_log(s, AV_LOG_ERROR,
"Sample interleaving in fragments is mutually exclusive with "
"omit_tfhd_offset and separate_moof\n");
return AVERROR(EINVAL);
}
/* Non-seekable output is ok if using fragmentation. If ism_lookahead
* is enabled, we don't support non-seekable output at all. */
if (!(s->pb->seekable & AVIO_SEEKABLE_NORMAL) &&
(!(mov->flags & FF_MOV_FLAG_FRAGMENT) || mov->ism_lookahead)) {
av_log(s, AV_LOG_ERROR, "muxer does not support non seekable output\n");
return AVERROR(EINVAL);
}
mov->nb_streams = s->nb_streams;
if (mov->mode & (MODE_MP4|MODE_MOV|MODE_IPOD) && s->nb_chapters)
mov->chapter_track = mov->nb_streams++;
if (mov->flags & FF_MOV_FLAG_RTP_HINT) {
for (i = 0; i < s->nb_streams; i++)
if (rtp_hinting_needed(s->streams[i]))
mov->nb_streams++;
}
if ( mov->write_tmcd == -1 && (mov->mode == MODE_MOV || mov->mode == MODE_MP4)
|| mov->write_tmcd == 1) {
/* +1 tmcd track for each video stream with a timecode */
for (i = 0; i < s->nb_streams; i++) {
AVStream *st = s->streams[i];
AVDictionaryEntry *t = global_tcr;
if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO &&
(t || (t=av_dict_get(st->metadata, "timecode", NULL, 0)))) {
AVTimecode tc;
ret = mov_check_timecode_track(s, &tc, i, t->value);
if (ret >= 0)
mov->nb_meta_tmcd++;
}
}
/* check if there is already a tmcd track to remux */
if (mov->nb_meta_tmcd) {
for (i = 0; i < s->nb_streams; i++) {
AVStream *st = s->streams[i];
if (st->codecpar->codec_tag == MKTAG('t','m','c','d')) {
av_log(s, AV_LOG_WARNING, "You requested a copy of the original timecode track "
"so timecode metadata are now ignored\n");
mov->nb_meta_tmcd = 0;
}
}
}
mov->nb_streams += mov->nb_meta_tmcd;
}
// Reserve an extra stream for chapters for the case where chapters
// are written in the trailer
mov->tracks = av_mallocz_array((mov->nb_streams + 1), sizeof(*mov->tracks));
if (!mov->tracks)
return AVERROR(ENOMEM);
if (mov->encryption_scheme_str != NULL && strcmp(mov->encryption_scheme_str, "none") != 0) {
if (strcmp(mov->encryption_scheme_str, "cenc-aes-ctr") == 0) {
mov->encryption_scheme = MOV_ENC_CENC_AES_CTR;
if (mov->encryption_key_len != AES_CTR_KEY_SIZE) {
av_log(s, AV_LOG_ERROR, "Invalid encryption key len %d expected %d\n",
mov->encryption_key_len, AES_CTR_KEY_SIZE);
return AVERROR(EINVAL);
}
if (mov->encryption_kid_len != CENC_KID_SIZE) {
av_log(s, AV_LOG_ERROR, "Invalid encryption kid len %d expected %d\n",
mov->encryption_kid_len, CENC_KID_SIZE);
return AVERROR(EINVAL);
}
} else {
av_log(s, AV_LOG_ERROR, "unsupported encryption scheme %s\n",
mov->encryption_scheme_str);
return AVERROR(EINVAL);
}
}
for (i = 0; i < s->nb_streams; i++) {
AVStream *st= s->streams[i];
MOVTrack *track= &mov->tracks[i];
AVDictionaryEntry *lang = av_dict_get(st->metadata, "language", NULL,0);
track->st = st;
track->par = st->codecpar;
track->language = ff_mov_iso639_to_lang(lang?lang->value:"und", mov->mode!=MODE_MOV);
if (track->language < 0)
track->language = 0;
track->mode = mov->mode;
track->tag = mov_find_codec_tag(s, track);
if (!track->tag) {
av_log(s, AV_LOG_ERROR, "Could not find tag for codec %s in stream #%d, "
"codec not currently supported in container\n",
avcodec_get_name(st->codecpar->codec_id), i);
return AVERROR(EINVAL);
}
/* If hinting of this track is enabled by a later hint track,
* this is updated. */
track->hint_track = -1;
track->start_dts = AV_NOPTS_VALUE;
track->start_cts = AV_NOPTS_VALUE;
track->end_pts = AV_NOPTS_VALUE;
track->dts_shift = AV_NOPTS_VALUE;
if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
if (track->tag == MKTAG('m','x','3','p') || track->tag == MKTAG('m','x','3','n') ||
track->tag == MKTAG('m','x','4','p') || track->tag == MKTAG('m','x','4','n') ||
track->tag == MKTAG('m','x','5','p') || track->tag == MKTAG('m','x','5','n')) {
if (st->codecpar->width != 720 || (st->codecpar->height != 608 && st->codecpar->height != 512)) {
av_log(s, AV_LOG_ERROR, "D-10/IMX must use 720x608 or 720x512 video resolution\n");
return AVERROR(EINVAL);
}
track->height = track->tag >> 24 == 'n' ? 486 : 576;
}
if (mov->video_track_timescale) {
track->timescale = mov->video_track_timescale;
} else {
track->timescale = st->time_base.den;
while(track->timescale < 10000)
track->timescale *= 2;
}
if (st->codecpar->width > 65535 || st->codecpar->height > 65535) {
av_log(s, AV_LOG_ERROR, "Resolution %dx%d too large for mov/mp4\n", st->codecpar->width, st->codecpar->height);
return AVERROR(EINVAL);
}
if (track->mode == MODE_MOV && track->timescale > 100000)
av_log(s, AV_LOG_WARNING,
"WARNING codec timebase is very high. If duration is too long,\n"
"file may not be playable by quicktime. Specify a shorter timebase\n"
"or choose different container.\n");
if (track->mode == MODE_MOV &&
track->par->codec_id == AV_CODEC_ID_RAWVIDEO &&
track->tag == MKTAG('r','a','w',' ')) {
enum AVPixelFormat pix_fmt = track->par->format;
if (pix_fmt == AV_PIX_FMT_NONE && track->par->bits_per_coded_sample == 1)
pix_fmt = AV_PIX_FMT_MONOWHITE;
track->is_unaligned_qt_rgb =
pix_fmt == AV_PIX_FMT_RGB24 ||
pix_fmt == AV_PIX_FMT_BGR24 ||
pix_fmt == AV_PIX_FMT_PAL8 ||
pix_fmt == AV_PIX_FMT_GRAY8 ||
pix_fmt == AV_PIX_FMT_MONOWHITE ||
pix_fmt == AV_PIX_FMT_MONOBLACK;
}
if (track->par->codec_id == AV_CODEC_ID_VP9) {
if (track->mode != MODE_MP4) {
av_log(s, AV_LOG_ERROR, "VP9 only supported in MP4.\n");
return AVERROR(EINVAL);
}
} else if (track->par->codec_id == AV_CODEC_ID_AV1) {
/* spec is not finished, so forbid for now */
av_log(s, AV_LOG_ERROR, "AV1 muxing is currently not supported.\n");
return AVERROR_PATCHWELCOME;
} else if (track->par->codec_id == AV_CODEC_ID_VP8) {
/* altref frames handling is not defined in the spec as of version v1.0,
* so just forbid muxing VP8 streams altogether until a new version does */
av_log(s, AV_LOG_ERROR, "VP8 muxing is currently not supported.\n");
return AVERROR_PATCHWELCOME;
}
} else if (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
track->timescale = st->codecpar->sample_rate;
if (!st->codecpar->frame_size && !av_get_bits_per_sample(st->codecpar->codec_id)) {
av_log(s, AV_LOG_WARNING, "track %d: codec frame size is not set\n", i);
track->audio_vbr = 1;
}else if (st->codecpar->codec_id == AV_CODEC_ID_ADPCM_MS ||
st->codecpar->codec_id == AV_CODEC_ID_ADPCM_IMA_WAV ||
st->codecpar->codec_id == AV_CODEC_ID_ILBC){
if (!st->codecpar->block_align) {
av_log(s, AV_LOG_ERROR, "track %d: codec block align is not set for adpcm\n", i);
return AVERROR(EINVAL);
}
track->sample_size = st->codecpar->block_align;
}else if (st->codecpar->frame_size > 1){ /* assume compressed audio */
track->audio_vbr = 1;
}else{
track->sample_size = (av_get_bits_per_sample(st->codecpar->codec_id) >> 3) * st->codecpar->channels;
}
if (st->codecpar->codec_id == AV_CODEC_ID_ILBC ||
st->codecpar->codec_id == AV_CODEC_ID_ADPCM_IMA_QT) {
track->audio_vbr = 1;
}
if (track->mode != MODE_MOV &&
track->par->codec_id == AV_CODEC_ID_MP3 && track->timescale < 16000) {
if (s->strict_std_compliance >= FF_COMPLIANCE_NORMAL) {
av_log(s, AV_LOG_ERROR, "track %d: muxing mp3 at %dhz is not standard, to mux anyway set strict to -1\n",
i, track->par->sample_rate);
return AVERROR(EINVAL);
} else {
av_log(s, AV_LOG_WARNING, "track %d: muxing mp3 at %dhz is not standard in MP4\n",
i, track->par->sample_rate);
}
}
if (track->par->codec_id == AV_CODEC_ID_FLAC ||
track->par->codec_id == AV_CODEC_ID_OPUS) {
if (track->mode != MODE_MP4) {
av_log(s, AV_LOG_ERROR, "%s only supported in MP4.\n", avcodec_get_name(track->par->codec_id));
return AVERROR(EINVAL);
}
if (s->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
av_log(s, AV_LOG_ERROR,
"%s in MP4 support is experimental, add "
"'-strict %d' if you want to use it.\n",
avcodec_get_name(track->par->codec_id), FF_COMPLIANCE_EXPERIMENTAL);
return AVERROR_EXPERIMENTAL;
}
}
} else if (st->codecpar->codec_type == AVMEDIA_TYPE_SUBTITLE) {
track->timescale = st->time_base.den;
} else if (st->codecpar->codec_type == AVMEDIA_TYPE_DATA) {
track->timescale = st->time_base.den;
} else {
track->timescale = MOV_TIMESCALE;
}
if (!track->height)
track->height = st->codecpar->height;
/* The ism specific timescale isn't mandatory, but is assumed by
* some tools, such as mp4split. */
if (mov->mode == MODE_ISM)
track->timescale = 10000000;
avpriv_set_pts_info(st, 64, 1, track->timescale);
if (mov->encryption_scheme == MOV_ENC_CENC_AES_CTR) {
ret = ff_mov_cenc_init(&track->cenc, mov->encryption_key,
track->par->codec_id == AV_CODEC_ID_H264, s->flags & AVFMT_FLAG_BITEXACT);
if (ret)
return ret;
}
}
enable_tracks(s);
return 0;
}
| 0 |
[
"CWE-125"
] |
FFmpeg
|
95556e27e2c1d56d9e18f5db34d6f756f3011148
| 2,514,535,081,057,900,500,000,000,000,000,000,000 | 314 |
avformat/movenc: Do not pass AVCodecParameters in avpriv_request_sample
Fixes: out of array read
Fixes: ffmpeg_crash_8.avi
Found-by: Thuan Pham, Marcel Böhme, Andrew Santosa and Alexandru Razvan Caciulescu with AFLSmart
Signed-off-by: Michael Niedermayer <[email protected]>
|
xfs_bmapi_reserve_delalloc(
struct xfs_inode *ip,
int whichfork,
xfs_fileoff_t off,
xfs_filblks_t len,
xfs_filblks_t prealloc,
struct xfs_bmbt_irec *got,
struct xfs_iext_cursor *icur,
int eof)
{
struct xfs_mount *mp = ip->i_mount;
struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
xfs_extlen_t alen;
xfs_extlen_t indlen;
int error;
xfs_fileoff_t aoff = off;
/*
* Cap the alloc length. Keep track of prealloc so we know whether to
* tag the inode before we return.
*/
alen = XFS_FILBLKS_MIN(len + prealloc, MAXEXTLEN);
if (!eof)
alen = XFS_FILBLKS_MIN(alen, got->br_startoff - aoff);
if (prealloc && alen >= len)
prealloc = alen - len;
/* Figure out the extent size, adjust alen */
if (whichfork == XFS_COW_FORK) {
struct xfs_bmbt_irec prev;
xfs_extlen_t extsz = xfs_get_cowextsz_hint(ip);
if (!xfs_iext_peek_prev_extent(ifp, icur, &prev))
prev.br_startoff = NULLFILEOFF;
error = xfs_bmap_extsize_align(mp, got, &prev, extsz, 0, eof,
1, 0, &aoff, &alen);
ASSERT(!error);
}
/*
* Make a transaction-less quota reservation for delayed allocation
* blocks. This number gets adjusted later. We return if we haven't
* allocated blocks already inside this loop.
*/
error = xfs_trans_reserve_quota_nblks(NULL, ip, (long)alen, 0,
XFS_QMOPT_RES_REGBLKS);
if (error)
return error;
/*
* Split changing sb for alen and indlen since they could be coming
* from different places.
*/
indlen = (xfs_extlen_t)xfs_bmap_worst_indlen(ip, alen);
ASSERT(indlen > 0);
error = xfs_mod_fdblocks(mp, -((int64_t)alen), false);
if (error)
goto out_unreserve_quota;
error = xfs_mod_fdblocks(mp, -((int64_t)indlen), false);
if (error)
goto out_unreserve_blocks;
ip->i_delayed_blks += alen;
got->br_startoff = aoff;
got->br_startblock = nullstartblock(indlen);
got->br_blockcount = alen;
got->br_state = XFS_EXT_NORM;
xfs_bmap_add_extent_hole_delay(ip, whichfork, icur, got);
/*
* Tag the inode if blocks were preallocated. Note that COW fork
* preallocation can occur at the start or end of the extent, even when
* prealloc == 0, so we must also check the aligned offset and length.
*/
if (whichfork == XFS_DATA_FORK && prealloc)
xfs_inode_set_eofblocks_tag(ip);
if (whichfork == XFS_COW_FORK && (prealloc || aoff < off || alen > len))
xfs_inode_set_cowblocks_tag(ip);
return 0;
out_unreserve_blocks:
xfs_mod_fdblocks(mp, alen, false);
out_unreserve_quota:
if (XFS_IS_QUOTA_ON(mp))
xfs_trans_unreserve_quota_nblks(NULL, ip, (long)alen, 0,
XFS_QMOPT_RES_REGBLKS);
return error;
}
| 0 |
[] |
linux
|
2c4306f719b083d17df2963bc761777576b8ad1b
| 125,084,559,861,947,280,000,000,000,000,000,000,000 | 95 |
xfs: set format back to extents if xfs_bmap_extents_to_btree
If xfs_bmap_extents_to_btree fails in a mode where we call
xfs_iroot_realloc(-1) to de-allocate the root, set the
format back to extents.
Otherwise we can assume we can dereference ifp->if_broot
based on the XFS_DINODE_FMT_BTREE format, and crash.
Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=199423
Signed-off-by: Eric Sandeen <[email protected]>
Reviewed-by: Christoph Hellwig <[email protected]>
Reviewed-by: Darrick J. Wong <[email protected]>
Signed-off-by: Darrick J. Wong <[email protected]>
|
static int svm_check_intercept(struct kvm_vcpu *vcpu,
struct x86_instruction_info *info,
enum x86_intercept_stage stage,
struct x86_exception *exception)
{
struct vcpu_svm *svm = to_svm(vcpu);
int vmexit, ret = X86EMUL_CONTINUE;
struct __x86_intercept icpt_info;
struct vmcb *vmcb = svm->vmcb;
if (info->intercept >= ARRAY_SIZE(x86_intercept_map))
goto out;
icpt_info = x86_intercept_map[info->intercept];
if (stage != icpt_info.stage)
goto out;
switch (icpt_info.exit_code) {
case SVM_EXIT_READ_CR0:
if (info->intercept == x86_intercept_cr_read)
icpt_info.exit_code += info->modrm_reg;
break;
case SVM_EXIT_WRITE_CR0: {
unsigned long cr0, val;
if (info->intercept == x86_intercept_cr_write)
icpt_info.exit_code += info->modrm_reg;
if (icpt_info.exit_code != SVM_EXIT_WRITE_CR0 ||
info->intercept == x86_intercept_clts)
break;
if (!(vmcb12_is_intercept(&svm->nested.ctl,
INTERCEPT_SELECTIVE_CR0)))
break;
cr0 = vcpu->arch.cr0 & ~SVM_CR0_SELECTIVE_MASK;
val = info->src_val & ~SVM_CR0_SELECTIVE_MASK;
if (info->intercept == x86_intercept_lmsw) {
cr0 &= 0xfUL;
val &= 0xfUL;
/* lmsw can't clear PE - catch this here */
if (cr0 & X86_CR0_PE)
val |= X86_CR0_PE;
}
if (cr0 ^ val)
icpt_info.exit_code = SVM_EXIT_CR0_SEL_WRITE;
break;
}
case SVM_EXIT_READ_DR0:
case SVM_EXIT_WRITE_DR0:
icpt_info.exit_code += info->modrm_reg;
break;
case SVM_EXIT_MSR:
if (info->intercept == x86_intercept_wrmsr)
vmcb->control.exit_info_1 = 1;
else
vmcb->control.exit_info_1 = 0;
break;
case SVM_EXIT_PAUSE:
/*
* We get this for NOP only, but pause
* is rep not, check this here
*/
if (info->rep_prefix != REPE_PREFIX)
goto out;
break;
case SVM_EXIT_IOIO: {
u64 exit_info;
u32 bytes;
if (info->intercept == x86_intercept_in ||
info->intercept == x86_intercept_ins) {
exit_info = ((info->src_val & 0xffff) << 16) |
SVM_IOIO_TYPE_MASK;
bytes = info->dst_bytes;
} else {
exit_info = (info->dst_val & 0xffff) << 16;
bytes = info->src_bytes;
}
if (info->intercept == x86_intercept_outs ||
info->intercept == x86_intercept_ins)
exit_info |= SVM_IOIO_STR_MASK;
if (info->rep_prefix)
exit_info |= SVM_IOIO_REP_MASK;
bytes = min(bytes, 4u);
exit_info |= bytes << SVM_IOIO_SIZE_SHIFT;
exit_info |= (u32)info->ad_bytes << (SVM_IOIO_ASIZE_SHIFT - 1);
vmcb->control.exit_info_1 = exit_info;
vmcb->control.exit_info_2 = info->next_rip;
break;
}
default:
break;
}
/* TODO: Advertise NRIPS to guest hypervisor unconditionally */
if (static_cpu_has(X86_FEATURE_NRIPS))
vmcb->control.next_rip = info->next_rip;
vmcb->control.exit_code = icpt_info.exit_code;
vmexit = nested_svm_exit_handled(svm);
ret = (vmexit == NESTED_EXIT_DONE) ? X86EMUL_INTERCEPTED
: X86EMUL_CONTINUE;
out:
return ret;
}
| 0 |
[
"CWE-703"
] |
linux
|
6cd88243c7e03845a450795e134b488fc2afb736
| 334,881,408,595,264,800,000,000,000,000,000,000,000 | 119 |
KVM: x86: do not report a vCPU as preempted outside instruction boundaries
If a vCPU is outside guest mode and is scheduled out, it might be in the
process of making a memory access. A problem occurs if another vCPU uses
the PV TLB flush feature during the period when the vCPU is scheduled
out, and a virtual address has already been translated but has not yet
been accessed, because this is equivalent to using a stale TLB entry.
To avoid this, only report a vCPU as preempted if sure that the guest
is at an instruction boundary. A rescheduling request will be delivered
to the host physical CPU as an external interrupt, so for simplicity
consider any vmexit *not* instruction boundary except for external
interrupts.
It would in principle be okay to report the vCPU as preempted also
if it is sleeping in kvm_vcpu_block(): a TLB flush IPI will incur the
vmentry/vmexit overhead unnecessarily, and optimistic spinning is
also unlikely to succeed. However, leave it for later because right
now kvm_vcpu_check_block() is doing memory accesses. Even
though the TLB flush issue only applies to virtual memory address,
it's very much preferrable to be conservative.
Reported-by: Jann Horn <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
|
static VALUE read_memory(int argc, VALUE *argv, VALUE klass)
{
VALUE content;
VALUE parse_options;
int parse_options_int;
xmlSchemaParserCtxtPtr ctx;
xmlSchemaPtr schema;
VALUE errors;
VALUE rb_schema;
int scanned_args = 0;
scanned_args = rb_scan_args(argc, argv, "11", &content, &parse_options);
if (scanned_args == 1) {
parse_options = rb_const_get(rb_const_get(mNokogiriXml, rb_intern("ParseOptions")), rb_intern("DEFAULT_SCHEMA"));
}
parse_options_int = (int)NUM2INT(rb_funcall(parse_options, rb_intern("to_i"), 0));
ctx = xmlSchemaNewMemParserCtxt((const char *)StringValuePtr(content), (int)RSTRING_LEN(content));
errors = rb_ary_new();
xmlSetStructuredErrorFunc((void *)errors, Nokogiri_error_array_pusher);
#ifdef HAVE_XMLSCHEMASETPARSERSTRUCTUREDERRORS
xmlSchemaSetParserStructuredErrors(
ctx,
Nokogiri_error_array_pusher,
(void *)errors
);
#endif
schema = xmlSchemaParse(ctx);
xmlSetStructuredErrorFunc(NULL, NULL);
xmlSchemaFreeParserCtxt(ctx);
if(NULL == schema) {
xmlErrorPtr error = xmlGetLastError();
if(error)
Nokogiri_error_raise(NULL, error);
else
rb_raise(rb_eRuntimeError, "Could not parse document");
return Qnil;
}
rb_schema = Data_Wrap_Struct(klass, 0, dealloc, schema);
rb_iv_set(rb_schema, "@errors", errors);
rb_iv_set(rb_schema, "@parse_options", parse_options);
return rb_schema;
}
| 0 |
[
"CWE-611",
"CWE-703"
] |
nokogiri
|
9c87439d9afa14a365ff13e73adc809cb2c3d97b
| 334,909,212,581,692,700,000,000,000,000,000,000,000 | 51 |
feat: XML::Schema and RelaxNG creation accept optional ParseOptions
I'm trying out a new pattern, which is that the parsed object carries
around the ParseOptions it was created with, which should make some
testing a bit easier.
I'm also not implementing the "config block" pattern in use for
Documents, because I think the UX is weird and I'm hoping to change
everything to use kwargs in a 2.0 release, anyway.
|
Subsets and Splits
CWE 416 & 19
The query filters records related to specific CWEs (Common Weakness Enumerations), providing a basic overview of entries with these vulnerabilities but without deeper analysis.
CWE Frequency in Train Set
Counts the occurrences of each CWE (Common Weakness Enumeration) in the dataset, providing a basic distribution but limited insight.