func
stringlengths 0
484k
| target
int64 0
1
| cwe
listlengths 0
4
| project
stringclasses 799
values | commit_id
stringlengths 40
40
| hash
float64 1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
| size
int64 1
24k
| message
stringlengths 0
13.3k
|
---|---|---|---|---|---|---|---|
static void reply_spnego_ntlmssp(struct smb_request *req,
uint16 vuid,
AUTH_NTLMSSP_STATE **auth_ntlmssp_state,
DATA_BLOB *ntlmssp_blob, NTSTATUS nt_status,
const char *OID,
bool wrap)
{
DATA_BLOB response;
struct auth_serversupplied_info *server_info = NULL;
struct smbd_server_connection *sconn = smbd_server_conn;
if (NT_STATUS_IS_OK(nt_status)) {
server_info = (*auth_ntlmssp_state)->server_info;
} else {
nt_status = do_map_to_guest(nt_status,
&server_info,
(*auth_ntlmssp_state)->ntlmssp_state->user,
(*auth_ntlmssp_state)->ntlmssp_state->domain);
}
reply_outbuf(req, 4, 0);
SSVAL(req->outbuf, smb_uid, vuid);
if (NT_STATUS_IS_OK(nt_status)) {
DATA_BLOB nullblob = data_blob_null;
if (!is_partial_auth_vuid(sconn, vuid)) {
nt_status = NT_STATUS_LOGON_FAILURE;
goto out;
}
data_blob_free(&server_info->user_session_key);
server_info->user_session_key =
data_blob_talloc(
server_info,
(*auth_ntlmssp_state)->ntlmssp_state->session_key.data,
(*auth_ntlmssp_state)->ntlmssp_state->session_key.length);
/* register_existing_vuid keeps the server info */
if (register_existing_vuid(sconn, vuid,
server_info, nullblob,
(*auth_ntlmssp_state)->ntlmssp_state->user) !=
vuid) {
nt_status = NT_STATUS_LOGON_FAILURE;
goto out;
}
(*auth_ntlmssp_state)->server_info = NULL;
/* current_user_info is changed on new vuid */
reload_services( True );
SSVAL(req->outbuf, smb_vwv3, 0);
if (server_info->guest) {
SSVAL(req->outbuf,smb_vwv2,1);
}
}
out:
if (wrap) {
response = spnego_gen_auth_response(ntlmssp_blob,
nt_status, OID);
} else {
response = *ntlmssp_blob;
}
reply_sesssetup_blob(req, response, nt_status);
if (wrap) {
data_blob_free(&response);
}
/* NT_STATUS_MORE_PROCESSING_REQUIRED from our NTLMSSP code tells us,
and the other end, that we are not finished yet. */
if (!NT_STATUS_EQUAL(nt_status, NT_STATUS_MORE_PROCESSING_REQUIRED)) {
/* NB. This is *NOT* an error case. JRA */
auth_ntlmssp_end(auth_ntlmssp_state);
if (!NT_STATUS_IS_OK(nt_status)) {
/* Kill the intermediate vuid */
invalidate_vuid(sconn, vuid);
}
}
}
| 0 |
[
"CWE-119"
] |
samba
|
9280051bfba337458722fb157f3082f93cbd9f2b
| 106,907,367,785,920,740,000,000,000,000,000,000,000 | 86 |
s3: Fix an uninitialized variable read
Found by Laurent Gaffie <[email protected]>
Thanks for that,
Volker
Fix bug #7254 (An uninitialized variable read could cause an smbd crash).
|
__releases(dentry->d_inode->i_lock)
{
struct inode *inode = dentry->d_inode;
bool hashed = !d_unhashed(dentry);
if (hashed)
raw_write_seqcount_begin(&dentry->d_seq);
__d_clear_type_and_inode(dentry);
hlist_del_init(&dentry->d_u.d_alias);
if (hashed)
raw_write_seqcount_end(&dentry->d_seq);
spin_unlock(&dentry->d_lock);
spin_unlock(&inode->i_lock);
if (!inode->i_nlink)
fsnotify_inoderemove(inode);
if (dentry->d_op && dentry->d_op->d_iput)
dentry->d_op->d_iput(dentry, inode);
else
iput(inode);
}
| 0 |
[
"CWE-362",
"CWE-399"
] |
linux
|
49d31c2f389acfe83417083e1208422b4091cd9e
| 142,509,946,463,556,180,000,000,000,000,000,000,000 | 20 |
dentry name snapshots
take_dentry_name_snapshot() takes a safe snapshot of dentry name;
if the name is a short one, it gets copied into caller-supplied
structure, otherwise an extra reference to external name is grabbed
(those are never modified). In either case the pointer to stable
string is stored into the same structure.
dentry must be held by the caller of take_dentry_name_snapshot(),
but may be freely dropped afterwards - the snapshot will stay
until destroyed by release_dentry_name_snapshot().
Intended use:
struct name_snapshot s;
take_dentry_name_snapshot(&s, dentry);
...
access s.name
...
release_dentry_name_snapshot(&s);
Replaces fsnotify_oldname_...(), gets used in fsnotify to obtain the name
to pass down with event.
Signed-off-by: Al Viro <[email protected]>
|
static void control_intr(struct virtqueue *vq)
{
struct ports_device *portdev;
portdev = vq->vdev->priv;
schedule_work(&portdev->control_work);
}
| 0 |
[
"CWE-119",
"CWE-787"
] |
linux
|
c4baad50297d84bde1a7ad45e50c73adae4a2192
| 39,035,467,319,540,046,000,000,000,000,000,000,000 | 7 |
virtio-console: avoid DMA from stack
put_chars() stuffs the buffer it gets into an sg, but that buffer may be
on the stack. This breaks with CONFIG_VMAP_STACK=y (for me, it
manifested as printks getting turned into NUL bytes).
Signed-off-by: Omar Sandoval <[email protected]>
Signed-off-by: Michael S. Tsirkin <[email protected]>
Reviewed-by: Amit Shah <[email protected]>
|
ast_for_with_stmt(struct compiling *c, const node *n0, bool is_async)
{
const node * const n = is_async ? CHILD(n0, 1) : n0;
int i, n_items, nch_minus_type, has_type_comment, end_lineno, end_col_offset;
asdl_seq *items, *body;
string type_comment;
REQ(n, with_stmt);
has_type_comment = TYPE(CHILD(n, NCH(n) - 2)) == TYPE_COMMENT;
nch_minus_type = NCH(n) - has_type_comment;
n_items = (nch_minus_type - 2) / 2;
items = _Py_asdl_seq_new(n_items, c->c_arena);
if (!items)
return NULL;
for (i = 1; i < nch_minus_type - 2; i += 2) {
withitem_ty item = ast_for_with_item(c, CHILD(n, i));
if (!item)
return NULL;
asdl_seq_SET(items, (i - 1) / 2, item);
}
body = ast_for_suite(c, CHILD(n, NCH(n) - 1));
if (!body)
return NULL;
get_last_end_pos(body, &end_lineno, &end_col_offset);
if (has_type_comment) {
type_comment = NEW_TYPE_COMMENT(CHILD(n, NCH(n) - 2));
if (!type_comment)
return NULL;
}
else
type_comment = NULL;
if (is_async)
return AsyncWith(items, body, type_comment, LINENO(n0), n0->n_col_offset,
end_lineno, end_col_offset, c->c_arena);
else
return With(items, body, type_comment, LINENO(n), n->n_col_offset,
end_lineno, end_col_offset, c->c_arena);
}
| 0 |
[
"CWE-125"
] |
cpython
|
dcfcd146f8e6fc5c2fc16a4c192a0c5f5ca8c53c
| 185,898,933,456,734,900,000,000,000,000,000,000,000 | 43 |
bpo-35766: Merge typed_ast back into CPython (GH-11645)
|
#ifndef GPAC_DISABLE_ISOM_WRITE
static u32 sgpd_size_entry(u32 grouping_type, void *entry)
{
switch (grouping_type) {
case GF_ISOM_SAMPLE_GROUP_ROLL:
case GF_ISOM_SAMPLE_GROUP_PROL:
return 2;
case GF_ISOM_SAMPLE_GROUP_TELE:
case GF_ISOM_SAMPLE_GROUP_RAP:
case GF_ISOM_SAMPLE_GROUP_SAP:
case GF_ISOM_SAMPLE_GROUP_SYNC:
return 1;
case GF_ISOM_SAMPLE_GROUP_TSCL:
return 20;
case GF_ISOM_SAMPLE_GROUP_LBLI:
return 2;
case GF_ISOM_SAMPLE_GROUP_TSAS:
case GF_ISOM_SAMPLE_GROUP_STSA:
return 0;
case GF_ISOM_SAMPLE_GROUP_SEIG:
{
GF_CENCSampleEncryptionGroupEntry *seig = (GF_CENCSampleEncryptionGroupEntry *)entry;
Bool use_mkey = seig->key_info[0] ? GF_TRUE : GF_FALSE;
if (use_mkey) {
return 3 + seig->key_info_size-1;
}
return seig->key_info_size; //== 3 + (seig->key_info_size-3);
}
case GF_ISOM_SAMPLE_GROUP_OINF:
return gf_isom_oinf_size_entry(entry);
case GF_ISOM_SAMPLE_GROUP_LINF:
return gf_isom_linf_size_entry(entry);
default:
return ((GF_DefaultSampleGroupDescriptionEntry *)entry)->length;
}
| 0 |
[
"CWE-476",
"CWE-787"
] |
gpac
|
b8f8b202d4fc23eb0ab4ce71ae96536ca6f5d3f8
| 299,357,869,328,531,920,000,000,000,000,000,000,000 | 35 |
fixed #1757
|
static int matches_aliases(server_rec *s, const char *host)
{
int i;
apr_array_header_t *names;
/* match ServerName */
if (!strcasecmp(host, s->server_hostname)) {
return 1;
}
/* search all the aliases from ServerAlias directive */
names = s->names;
if (names) {
char **name = (char **) names->elts;
for (i = 0; i < names->nelts; ++i) {
if (!name[i]) continue;
if (!strcasecmp(host, name[i]))
return 1;
}
}
names = s->wild_names;
if (names) {
char **name = (char **) names->elts;
for (i = 0; i < names->nelts; ++i) {
if (!name[i]) continue;
if (!ap_strcasecmp_match(host, name[i]))
return 1;
}
}
return 0;
}
| 0 |
[] |
httpd
|
ecebcc035ccd8d0e2984fe41420d9e944f456b3c
| 17,938,599,602,176,301,000,000,000,000,000,000,000 | 31 |
Merged r1734009,r1734231,r1734281,r1838055,r1838079,r1840229,r1876664,r1876674,r1876784,r1879078,r1881620,r1887311,r1888871 from trunk:
*) core: Split ap_create_request() from ap_read_request(). [Graham Leggett]
*) core, h2: common ap_parse_request_line() and ap_check_request_header()
code. [Yann Ylavic]
*) core: Add StrictHostCheck to allow unconfigured hostnames to be
rejected. [Eric Covener]
git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1890245 13f79535-47bb-0310-9956-ffa450edef68
|
GF_Err gf_media_avc_change_color(GF_AVCConfig *avcc, s32 fullrange, s32 vidformat, s32 colorprim, s32 transfer, s32 colmatrix)
{
GF_VUIInfo vuii;
memset(&vuii, 0, sizeof(GF_VUIInfo));
vuii.ar_num = -1;
vuii.ar_den = -1;
vuii.fullrange = fullrange;
vuii.video_format = vidformat;
vuii.color_prim = colorprim;
vuii.color_tfc = transfer;
vuii.color_matrix = colmatrix;
return gf_avc_change_vui(avcc, &vuii);
}
| 0 |
[
"CWE-190",
"CWE-787"
] |
gpac
|
51cdb67ff7c5f1242ac58c5aa603ceaf1793b788
| 5,354,776,727,758,470,000,000,000,000,000,000,000 | 13 |
add safety in avc/hevc/vvc sps/pps/vps ID check - cf #1720 #1721 #1722
|
parsestatus(netdissect_options *ndo,
const uint32_t *dp, int *er)
{
int errnum;
ND_TCHECK(dp[0]);
errnum = EXTRACT_32BITS(&dp[0]);
if (er)
*er = errnum;
if (errnum != 0) {
if (!ndo->ndo_qflag)
ND_PRINT((ndo, " ERROR: %s",
tok2str(status2str, "unk %d", errnum)));
nfserr = 1;
}
return (dp + 1);
trunc:
return NULL;
}
| 0 |
[
"CWE-125",
"CWE-787"
] |
tcpdump
|
7a923447fd49a069a0fd3b6c3547438ab5ee2123
| 314,010,798,987,738,780,000,000,000,000,000,000,000 | 20 |
CVE-2017-13001/NFS: Don't copy more data than is in the file handle.
Also, put the buffer on the stack; no reason to make it static. (65
bytes isn't a lot.)
This fixes a buffer over-read discovered by Kamil Frankowicz.
Add a test using the capture file supplied by the reporter(s).
|
session_worker_child_watch (GPid pid,
int status,
GdmSessionWorker *worker)
{
g_debug ("GdmSessionWorker: child (pid:%d) done (%s:%d)",
(int) pid,
WIFEXITED (status) ? "status"
: WIFSIGNALED (status) ? "signal"
: "unknown",
WIFEXITED (status) ? WEXITSTATUS (status)
: WIFSIGNALED (status) ? WTERMSIG (status)
: -1);
if (WIFEXITED (status)) {
int code = WEXITSTATUS (status);
send_dbus_int_method (worker->priv->connection,
"SessionExited",
code);
} else if (WIFSIGNALED (status)) {
int num = WTERMSIG (status);
send_dbus_int_method (worker->priv->connection,
"SessionDied",
num);
}
if (worker->priv->ckc != NULL) {
ck_connector_close_session (worker->priv->ckc, NULL);
ck_connector_unref (worker->priv->ckc);
worker->priv->ckc = NULL;
}
gdm_session_worker_uninitialize_pam (worker, PAM_SUCCESS);
worker->priv->child_pid = -1;
}
| 0 |
[] |
gdm
|
c25ef9245be4e0be2126ef3d075df4401949b570
| 166,718,740,293,424,460,000,000,000,000,000,000,000 | 37 |
Store the face and dmrc files in a cache. Refer to bug #565151.
|
int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads,
unsigned count)
{
int start, n, r;
start = vq->last_used_idx & (vq->num - 1);
n = vq->num - start;
if (n < count) {
r = __vhost_add_used_n(vq, heads, n);
if (r < 0)
return r;
heads += n;
count -= n;
}
r = __vhost_add_used_n(vq, heads, count);
/* Make sure buffer is written before we update index. */
smp_wmb();
if (vhost_put_used_idx(vq)) {
vq_err(vq, "Failed to increment used idx");
return -EFAULT;
}
if (unlikely(vq->log_used)) {
/* Make sure used idx is seen before log. */
smp_wmb();
/* Log used index update. */
log_used(vq, offsetof(struct vring_used, idx),
sizeof vq->used->idx);
if (vq->log_ctx)
eventfd_signal(vq->log_ctx, 1);
}
return r;
}
| 0 |
[
"CWE-120"
] |
linux
|
060423bfdee3f8bc6e2c1bac97de24d5415e2bc4
| 267,523,542,575,055,560,000,000,000,000,000,000,000 | 33 |
vhost: make sure log_num < in_num
The code assumes log_num < in_num everywhere, and that is true as long as
in_num is incremented by descriptor iov count, and log_num by 1. However
this breaks if there's a zero sized descriptor.
As a result, if a malicious guest creates a vring desc with desc.len = 0,
it may cause the host kernel to crash by overflowing the log array. This
bug can be triggered during the VM migration.
There's no need to log when desc.len = 0, so just don't increment log_num
in this case.
Fixes: 3a4d5c94e959 ("vhost_net: a kernel-level virtio server")
Cc: [email protected]
Reviewed-by: Lidong Chen <[email protected]>
Signed-off-by: ruippan <[email protected]>
Signed-off-by: yongduan <[email protected]>
Acked-by: Michael S. Tsirkin <[email protected]>
Reviewed-by: Tyler Hicks <[email protected]>
Signed-off-by: Michael S. Tsirkin <[email protected]>
|
static int emulate_string_inst(struct pt_regs *regs, u32 instword)
{
u8 rT = (instword >> 21) & 0x1f;
u8 rA = (instword >> 16) & 0x1f;
u8 NB_RB = (instword >> 11) & 0x1f;
u32 num_bytes;
unsigned long EA;
int pos = 0;
/* Early out if we are an invalid form of lswx */
if ((instword & PPC_INST_STRING_MASK) == PPC_INST_LSWX)
if ((rT == rA) || (rT == NB_RB))
return -EINVAL;
EA = (rA == 0) ? 0 : regs->gpr[rA];
switch (instword & PPC_INST_STRING_MASK) {
case PPC_INST_LSWX:
case PPC_INST_STSWX:
EA += NB_RB;
num_bytes = regs->xer & 0x7f;
break;
case PPC_INST_LSWI:
case PPC_INST_STSWI:
num_bytes = (NB_RB == 0) ? 32 : NB_RB;
break;
default:
return -EINVAL;
}
while (num_bytes != 0)
{
u8 val;
u32 shift = 8 * (3 - (pos & 0x3));
/* if process is 32-bit, clear upper 32 bits of EA */
if ((regs->msr & MSR_64BIT) == 0)
EA &= 0xFFFFFFFF;
switch ((instword & PPC_INST_STRING_MASK)) {
case PPC_INST_LSWX:
case PPC_INST_LSWI:
if (get_user(val, (u8 __user *)EA))
return -EFAULT;
/* first time updating this reg,
* zero it out */
if (pos == 0)
regs->gpr[rT] = 0;
regs->gpr[rT] |= val << shift;
break;
case PPC_INST_STSWI:
case PPC_INST_STSWX:
val = regs->gpr[rT] >> shift;
if (put_user(val, (u8 __user *)EA))
return -EFAULT;
break;
}
/* move EA to next address */
EA += 1;
num_bytes--;
/* manage our position within the register */
if (++pos == 4) {
pos = 0;
if (++rT == 32)
rT = 0;
}
}
return 0;
}
| 0 |
[] |
linux
|
5d176f751ee3c6eededd984ad409bff201f436a7
| 126,849,740,765,992,570,000,000,000,000,000,000,000 | 71 |
powerpc: tm: Enable transactional memory (TM) lazily for userspace
Currently the MSR TM bit is always set if the hardware is TM capable.
This adds extra overhead as it means the TM SPRS (TFHAR, TEXASR and
TFAIR) must be swapped for each process regardless of if they use TM.
For processes that don't use TM the TM MSR bit can be turned off
allowing the kernel to avoid the expensive swap of the TM registers.
A TM unavailable exception will occur if a thread does use TM and the
kernel will enable MSR_TM and leave it so for some time afterwards.
Signed-off-by: Cyril Bur <[email protected]>
Signed-off-by: Michael Ellerman <[email protected]>
|
MaybeLocal<Object> X509Certificate::GetCert(
Environment* env,
const SSLPointer& ssl) {
ClearErrorOnReturn clear_error_on_return;
X509* cert = SSL_get_certificate(ssl.get());
if (cert == nullptr)
return MaybeLocal<Object>();
X509Pointer ptr(X509_dup(cert));
return New(env, std::move(ptr));
}
| 0 |
[
"CWE-295"
] |
node
|
466e5415a2b7b3574ab5403acb87e89a94a980d1
| 47,835,371,061,633,070,000,000,000,000,000,000,000 | 11 |
crypto,tls: implement safe x509 GeneralName format
This change introduces JSON-compatible escaping rules for strings that
include X.509 GeneralName components (see RFC 5280). This non-standard
format avoids ambiguities and prevents injection attacks that could
previously lead to X.509 certificates being accepted even though they
were not valid for the target hostname.
These changes affect the format of subject alternative names and the
format of authority information access. The checkServerIdentity function
has been modified to safely handle the new format, eliminating the
possibility of injecting subject alternative names into the verification
logic.
Because each subject alternative name is only encoded as a JSON string
literal if necessary for security purposes, this change will only be
visible in rare cases.
This addresses CVE-2021-44532.
CVE-ID: CVE-2021-44532
PR-URL: https://github.com/nodejs-private/node-private/pull/300
Reviewed-By: Michael Dawson <[email protected]>
Reviewed-By: Rich Trott <[email protected]>
|
const char* XMLRPC_GetValueBase64(XMLRPC_VALUE value) {
return ((value && value->type == xmlrpc_base64) ? value->str.str : 0);
}
| 0 |
[
"CWE-119"
] |
php-src
|
88412772d295ebf7dd34409534507dc9bcac726e
| 29,424,368,393,754,428,000,000,000,000,000,000,000 | 3 |
Fix bug #68027 - fix date parsing in XMLRPC lib
|
int dns_packet_append_rr(DnsPacket *p, const DnsResourceRecord *rr, const DnsAnswerFlags flags, size_t *start, size_t *rdata_start) {
size_t saved_size, rdlength_offset, end, rdlength, rds;
uint32_t ttl;
int r;
assert(p);
assert(rr);
saved_size = p->size;
r = dns_packet_append_key(p, rr->key, flags, NULL);
if (r < 0)
goto fail;
ttl = flags & DNS_ANSWER_GOODBYE ? 0 : rr->ttl;
r = dns_packet_append_uint32(p, ttl, NULL);
if (r < 0)
goto fail;
/* Initially we write 0 here */
r = dns_packet_append_uint16(p, 0, &rdlength_offset);
if (r < 0)
goto fail;
rds = p->size - saved_size;
switch (rr->unparseable ? _DNS_TYPE_INVALID : rr->key->type) {
case DNS_TYPE_SRV:
r = dns_packet_append_uint16(p, rr->srv.priority, NULL);
if (r < 0)
goto fail;
r = dns_packet_append_uint16(p, rr->srv.weight, NULL);
if (r < 0)
goto fail;
r = dns_packet_append_uint16(p, rr->srv.port, NULL);
if (r < 0)
goto fail;
r = dns_packet_append_name(p, rr->srv.name, true, false, NULL);
break;
case DNS_TYPE_PTR:
case DNS_TYPE_NS:
case DNS_TYPE_CNAME:
case DNS_TYPE_DNAME:
r = dns_packet_append_name(p, rr->ptr.name, true, false, NULL);
break;
case DNS_TYPE_HINFO:
r = dns_packet_append_string(p, rr->hinfo.cpu, NULL);
if (r < 0)
goto fail;
r = dns_packet_append_string(p, rr->hinfo.os, NULL);
break;
case DNS_TYPE_SPF: /* exactly the same as TXT */
case DNS_TYPE_TXT:
if (!rr->txt.items) {
/* RFC 6763, section 6.1 suggests to generate
* single empty string for an empty array. */
r = dns_packet_append_raw_string(p, NULL, 0, NULL);
if (r < 0)
goto fail;
} else {
DnsTxtItem *i;
LIST_FOREACH(items, i, rr->txt.items) {
r = dns_packet_append_raw_string(p, i->data, i->length, NULL);
if (r < 0)
goto fail;
}
}
r = 0;
break;
case DNS_TYPE_A:
r = dns_packet_append_blob(p, &rr->a.in_addr, sizeof(struct in_addr), NULL);
break;
case DNS_TYPE_AAAA:
r = dns_packet_append_blob(p, &rr->aaaa.in6_addr, sizeof(struct in6_addr), NULL);
break;
case DNS_TYPE_SOA:
r = dns_packet_append_name(p, rr->soa.mname, true, false, NULL);
if (r < 0)
goto fail;
r = dns_packet_append_name(p, rr->soa.rname, true, false, NULL);
if (r < 0)
goto fail;
r = dns_packet_append_uint32(p, rr->soa.serial, NULL);
if (r < 0)
goto fail;
r = dns_packet_append_uint32(p, rr->soa.refresh, NULL);
if (r < 0)
goto fail;
r = dns_packet_append_uint32(p, rr->soa.retry, NULL);
if (r < 0)
goto fail;
r = dns_packet_append_uint32(p, rr->soa.expire, NULL);
if (r < 0)
goto fail;
r = dns_packet_append_uint32(p, rr->soa.minimum, NULL);
break;
case DNS_TYPE_MX:
r = dns_packet_append_uint16(p, rr->mx.priority, NULL);
if (r < 0)
goto fail;
r = dns_packet_append_name(p, rr->mx.exchange, true, false, NULL);
break;
case DNS_TYPE_LOC:
r = dns_packet_append_uint8(p, rr->loc.version, NULL);
if (r < 0)
goto fail;
r = dns_packet_append_uint8(p, rr->loc.size, NULL);
if (r < 0)
goto fail;
r = dns_packet_append_uint8(p, rr->loc.horiz_pre, NULL);
if (r < 0)
goto fail;
r = dns_packet_append_uint8(p, rr->loc.vert_pre, NULL);
if (r < 0)
goto fail;
r = dns_packet_append_uint32(p, rr->loc.latitude, NULL);
if (r < 0)
goto fail;
r = dns_packet_append_uint32(p, rr->loc.longitude, NULL);
if (r < 0)
goto fail;
r = dns_packet_append_uint32(p, rr->loc.altitude, NULL);
break;
case DNS_TYPE_DS:
r = dns_packet_append_uint16(p, rr->ds.key_tag, NULL);
if (r < 0)
goto fail;
r = dns_packet_append_uint8(p, rr->ds.algorithm, NULL);
if (r < 0)
goto fail;
r = dns_packet_append_uint8(p, rr->ds.digest_type, NULL);
if (r < 0)
goto fail;
r = dns_packet_append_blob(p, rr->ds.digest, rr->ds.digest_size, NULL);
break;
case DNS_TYPE_SSHFP:
r = dns_packet_append_uint8(p, rr->sshfp.algorithm, NULL);
if (r < 0)
goto fail;
r = dns_packet_append_uint8(p, rr->sshfp.fptype, NULL);
if (r < 0)
goto fail;
r = dns_packet_append_blob(p, rr->sshfp.fingerprint, rr->sshfp.fingerprint_size, NULL);
break;
case DNS_TYPE_DNSKEY:
r = dns_packet_append_uint16(p, rr->dnskey.flags, NULL);
if (r < 0)
goto fail;
r = dns_packet_append_uint8(p, rr->dnskey.protocol, NULL);
if (r < 0)
goto fail;
r = dns_packet_append_uint8(p, rr->dnskey.algorithm, NULL);
if (r < 0)
goto fail;
r = dns_packet_append_blob(p, rr->dnskey.key, rr->dnskey.key_size, NULL);
break;
case DNS_TYPE_RRSIG:
r = dns_packet_append_uint16(p, rr->rrsig.type_covered, NULL);
if (r < 0)
goto fail;
r = dns_packet_append_uint8(p, rr->rrsig.algorithm, NULL);
if (r < 0)
goto fail;
r = dns_packet_append_uint8(p, rr->rrsig.labels, NULL);
if (r < 0)
goto fail;
r = dns_packet_append_uint32(p, rr->rrsig.original_ttl, NULL);
if (r < 0)
goto fail;
r = dns_packet_append_uint32(p, rr->rrsig.expiration, NULL);
if (r < 0)
goto fail;
r = dns_packet_append_uint32(p, rr->rrsig.inception, NULL);
if (r < 0)
goto fail;
r = dns_packet_append_uint16(p, rr->rrsig.key_tag, NULL);
if (r < 0)
goto fail;
r = dns_packet_append_name(p, rr->rrsig.signer, false, true, NULL);
if (r < 0)
goto fail;
r = dns_packet_append_blob(p, rr->rrsig.signature, rr->rrsig.signature_size, NULL);
break;
case DNS_TYPE_NSEC:
r = dns_packet_append_name(p, rr->nsec.next_domain_name, false, false, NULL);
if (r < 0)
goto fail;
r = dns_packet_append_types(p, rr->nsec.types, NULL);
if (r < 0)
goto fail;
break;
case DNS_TYPE_NSEC3:
r = dns_packet_append_uint8(p, rr->nsec3.algorithm, NULL);
if (r < 0)
goto fail;
r = dns_packet_append_uint8(p, rr->nsec3.flags, NULL);
if (r < 0)
goto fail;
r = dns_packet_append_uint16(p, rr->nsec3.iterations, NULL);
if (r < 0)
goto fail;
r = dns_packet_append_uint8(p, rr->nsec3.salt_size, NULL);
if (r < 0)
goto fail;
r = dns_packet_append_blob(p, rr->nsec3.salt, rr->nsec3.salt_size, NULL);
if (r < 0)
goto fail;
r = dns_packet_append_uint8(p, rr->nsec3.next_hashed_name_size, NULL);
if (r < 0)
goto fail;
r = dns_packet_append_blob(p, rr->nsec3.next_hashed_name, rr->nsec3.next_hashed_name_size, NULL);
if (r < 0)
goto fail;
r = dns_packet_append_types(p, rr->nsec3.types, NULL);
if (r < 0)
goto fail;
break;
case DNS_TYPE_TLSA:
r = dns_packet_append_uint8(p, rr->tlsa.cert_usage, NULL);
if (r < 0)
goto fail;
r = dns_packet_append_uint8(p, rr->tlsa.selector, NULL);
if (r < 0)
goto fail;
r = dns_packet_append_uint8(p, rr->tlsa.matching_type, NULL);
if (r < 0)
goto fail;
r = dns_packet_append_blob(p, rr->tlsa.data, rr->tlsa.data_size, NULL);
break;
case DNS_TYPE_CAA:
r = dns_packet_append_uint8(p, rr->caa.flags, NULL);
if (r < 0)
goto fail;
r = dns_packet_append_string(p, rr->caa.tag, NULL);
if (r < 0)
goto fail;
r = dns_packet_append_blob(p, rr->caa.value, rr->caa.value_size, NULL);
break;
case DNS_TYPE_OPT:
case DNS_TYPE_OPENPGPKEY:
case _DNS_TYPE_INVALID: /* unparseable */
default:
r = dns_packet_append_blob(p, rr->generic.data, rr->generic.data_size, NULL);
break;
}
if (r < 0)
goto fail;
/* Let's calculate the actual data size and update the field */
rdlength = p->size - rdlength_offset - sizeof(uint16_t);
if (rdlength > 0xFFFF) {
r = -ENOSPC;
goto fail;
}
end = p->size;
p->size = rdlength_offset;
r = dns_packet_append_uint16(p, rdlength, NULL);
if (r < 0)
goto fail;
p->size = end;
if (start)
*start = saved_size;
if (rdata_start)
*rdata_start = rds;
return 0;
fail:
dns_packet_truncate(p, saved_size);
return r;
}
| 0 |
[
"CWE-20",
"CWE-476"
] |
systemd
|
a924f43f30f9c4acaf70618dd2a055f8b0f166be
| 277,024,461,159,139,470,000,000,000,000,000,000,000 | 346 |
resolved: bugfix of null pointer p->question dereferencing (#6020)
See https://bugs.launchpad.net/ubuntu/+source/systemd/+bug/1621396
|
static void InsertSampler( FlowSource_t *fs, exporter_v9_domain_t *exporter, int32_t id, uint16_t mode, uint32_t interval) {
generic_sampler_t *sampler;
dbg_printf("[%u] Insert Sampler: Exporter is 0x%llu\n", exporter->info.id, (long long unsigned)exporter);
if ( !exporter->sampler ) {
// no samplers so far
sampler = (generic_sampler_t *)malloc(sizeof(generic_sampler_t));
if ( !sampler ) {
LogError( "Process_v9: Panic! malloc(): %s line %d: %s", __FILE__, __LINE__, strerror (errno));
return;
}
sampler->info.header.type = SamplerInfoRecordype;
sampler->info.header.size = sizeof(sampler_info_record_t);
sampler->info.exporter_sysid = exporter->info.sysid;
sampler->info.id = id;
sampler->info.mode = mode;
sampler->info.interval = interval;
sampler->next = NULL;
exporter->sampler = sampler;
FlushInfoSampler(fs, &(sampler->info));
LogInfo( "Add new sampler: ID: %i, mode: %u, interval: %u\n",
id, mode, interval);
dbg_printf("Add new sampler: ID: %i, mode: %u, interval: %u\n",
id, mode, interval);
} else {
sampler = exporter->sampler;
while ( sampler ) {
// test for update of existing sampler
if ( sampler->info.id == id ) {
// found same sampler id - update record
LogInfo( "Update existing sampler id: %i, mode: %u, interval: %u\n",
id, mode, interval);
dbg_printf("Update existing sampler id: %i, mode: %u, interval: %u\n",
id, mode, interval);
// we update only on changes
if ( mode != sampler->info.mode || interval != sampler->info.interval ) {
FlushInfoSampler(fs, &(sampler->info));
sampler->info.mode = mode;
sampler->info.interval = interval;
} else {
dbg_printf("Sampler unchanged!\n");
}
break;
}
// test for end of chain
if ( sampler->next == NULL ) {
// end of sampler chain - insert new sampler
sampler->next = (generic_sampler_t *)malloc(sizeof(generic_sampler_t));
if ( !sampler->next ) {
LogError( "Process_v9: Panic! malloc(): %s line %d: %s", __FILE__, __LINE__, strerror (errno));
return;
}
sampler = sampler->next;
sampler->info.header.type = SamplerInfoRecordype;
sampler->info.header.size = sizeof(sampler_info_record_t);
sampler->info.exporter_sysid = exporter->info.sysid;
sampler->info.id = id;
sampler->info.mode = mode;
sampler->info.interval = interval;
sampler->next = NULL;
FlushInfoSampler(fs, &(sampler->info));
LogInfo( "Append new sampler: ID: %u, mode: %u, interval: %u\n",
id, mode, interval);
dbg_printf("Append new sampler: ID: %u, mode: %u, interval: %u\n",
id, mode, interval);
break;
}
// advance
sampler = sampler->next;
}
}
} // End of InsertSampler
| 0 |
[] |
nfdump
|
ff0e855bd1f51bed9fc5d8559c64d3cfb475a5d8
| 269,076,380,483,709,600,000,000,000,000,000,000,000 | 85 |
Fix security issues in netflow_v9.c and ipfix.c
|
int __sk_mem_schedule(struct sock *sk, int size, int kind)
{
struct proto *prot = sk->sk_prot;
int amt = sk_mem_pages(size);
long allocated;
sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
allocated = sk_memory_allocated_add(sk, amt);
if (mem_cgroup_sockets_enabled && sk->sk_memcg &&
!mem_cgroup_charge_skmem(sk->sk_memcg, amt))
goto suppress_allocation;
/* Under limit. */
if (allocated <= sk_prot_mem_limits(sk, 0)) {
sk_leave_memory_pressure(sk);
return 1;
}
/* Under pressure. */
if (allocated > sk_prot_mem_limits(sk, 1))
sk_enter_memory_pressure(sk);
/* Over hard limit. */
if (allocated > sk_prot_mem_limits(sk, 2))
goto suppress_allocation;
/* guarantee minimum buffer size under pressure */
if (kind == SK_MEM_RECV) {
if (atomic_read(&sk->sk_rmem_alloc) < prot->sysctl_rmem[0])
return 1;
} else { /* SK_MEM_SEND */
if (sk->sk_type == SOCK_STREAM) {
if (sk->sk_wmem_queued < prot->sysctl_wmem[0])
return 1;
} else if (atomic_read(&sk->sk_wmem_alloc) <
prot->sysctl_wmem[0])
return 1;
}
if (sk_has_memory_pressure(sk)) {
int alloc;
if (!sk_under_memory_pressure(sk))
return 1;
alloc = sk_sockets_allocated_read_positive(sk);
if (sk_prot_mem_limits(sk, 2) > alloc *
sk_mem_pages(sk->sk_wmem_queued +
atomic_read(&sk->sk_rmem_alloc) +
sk->sk_forward_alloc))
return 1;
}
suppress_allocation:
if (kind == SK_MEM_SEND && sk->sk_type == SOCK_STREAM) {
sk_stream_moderate_sndbuf(sk);
/* Fail only if socket is _under_ its sndbuf.
* In this case we cannot block, so that we have to fail.
*/
if (sk->sk_wmem_queued + size >= sk->sk_sndbuf)
return 1;
}
trace_sock_exceed_buf_limit(sk, prot, allocated);
/* Alas. Undo changes. */
sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM;
sk_memory_allocated_sub(sk, amt);
if (mem_cgroup_sockets_enabled && sk->sk_memcg)
mem_cgroup_uncharge_skmem(sk->sk_memcg, amt);
return 0;
}
| 0 |
[
"CWE-119",
"CWE-787"
] |
linux
|
b98b0bc8c431e3ceb4b26b0dfc8db509518fb290
| 155,640,908,612,088,700,000,000,000,000,000,000,000 | 79 |
net: avoid signed overflows for SO_{SND|RCV}BUFFORCE
CAP_NET_ADMIN users should not be allowed to set negative
sk_sndbuf or sk_rcvbuf values, as it can lead to various memory
corruptions, crashes, OOM...
Note that before commit 82981930125a ("net: cleanups in
sock_setsockopt()"), the bug was even more serious, since SO_SNDBUF
and SO_RCVBUF were vulnerable.
This needs to be backported to all known linux kernels.
Again, many thanks to syzkaller team for discovering this gem.
Signed-off-by: Eric Dumazet <[email protected]>
Reported-by: Andrey Konovalov <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
nm_gconf_get_bytearray_helper (GConfClient *client,
const char *path,
const char *key,
const char *setting,
GByteArray **value)
{
char *gc_key;
GConfValue *gc_value;
GByteArray *array;
gboolean success = FALSE;
g_return_val_if_fail (key != NULL, FALSE);
g_return_val_if_fail (setting != NULL, FALSE);
g_return_val_if_fail (value != NULL, FALSE);
gc_key = g_strdup_printf ("%s/%s/%s", path, setting, key);
if (!(gc_value = gconf_client_get (client, gc_key, NULL)))
goto out;
if (gc_value->type == GCONF_VALUE_LIST
&& gconf_value_get_list_type (gc_value) == GCONF_VALUE_INT)
{
GSList *elt;
array = g_byte_array_new ();
for (elt = gconf_value_get_list (gc_value); elt != NULL; elt = g_slist_next (elt))
{
int i = gconf_value_get_int ((GConfValue *) elt->data);
unsigned char val = (unsigned char) (i & 0xFF);
if (i < 0 || i > 255) {
g_log (G_LOG_DOMAIN, G_LOG_LEVEL_WARNING,
"value %d out-of-range for a byte value", i);
g_byte_array_free (array, TRUE);
goto out;
}
g_byte_array_append (array, (const unsigned char *) &val, sizeof (val));
}
*value = array;
success = TRUE;
}
out:
if (gc_value)
gconf_value_free (gc_value);
g_free (gc_key);
return success;
}
| 0 |
[
"CWE-310"
] |
network-manager-applet
|
4020594dfbf566f1852f0acb36ad631a9e73a82b
| 88,224,582,649,413,790,000,000,000,000,000,000,000 | 50 |
core: fix CA cert mishandling after cert file deletion (deb #560067) (rh #546793)
If a connection was created with a CA certificate, but the user later
moved or deleted that CA certificate, the applet would simply provide the
connection to NetworkManager without any CA certificate. This could cause
NM to connect to the original network (or a network spoofing the original
network) without verifying the identity of the network as the user
expects.
In the future we can/should do better here by (1) alerting the user that
some connection is now no longer complete by flagging it in the connection
editor or notifying the user somehow, and (2) by using a freaking' cert
store already (not that Linux has one yet).
|
import_one (ctrl_t ctrl,
const char *fname, KBNODE keyblock, struct stats_s *stats,
unsigned char **fpr,size_t *fpr_len,unsigned int options,
int from_sk )
{
PKT_public_key *pk;
PKT_public_key *pk_orig;
KBNODE node, uidnode;
KBNODE keyblock_orig = NULL;
u32 keyid[2];
int rc = 0;
int new_key = 0;
int mod_key = 0;
int same_key = 0;
int non_self = 0;
/* get the key and print some info about it */
node = find_kbnode( keyblock, PKT_PUBLIC_KEY );
if( !node )
BUG();
pk = node->pkt->pkt.public_key;
keyid_from_pk( pk, keyid );
uidnode = find_next_kbnode( keyblock, PKT_USER_ID );
if( opt.verbose && !opt.interactive )
{
log_info( "pub %4u%c/%s %s ",
nbits_from_pk( pk ),
pubkey_letter( pk->pubkey_algo ),
keystr_from_pk(pk), datestr_from_pk(pk) );
if (uidnode)
print_utf8_buffer (log_get_stream (),
uidnode->pkt->pkt.user_id->name,
uidnode->pkt->pkt.user_id->len );
log_printf ("\n");
}
if( !uidnode )
{
log_error( _("key %s: no user ID\n"), keystr_from_pk(pk));
return 0;
}
if (opt.interactive) {
if(is_status_enabled())
print_import_check (pk, uidnode->pkt->pkt.user_id);
merge_keys_and_selfsig (keyblock);
tty_printf ("\n");
show_basic_key_info (keyblock);
tty_printf ("\n");
if (!cpr_get_answer_is_yes ("import.okay",
"Do you want to import this key? (y/N) "))
return 0;
}
collapse_uids(&keyblock);
/* Clean the key that we're about to import, to cut down on things
that we have to clean later. This has no practical impact on
the end result, but does result in less logging which might
confuse the user. */
if(options&IMPORT_CLEAN)
clean_key(keyblock,opt.verbose,options&IMPORT_MINIMAL,NULL,NULL);
clear_kbnode_flags( keyblock );
if((options&IMPORT_REPAIR_PKS_SUBKEY_BUG) && fix_pks_corruption(keyblock)
&& opt.verbose)
log_info(_("key %s: PKS subkey corruption repaired\n"),
keystr_from_pk(pk));
rc = chk_self_sigs( fname, keyblock , pk, keyid, &non_self );
if( rc )
return rc== -1? 0:rc;
/* If we allow such a thing, mark unsigned uids as valid */
if( opt.allow_non_selfsigned_uid )
for( node=keyblock; node; node = node->next )
if( node->pkt->pkttype == PKT_USER_ID && !(node->flag & 1) )
{
char *user=utf8_to_native(node->pkt->pkt.user_id->name,
node->pkt->pkt.user_id->len,0);
node->flag |= 1;
log_info( _("key %s: accepted non self-signed user ID \"%s\"\n"),
keystr_from_pk(pk),user);
xfree(user);
}
if( !delete_inv_parts( fname, keyblock, keyid, options ) ) {
log_error( _("key %s: no valid user IDs\n"), keystr_from_pk(pk));
if( !opt.quiet )
log_info(_("this may be caused by a missing self-signature\n"));
stats->no_user_id++;
return 0;
}
/* do we have this key already in one of our pubrings ? */
pk_orig = xmalloc_clear( sizeof *pk_orig );
rc = get_pubkey_fast ( pk_orig, keyid );
if( rc && rc != G10ERR_NO_PUBKEY && rc != G10ERR_UNU_PUBKEY )
{
log_error( _("key %s: public key not found: %s\n"),
keystr(keyid), g10_errstr(rc));
}
else if ( rc && (opt.import_options&IMPORT_MERGE_ONLY) )
{
if( opt.verbose )
log_info( _("key %s: new key - skipped\n"), keystr(keyid));
rc = 0;
stats->skipped_new_keys++;
}
else if( rc ) { /* insert this key */
KEYDB_HANDLE hd = keydb_new ();
rc = keydb_locate_writable (hd, NULL);
if (rc) {
log_error (_("no writable keyring found: %s\n"), g10_errstr (rc));
keydb_release (hd);
return G10ERR_GENERAL;
}
if( opt.verbose > 1 )
log_info (_("writing to '%s'\n"), keydb_get_resource_name (hd) );
rc = keydb_insert_keyblock (hd, keyblock );
if (rc)
log_error (_("error writing keyring '%s': %s\n"),
keydb_get_resource_name (hd), g10_errstr(rc));
else
{
/* This should not be possible since we delete the
ownertrust when a key is deleted, but it can happen if
the keyring and trustdb are out of sync. It can also
be made to happen with the trusted-key command. */
clear_ownertrusts (pk);
if(non_self)
revalidation_mark ();
}
keydb_release (hd);
/* we are ready */
if( !opt.quiet )
{
char *p=get_user_id_native (keyid);
log_info( _("key %s: public key \"%s\" imported\n"),
keystr(keyid),p);
xfree(p);
}
if( is_status_enabled() )
{
char *us = get_long_user_id_string( keyid );
write_status_text( STATUS_IMPORTED, us );
xfree(us);
print_import_ok (pk, 1);
}
stats->imported++;
if( is_RSA( pk->pubkey_algo ) )
stats->imported_rsa++;
new_key = 1;
}
else { /* merge */
KEYDB_HANDLE hd;
int n_uids, n_sigs, n_subk, n_sigs_cleaned, n_uids_cleaned;
/* Compare the original against the new key; just to be sure nothing
* weird is going on */
if( cmp_public_keys( pk_orig, pk ) )
{
log_error( _("key %s: doesn't match our copy\n"),keystr(keyid));
goto leave;
}
/* now read the original keyblock */
hd = keydb_new ();
{
byte afp[MAX_FINGERPRINT_LEN];
size_t an;
fingerprint_from_pk (pk_orig, afp, &an);
while (an < MAX_FINGERPRINT_LEN)
afp[an++] = 0;
rc = keydb_search_fpr (hd, afp);
}
if( rc )
{
log_error (_("key %s: can't locate original keyblock: %s\n"),
keystr(keyid), g10_errstr(rc));
keydb_release (hd);
goto leave;
}
rc = keydb_get_keyblock (hd, &keyblock_orig );
if (rc)
{
log_error (_("key %s: can't read original keyblock: %s\n"),
keystr(keyid), g10_errstr(rc));
keydb_release (hd);
goto leave;
}
/* Make sure the original direct key sigs are all sane. */
n_sigs_cleaned = fix_bad_direct_key_sigs (keyblock_orig, keyid);
if (n_sigs_cleaned)
commit_kbnode (&keyblock_orig);
/* and try to merge the block */
clear_kbnode_flags( keyblock_orig );
clear_kbnode_flags( keyblock );
n_uids = n_sigs = n_subk = n_uids_cleaned = 0;
rc = merge_blocks( fname, keyblock_orig, keyblock,
keyid, &n_uids, &n_sigs, &n_subk );
if( rc )
{
keydb_release (hd);
goto leave;
}
if(options&IMPORT_CLEAN)
clean_key(keyblock_orig,opt.verbose,options&IMPORT_MINIMAL,
&n_uids_cleaned,&n_sigs_cleaned);
if( n_uids || n_sigs || n_subk || n_sigs_cleaned || n_uids_cleaned) {
mod_key = 1;
/* keyblock_orig has been updated; write */
rc = keydb_update_keyblock (hd, keyblock_orig);
if (rc)
log_error (_("error writing keyring '%s': %s\n"),
keydb_get_resource_name (hd), g10_errstr(rc) );
else if(non_self)
revalidation_mark ();
/* we are ready */
if( !opt.quiet )
{
char *p=get_user_id_native(keyid);
if( n_uids == 1 )
log_info( _("key %s: \"%s\" 1 new user ID\n"),
keystr(keyid),p);
else if( n_uids )
log_info( _("key %s: \"%s\" %d new user IDs\n"),
keystr(keyid),p,n_uids);
if( n_sigs == 1 )
log_info( _("key %s: \"%s\" 1 new signature\n"),
keystr(keyid), p);
else if( n_sigs )
log_info( _("key %s: \"%s\" %d new signatures\n"),
keystr(keyid), p, n_sigs );
if( n_subk == 1 )
log_info( _("key %s: \"%s\" 1 new subkey\n"),
keystr(keyid), p);
else if( n_subk )
log_info( _("key %s: \"%s\" %d new subkeys\n"),
keystr(keyid), p, n_subk );
if(n_sigs_cleaned==1)
log_info(_("key %s: \"%s\" %d signature cleaned\n"),
keystr(keyid),p,n_sigs_cleaned);
else if(n_sigs_cleaned)
log_info(_("key %s: \"%s\" %d signatures cleaned\n"),
keystr(keyid),p,n_sigs_cleaned);
if(n_uids_cleaned==1)
log_info(_("key %s: \"%s\" %d user ID cleaned\n"),
keystr(keyid),p,n_uids_cleaned);
else if(n_uids_cleaned)
log_info(_("key %s: \"%s\" %d user IDs cleaned\n"),
keystr(keyid),p,n_uids_cleaned);
xfree(p);
}
stats->n_uids +=n_uids;
stats->n_sigs +=n_sigs;
stats->n_subk +=n_subk;
stats->n_sigs_cleaned +=n_sigs_cleaned;
stats->n_uids_cleaned +=n_uids_cleaned;
if (is_status_enabled ())
print_import_ok (pk, ((n_uids?2:0)|(n_sigs?4:0)|(n_subk?8:0)));
}
else
{
same_key = 1;
if (is_status_enabled ())
print_import_ok (pk, 0);
if( !opt.quiet )
{
char *p=get_user_id_native(keyid);
log_info( _("key %s: \"%s\" not changed\n"),keystr(keyid),p);
xfree(p);
}
stats->unchanged++;
}
keydb_release (hd); hd = NULL;
}
leave:
if (mod_key || new_key || same_key)
{
/* A little explanation for this: we fill in the fingerprint
when importing keys as it can be useful to know the
fingerprint in certain keyserver-related cases (a keyserver
asked for a particular name, but the key doesn't have that
name). However, in cases where we're importing more than
one key at a time, we cannot know which key to fingerprint.
In these cases, rather than guessing, we do not
fingerprinting at all, and we must hope the user ID on the
keys are useful. Note that we need to do this for new
keys, merged keys and even for unchanged keys. This is
required because for example the --auto-key-locate feature
may import an already imported key and needs to know the
fingerprint of the key in all cases. */
if (fpr)
{
xfree (*fpr);
/* Note that we need to compare against 0 here because
COUNT gets only incremented after returning form this
function. */
if (stats->count == 0)
*fpr = fingerprint_from_pk (pk, NULL, fpr_len);
else
*fpr = NULL;
}
}
/* Now that the key is definitely incorporated into the keydb, we
need to check if a designated revocation is present or if the
prefs are not rational so we can warn the user. */
if (mod_key)
{
revocation_present (ctrl, keyblock_orig);
if (!from_sk && have_secret_key_with_kid (keyid))
check_prefs (ctrl, keyblock_orig);
}
else if (new_key)
{
revocation_present (ctrl, keyblock);
if (!from_sk && have_secret_key_with_kid (keyid))
check_prefs (ctrl, keyblock);
}
release_kbnode( keyblock_orig );
free_public_key( pk_orig );
return rc;
}
| 0 |
[
"CWE-20"
] |
gnupg
|
f0b33b6fb8e0586e9584a7a409dcc31263776a67
| 317,172,042,889,104,900,000,000,000,000,000,000,000 | 349 |
gpg: Import only packets which are allowed in a keyblock.
* g10/import.c (valid_keyblock_packet): New.
(read_block): Store only valid packets.
--
A corrupted key, which for example included a mangled public key
encrypted packet, used to corrupt the keyring. This change skips all
packets which are not allowed in a keyblock.
GnuPG-bug-id: 1455
(cherry-picked from commit f795a0d59e197455f8723c300eebf59e09853efa)
|
static inline void status_report(struct tty_struct *tty)
{
static const char teminal_ok[] = "\033[0n";
respond_string(teminal_ok, strlen(teminal_ok), tty->port);
}
| 0 |
[
"CWE-125"
] |
linux
|
3c4e0dff2095c579b142d5a0693257f1c58b4804
| 162,044,434,989,527,990,000,000,000,000,000,000,000 | 6 |
vt: Disable KD_FONT_OP_COPY
It's buggy:
On Fri, Nov 06, 2020 at 10:30:08PM +0800, Minh Yuan wrote:
> We recently discovered a slab-out-of-bounds read in fbcon in the latest
> kernel ( v5.10-rc2 for now ). The root cause of this vulnerability is that
> "fbcon_do_set_font" did not handle "vc->vc_font.data" and
> "vc->vc_font.height" correctly, and the patch
> <https://lkml.org/lkml/2020/9/27/223> for VT_RESIZEX can't handle this
> issue.
>
> Specifically, we use KD_FONT_OP_SET to set a small font.data for tty6, and
> use KD_FONT_OP_SET again to set a large font.height for tty1. After that,
> we use KD_FONT_OP_COPY to assign tty6's vc_font.data to tty1's vc_font.data
> in "fbcon_do_set_font", while tty1 retains the original larger
> height. Obviously, this will cause an out-of-bounds read, because we can
> access a smaller vc_font.data with a larger vc_font.height.
Further there was only one user ever.
- Android's loadfont, busybox and console-tools only ever use OP_GET
and OP_SET
- fbset documentation only mentions the kernel cmdline font: option,
not anything else.
- systemd used OP_COPY before release 232 published in Nov 2016
Now unfortunately the crucial report seems to have gone down with
gmane, and the commit message doesn't say much. But the pull request
hints at OP_COPY being broken
https://github.com/systemd/systemd/pull/3651
So in other words, this never worked, and the only project which
foolishly every tried to use it, realized that rather quickly too.
Instead of trying to fix security issues here on dead code by adding
missing checks, fix the entire thing by removing the functionality.
Note that systemd code using the OP_COPY function ignored the return
value, so it doesn't matter what we're doing here really - just in
case a lone server somewhere happens to be extremely unlucky and
running an affected old version of systemd. The relevant code from
font_copy_to_all_vcs() in systemd was:
/* copy font from active VT, where the font was uploaded to */
cfo.op = KD_FONT_OP_COPY;
cfo.height = vcs.v_active-1; /* tty1 == index 0 */
(void) ioctl(vcfd, KDFONTOP, &cfo);
Note this just disables the ioctl, garbage collecting the now unused
callbacks is left for -next.
v2: Tetsuo found the old mail, which allowed me to find it on another
archive. Add the link too.
Acked-by: Peilin Ye <[email protected]>
Reported-by: Minh Yuan <[email protected]>
References: https://lists.freedesktop.org/archives/systemd-devel/2016-June/036935.html
References: https://github.com/systemd/systemd/pull/3651
Cc: Greg KH <[email protected]>
Cc: Peilin Ye <[email protected]>
Cc: Tetsuo Handa <[email protected]>
Signed-off-by: Daniel Vetter <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Greg Kroah-Hartman <[email protected]>
|
BSONObj spec() {
return BSON("$or" << BSON_ARRAY(false << false << true));
}
| 0 |
[
"CWE-835"
] |
mongo
|
0a076417d1d7fba3632b73349a1fd29a83e68816
| 147,917,278,351,366,200,000,000,000,000,000,000,000 | 3 |
SERVER-38070 fix infinite loop in agg expression
|
static struct page *alloc_fresh_gigantic_page_node(struct hstate *h, int nid)
{
struct page *page;
page = alloc_gigantic_page(nid, h);
if (page) {
prep_compound_gigantic_page(page, huge_page_order(h));
prep_new_huge_page(h, page, nid);
}
return page;
}
| 0 |
[
"CWE-362",
"CWE-119"
] |
linux
|
1e3921471354244f70fe268586ff94a97a6dd4df
| 336,583,989,864,425,260,000,000,000,000,000,000,000 | 12 |
userfaultfd: hugetlbfs: prevent UFFDIO_COPY to fill beyond the end of i_size
This oops:
kernel BUG at fs/hugetlbfs/inode.c:484!
RIP: remove_inode_hugepages+0x3d0/0x410
Call Trace:
hugetlbfs_setattr+0xd9/0x130
notify_change+0x292/0x410
do_truncate+0x65/0xa0
do_sys_ftruncate.constprop.3+0x11a/0x180
SyS_ftruncate+0xe/0x10
tracesys+0xd9/0xde
was caused by the lack of i_size check in hugetlb_mcopy_atomic_pte.
mmap() can still succeed beyond the end of the i_size after vmtruncate
zapped vmas in those ranges, but the faults must not succeed, and that
includes UFFDIO_COPY.
We could differentiate the retval to userland to represent a SIGBUS like
a page fault would do (vs SIGSEGV), but it doesn't seem very useful and
we'd need to pick a random retval as there's no meaningful syscall
retval that would differentiate from SIGSEGV and SIGBUS, there's just
-EFAULT.
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Andrea Arcangeli <[email protected]>
Reviewed-by: Mike Kravetz <[email protected]>
Cc: Mike Rapoport <[email protected]>
Cc: "Dr. David Alan Gilbert" <[email protected]>
Cc: <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
greedy_search(JOIN *join,
table_map remaining_tables,
uint search_depth,
uint prune_level,
uint use_cond_selectivity)
{
double record_count= 1.0;
double read_time= 0.0;
uint idx= join->const_tables; // index into 'join->best_ref'
uint best_idx;
uint size_remain; // cardinality of remaining_tables
POSITION best_pos;
JOIN_TAB *best_table; // the next plan node to be added to the curr QEP
// ==join->tables or # tables in the sj-mat nest we're optimizing
uint n_tables __attribute__((unused));
DBUG_ENTER("greedy_search");
/* number of tables that remain to be optimized */
n_tables= size_remain= my_count_bits(remaining_tables &
(join->emb_sjm_nest?
(join->emb_sjm_nest->sj_inner_tables &
~join->const_table_map)
:
~(table_map)0));
do {
/* Find the extension of the current QEP with the lowest cost */
join->best_read= DBL_MAX;
if (best_extension_by_limited_search(join, remaining_tables, idx, record_count,
read_time, search_depth, prune_level,
use_cond_selectivity))
DBUG_RETURN(TRUE);
/*
'best_read < DBL_MAX' means that optimizer managed to find
some plan and updated 'best_positions' array accordingly.
*/
DBUG_ASSERT(join->best_read < DBL_MAX);
if (size_remain <= search_depth)
{
/*
'join->best_positions' contains a complete optimal extension of the
current partial QEP.
*/
DBUG_EXECUTE("opt", print_plan(join, n_tables,
record_count, read_time, read_time,
"optimal"););
DBUG_RETURN(FALSE);
}
/* select the first table in the optimal extension as most promising */
best_pos= join->best_positions[idx];
best_table= best_pos.table;
/*
Each subsequent loop of 'best_extension_by_limited_search' uses
'join->positions' for cost estimates, therefore we have to update its
value.
*/
join->positions[idx]= best_pos;
/*
Update the interleaving state after extending the current partial plan
with a new table.
We are doing this here because best_extension_by_limited_search reverts
the interleaving state to the one of the non-extended partial plan
on exit.
*/
bool is_interleave_error __attribute__((unused))=
check_interleaving_with_nj (best_table);
/* This has been already checked by best_extension_by_limited_search */
DBUG_ASSERT(!is_interleave_error);
/* find the position of 'best_table' in 'join->best_ref' */
best_idx= idx;
JOIN_TAB *pos= join->best_ref[best_idx];
while (pos && best_table != pos)
pos= join->best_ref[++best_idx];
DBUG_ASSERT((pos != NULL)); // should always find 'best_table'
/* move 'best_table' at the first free position in the array of joins */
swap_variables(JOIN_TAB*, join->best_ref[idx], join->best_ref[best_idx]);
/* compute the cost of the new plan extended with 'best_table' */
record_count= COST_MULT(record_count, join->positions[idx].records_read);
read_time= COST_ADD(read_time,
COST_ADD(join->positions[idx].read_time,
record_count / (double) TIME_FOR_COMPARE));
remaining_tables&= ~(best_table->table->map);
--size_remain;
++idx;
DBUG_EXECUTE("opt", print_plan(join, idx,
record_count, read_time, read_time,
"extended"););
} while (TRUE);
}
| 0 |
[] |
server
|
ff77a09bda884fe6bf3917eb29b9d3a2f53f919b
| 74,631,955,048,978,390,000,000,000,000,000,000,000 | 97 |
MDEV-22464 Server crash on UPDATE with nested subquery
Uninitialized ref_pointer_array[] because setup_fields() got empty
fields list. mysql_multi_update() for some reason does that by
substituting the fields list with empty total_list for the
mysql_select() call (looks like wrong merge since total_list is not
used anywhere else and is always empty). The fix would be to return
back the original fields list. But this fails update_use_source.test
case:
--error ER_BAD_FIELD_ERROR
update v1 set t1c1=2 order by 1;
Actually not failing the above seems to be ok.
The other fix would be to keep resolve_in_select_list false (and that
keeps outer context from being resolved in
Item_ref::fix_fields()). This fix is more consistent with how SELECT
behaves:
--error ER_SUBQUERY_NO_1_ROW
select a from t1 where a= (select 2 from t1 having (a = 3));
So this patch implements this fix.
|
static bool create_canon_ace_lists(files_struct *fsp, SMB_STRUCT_STAT *pst,
DOM_SID *pfile_owner_sid,
DOM_SID *pfile_grp_sid,
canon_ace **ppfile_ace, canon_ace **ppdir_ace,
SEC_ACL *dacl)
{
bool all_aces_are_inherit_only = (fsp->is_directory ? True : False);
canon_ace *file_ace = NULL;
canon_ace *dir_ace = NULL;
canon_ace *current_ace = NULL;
bool got_dir_allow = False;
bool got_file_allow = False;
int i, j;
*ppfile_ace = NULL;
*ppdir_ace = NULL;
/*
* Convert the incoming ACL into a more regular form.
*/
for(i = 0; i < dacl->num_aces; i++) {
SEC_ACE *psa = &dacl->aces[i];
if((psa->type != SEC_ACE_TYPE_ACCESS_ALLOWED) && (psa->type != SEC_ACE_TYPE_ACCESS_DENIED)) {
DEBUG(3,("create_canon_ace_lists: unable to set anything but an ALLOW or DENY ACE.\n"));
return False;
}
if (nt4_compatible_acls()) {
/*
* The security mask may be UNIX_ACCESS_NONE which should map into
* no permissions (we overload the WRITE_OWNER bit for this) or it
* should be one of the ALL/EXECUTE/READ/WRITE bits. Arrange for this
* to be so. Any other bits override the UNIX_ACCESS_NONE bit.
*/
/*
* Convert GENERIC bits to specific bits.
*/
se_map_generic(&psa->access_mask, &file_generic_mapping);
psa->access_mask &= (UNIX_ACCESS_NONE|FILE_ALL_ACCESS);
if(psa->access_mask != UNIX_ACCESS_NONE)
psa->access_mask &= ~UNIX_ACCESS_NONE;
}
}
/*
* Deal with the fact that NT 4.x re-writes the canonical format
* that we return for default ACLs. If a directory ACE is identical
* to a inherited directory ACE then NT changes the bits so that the
* first ACE is set to OI|IO and the second ACE for this SID is set
* to CI. We need to repair this. JRA.
*/
for(i = 0; i < dacl->num_aces; i++) {
SEC_ACE *psa1 = &dacl->aces[i];
for (j = i + 1; j < dacl->num_aces; j++) {
SEC_ACE *psa2 = &dacl->aces[j];
if (psa1->access_mask != psa2->access_mask)
continue;
if (!sid_equal(&psa1->trustee, &psa2->trustee))
continue;
/*
* Ok - permission bits and SIDs are equal.
* Check if flags were re-written.
*/
if (psa1->flags & SEC_ACE_FLAG_INHERIT_ONLY) {
psa1->flags |= (psa2->flags & (SEC_ACE_FLAG_CONTAINER_INHERIT|SEC_ACE_FLAG_OBJECT_INHERIT));
psa2->flags &= ~(SEC_ACE_FLAG_CONTAINER_INHERIT|SEC_ACE_FLAG_OBJECT_INHERIT);
} else if (psa2->flags & SEC_ACE_FLAG_INHERIT_ONLY) {
psa2->flags |= (psa1->flags & (SEC_ACE_FLAG_CONTAINER_INHERIT|SEC_ACE_FLAG_OBJECT_INHERIT));
psa1->flags &= ~(SEC_ACE_FLAG_CONTAINER_INHERIT|SEC_ACE_FLAG_OBJECT_INHERIT);
}
}
}
for(i = 0; i < dacl->num_aces; i++) {
SEC_ACE *psa = &dacl->aces[i];
/*
* Create a cannon_ace entry representing this NT DACL ACE.
*/
if ((current_ace = SMB_MALLOC_P(canon_ace)) == NULL) {
free_canon_ace_list(file_ace);
free_canon_ace_list(dir_ace);
DEBUG(0,("create_canon_ace_lists: malloc fail.\n"));
return False;
}
ZERO_STRUCTP(current_ace);
sid_copy(¤t_ace->trustee, &psa->trustee);
/*
* Try and work out if the SID is a user or group
* as we need to flag these differently for POSIX.
* Note what kind of a POSIX ACL this should map to.
*/
if( sid_equal(¤t_ace->trustee, &global_sid_World)) {
current_ace->owner_type = WORLD_ACE;
current_ace->unix_ug.world = -1;
current_ace->type = SMB_ACL_OTHER;
} else if (sid_equal(¤t_ace->trustee, &global_sid_Creator_Owner)) {
current_ace->owner_type = UID_ACE;
current_ace->unix_ug.uid = pst->st_uid;
current_ace->type = SMB_ACL_USER_OBJ;
/*
* The Creator Owner entry only specifies inheritable permissions,
* never access permissions. WinNT doesn't always set the ACE to
*INHERIT_ONLY, though.
*/
if (nt4_compatible_acls())
psa->flags |= SEC_ACE_FLAG_INHERIT_ONLY;
} else if (sid_equal(¤t_ace->trustee, &global_sid_Creator_Group)) {
current_ace->owner_type = GID_ACE;
current_ace->unix_ug.gid = pst->st_gid;
current_ace->type = SMB_ACL_GROUP_OBJ;
/*
* The Creator Group entry only specifies inheritable permissions,
* never access permissions. WinNT doesn't always set the ACE to
*INHERIT_ONLY, though.
*/
if (nt4_compatible_acls())
psa->flags |= SEC_ACE_FLAG_INHERIT_ONLY;
} else if (sid_to_uid( ¤t_ace->trustee, ¤t_ace->unix_ug.uid)) {
current_ace->owner_type = UID_ACE;
/* If it's the owning user, this is a user_obj, not
* a user. */
if (current_ace->unix_ug.uid == pst->st_uid) {
current_ace->type = SMB_ACL_USER_OBJ;
} else {
current_ace->type = SMB_ACL_USER;
}
} else if (sid_to_gid( ¤t_ace->trustee, ¤t_ace->unix_ug.gid)) {
current_ace->owner_type = GID_ACE;
/* If it's the primary group, this is a group_obj, not
* a group. */
if (current_ace->unix_ug.gid == pst->st_gid) {
current_ace->type = SMB_ACL_GROUP_OBJ;
} else {
current_ace->type = SMB_ACL_GROUP;
}
} else {
/*
* Silently ignore map failures in non-mappable SIDs (NT Authority, BUILTIN etc).
*/
if (non_mappable_sid(&psa->trustee)) {
DEBUG(10, ("create_canon_ace_lists: ignoring "
"non-mappable SID %s\n",
sid_string_dbg(&psa->trustee)));
SAFE_FREE(current_ace);
continue;
}
free_canon_ace_list(file_ace);
free_canon_ace_list(dir_ace);
DEBUG(0, ("create_canon_ace_lists: unable to map SID "
"%s to uid or gid.\n",
sid_string_dbg(¤t_ace->trustee)));
SAFE_FREE(current_ace);
return False;
}
/*
* Map the given NT permissions into a UNIX mode_t containing only
* S_I(R|W|X)USR bits.
*/
current_ace->perms |= map_nt_perms( &psa->access_mask, S_IRUSR);
current_ace->attr = (psa->type == SEC_ACE_TYPE_ACCESS_ALLOWED) ? ALLOW_ACE : DENY_ACE;
current_ace->inherited = ((psa->flags & SEC_ACE_FLAG_INHERITED_ACE) ? True : False);
/*
* Now add the created ace to either the file list, the directory
* list, or both. We *MUST* preserve the order here (hence we use
* DLIST_ADD_END) as NT ACLs are order dependent.
*/
if (fsp->is_directory) {
/*
* We can only add to the default POSIX ACE list if the ACE is
* designed to be inherited by both files and directories.
*/
if ((psa->flags & (SEC_ACE_FLAG_OBJECT_INHERIT|SEC_ACE_FLAG_CONTAINER_INHERIT)) ==
(SEC_ACE_FLAG_OBJECT_INHERIT|SEC_ACE_FLAG_CONTAINER_INHERIT)) {
DLIST_ADD_END(dir_ace, current_ace, canon_ace *);
/*
* Note if this was an allow ace. We can't process
* any further deny ace's after this.
*/
if (current_ace->attr == ALLOW_ACE)
got_dir_allow = True;
if ((current_ace->attr == DENY_ACE) && got_dir_allow) {
DEBUG(0,("create_canon_ace_lists: malformed ACL in inheritable ACL ! \
Deny entry after Allow entry. Failing to set on file %s.\n", fsp->fsp_name ));
free_canon_ace_list(file_ace);
free_canon_ace_list(dir_ace);
return False;
}
if( DEBUGLVL( 10 )) {
dbgtext("create_canon_ace_lists: adding dir ACL:\n");
print_canon_ace( current_ace, 0);
}
/*
* If this is not an inherit only ACE we need to add a duplicate
* to the file acl.
*/
if (!(psa->flags & SEC_ACE_FLAG_INHERIT_ONLY)) {
canon_ace *dup_ace = dup_canon_ace(current_ace);
if (!dup_ace) {
DEBUG(0,("create_canon_ace_lists: malloc fail !\n"));
free_canon_ace_list(file_ace);
free_canon_ace_list(dir_ace);
return False;
}
/*
* We must not free current_ace here as its
* pointer is now owned by the dir_ace list.
*/
current_ace = dup_ace;
} else {
/*
* We must not free current_ace here as its
* pointer is now owned by the dir_ace list.
*/
current_ace = NULL;
}
}
}
/*
* Only add to the file ACL if not inherit only.
*/
if (current_ace && !(psa->flags & SEC_ACE_FLAG_INHERIT_ONLY)) {
DLIST_ADD_END(file_ace, current_ace, canon_ace *);
/*
* Note if this was an allow ace. We can't process
* any further deny ace's after this.
*/
if (current_ace->attr == ALLOW_ACE)
got_file_allow = True;
if ((current_ace->attr == DENY_ACE) && got_file_allow) {
DEBUG(0,("create_canon_ace_lists: malformed ACL in file ACL ! \
Deny entry after Allow entry. Failing to set on file %s.\n", fsp->fsp_name ));
free_canon_ace_list(file_ace);
free_canon_ace_list(dir_ace);
return False;
}
if( DEBUGLVL( 10 )) {
dbgtext("create_canon_ace_lists: adding file ACL:\n");
print_canon_ace( current_ace, 0);
}
all_aces_are_inherit_only = False;
/*
* We must not free current_ace here as its
* pointer is now owned by the file_ace list.
*/
current_ace = NULL;
}
/*
* Free if ACE was not added.
*/
SAFE_FREE(current_ace);
}
if (fsp->is_directory && all_aces_are_inherit_only) {
/*
* Windows 2000 is doing one of these weird 'inherit acl'
* traverses to conserve NTFS ACL resources. Just pretend
* there was no DACL sent. JRA.
*/
DEBUG(10,("create_canon_ace_lists: Win2k inherit acl traverse. Ignoring DACL.\n"));
free_canon_ace_list(file_ace);
free_canon_ace_list(dir_ace);
file_ace = NULL;
dir_ace = NULL;
} else {
/*
* Check if we have SMB_ACL_USER_OBJ and SMB_ACL_GROUP_OBJ entries in each
* ACL. If we don't have them, check if any SMB_ACL_USER/SMB_ACL_GROUP
* entries can be converted to *_OBJ. Usually we will already have these
* entries in the Default ACL, and the Access ACL will not have them.
*/
if (file_ace) {
check_owning_objs(file_ace, pfile_owner_sid, pfile_grp_sid);
}
if (dir_ace) {
check_owning_objs(dir_ace, pfile_owner_sid, pfile_grp_sid);
}
}
*ppfile_ace = file_ace;
*ppdir_ace = dir_ace;
return True;
}
| 0 |
[
"CWE-264"
] |
samba
|
d6c28913f3109d1327a3d1369b6eafd3874b2dca
| 204,546,760,932,536,100,000,000,000,000,000,000,000 | 335 |
Bug 6488: acl_group_override() call in posix acls references an uninitialized variable.
(cherry picked from commit f92195e3a1baaddda47a5d496f9488c8445b41ad)
|
func_unref(char_u *name)
{
ufunc_T *fp = NULL;
if (name == NULL || !func_name_refcount(name))
return;
fp = find_func(name, FALSE, NULL);
if (fp == NULL && numbered_function(name))
{
#ifdef EXITFREE
if (!entered_free_all_mem)
#endif
internal_error("func_unref()");
}
func_ptr_unref(fp);
}
| 0 |
[
"CWE-416"
] |
vim
|
9c23f9bb5fe435b28245ba8ac65aa0ca6b902c04
| 297,241,851,629,683,800,000,000,000,000,000,000,000 | 16 |
patch 8.2.3902: Vim9: double free with nested :def function
Problem: Vim9: double free with nested :def function.
Solution: Pass "line_to_free" from compile_def_function() and make sure
cmdlinep is valid.
|
static int regulator_mode_constrain(struct regulator_dev *rdev, int *mode)
{
switch (*mode) {
case REGULATOR_MODE_FAST:
case REGULATOR_MODE_NORMAL:
case REGULATOR_MODE_IDLE:
case REGULATOR_MODE_STANDBY:
break;
default:
rdev_err(rdev, "invalid mode %x specified\n", *mode);
return -EINVAL;
}
if (!rdev->constraints) {
rdev_err(rdev, "no constraints\n");
return -ENODEV;
}
if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_MODE)) {
rdev_err(rdev, "operation not allowed\n");
return -EPERM;
}
/* The modes are bitmasks, the most power hungry modes having
* the lowest values. If the requested mode isn't supported
* try higher modes. */
while (*mode) {
if (rdev->constraints->valid_modes_mask & *mode)
return 0;
*mode /= 2;
}
return -EINVAL;
}
| 0 |
[
"CWE-416"
] |
linux
|
60a2362f769cf549dc466134efe71c8bf9fbaaba
| 56,162,298,589,733,400,000,000,000,000,000,000,000 | 33 |
regulator: core: Fix regualtor_ena_gpio_free not to access pin after freeing
After freeing pin from regulator_ena_gpio_free, loop can access
the pin. So this patch fixes not to access pin after freeing.
Signed-off-by: Seung-Woo Kim <[email protected]>
Signed-off-by: Mark Brown <[email protected]>
|
GetQualifiedName(
TPMI_DH_OBJECT handle, // IN: handle of the object
TPM2B_NAME *qualifiedName // OUT: qualified name of the object
)
{
OBJECT *object;
switch(HandleGetType(handle))
{
case TPM_HT_PERMANENT:
qualifiedName->t.size = sizeof(TPM_HANDLE);
UINT32_TO_BYTE_ARRAY(handle, qualifiedName->t.name);
break;
case TPM_HT_TRANSIENT:
object = HandleToObject(handle);
if(object == NULL || object->publicArea.nameAlg == TPM_ALG_NULL)
qualifiedName->t.size = 0;
else
// Copy the name
*qualifiedName = object->qualifiedName;
break;
default:
FAIL(FATAL_ERROR_INTERNAL);
}
return;
}
| 0 |
[
"CWE-119"
] |
libtpms
|
ea62fd9679f8c6fc5e79471b33cfbd8227bfed72
| 175,592,929,396,997,130,000,000,000,000,000,000,000 | 25 |
tpm2: Initialize a whole OBJECT before using it
Initialize a whole OBJECT before using it. This is necessary since
an OBJECT may also be used as a HASH_OBJECT via the ANY_OBJECT
union and that HASH_OBJECT can leave bad size inidicators in TPM2B
buffer in the OBJECT. To get rid of this problem we reset the whole
OBJECT to 0 before using it. This is as if the memory for the
OBJECT was just initialized.
Signed-off-by: Stefan Berger <[email protected]>
|
static void Ins_JROT( INS_ARG )
{
if ( args[1] != 0 )
{
CUR.IP += (Int)(args[0]);
CUR.step_ins = FALSE;
/* See JMPR below */
if(CUR.IP > CUR.codeSize ||
(CUR.code[CUR.IP] != 0x2D && CUR.code[CUR.IP - 1] == 0x2D))
CUR.IP -= 1;
}
}
| 0 |
[
"CWE-125"
] |
ghostpdl
|
c7c55972758a93350882c32147801a3485b010fe
| 300,588,853,822,862,780,000,000,000,000,000,000,000 | 13 |
Bug 698024: bounds check zone pointer in Ins_MIRP()
|
static int ti_cpu_rev(struct edge_ti_manuf_descriptor *desc)
{
return TI_GET_CPU_REVISION(desc->CpuRev_BoardRev);
}
| 0 |
[
"CWE-191"
] |
linux
|
654b404f2a222f918af9b0cd18ad469d0c941a8e
| 236,801,734,292,674,980,000,000,000,000,000,000,000 | 4 |
USB: serial: io_ti: fix information leak in completion handler
Add missing sanity check to the bulk-in completion handler to avoid an
integer underflow that can be triggered by a malicious device.
This avoids leaking 128 kB of memory content from after the URB transfer
buffer to user space.
Fixes: 8c209e6782ca ("USB: make actual_length in struct urb field u32")
Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
Cc: stable <[email protected]> # 2.6.30
Signed-off-by: Johan Hovold <[email protected]>
|
static int64_t mxf_set_current_edit_unit(MXFContext *mxf, AVStream *st, int64_t current_offset, int resync)
{
int64_t next_ofs = -1;
MXFTrack *track = st->priv_data;
int64_t edit_unit = av_rescale_q(track->sample_count, st->time_base, av_inv_q(track->edit_rate));
int64_t new_edit_unit;
MXFIndexTable *t = mxf_find_index_table(mxf, track->index_sid);
if (!t || track->wrapping == UnknownWrapped)
return -1;
if (mxf_edit_unit_absolute_offset(mxf, t, edit_unit + track->edit_units_per_packet, track->edit_rate, NULL, &next_ofs, NULL, 0) < 0 &&
(next_ofs = mxf_essence_container_end(mxf, t->body_sid)) <= 0) {
av_log(mxf->fc, AV_LOG_ERROR, "unable to compute the size of the last packet\n");
return -1;
}
/* check if the next edit unit offset (next_ofs) starts ahead of current_offset */
if (next_ofs > current_offset)
return next_ofs;
if (!resync) {
av_log(mxf->fc, AV_LOG_ERROR, "cannot find current edit unit for stream %d, invalid index?\n", st->index);
return -1;
}
if (mxf_get_next_track_edit_unit(mxf, track, current_offset + 1, &new_edit_unit) < 0 || new_edit_unit <= 0) {
av_log(mxf->fc, AV_LOG_ERROR, "failed to find next track edit unit in stream %d\n", st->index);
return -1;
}
new_edit_unit--;
track->sample_count = mxf_compute_sample_count(mxf, st, new_edit_unit);
av_log(mxf->fc, AV_LOG_WARNING, "edit unit sync lost on stream %d, jumping from %"PRId64" to %"PRId64"\n", st->index, edit_unit, new_edit_unit);
return mxf_set_current_edit_unit(mxf, st, current_offset, 0);
}
| 0 |
[
"CWE-125"
] |
FFmpeg
|
bab0716c7f4793ec42e05a5aa7e80d82a0dd4e75
| 137,062,788,660,454,150,000,000,000,000,000,000,000 | 37 |
avformat/mxfdec: Fix av_log context
Fixes: out of array access
Fixes: mxf-crash-1c2e59bf07a34675bfb3ada5e1ec22fa9f38f923
Found-by: Paul Ch <[email protected]>
Signed-off-by: Michael Niedermayer <[email protected]>
|
bool Item_subselect::exec()
{
subselect_engine *org_engine= engine;
DBUG_ENTER("Item_subselect::exec");
DBUG_ASSERT(fixed);
DBUG_ASSERT(!eliminated);
/*
Do not execute subselect in case of a fatal error
or if the query has been killed.
*/
if (thd->is_error() || thd->killed)
DBUG_RETURN(true);
DBUG_ASSERT(!thd->lex->context_analysis_only);
/*
Simulate a failure in sub-query execution. Used to test e.g.
out of memory or query being killed conditions.
*/
DBUG_EXECUTE_IF("subselect_exec_fail", DBUG_RETURN(true););
bool res= engine->exec();
#ifndef DBUG_OFF
++exec_counter;
#endif
if (engine != org_engine)
{
/*
If the subquery engine changed during execution due to lazy subquery
optimization, or because the original engine found a more efficient other
engine, re-execute the subquery with the new engine.
*/
DBUG_RETURN(exec());
}
DBUG_RETURN(res);
}
| 0 |
[
"CWE-89"
] |
server
|
5100b20b15edd93200f34a79d25f1b14e46a677e
| 248,122,995,292,450,250,000,000,000,000,000,000,000 | 38 |
MDEV-26047: MariaDB server crash at Item_subselect::init_expr_cache_tracker
The cause of crash:
remove_redundant_subquery_clauses() removes redundant item expressions.
The primary goal of this is to remove the subquery items.
The removal process unlinks the subquery from SELECT_LEX tree, but does
not remove it from SELECT_LEX:::ref_pointer_array or from JOIN::all_fields.
Then, setup_subquery_caches() tries to wrap the subquery item in an
expression cache, which fails, the first reason for failure being that
the item doesn't have a query plan.
Solution: do not wrap eliminated items with expression cache.
(also added an assert to check that we do not attempt to execute them).
This may look like an incomplete fix: why don't we remove any mention
of eliminated item everywhere? The difficulties here are:
* items can be "un-removed" (see set_fake_select_as_master_processor)
* it's difficult to remove an element from ref_pointer_array: Item_ref
objects refer to elements of that array, so one can't shift elements in
it. Replacing eliminated subselect with a dummy Item doesn't look like a
good idea, either.
|
add_ctype_to_cc(CClassNode* cc, int ctype, int not, ScanEnv* env)
{
#define ASCII_LIMIT 127
int c, r;
int ascii_mode;
int is_single;
const OnigCodePoint *ranges;
OnigCodePoint limit;
OnigCodePoint sb_out;
OnigEncoding enc = env->enc;
ascii_mode = IS_ASCII_MODE_CTYPE_OPTION(ctype, env->options);
r = ONIGENC_GET_CTYPE_CODE_RANGE(enc, ctype, &sb_out, &ranges);
if (r == 0) {
if (ascii_mode == 0)
r = add_ctype_to_cc_by_range(cc, ctype, not, env->enc, sb_out, ranges);
else
r = add_ctype_to_cc_by_range_limit(cc, ctype, not, env->enc, sb_out,
ranges, ASCII_LIMIT);
return r;
}
else if (r != ONIG_NO_SUPPORT_CONFIG) {
return r;
}
r = 0;
is_single = ONIGENC_IS_SINGLEBYTE(enc);
limit = ascii_mode ? ASCII_LIMIT : SINGLE_BYTE_SIZE;
switch (ctype) {
case ONIGENC_CTYPE_ALPHA:
case ONIGENC_CTYPE_BLANK:
case ONIGENC_CTYPE_CNTRL:
case ONIGENC_CTYPE_DIGIT:
case ONIGENC_CTYPE_LOWER:
case ONIGENC_CTYPE_PUNCT:
case ONIGENC_CTYPE_SPACE:
case ONIGENC_CTYPE_UPPER:
case ONIGENC_CTYPE_XDIGIT:
case ONIGENC_CTYPE_ASCII:
case ONIGENC_CTYPE_ALNUM:
if (not != 0) {
for (c = 0; c < (int )limit; c++) {
if (is_single != 0 || ONIGENC_CODE_TO_MBCLEN(enc, c) == 1) {
if (! ONIGENC_IS_CODE_CTYPE(enc, (OnigCodePoint )c, ctype))
BITSET_SET_BIT(cc->bs, c);
}
}
for (c = limit; c < SINGLE_BYTE_SIZE; c++) {
if (is_single != 0 || ONIGENC_CODE_TO_MBCLEN(enc, c) == 1)
BITSET_SET_BIT(cc->bs, c);
}
if (is_single == 0)
ADD_ALL_MULTI_BYTE_RANGE(enc, cc->mbuf);
}
else {
for (c = 0; c < (int )limit; c++) {
if (is_single != 0 || ONIGENC_CODE_TO_MBCLEN(enc, c) == 1) {
if (ONIGENC_IS_CODE_CTYPE(enc, (OnigCodePoint )c, ctype))
BITSET_SET_BIT(cc->bs, c);
}
}
}
break;
case ONIGENC_CTYPE_GRAPH:
case ONIGENC_CTYPE_PRINT:
case ONIGENC_CTYPE_WORD:
if (not != 0) {
for (c = 0; c < (int )limit; c++) {
/* check invalid code point */
if ((is_single != 0 || ONIGENC_CODE_TO_MBCLEN(enc, c) == 1)
&& ! ONIGENC_IS_CODE_CTYPE(enc, (OnigCodePoint )c, ctype))
BITSET_SET_BIT(cc->bs, c);
}
for (c = limit; c < SINGLE_BYTE_SIZE; c++) {
if (is_single != 0 || ONIGENC_CODE_TO_MBCLEN(enc, c) == 1)
BITSET_SET_BIT(cc->bs, c);
}
if (ascii_mode != 0 && is_single == 0)
ADD_ALL_MULTI_BYTE_RANGE(enc, cc->mbuf);
}
else {
for (c = 0; c < (int )limit; c++) {
if ((is_single != 0 || ONIGENC_CODE_TO_MBCLEN(enc, c) == 1)
&& ONIGENC_IS_CODE_CTYPE(enc, (OnigCodePoint )c, ctype))
BITSET_SET_BIT(cc->bs, c);
}
if (ascii_mode == 0 && is_single == 0)
ADD_ALL_MULTI_BYTE_RANGE(enc, cc->mbuf);
}
break;
default:
return ONIGERR_PARSER_BUG;
break;
}
return r;
}
| 0 |
[
"CWE-125"
] |
oniguruma
|
aa0188eaedc056dca8374ac03d0177429b495515
| 15,064,257,956,128,108,000,000,000,000,000,000,000 | 103 |
fix #163: heap-buffer-overflow in gb18030_mbc_enc_len
|
static inline MagickBooleanType CopyPixel(const Image *image,
const Quantum *source,Quantum *destination)
{
register ssize_t
i;
if (source == (const Quantum *) NULL)
{
destination[RedPixelChannel]=ClampToQuantum(image->background_color.red);
destination[GreenPixelChannel]=ClampToQuantum(
image->background_color.green);
destination[BluePixelChannel]=ClampToQuantum(
image->background_color.blue);
destination[BlackPixelChannel]=ClampToQuantum(
image->background_color.black);
destination[AlphaPixelChannel]=ClampToQuantum(
image->background_color.alpha);
return(MagickFalse);
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
destination[channel]=source[i];
}
return(MagickTrue);
}
| 0 |
[
"CWE-476"
] |
ImageMagick
|
76f94fa2d9ae5d96e15929b6b6ce0c866fc44c69
| 85,710,530,237,998,830,000,000,000,000,000,000,000 | 26 |
https://github.com/ImageMagick/ImageMagick/issues/746
https://github.com/ImageMagick/ImageMagick/issues/741
|
static int rsi_load_9116_bootup_params(struct rsi_common *common)
{
struct sk_buff *skb;
struct rsi_boot_params_9116 *boot_params;
rsi_dbg(MGMT_TX_ZONE, "%s: Sending boot params frame\n", __func__);
skb = dev_alloc_skb(sizeof(struct rsi_boot_params_9116));
if (!skb)
return -ENOMEM;
memset(skb->data, 0, sizeof(struct rsi_boot_params));
boot_params = (struct rsi_boot_params_9116 *)skb->data;
if (common->channel_width == BW_40MHZ) {
memcpy(&boot_params->bootup_params,
&boot_params_9116_40,
sizeof(struct bootup_params_9116));
rsi_dbg(MGMT_TX_ZONE, "%s: Packet 40MHZ <=== %d\n", __func__,
UMAC_CLK_40BW);
boot_params->umac_clk = cpu_to_le16(UMAC_CLK_40BW);
} else {
memcpy(&boot_params->bootup_params,
&boot_params_9116_20,
sizeof(struct bootup_params_9116));
if (boot_params_20.valid != cpu_to_le32(VALID_20)) {
boot_params->umac_clk = cpu_to_le16(UMAC_CLK_20BW);
rsi_dbg(MGMT_TX_ZONE,
"%s: Packet 20MHZ <=== %d\n", __func__,
UMAC_CLK_20BW);
} else {
boot_params->umac_clk = cpu_to_le16(UMAC_CLK_40MHZ);
rsi_dbg(MGMT_TX_ZONE,
"%s: Packet 20MHZ <=== %d\n", __func__,
UMAC_CLK_40MHZ);
}
}
rsi_set_len_qno(&boot_params->desc_dword0.len_qno,
sizeof(struct bootup_params_9116), RSI_WIFI_MGMT_Q);
boot_params->desc_dword0.frame_type = BOOTUP_PARAMS_REQUEST;
skb_put(skb, sizeof(struct rsi_boot_params_9116));
return rsi_send_internal_mgmt_frame(common, skb);
}
| 0 |
[
"CWE-400",
"CWE-401"
] |
linux
|
d563131ef23cbc756026f839a82598c8445bc45f
| 109,013,052,552,468,400,000,000,000,000,000,000,000 | 43 |
rsi: release skb if rsi_prepare_beacon fails
In rsi_send_beacon, if rsi_prepare_beacon fails the allocated skb should
be released.
Signed-off-by: Navid Emamdoost <[email protected]>
Signed-off-by: Kalle Valo <[email protected]>
|
bool WebContents::EmitNavigationEvent(
const std::string& event,
content::NavigationHandle* navigation_handle) {
bool is_main_frame = navigation_handle->IsInMainFrame();
int frame_tree_node_id = navigation_handle->GetFrameTreeNodeId();
content::FrameTreeNode* frame_tree_node =
content::FrameTreeNode::GloballyFindByID(frame_tree_node_id);
content::RenderFrameHostManager* render_manager =
frame_tree_node->render_manager();
content::RenderFrameHost* frame_host = nullptr;
if (render_manager) {
frame_host = render_manager->speculative_frame_host();
if (!frame_host)
frame_host = render_manager->current_frame_host();
}
int frame_process_id = -1, frame_routing_id = -1;
if (frame_host) {
frame_process_id = frame_host->GetProcess()->GetID();
frame_routing_id = frame_host->GetRoutingID();
}
bool is_same_document = navigation_handle->IsSameDocument();
auto url = navigation_handle->GetURL();
return Emit(event, url, is_same_document, is_main_frame, frame_process_id,
frame_routing_id);
}
| 0 |
[] |
electron
|
e9fa834757f41c0b9fe44a4dffe3d7d437f52d34
| 72,983,471,035,902,830,000,000,000,000,000,000,000 | 25 |
fix: ensure ElectronBrowser mojo service is only bound to appropriate render frames (#33344)
* fix: ensure ElectronBrowser mojo service is only bound to authorized render frames
Notes: no-notes
* refactor: extract electron API IPC to its own mojo interface
* fix: just check main frame not primary main frame
Co-authored-by: Samuel Attard <[email protected]>
Co-authored-by: Samuel Attard <[email protected]>
|
static int fuse_dentry_delete(const struct dentry *dentry)
{
return time_before64(fuse_dentry_time(dentry), get_jiffies_64());
}
| 0 |
[
"CWE-459"
] |
linux
|
5d069dbe8aaf2a197142558b6fb2978189ba3454
| 157,637,742,357,114,120,000,000,000,000,000,000,000 | 4 |
fuse: fix bad inode
Jan Kara's analysis of the syzbot report (edited):
The reproducer opens a directory on FUSE filesystem, it then attaches
dnotify mark to the open directory. After that a fuse_do_getattr() call
finds that attributes returned by the server are inconsistent, and calls
make_bad_inode() which, among other things does:
inode->i_mode = S_IFREG;
This then confuses dnotify which doesn't tear down its structures
properly and eventually crashes.
Avoid calling make_bad_inode() on a live inode: switch to a private flag on
the fuse inode. Also add the test to ops which the bad_inode_ops would
have caught.
This bug goes back to the initial merge of fuse in 2.6.14...
Reported-by: [email protected]
Signed-off-by: Miklos Szeredi <[email protected]>
Tested-by: Jan Kara <[email protected]>
Cc: <[email protected]>
|
static int StreamTcpPacketStateSynRecv(ThreadVars *tv, Packet *p,
StreamTcpThread *stt, TcpSession *ssn, PacketQueue *pq)
{
if (ssn == NULL)
return -1;
if (p->tcph->th_flags & TH_RST) {
if (!StreamTcpValidateRst(ssn, p))
return -1;
uint8_t reset = TRUE;
/* After receiveing the RST in SYN_RECV state and if detection
evasion flags has been set, then the following operating
systems will not closed the connection. As they consider the
packet as stray packet and not belonging to the current
session, for more information check
http://www.packetstan.com/2010/06/recently-ive-been-on-campaign-to-make.html */
if (ssn->flags & STREAMTCP_FLAG_DETECTION_EVASION_ATTEMPT) {
if (PKT_IS_TOSERVER(p)) {
if ((ssn->server.os_policy == OS_POLICY_LINUX) ||
(ssn->server.os_policy == OS_POLICY_OLD_LINUX) ||
(ssn->server.os_policy == OS_POLICY_SOLARIS))
{
reset = FALSE;
SCLogDebug("Detection evasion has been attempted, so"
" not resetting the connection !!");
}
} else {
if ((ssn->client.os_policy == OS_POLICY_LINUX) ||
(ssn->client.os_policy == OS_POLICY_OLD_LINUX) ||
(ssn->client.os_policy == OS_POLICY_SOLARIS))
{
reset = FALSE;
SCLogDebug("Detection evasion has been attempted, so"
" not resetting the connection !!");
}
}
}
if (reset == TRUE) {
StreamTcpPacketSetState(p, ssn, TCP_CLOSED);
SCLogDebug("ssn %p: Reset received and state changed to "
"TCP_CLOSED", ssn);
if (ssn->flags & STREAMTCP_FLAG_TIMESTAMP) {
StreamTcpHandleTimestamp(ssn, p);
}
}
} else if (p->tcph->th_flags & TH_FIN) {
/* FIN is handled in the same way as in TCP_ESTABLISHED case */;
if (ssn->flags & STREAMTCP_FLAG_TIMESTAMP) {
if (!StreamTcpValidateTimestamp(ssn, p))
return -1;
}
if ((StreamTcpHandleFin(tv, stt, ssn, p, pq)) == -1)
return -1;
/* SYN/ACK */
} else if ((p->tcph->th_flags & (TH_SYN|TH_ACK)) == (TH_SYN|TH_ACK)) {
SCLogDebug("ssn %p: SYN/ACK packet on state SYN_RECV. resent", ssn);
if (PKT_IS_TOSERVER(p)) {
SCLogDebug("ssn %p: SYN/ACK-pkt to server in SYN_RECV state", ssn);
StreamTcpSetEvent(p, STREAM_3WHS_SYNACK_TOSERVER_ON_SYN_RECV);
return -1;
}
/* Check if the SYN/ACK packets ACK matches the earlier
* received SYN/ACK packet. */
if (!(SEQ_EQ(TCP_GET_ACK(p), ssn->client.last_ack))) {
SCLogDebug("ssn %p: ACK mismatch, packet ACK %" PRIu32 " != "
"%" PRIu32 " from stream", ssn, TCP_GET_ACK(p),
ssn->client.isn + 1);
StreamTcpSetEvent(p, STREAM_3WHS_SYNACK_RESEND_WITH_DIFFERENT_ACK);
return -1;
}
/* Check if the SYN/ACK packet SEQ the earlier
* received SYN/ACK packet, server resend with different ISN. */
if (!(SEQ_EQ(TCP_GET_SEQ(p), ssn->server.isn))) {
SCLogDebug("ssn %p: SEQ mismatch, packet SEQ %" PRIu32 " != "
"%" PRIu32 " from stream", ssn, TCP_GET_SEQ(p),
ssn->client.isn);
if (StreamTcp3whsQueueSynAck(ssn, p) == -1)
return -1;
SCLogDebug("ssn %p: queued different SYN/ACK", ssn);
}
} else if (p->tcph->th_flags & TH_SYN) {
SCLogDebug("ssn %p: SYN packet on state SYN_RECV... resent", ssn);
if (PKT_IS_TOCLIENT(p)) {
SCLogDebug("ssn %p: SYN-pkt to client in SYN_RECV state", ssn);
StreamTcpSetEvent(p, STREAM_3WHS_SYN_TOCLIENT_ON_SYN_RECV);
return -1;
}
if (!(SEQ_EQ(TCP_GET_SEQ(p), ssn->client.isn))) {
SCLogDebug("ssn %p: SYN with different SEQ on SYN_RECV state", ssn);
StreamTcpSetEvent(p, STREAM_3WHS_SYN_RESEND_DIFF_SEQ_ON_SYN_RECV);
return -1;
}
} else if (p->tcph->th_flags & TH_ACK) {
if (ssn->queue_len) {
SCLogDebug("ssn %p: checking ACK against queued SYN/ACKs", ssn);
TcpStateQueue *q = StreamTcp3whsFindSynAckByAck(ssn, p);
if (q != NULL) {
SCLogDebug("ssn %p: here we update state against queued SYN/ACK", ssn);
StreamTcp3whsSynAckUpdate(ssn, p, /* using queue to update state */q);
} else {
SCLogDebug("ssn %p: none found, now checking ACK against original SYN/ACK (state)", ssn);
}
}
/* If the timestamp option is enabled for both the streams, then
* validate the received packet timestamp value against the
* stream->last_ts. If the timestamp is valid then process the
* packet normally otherwise the drop the packet (RFC 1323)*/
if (ssn->flags & STREAMTCP_FLAG_TIMESTAMP) {
if (!(StreamTcpValidateTimestamp(ssn, p))) {
return -1;
}
}
if ((ssn->flags & STREAMTCP_FLAG_4WHS) && PKT_IS_TOCLIENT(p)) {
SCLogDebug("ssn %p: ACK received on 4WHS session",ssn);
if (!(SEQ_EQ(TCP_GET_SEQ(p), ssn->server.next_seq))) {
SCLogDebug("ssn %p: 4WHS wrong seq nr on packet", ssn);
StreamTcpSetEvent(p, STREAM_4WHS_WRONG_SEQ);
return -1;
}
if (StreamTcpValidateAck(ssn, &ssn->client, p) == -1) {
SCLogDebug("ssn %p: 4WHS invalid ack nr on packet", ssn);
StreamTcpSetEvent(p, STREAM_4WHS_INVALID_ACK);
return -1;
}
SCLogDebug("4WHS normal pkt");
SCLogDebug("ssn %p: pkt (%" PRIu32 ") is to client: SEQ "
"%" PRIu32 ", ACK %" PRIu32 "", ssn, p->payload_len,
TCP_GET_SEQ(p), TCP_GET_ACK(p));
if (ssn->flags & STREAMTCP_FLAG_TIMESTAMP) {
StreamTcpHandleTimestamp(ssn, p);
}
StreamTcpUpdateLastAck(ssn, &ssn->client, TCP_GET_ACK(p));
StreamTcpUpdateNextSeq(ssn, &ssn->server, (ssn->server.next_seq + p->payload_len));
ssn->client.window = TCP_GET_WINDOW(p) << ssn->client.wscale;
ssn->client.next_win = ssn->client.last_ack + ssn->client.window;
StreamTcpPacketSetState(p, ssn, TCP_ESTABLISHED);
SCLogDebug("ssn %p: =~ ssn state is now TCP_ESTABLISHED", ssn);
StreamTcpReassembleHandleSegment(tv, stt->ra_ctx, ssn,
&ssn->server, p, pq);
SCLogDebug("ssn %p: ssn->client.next_win %" PRIu32 ", "
"ssn->client.last_ack %"PRIu32"", ssn,
ssn->client.next_win, ssn->client.last_ack);
return 0;
}
bool ack_indicates_missed_3whs_ack_packet = false;
/* Check if the ACK received is in right direction. But when we have
* picked up a mid stream session after missing the initial SYN pkt,
* in this case the ACK packet can arrive from either client (normal
* case) or from server itself (asynchronous streams). Therefore
* the check has been avoided in this case */
if (PKT_IS_TOCLIENT(p)) {
/* special case, handle 4WHS, so SYN/ACK in the opposite
* direction */
if (ssn->flags & STREAMTCP_FLAG_MIDSTREAM_SYNACK) {
SCLogDebug("ssn %p: ACK received on midstream SYN/ACK "
"pickup session",ssn);
/* fall through */
} else if (ssn->flags & STREAMTCP_FLAG_TCP_FAST_OPEN) {
SCLogDebug("ssn %p: ACK received on TFO session",ssn);
/* fall through */
} else {
/* if we missed traffic between the S/SA and the current
* 'wrong direction' ACK, we could end up here. In IPS
* reject it. But in IDS mode we continue.
*
* IPS rejects as it should see all packets, so pktloss
* should lead to retransmissions. As this can also be
* pattern for MOTS/MITM injection attacks, we need to be
* careful.
*/
if (StreamTcpInlineMode()) {
if (p->payload_len > 0 &&
SEQ_EQ(TCP_GET_ACK(p), ssn->client.last_ack) &&
SEQ_EQ(TCP_GET_SEQ(p), ssn->server.next_seq)) {
/* packet loss is possible but unlikely here */
SCLogDebug("ssn %p: possible data injection", ssn);
StreamTcpSetEvent(p, STREAM_3WHS_ACK_DATA_INJECT);
return -1;
}
SCLogDebug("ssn %p: ACK received in the wrong direction",
ssn);
StreamTcpSetEvent(p, STREAM_3WHS_ACK_IN_WRONG_DIR);
return -1;
}
ack_indicates_missed_3whs_ack_packet = true;
}
}
SCLogDebug("ssn %p: pkt (%" PRIu32 ") is to server: SEQ %" PRIu32 ""
", ACK %" PRIu32 "", ssn, p->payload_len, TCP_GET_SEQ(p),
TCP_GET_ACK(p));
/* Check both seq and ack number before accepting the packet and
changing to ESTABLISHED state */
if ((SEQ_EQ(TCP_GET_SEQ(p), ssn->client.next_seq)) &&
SEQ_EQ(TCP_GET_ACK(p), ssn->server.next_seq)) {
SCLogDebug("normal pkt");
/* process the packet normal, No Async streams :) */
if (ssn->flags & STREAMTCP_FLAG_TIMESTAMP) {
StreamTcpHandleTimestamp(ssn, p);
}
StreamTcpUpdateLastAck(ssn, &ssn->server, TCP_GET_ACK(p));
StreamTcpUpdateNextSeq(ssn, &ssn->client, (ssn->client.next_seq + p->payload_len));
ssn->server.window = TCP_GET_WINDOW(p) << ssn->server.wscale;
ssn->server.next_win = ssn->server.last_ack + ssn->server.window;
if (ssn->flags & STREAMTCP_FLAG_MIDSTREAM) {
ssn->client.window = TCP_GET_WINDOW(p) << ssn->client.wscale;
ssn->client.next_win = ssn->client.last_ack + ssn->client.window;
ssn->server.next_win = ssn->server.last_ack +
ssn->server.window;
if (!(ssn->flags & STREAMTCP_FLAG_MIDSTREAM_SYNACK)) {
/* window scaling for midstream pickups, we can't do much
* other than assume that it's set to the max value: 14 */
ssn->server.wscale = TCP_WSCALE_MAX;
ssn->client.wscale = TCP_WSCALE_MAX;
ssn->flags |= STREAMTCP_FLAG_SACKOK;
}
}
StreamTcpPacketSetState(p, ssn, TCP_ESTABLISHED);
SCLogDebug("ssn %p: =~ ssn state is now TCP_ESTABLISHED", ssn);
StreamTcpReassembleHandleSegment(tv, stt->ra_ctx, ssn,
&ssn->client, p, pq);
/* If asynchronous stream handling is allowed then set the session,
if packet's seq number is equal the expected seq no.*/
} else if (stream_config.async_oneside == TRUE &&
(SEQ_EQ(TCP_GET_SEQ(p), ssn->server.next_seq)))
{
/*set the ASYNC flag used to indicate the session as async stream
and helps in relaxing the windows checks.*/
ssn->flags |= STREAMTCP_FLAG_ASYNC;
ssn->server.next_seq += p->payload_len;
ssn->server.last_ack = TCP_GET_SEQ(p);
ssn->client.window = TCP_GET_WINDOW(p) << ssn->client.wscale;
ssn->client.last_ack = TCP_GET_ACK(p);
if (ssn->flags & STREAMTCP_FLAG_TIMESTAMP) {
StreamTcpHandleTimestamp(ssn, p);
}
if (ssn->flags & STREAMTCP_FLAG_MIDSTREAM) {
ssn->server.window = TCP_GET_WINDOW(p);
ssn->client.next_win = ssn->server.last_ack +
ssn->server.window;
/* window scaling for midstream pickups, we can't do much
* other than assume that it's set to the max value: 14 */
ssn->server.wscale = TCP_WSCALE_MAX;
ssn->client.wscale = TCP_WSCALE_MAX;
ssn->flags |= STREAMTCP_FLAG_SACKOK;
}
SCLogDebug("ssn %p: synrecv => Asynchronous stream, packet SEQ"
" %" PRIu32 ", payload size %" PRIu32 " (%" PRIu32 "), "
"ssn->server.next_seq %" PRIu32 "\n"
, ssn, TCP_GET_SEQ(p), p->payload_len, TCP_GET_SEQ(p)
+ p->payload_len, ssn->server.next_seq);
StreamTcpPacketSetState(p, ssn, TCP_ESTABLISHED);
SCLogDebug("ssn %p: =~ ssn state is now TCP_ESTABLISHED", ssn);
StreamTcpReassembleHandleSegment(tv, stt->ra_ctx, ssn,
&ssn->server, p, pq);
/* Upon receiving the packet with correct seq number and wrong
ACK number, it causes the other end to send RST. But some target
system (Linux & solaris) does not RST the connection, so it is
likely to avoid the detection */
} else if (SEQ_EQ(TCP_GET_SEQ(p), ssn->client.next_seq)){
ssn->flags |= STREAMTCP_FLAG_DETECTION_EVASION_ATTEMPT;
SCLogDebug("ssn %p: wrong ack nr on packet, possible evasion!!",
ssn);
StreamTcpSetEvent(p, STREAM_3WHS_RIGHT_SEQ_WRONG_ACK_EVASION);
return -1;
/* if we get a packet with a proper ack, but a seq that is beyond
* next_seq but in-window, we probably missed some packets */
} else if (SEQ_GT(TCP_GET_SEQ(p), ssn->client.next_seq) &&
SEQ_LEQ(TCP_GET_SEQ(p),ssn->client.next_win) &&
SEQ_EQ(TCP_GET_ACK(p), ssn->server.next_seq))
{
SCLogDebug("ssn %p: ACK for missing data", ssn);
if (ssn->flags & STREAMTCP_FLAG_TIMESTAMP) {
StreamTcpHandleTimestamp(ssn, p);
}
StreamTcpUpdateLastAck(ssn, &ssn->server, TCP_GET_ACK(p));
ssn->client.next_seq = TCP_GET_SEQ(p) + p->payload_len;
SCLogDebug("ssn %p: ACK for missing data: ssn->client.next_seq %u", ssn, ssn->client.next_seq);
ssn->server.window = TCP_GET_WINDOW(p) << ssn->server.wscale;
ssn->server.next_win = ssn->server.last_ack + ssn->server.window;
if (ssn->flags & STREAMTCP_FLAG_MIDSTREAM) {
ssn->client.window = TCP_GET_WINDOW(p);
ssn->server.next_win = ssn->server.last_ack +
ssn->server.window;
/* window scaling for midstream pickups, we can't do much
* other than assume that it's set to the max value: 14 */
ssn->server.wscale = TCP_WSCALE_MAX;
ssn->client.wscale = TCP_WSCALE_MAX;
ssn->flags |= STREAMTCP_FLAG_SACKOK;
}
StreamTcpPacketSetState(p, ssn, TCP_ESTABLISHED);
SCLogDebug("ssn %p: =~ ssn state is now TCP_ESTABLISHED", ssn);
StreamTcpReassembleHandleSegment(tv, stt->ra_ctx, ssn,
&ssn->client, p, pq);
/* toclient packet: after having missed the 3whs's final ACK */
} else if ((ack_indicates_missed_3whs_ack_packet ||
(ssn->flags & STREAMTCP_FLAG_TCP_FAST_OPEN)) &&
SEQ_EQ(TCP_GET_ACK(p), ssn->client.last_ack) &&
SEQ_EQ(TCP_GET_SEQ(p), ssn->server.next_seq))
{
if (ack_indicates_missed_3whs_ack_packet) {
SCLogDebug("ssn %p: packet fits perfectly after a missed 3whs-ACK", ssn);
} else {
SCLogDebug("ssn %p: (TFO) expected packet fits perfectly after SYN/ACK", ssn);
}
StreamTcpUpdateNextSeq(ssn, &ssn->server, (TCP_GET_SEQ(p) + p->payload_len));
ssn->server.window = TCP_GET_WINDOW(p) << ssn->server.wscale;
ssn->server.next_win = ssn->server.last_ack + ssn->server.window;
StreamTcpPacketSetState(p, ssn, TCP_ESTABLISHED);
SCLogDebug("ssn %p: =~ ssn state is now TCP_ESTABLISHED", ssn);
StreamTcpReassembleHandleSegment(tv, stt->ra_ctx, ssn,
&ssn->server, p, pq);
} else {
SCLogDebug("ssn %p: wrong seq nr on packet", ssn);
StreamTcpSetEvent(p, STREAM_3WHS_WRONG_SEQ_WRONG_ACK);
return -1;
}
SCLogDebug("ssn %p: ssn->server.next_win %" PRIu32 ", "
"ssn->server.last_ack %"PRIu32"", ssn,
ssn->server.next_win, ssn->server.last_ack);
} else {
SCLogDebug("ssn %p: default case", ssn);
}
return 0;
}
| 0 |
[
"CWE-436",
"CWE-94"
] |
suricata
|
fa692df37a796c3330c81988d15ef1a219afc006
| 89,595,187,401,309,560,000,000,000,000,000,000,000 | 390 |
stream: reject broken ACK packets
Fix evasion posibility by rejecting packets with a broken ACK field.
These packets have a non-0 ACK field, but do not have a ACK flag set.
Bug #3324.
Reported-by: Nicolas Adba
|
static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx,
int needs_unthr)
{
struct perf_event *event;
struct hw_perf_event *hwc;
u64 now, period = TICK_NSEC;
s64 delta;
/*
* only need to iterate over all events iff:
* - context have events in frequency mode (needs freq adjust)
* - there are events to unthrottle on this cpu
*/
if (!(ctx->nr_freq || needs_unthr))
return;
raw_spin_lock(&ctx->lock);
perf_pmu_disable(ctx->pmu);
list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
if (event->state != PERF_EVENT_STATE_ACTIVE)
continue;
if (!event_filter_match(event))
continue;
perf_pmu_disable(event->pmu);
hwc = &event->hw;
if (hwc->interrupts == MAX_INTERRUPTS) {
hwc->interrupts = 0;
perf_log_throttle(event, 1);
event->pmu->start(event, 0);
}
if (!event->attr.freq || !event->attr.sample_freq)
goto next;
/*
* stop the event and update event->count
*/
event->pmu->stop(event, PERF_EF_UPDATE);
now = local64_read(&event->count);
delta = now - hwc->freq_count_stamp;
hwc->freq_count_stamp = now;
/*
* restart the event
* reload only if value has changed
* we have stopped the event so tell that
* to perf_adjust_period() to avoid stopping it
* twice.
*/
if (delta > 0)
perf_adjust_period(event, period, delta, false);
event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0);
next:
perf_pmu_enable(event->pmu);
}
perf_pmu_enable(ctx->pmu);
raw_spin_unlock(&ctx->lock);
}
| 0 |
[
"CWE-284",
"CWE-264"
] |
linux
|
f63a8daa5812afef4f06c962351687e1ff9ccb2b
| 156,424,769,149,939,790,000,000,000,000,000,000,000 | 66 |
perf: Fix event->ctx locking
There have been a few reported issues wrt. the lack of locking around
changing event->ctx. This patch tries to address those.
It avoids the whole rwsem thing; and while it appears to work, please
give it some thought in review.
What I did fail at is sensible runtime checks on the use of
event->ctx, the RCU use makes it very hard.
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Cc: Paul E. McKenney <[email protected]>
Cc: Jiri Olsa <[email protected]>
Cc: Arnaldo Carvalho de Melo <[email protected]>
Cc: Linus Torvalds <[email protected]>
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]>
|
int RESTArgs::get_time(struct req_state *s, const string& name,
const utime_t& def_val, utime_t *val, bool *existed)
{
bool exists;
string sval = s->info.args.get(name, &exists);
if (existed)
*existed = exists;
if (!exists) {
*val = def_val;
return 0;
}
uint64_t epoch, nsec;
int r = utime_t::parse_date(sval, &epoch, &nsec);
if (r < 0)
return r;
*val = utime_t(epoch, nsec);
return 0;
}
| 0 |
[
"CWE-770"
] |
ceph
|
ab29bed2fc9f961fe895de1086a8208e21ddaddc
| 278,400,805,941,135,300,000,000,000,000,000,000,000 | 24 |
rgw: fix issues with 'enforce bounds' patch
The patch to enforce bounds on max-keys/max-uploads/max-parts had a few
issues that would prevent us from compiling it. Instead of changing the
code provided by the submitter, we're addressing them in a separate
commit to maintain the DCO.
Signed-off-by: Joao Eduardo Luis <[email protected]>
Signed-off-by: Abhishek Lekshmanan <[email protected]>
(cherry picked from commit 29bc434a6a81a2e5c5b8cfc4c8d5c82ca5bf538a)
mimic specific fixes:
As the largeish change from master g_conf() isn't in mimic yet, use the g_conf
global structure, also make rgw_op use the value from req_info ceph context as
we do for all the requests
|
static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
dma_addr_t mapping, u32 len, u32 flags,
u32 mss, u32 vlan)
{
txbd->addr_hi = ((u64) mapping >> 32);
txbd->addr_lo = ((u64) mapping & 0xffffffff);
txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
}
| 0 |
[
"CWE-476",
"CWE-119"
] |
linux
|
715230a44310a8cf66fbfb5a46f9a62a9b2de424
| 59,973,208,221,855,645,000,000,000,000,000,000,000 | 9 |
tg3: fix length overflow in VPD firmware parsing
Commit 184b89044fb6e2a74611dafa69b1dce0d98612c6 ("tg3: Use VPD fw version
when present") introduced VPD parsing that contained a potential length
overflow.
Limit the hardware's reported firmware string length (max 255 bytes) to
stay inside the driver's firmware string length (32 bytes). On overflow,
truncate the formatted firmware string instead of potentially overwriting
portions of the tg3 struct.
http://cansecwest.com/slides/2013/PrivateCore%20CSW%202013.pdf
Signed-off-by: Kees Cook <[email protected]>
Reported-by: Oded Horovitz <[email protected]>
Reported-by: Brad Spengler <[email protected]>
Cc: [email protected]
Cc: Matt Carlson <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
**/
CImg<Tdouble> get_stats(const unsigned int variance_method=1) const {
if (is_empty()) return CImg<doubleT>();
const T *const p_end = end(), *pm = _data, *pM = _data;
double S = 0, S2 = 0, P = 1;
const ulongT siz = size();
T m = *pm, M = *pM;
cimg_pragma_openmp(parallel reduction(+:S,S2) reduction(*:P) cimg_openmp_if(siz>=131072)) {
const T *lpm = _data, *lpM = _data;
T lm = *lpm, lM = *lpM;
cimg_pragma_openmp(for)
for (const T *ptrs = _data; ptrs<p_end; ++ptrs) {
const T val = *ptrs;
const double _val = (double)val;
if (val<lm) { lm = val; lpm = ptrs; }
if (val>lM) { lM = val; lpM = ptrs; }
S+=_val;
S2+=_val*_val;
P*=_val;
}
cimg_pragma_openmp(critical(get_stats)) {
if (lm<m || (lm==m && lpm<pm)) { m = lm; pm = lpm; }
if (lM>M || (lM==M && lpM<pM)) { M = lM; pM = lpM; }
}
}
const double
mean_value = S/siz,
_variance_value = variance_method==0?(S2 - S*S/siz)/siz:
(variance_method==1?(siz>1?(S2 - S*S/siz)/(siz - 1):0):
variance(variance_method)),
variance_value = _variance_value>0?_variance_value:0;
int
xm = 0, ym = 0, zm = 0, cm = 0,
xM = 0, yM = 0, zM = 0, cM = 0;
contains(*pm,xm,ym,zm,cm);
contains(*pM,xM,yM,zM,cM);
return CImg<Tdouble>(1,14).fill((double)m,(double)M,mean_value,variance_value,
(double)xm,(double)ym,(double)zm,(double)cm,
(double)xM,(double)yM,(double)zM,(double)cM,
S,P);
| 0 |
[
"CWE-125"
] |
CImg
|
10af1e8c1ad2a58a0a3342a856bae63e8f257abb
| 173,738,050,637,744,120,000,000,000,000,000,000,000 | 42 |
Fix other issues in 'CImg<T>::load_bmp()'.
|
static inline int security_task_setscheduler(struct task_struct *p,
int policy,
struct sched_param *lp)
{
return cap_task_setscheduler(p, policy, lp);
}
| 0 |
[] |
linux-2.6
|
ee18d64c1f632043a02e6f5ba5e045bb26a5465f
| 269,955,825,830,391,300,000,000,000,000,000,000,000 | 6 |
KEYS: Add a keyctl to install a process's session keyring on its parent [try #6]
Add a keyctl to install a process's session keyring onto its parent. This
replaces the parent's session keyring. Because the COW credential code does
not permit one process to change another process's credentials directly, the
change is deferred until userspace next starts executing again. Normally this
will be after a wait*() syscall.
To support this, three new security hooks have been provided:
cred_alloc_blank() to allocate unset security creds, cred_transfer() to fill in
the blank security creds and key_session_to_parent() - which asks the LSM if
the process may replace its parent's session keyring.
The replacement may only happen if the process has the same ownership details
as its parent, and the process has LINK permission on the session keyring, and
the session keyring is owned by the process, and the LSM permits it.
Note that this requires alteration to each architecture's notify_resume path.
This has been done for all arches barring blackfin, m68k* and xtensa, all of
which need assembly alteration to support TIF_NOTIFY_RESUME. This allows the
replacement to be performed at the point the parent process resumes userspace
execution.
This allows the userspace AFS pioctl emulation to fully emulate newpag() and
the VIOCSETTOK and VIOCSETTOK2 pioctls, all of which require the ability to
alter the parent process's PAG membership. However, since kAFS doesn't use
PAGs per se, but rather dumps the keys into the session keyring, the session
keyring of the parent must be replaced if, for example, VIOCSETTOK is passed
the newpag flag.
This can be tested with the following program:
#include <stdio.h>
#include <stdlib.h>
#include <keyutils.h>
#define KEYCTL_SESSION_TO_PARENT 18
#define OSERROR(X, S) do { if ((long)(X) == -1) { perror(S); exit(1); } } while(0)
int main(int argc, char **argv)
{
key_serial_t keyring, key;
long ret;
keyring = keyctl_join_session_keyring(argv[1]);
OSERROR(keyring, "keyctl_join_session_keyring");
key = add_key("user", "a", "b", 1, keyring);
OSERROR(key, "add_key");
ret = keyctl(KEYCTL_SESSION_TO_PARENT);
OSERROR(ret, "KEYCTL_SESSION_TO_PARENT");
return 0;
}
Compiled and linked with -lkeyutils, you should see something like:
[dhowells@andromeda ~]$ keyctl show
Session Keyring
-3 --alswrv 4043 4043 keyring: _ses
355907932 --alswrv 4043 -1 \_ keyring: _uid.4043
[dhowells@andromeda ~]$ /tmp/newpag
[dhowells@andromeda ~]$ keyctl show
Session Keyring
-3 --alswrv 4043 4043 keyring: _ses
1055658746 --alswrv 4043 4043 \_ user: a
[dhowells@andromeda ~]$ /tmp/newpag hello
[dhowells@andromeda ~]$ keyctl show
Session Keyring
-3 --alswrv 4043 4043 keyring: hello
340417692 --alswrv 4043 4043 \_ user: a
Where the test program creates a new session keyring, sticks a user key named
'a' into it and then installs it on its parent.
Signed-off-by: David Howells <[email protected]>
Signed-off-by: James Morris <[email protected]>
|
v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode,
unsigned int flags)
{
umode_t mode;
struct v9fs_inode *v9inode = V9FS_I(inode);
if ((stat->st_result_mask & P9_STATS_BASIC) == P9_STATS_BASIC) {
inode->i_atime.tv_sec = stat->st_atime_sec;
inode->i_atime.tv_nsec = stat->st_atime_nsec;
inode->i_mtime.tv_sec = stat->st_mtime_sec;
inode->i_mtime.tv_nsec = stat->st_mtime_nsec;
inode->i_ctime.tv_sec = stat->st_ctime_sec;
inode->i_ctime.tv_nsec = stat->st_ctime_nsec;
inode->i_uid = stat->st_uid;
inode->i_gid = stat->st_gid;
set_nlink(inode, stat->st_nlink);
mode = stat->st_mode & S_IALLUGO;
mode |= inode->i_mode & ~S_IALLUGO;
inode->i_mode = mode;
if (!(flags & V9FS_STAT2INODE_KEEP_ISIZE))
v9fs_i_size_write(inode, stat->st_size);
inode->i_blocks = stat->st_blocks;
} else {
if (stat->st_result_mask & P9_STATS_ATIME) {
inode->i_atime.tv_sec = stat->st_atime_sec;
inode->i_atime.tv_nsec = stat->st_atime_nsec;
}
if (stat->st_result_mask & P9_STATS_MTIME) {
inode->i_mtime.tv_sec = stat->st_mtime_sec;
inode->i_mtime.tv_nsec = stat->st_mtime_nsec;
}
if (stat->st_result_mask & P9_STATS_CTIME) {
inode->i_ctime.tv_sec = stat->st_ctime_sec;
inode->i_ctime.tv_nsec = stat->st_ctime_nsec;
}
if (stat->st_result_mask & P9_STATS_UID)
inode->i_uid = stat->st_uid;
if (stat->st_result_mask & P9_STATS_GID)
inode->i_gid = stat->st_gid;
if (stat->st_result_mask & P9_STATS_NLINK)
set_nlink(inode, stat->st_nlink);
if (stat->st_result_mask & P9_STATS_MODE) {
inode->i_mode = stat->st_mode;
if ((S_ISBLK(inode->i_mode)) ||
(S_ISCHR(inode->i_mode)))
init_special_inode(inode, inode->i_mode,
inode->i_rdev);
}
if (stat->st_result_mask & P9_STATS_RDEV)
inode->i_rdev = new_decode_dev(stat->st_rdev);
if (!(flags & V9FS_STAT2INODE_KEEP_ISIZE) &&
stat->st_result_mask & P9_STATS_SIZE)
v9fs_i_size_write(inode, stat->st_size);
if (stat->st_result_mask & P9_STATS_BLOCKS)
inode->i_blocks = stat->st_blocks;
}
if (stat->st_result_mask & P9_STATS_GEN)
inode->i_generation = stat->st_gen;
/* Currently we don't support P9_STATS_BTIME and P9_STATS_DATA_VERSION
* because the inode structure does not have fields for them.
*/
v9inode->cache_validity &= ~V9FS_INO_INVALID_ATTR;
}
| 0 |
[
"CWE-835"
] |
linux
|
5e3cc1ee1405a7eb3487ed24f786dec01b4cbe1f
| 332,481,863,261,389,840,000,000,000,000,000,000,000 | 66 |
9p: use inode->i_lock to protect i_size_write() under 32-bit
Use inode->i_lock to protect i_size_write(), else i_size_read() in
generic_fillattr() may loop infinitely in read_seqcount_begin() when
multiple processes invoke v9fs_vfs_getattr() or v9fs_vfs_getattr_dotl()
simultaneously under 32-bit SMP environment, and a soft lockup will be
triggered as show below:
watchdog: BUG: soft lockup - CPU#5 stuck for 22s! [stat:2217]
Modules linked in:
CPU: 5 PID: 2217 Comm: stat Not tainted 5.0.0-rc1-00005-g7f702faf5a9e #4
Hardware name: Generic DT based system
PC is at generic_fillattr+0x104/0x108
LR is at 0xec497f00
pc : [<802b8898>] lr : [<ec497f00>] psr: 200c0013
sp : ec497e20 ip : ed608030 fp : ec497e3c
r10: 00000000 r9 : ec497f00 r8 : ed608030
r7 : ec497ebc r6 : ec497f00 r5 : ee5c1550 r4 : ee005780
r3 : 0000052d r2 : 00000000 r1 : ec497f00 r0 : ed608030
Flags: nzCv IRQs on FIQs on Mode SVC_32 ISA ARM Segment none
Control: 10c5387d Table: ac48006a DAC: 00000051
CPU: 5 PID: 2217 Comm: stat Not tainted 5.0.0-rc1-00005-g7f702faf5a9e #4
Hardware name: Generic DT based system
Backtrace:
[<8010d974>] (dump_backtrace) from [<8010dc88>] (show_stack+0x20/0x24)
[<8010dc68>] (show_stack) from [<80a1d194>] (dump_stack+0xb0/0xdc)
[<80a1d0e4>] (dump_stack) from [<80109f34>] (show_regs+0x1c/0x20)
[<80109f18>] (show_regs) from [<801d0a80>] (watchdog_timer_fn+0x280/0x2f8)
[<801d0800>] (watchdog_timer_fn) from [<80198658>] (__hrtimer_run_queues+0x18c/0x380)
[<801984cc>] (__hrtimer_run_queues) from [<80198e60>] (hrtimer_run_queues+0xb8/0xf0)
[<80198da8>] (hrtimer_run_queues) from [<801973e8>] (run_local_timers+0x28/0x64)
[<801973c0>] (run_local_timers) from [<80197460>] (update_process_times+0x3c/0x6c)
[<80197424>] (update_process_times) from [<801ab2b8>] (tick_nohz_handler+0xe0/0x1bc)
[<801ab1d8>] (tick_nohz_handler) from [<80843050>] (arch_timer_handler_virt+0x38/0x48)
[<80843018>] (arch_timer_handler_virt) from [<80180a64>] (handle_percpu_devid_irq+0x8c/0x240)
[<801809d8>] (handle_percpu_devid_irq) from [<8017ac20>] (generic_handle_irq+0x34/0x44)
[<8017abec>] (generic_handle_irq) from [<8017b344>] (__handle_domain_irq+0x6c/0xc4)
[<8017b2d8>] (__handle_domain_irq) from [<801022e0>] (gic_handle_irq+0x4c/0x88)
[<80102294>] (gic_handle_irq) from [<80101a30>] (__irq_svc+0x70/0x98)
[<802b8794>] (generic_fillattr) from [<8056b284>] (v9fs_vfs_getattr_dotl+0x74/0xa4)
[<8056b210>] (v9fs_vfs_getattr_dotl) from [<802b8904>] (vfs_getattr_nosec+0x68/0x7c)
[<802b889c>] (vfs_getattr_nosec) from [<802b895c>] (vfs_getattr+0x44/0x48)
[<802b8918>] (vfs_getattr) from [<802b8a74>] (vfs_statx+0x9c/0xec)
[<802b89d8>] (vfs_statx) from [<802b9428>] (sys_lstat64+0x48/0x78)
[<802b93e0>] (sys_lstat64) from [<80101000>] (ret_fast_syscall+0x0/0x28)
[[email protected]: updated comment to not refer to a function
in another subsystem]
Link: http://lkml.kernel.org/r/[email protected]
Cc: [email protected]
Fixes: 7549ae3e81cc ("9p: Use the i_size_[read, write]() macros instead of using inode->i_size directly.")
Reported-by: Xing Gaopeng <[email protected]>
Signed-off-by: Hou Tao <[email protected]>
Signed-off-by: Dominique Martinet <[email protected]>
|
static inline struct sock *udp_v4_mcast_next(struct net *net, struct sock *sk,
__be16 loc_port, __be32 loc_addr,
__be16 rmt_port, __be32 rmt_addr,
int dif)
{
struct hlist_nulls_node *node;
struct sock *s = sk;
unsigned short hnum = ntohs(loc_port);
sk_nulls_for_each_from(s, node) {
struct inet_sock *inet = inet_sk(s);
if (!net_eq(sock_net(s), net) ||
udp_sk(s)->udp_port_hash != hnum ||
(inet->inet_daddr && inet->inet_daddr != rmt_addr) ||
(inet->inet_dport != rmt_port && inet->inet_dport) ||
(inet->inet_rcv_saddr &&
inet->inet_rcv_saddr != loc_addr) ||
ipv6_only_sock(s) ||
(s->sk_bound_dev_if && s->sk_bound_dev_if != dif))
continue;
if (!ip_mc_sf_allow(s, loc_addr, rmt_addr, dif))
continue;
goto found;
}
s = NULL;
found:
return s;
}
| 0 |
[
"CWE-400"
] |
linux-2.6
|
c377411f2494a931ff7facdbb3a6839b1266bcf6
| 144,625,016,311,581,400,000,000,000,000,000,000,000 | 29 |
net: sk_add_backlog() take rmem_alloc into account
Current socket backlog limit is not enough to really stop DDOS attacks,
because user thread spend many time to process a full backlog each
round, and user might crazy spin on socket lock.
We should add backlog size and receive_queue size (aka rmem_alloc) to
pace writers, and let user run without being slow down too much.
Introduce a sk_rcvqueues_full() helper, to avoid taking socket lock in
stress situations.
Under huge stress from a multiqueue/RPS enabled NIC, a single flow udp
receiver can now process ~200.000 pps (instead of ~100 pps before the
patch) on a 8 core machine.
Signed-off-by: Eric Dumazet <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static __init void init_smack_known_list(void)
{
/*
* Initialize rule list locks
*/
mutex_init(&smack_known_huh.smk_rules_lock);
mutex_init(&smack_known_hat.smk_rules_lock);
mutex_init(&smack_known_floor.smk_rules_lock);
mutex_init(&smack_known_star.smk_rules_lock);
mutex_init(&smack_known_web.smk_rules_lock);
/*
* Initialize rule lists
*/
INIT_LIST_HEAD(&smack_known_huh.smk_rules);
INIT_LIST_HEAD(&smack_known_hat.smk_rules);
INIT_LIST_HEAD(&smack_known_star.smk_rules);
INIT_LIST_HEAD(&smack_known_floor.smk_rules);
INIT_LIST_HEAD(&smack_known_web.smk_rules);
/*
* Create the known labels list
*/
smk_insert_entry(&smack_known_huh);
smk_insert_entry(&smack_known_hat);
smk_insert_entry(&smack_known_star);
smk_insert_entry(&smack_known_floor);
smk_insert_entry(&smack_known_web);
}
| 0 |
[
"CWE-416"
] |
linux
|
a3727a8bac0a9e77c70820655fd8715523ba3db7
| 59,290,961,850,121,730,000,000,000,000,000,000,000 | 27 |
selinux,smack: fix subjective/objective credential use mixups
Jann Horn reported a problem with commit eb1231f73c4d ("selinux:
clarify task subjective and objective credentials") where some LSM
hooks were attempting to access the subjective credentials of a task
other than the current task. Generally speaking, it is not safe to
access another task's subjective credentials and doing so can cause
a number of problems.
Further, while looking into the problem, I realized that Smack was
suffering from a similar problem brought about by a similar commit
1fb057dcde11 ("smack: differentiate between subjective and objective
task credentials").
This patch addresses this problem by restoring the use of the task's
objective credentials in those cases where the task is other than the
current executing task. Not only does this resolve the problem
reported by Jann, it is arguably the correct thing to do in these
cases.
Cc: [email protected]
Fixes: eb1231f73c4d ("selinux: clarify task subjective and objective credentials")
Fixes: 1fb057dcde11 ("smack: differentiate between subjective and objective task credentials")
Reported-by: Jann Horn <[email protected]>
Acked-by: Eric W. Biederman <[email protected]>
Acked-by: Casey Schaufler <[email protected]>
Signed-off-by: Paul Moore <[email protected]>
|
GF_Err gnrm_box_size(GF_Box *s)
{
GF_GenericSampleEntryBox *ptr = (GF_GenericSampleEntryBox *)s;
s->type = GF_ISOM_BOX_TYPE_GNRM;
ptr->size += 8+ptr->data_size;
return GF_OK;
}
| 0 |
[
"CWE-787"
] |
gpac
|
388ecce75d05e11fc8496aa4857b91245007d26e
| 49,695,693,548,336,950,000,000,000,000,000,000,000 | 7 |
fixed #1587
|
void dpy_gfx_copy(QemuConsole *con, int src_x, int src_y,
int dst_x, int dst_y, int w, int h)
{
DisplayState *s = con->ds;
DisplayChangeListener *dcl;
if (!qemu_console_is_visible(con)) {
return;
}
QLIST_FOREACH(dcl, &s->listeners, next) {
if (con != (dcl->con ? dcl->con : active_console)) {
continue;
}
if (dcl->ops->dpy_gfx_copy) {
dcl->ops->dpy_gfx_copy(dcl, src_x, src_y, dst_x, dst_y, w, h);
} else { /* TODO */
dcl->ops->dpy_gfx_update(dcl, dst_x, dst_y, w, h);
}
}
}
| 0 |
[
"CWE-416"
] |
qemu
|
a4afa548fc6dd9842ed86639b4d37d4d1c4ad480
| 145,332,591,131,920,800,000,000,000,000,000,000,000 | 20 |
char: move front end handlers in CharBackend
Since the hanlders are associated with a CharBackend, rather than the
CharDriverState, it is more appropriate to store in CharBackend. This
avoids the handler copy dance in qemu_chr_fe_set_handlers() then
mux_chr_update_read_handler(), by storing the CharBackend pointer
directly.
Also a mux CharDriver should go through mux->backends[focused], since
chr->be will stay NULL. Before that, it was possible to call
chr->handler by mistake with surprising results, for ex through
qemu_chr_be_can_write(), which would result in calling the last set
handler front end, not the one with focus.
Signed-off-by: Marc-André Lureau <[email protected]>
Message-Id: <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
|
inline void do_write_sync(std::vector<asio::const_buffer>& buffers)
{
asio::write(adaptor_.socket(), buffers, [&](asio::error_code ec, std::size_t) {
if (!ec)
{
return false;
}
else
{
CROW_LOG_ERROR << ec << " - happened while sending buffers";
CROW_LOG_DEBUG << this << " from write (sync)(2)";
check_destroy();
return true;
}
});
}
| 0 |
[
"CWE-416"
] |
Crow
|
fba01dc76d6ea940ad7c8392e8f39f9647241d8e
| 248,755,917,875,383,470,000,000,000,000,000,000,000 | 17 |
Prevent HTTP pipelining which Crow doesn't support.
|
MFixLine(p, y, mc)
struct win *p;
int y;
struct mchar *mc;
{
struct mline *ml = &p->w_mlines[y];
if (mc->attr && ml->attr == null)
{
if ((ml->attr = (unsigned char *)calloc(p->w_width + 1, 1)) == 0)
{
ml->attr = null;
mc->attr = p->w_rend.attr = 0;
WMsg(p, 0, "Warning: no space for attr - turned off");
}
}
#ifdef FONT
if (mc->font && ml->font == null)
{
if ((ml->font = (unsigned char *)calloc(p->w_width + 1, 1)) == 0)
{
ml->font = null;
p->w_FontL = p->w_charsets[p->w_ss ? p->w_ss : p->w_Charset] = 0;
p->w_FontR = p->w_charsets[p->w_ss ? p->w_ss : p->w_CharsetR] = 0;
mc->font = mc->fontx = p->w_rend.font = 0;
WMsg(p, 0, "Warning: no space for font - turned off");
}
}
if (mc->fontx && ml->fontx == null)
{
if ((ml->fontx = (unsigned char *)calloc(p->w_width + 1, 1)) == 0)
{
ml->fontx = null;
mc->fontx = 0;
}
}
#endif
#ifdef COLOR
if (mc->color && ml->color == null)
{
if ((ml->color = (unsigned char *)calloc(p->w_width + 1, 1)) == 0)
{
ml->color = null;
mc->color = p->w_rend.color = 0;
WMsg(p, 0, "Warning: no space for color - turned off");
}
}
# ifdef COLORS256
if (mc->colorx && ml->colorx == null)
{
if ((ml->colorx = (unsigned char *)calloc(p->w_width + 1, 1)) == 0)
{
ml->colorx = null;
mc->colorx = p->w_rend.colorx = 0;
WMsg(p, 0, "Warning: no space for extended colors - turned off");
}
}
# endif
#endif
}
| 0 |
[] |
screen
|
c5db181b6e017cfccb8d7842ce140e59294d9f62
| 206,615,001,610,372,340,000,000,000,000,000,000,000 | 59 |
ansi: add support for xterm OSC 11
It allows for getting and setting the background color. Notably, Vim uses
OSC 11 to learn whether it's running on a light or dark colored terminal
and choose a color scheme accordingly.
Tested with gnome-terminal and xterm. When called with "?" argument the
current background color is returned:
$ echo -ne "\e]11;?\e\\"
$ 11;rgb:2323/2727/2929
Signed-off-by: Lubomir Rintel <[email protected]>
(cherry picked from commit 7059bff20a28778f9d3acf81cad07b1388d02309)
Signed-off-by: Amadeusz Sławiński <[email protected]
|
php_http_url_t *php_http_url_parse(const char *str, size_t len, unsigned flags TSRMLS_DC)
{
size_t maxlen = 3 * len;
struct parse_state *state = ecalloc(1, sizeof(*state) + maxlen);
state->end = str + len;
state->ptr = str;
state->flags = flags;
state->maxlen = maxlen;
TSRMLS_SET_CTX(state->ts);
if (!parse_scheme(state)) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Failed to parse URL scheme: '%s'", state->ptr);
efree(state);
return NULL;
}
if (!parse_hier(state)) {
efree(state);
return NULL;
}
if (!parse_query(state)) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Failed to parse URL query: '%s'", state->ptr);
efree(state);
return NULL;
}
if (!parse_fragment(state)) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Failed to parse URL fragment: '%s'", state->ptr);
efree(state);
return NULL;
}
return (php_http_url_t *) state;
}
| 1 |
[
"CWE-119"
] |
ext-http
|
3724cd76a28be1d6049b5537232e97ac567ae1f5
| 238,300,026,944,275,850,000,000,000,000,000,000,000 | 36 |
fix bug #71719 (Buffer overflow in HTTP url parsing functions)
The parser's offset was not reset when we softfail in scheme
parsing and continue to parse a path.
Thanks to hlt99 at blinkenshell dot org for the report.
|
static int sort_dvi_down(const void *p1, const void *p2)
{
return ((long *)p1)[0] - ((long *)p2)[0];
}
| 0 |
[
"CWE-20"
] |
evince
|
d4139205b010ed06310d14284e63114e88ec6de2
| 189,625,051,641,516,400,000,000,000,000,000,000,000 | 4 |
backends: Fix several security issues in the dvi-backend.
See CVE-2010-2640, CVE-2010-2641, CVE-2010-2642 and CVE-2010-2643.
|
int hw_atl_utils_fw_downld_dwords(struct aq_hw_s *self, u32 a,
u32 *p, u32 cnt)
{
int err = 0;
u32 val;
err = readx_poll_timeout_atomic(hw_atl_sem_ram_get,
self, val, val == 1U,
1U, 10000U);
if (err < 0) {
bool is_locked;
hw_atl_reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM);
is_locked = hw_atl_sem_ram_get(self);
if (!is_locked) {
err = -ETIME;
goto err_exit;
}
}
aq_hw_write_reg(self, HW_ATL_MIF_ADDR, a);
for (++cnt; --cnt && !err;) {
aq_hw_write_reg(self, HW_ATL_MIF_CMD, 0x00008000U);
if (ATL_HW_IS_CHIP_FEATURE(self, REVISION_B1))
err = readx_poll_timeout_atomic(hw_atl_utils_mif_addr_get,
self, val, val != a,
1U, 1000U);
else
err = readx_poll_timeout_atomic(hw_atl_utils_mif_cmd_get,
self, val,
!(val & 0x100),
1U, 1000U);
*(p++) = aq_hw_read_reg(self, HW_ATL_MIF_VAL);
a += 4;
}
hw_atl_reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM);
err_exit:
return err;
}
| 0 |
[
"CWE-787"
] |
net
|
b922f622592af76b57cbc566eaeccda0b31a3496
| 120,807,791,984,516,860,000,000,000,000,000,000,000 | 45 |
atlantic: Fix OOB read and write in hw_atl_utils_fw_rpc_wait
This bug report shows up when running our research tools. The
reports is SOOB read, but it seems SOOB write is also possible
a few lines below.
In details, fw.len and sw.len are inputs coming from io. A len
over the size of self->rpc triggers SOOB. The patch fixes the
bugs by adding sanity checks.
The bugs are triggerable with compromised/malfunctioning devices.
They are potentially exploitable given they first leak up to
0xffff bytes and able to overwrite the region later.
The patch is tested with QEMU emulater.
This is NOT tested with a real device.
Attached is the log we found by fuzzing.
BUG: KASAN: slab-out-of-bounds in
hw_atl_utils_fw_upload_dwords+0x393/0x3c0 [atlantic]
Read of size 4 at addr ffff888016260b08 by task modprobe/213
CPU: 0 PID: 213 Comm: modprobe Not tainted 5.6.0 #1
Call Trace:
dump_stack+0x76/0xa0
print_address_description.constprop.0+0x16/0x200
? hw_atl_utils_fw_upload_dwords+0x393/0x3c0 [atlantic]
? hw_atl_utils_fw_upload_dwords+0x393/0x3c0 [atlantic]
__kasan_report.cold+0x37/0x7c
? aq_hw_read_reg_bit+0x60/0x70 [atlantic]
? hw_atl_utils_fw_upload_dwords+0x393/0x3c0 [atlantic]
kasan_report+0xe/0x20
hw_atl_utils_fw_upload_dwords+0x393/0x3c0 [atlantic]
hw_atl_utils_fw_rpc_call+0x95/0x130 [atlantic]
hw_atl_utils_fw_rpc_wait+0x176/0x210 [atlantic]
hw_atl_utils_mpi_create+0x229/0x2e0 [atlantic]
? hw_atl_utils_fw_rpc_wait+0x210/0x210 [atlantic]
? hw_atl_utils_initfw+0x9f/0x1c8 [atlantic]
hw_atl_utils_initfw+0x12a/0x1c8 [atlantic]
aq_nic_ndev_register+0x88/0x650 [atlantic]
? aq_nic_ndev_init+0x235/0x3c0 [atlantic]
aq_pci_probe+0x731/0x9b0 [atlantic]
? aq_pci_func_init+0xc0/0xc0 [atlantic]
local_pci_probe+0xd3/0x160
pci_device_probe+0x23f/0x3e0
Reported-by: Brendan Dolan-Gavitt <[email protected]>
Signed-off-by: Zekun Shen <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
dissect_blip(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, _U_ void *data)
{
proto_tree *blip_tree;
gint offset = 0;
/* Set the protcol column to say BLIP */
col_set_str(pinfo->cinfo, COL_PROTOCOL, "BLIP");
/* Clear out stuff in the info column */
col_clear(pinfo->cinfo,COL_INFO);
// ------------------------------------- Setup BLIP tree -----------------------------------------------------------
/* Add a subtree to dissection. See WSDG 9.2.2. Dissecting the details of the protocol */
proto_item *blip_item = proto_tree_add_item(tree, proto_blip, tvb, offset, -1, ENC_NA);
blip_tree = proto_item_add_subtree(blip_item, ett_blip);
// ------------------------ BLIP Frame Header: Message Number VarInt -----------------------------------------------
// This gets the message number as a var int in order to find out how much to bump
// the offset for the next proto_tree item
guint64 value_message_num;
guint varint_message_num_length = tvb_get_varint(
tvb,
offset,
FT_VARINT_MAX_LEN,
&value_message_num,
ENC_VARINT_PROTOBUF);
proto_tree_add_item(blip_tree, hf_blip_message_number, tvb, offset, varint_message_num_length, ENC_VARINT_PROTOBUF);
offset += varint_message_num_length;
// ------------------------ BLIP Frame Header: Frame Flags VarInt --------------------------------------------------
// This gets the message number as a var int in order to find out how much to bump
// the offset for the next proto_tree item
guint64 value_frame_flags;
guint varint_frame_flags_length = tvb_get_varint(
tvb,
offset,
FT_VARINT_MAX_LEN,
&value_frame_flags,
ENC_VARINT_PROTOBUF);
guint64 masked = value_frame_flags & ~0x07;
proto_tree_add_uint(blip_tree, hf_blip_frame_flags, tvb, offset, varint_frame_flags_length, (guint8)masked);
offset += varint_frame_flags_length;
const gchar* msg_type = get_message_type(value_frame_flags);
gchar* msg_num = wmem_strdup_printf(wmem_packet_scope(), "#%" G_GUINT64_FORMAT, value_message_num);
gchar* col_info = wmem_strconcat(wmem_packet_scope(), msg_type, msg_num, NULL);
col_add_str(pinfo->cinfo, COL_INFO, col_info);
// If it's an ACK message, handle that separately, since there are no properties etc.
if (is_ack_message(value_frame_flags) == TRUE) {
return handle_ack_message(tvb, pinfo, blip_tree, offset, value_frame_flags);
}
// ------------------------------------- Conversation Tracking -----------------------------------------------------
blip_conversation_entry_t *conversation_entry_ptr = get_blip_conversation(pinfo);
// Is this the first frame in a blip message with multiple frames?
gboolean first_frame_in_msg = is_first_frame_in_msg(
conversation_entry_ptr,
pinfo,
value_frame_flags,
value_message_num
);
tvbuff_t* tvb_to_use = tvb;
gboolean compressed = is_compressed(value_frame_flags);
if(compressed) {
#ifdef HAVE_ZLIB
tvb_to_use = decompress(pinfo, blip_tree, tvb, offset, tvb_reported_length_remaining(tvb, offset) - BLIP_BODY_CHECKSUM_SIZE);
if(!tvb_to_use) {
return tvb_reported_length(tvb);
}
#else /* ! HAVE_ZLIB */
proto_tree_add_string(tree, hf_blip_message_body, tvb, offset, tvb_reported_length_remaining(tvb, offset), "<decompression support is not available>");
return tvb_reported_length(tvb);
#endif /* ! HAVE_ZLIB */
offset = 0;
}
// Is this the first frame in a message?
if (first_frame_in_msg == TRUE) {
// ------------------------ BLIP Frame Header: Properties Length VarInt --------------------------------------------------
// WARNING: this only works because this code assumes that ALL MESSAGES FIT INTO ONE FRAME, which is absolutely not true.
// In other words, as soon as there is a message that spans two frames, this code will break.
guint64 value_properties_length;
guint value_properties_length_varint_length = tvb_get_varint(
tvb_to_use,
offset,
FT_VARINT_MAX_LEN,
&value_properties_length,
ENC_VARINT_PROTOBUF);
proto_tree_add_item(blip_tree, hf_blip_properties_length, tvb_to_use, offset, value_properties_length_varint_length, ENC_VARINT_PROTOBUF);
offset += value_properties_length_varint_length;
// ------------------------ BLIP Frame: Properties --------------------------------------------------
// WARNING: this only works because this code assumes that ALL MESSAGES FIT INTO ONE FRAME, which is absolutely not true.
// In other words, as soon as there is a message that spans two frames, this code will break.
// At this point, the length of the properties is known and is stored in value_properties_length.
// This reads the entire properties out of the tvb and into a buffer (buf).
guint8* buf = tvb_get_string_enc(wmem_packet_scope(), tvb_to_use, offset, (gint) value_properties_length, ENC_UTF_8);
// "Profile\0subChanges\0continuous\0true\0foo\0bar" -> "Profile:subChanges:continuous:true:foo:bar"
// Iterate over buf and change all the \0 null characters to ':', since otherwise trying to set a header
// field to this buffer via proto_tree_add_item() will end up only printing it up to the first null character,
// for example "Profile", even though there are many more properties that follow.
for (int i = 0; i < (int) value_properties_length; i++) {
if (i < (int) (value_properties_length - 1)) {
if (buf[i] == '\0') { // TODO: I don't even know if this is actually a safe assumption in a UTF-8 encoded string
buf[i] = ':';
}
}
}
if(value_properties_length > 0) {
proto_tree_add_string(blip_tree, hf_blip_properties, tvb_to_use, offset, (int)value_properties_length, (const char *)buf);
}
// Bump the offset by the length of the properties
offset += (gint)value_properties_length;
}
// ------------------------ BLIP Frame: Message Body --------------------------------------------------
// WS_DLL_PUBLIC gint tvb_reported_length_remaining(const tvbuff_t *tvb, const gint offset);
gint reported_length_remaining = tvb_reported_length_remaining(tvb_to_use, offset);
// Don't read in the trailing checksum at the end
if (!compressed && reported_length_remaining >= BLIP_BODY_CHECKSUM_SIZE) {
reported_length_remaining -= BLIP_BODY_CHECKSUM_SIZE;
}
if(reported_length_remaining > 0) {
proto_tree_add_item(blip_tree, hf_blip_message_body, tvb_to_use, offset, reported_length_remaining, ENC_UTF_8|ENC_NA);
}
proto_tree_add_item(blip_tree, hf_blip_checksum, tvb, tvb_reported_length(tvb) - BLIP_BODY_CHECKSUM_SIZE, BLIP_BODY_CHECKSUM_SIZE, ENC_BIG_ENDIAN);
// -------------------------------------------- Etc ----------------------------------------------------------------
return tvb_captured_length(tvb);
}
| 0 |
[
"CWE-476"
] |
wireshark
|
4a948427100b6c109f4ec7b4361f0d2aec5e5c3f
| 126,311,140,020,855,400,000,000,000,000,000,000,000 | 162 |
BLIP: Fix decompression buffer bug
Until now, mistakenly, the buffer for decompressing compressed BLIP messages
has been statically allocated as 16 Kb, but that is not valid behavior.
16 Kb is the maximum size of a _compressed_ frame. In theory, due to the
ability to zipbomb, there is virtually no upper bound on what the maximum
size of an uncompressed frame could be. However, to keep sanity, it has
been made into a preference with a reasonable default that is not likely to
be exceeded (64 Kb). The behavior before for this was that wireshark would
crash because the dissector would return NULL for a decompressed buffer due
to error and then try to deference it later. A null check has been added,
so that the behavior is now that the packet will show
'<Error decompressing message>' instead, and log why it couldn't handle the
compressed message. Closes #16866.
|
int net_get(int s, void *arg, int *len)
{
struct net_hdr nh;
int plen;
if (net_read_exact(s, &nh, sizeof(nh)) == -1)
{
return -1;
}
plen = ntohl(nh.nh_len);
if (!(plen <= *len))
printf("PLEN %d type %d len %d\n",
plen, nh.nh_type, *len);
assert(plen <= *len && plen > 0); /* XXX */
*len = plen;
if ((*len) && (net_read_exact(s, arg, *len) == -1))
{
return -1;
}
return nh.nh_type;
}
| 0 |
[
"CWE-20",
"CWE-787"
] |
aircrack-ng
|
88702a3ce4c28a973bf69023cd0312f412f6193e
| 48,368,137,250,891,780,000,000,000,000,000,000,000 | 24 |
OSdep: Fixed segmentation fault that happens with a malicious server sending a negative length (Closes #16 on GitHub).
git-svn-id: http://svn.aircrack-ng.org/trunk@2419 28c6078b-6c39-48e3-add9-af49d547ecab
|
xfs_alloc_read_agfl(
xfs_mount_t *mp, /* mount point structure */
xfs_trans_t *tp, /* transaction pointer */
xfs_agnumber_t agno, /* allocation group number */
xfs_buf_t **bpp) /* buffer for the ag free block array */
{
xfs_buf_t *bp; /* return value */
int error;
ASSERT(agno != NULLAGNUMBER);
error = xfs_trans_read_buf(
mp, tp, mp->m_ddev_targp,
XFS_AG_DADDR(mp, agno, XFS_AGFL_DADDR(mp)),
XFS_FSS_TO_BB(mp, 1), 0, &bp, &xfs_agfl_buf_ops);
if (error)
return error;
xfs_buf_set_ref(bp, XFS_AGFL_REF);
*bpp = bp;
return 0;
}
| 0 |
[
"CWE-400",
"CWE-703",
"CWE-835"
] |
linux
|
d0c7feaf87678371c2c09b3709400be416b2dc62
| 23,169,877,829,617,473,000,000,000,000,000,000,000 | 20 |
xfs: add agf freeblocks verify in xfs_agf_verify
We recently used fuzz(hydra) to test XFS and automatically generate
tmp.img(XFS v5 format, but some metadata is wrong)
xfs_repair information(just one AG):
agf_freeblks 0, counted 3224 in ag 0
agf_longest 536874136, counted 3224 in ag 0
sb_fdblocks 613, counted 3228
Test as follows:
mount tmp.img tmpdir
cp file1M tmpdir
sync
In 4.19-stable, sync will stuck, the reason is:
xfs_mountfs
xfs_check_summary_counts
if ((!xfs_sb_version_haslazysbcount(&mp->m_sb) ||
XFS_LAST_UNMOUNT_WAS_CLEAN(mp)) &&
!xfs_fs_has_sickness(mp, XFS_SICK_FS_COUNTERS))
return 0; -->just return, incore sb_fdblocks still be 613
xfs_initialize_perag_data
cp file1M tmpdir -->ok(write file to pagecache)
sync -->stuck(write pagecache to disk)
xfs_map_blocks
xfs_iomap_write_allocate
while (count_fsb != 0) {
nimaps = 0;
while (nimaps == 0) { --> endless loop
nimaps = 1;
xfs_bmapi_write(..., &nimaps) --> nimaps becomes 0 again
xfs_bmapi_write
xfs_bmap_alloc
xfs_bmap_btalloc
xfs_alloc_vextent
xfs_alloc_fix_freelist
xfs_alloc_space_available -->fail(agf_freeblks is 0)
In linux-next, sync not stuck, cause commit c2b3164320b5 ("xfs:
use the latest extent at writeback delalloc conversion time") remove
the above while, dmesg is as follows:
[ 55.250114] XFS (loop0): page discard on page ffffea0008bc7380, inode 0x1b0c, offset 0.
Users do not know why this page is discard, the better soultion is:
1. Like xfs_repair, make sure sb_fdblocks is equal to counted
(xfs_initialize_perag_data did this, who is not called at this mount)
2. Add agf verify, if fail, will tell users to repair
This patch use the second soultion.
Signed-off-by: Zheng Bin <[email protected]>
Signed-off-by: Ren Xudong <[email protected]>
Reviewed-by: Darrick J. Wong <[email protected]>
Signed-off-by: Darrick J. Wong <[email protected]>
|
static inline uint32_t htx_free_data_space(const struct htx *htx)
{
uint32_t free = htx_free_space(htx);
if (free < sizeof(struct htx_blk))
return 0;
return (free - sizeof(struct htx_blk));
}
| 0 |
[
"CWE-190"
] |
haproxy
|
3b69886f7dcc3cfb3d166309018e6cfec9ce2c95
| 140,756,956,588,875,590,000,000,000,000,000,000,000 | 8 |
BUG/MAJOR: htx: fix missing header name length check in htx_add_header/trailer
Ori Hollander of JFrog Security reported that htx_add_header() and
htx_add_trailer() were missing a length check on the header name. While
this does not allow to overwrite any memory area, it results in bits of
the header name length to slip into the header value length and may
result in forging certain header names on the input. The sad thing here
is that a FIXME comment was present suggesting to add the required length
checks :-(
The injected headers are visible to the HTTP internals and to the config
rules, so haproxy will generally stay synchronized with the server. But
there is one exception which is the content-length header field, because
it is already deduplicated on the input, but before being indexed. As
such, injecting a content-length header after the deduplication stage
may be abused to present a different, shorter one on the other side and
help build a request smuggling attack, or even maybe a response splitting
attack. CVE-2021-40346 was assigned to this problem.
As a mitigation measure, it is sufficient to verify that no more than
one such header is present in any message, which is normally the case
thanks to the duplicate checks:
http-request deny if { req.hdr_cnt(content-length) gt 1 }
http-response deny if { res.hdr_cnt(content-length) gt 1 }
This must be backported to all HTX-enabled versions, hence as far as 2.0.
In 2.3 and earlier, the functions are in src/htx.c instead.
Many thanks to Ori for his work and his responsible report!
|
i_apply_case_fold(OnigCodePoint from, OnigCodePoint to[],
int to_len, void* arg)
{
IApplyCaseFoldArg* iarg;
ScanEnv* env;
CClassNode* cc;
CClassNode* asc_cc;
BitSetRef bs;
int add_flag, r;
iarg = (IApplyCaseFoldArg* )arg;
env = iarg->env;
cc = iarg->cc;
asc_cc = iarg->asc_cc;
bs = cc->bs;
if (IS_NULL(asc_cc)) {
add_flag = 0;
}
else if (ONIGENC_IS_ASCII_CODE(from) == ONIGENC_IS_ASCII_CODE(*to)) {
add_flag = 1;
}
else {
add_flag = onig_is_code_in_cc(env->enc, from, asc_cc);
if (IS_NCCLASS_NOT(asc_cc))
add_flag = !add_flag;
}
if (to_len == 1) {
int is_in = onig_is_code_in_cc(env->enc, from, cc);
#ifdef CASE_FOLD_IS_APPLIED_INSIDE_NEGATIVE_CCLASS
if ((is_in != 0 && !IS_NCCLASS_NOT(cc)) ||
(is_in == 0 && IS_NCCLASS_NOT(cc))) {
if (add_flag) {
if (ONIGENC_MBC_MINLEN(env->enc) > 1 || *to >= SINGLE_BYTE_SIZE) {
r = add_code_range0(&(cc->mbuf), env, *to, *to, 0);
if (r < 0) return r;
}
else {
BITSET_SET_BIT(bs, *to);
}
}
}
#else
if (is_in != 0) {
if (add_flag) {
if (ONIGENC_MBC_MINLEN(env->enc) > 1 || *to >= SINGLE_BYTE_SIZE) {
if (IS_NCCLASS_NOT(cc)) clear_not_flag_cclass(cc, env->enc);
r = add_code_range0(&(cc->mbuf), env, *to, *to, 0);
if (r < 0) return r;
}
else {
if (IS_NCCLASS_NOT(cc)) {
BITSET_CLEAR_BIT(bs, *to);
}
else {
BITSET_SET_BIT(bs, *to);
}
}
}
}
#endif /* CASE_FOLD_IS_APPLIED_INSIDE_NEGATIVE_CCLASS */
}
else {
int r, i, len;
UChar buf[ONIGENC_CODE_TO_MBC_MAXLEN];
Node *snode = NULL_NODE;
if (onig_is_code_in_cc(env->enc, from, cc)
#ifdef CASE_FOLD_IS_APPLIED_INSIDE_NEGATIVE_CCLASS
&& !IS_NCCLASS_NOT(cc)
#endif
) {
for (i = 0; i < to_len; i++) {
len = ONIGENC_CODE_TO_MBC(env->enc, to[i], buf);
if (i == 0) {
snode = onig_node_new_str(buf, buf + len);
CHECK_NULL_RETURN_MEMERR(snode);
/* char-class expanded multi-char only
compare with string folded at match time. */
NSTRING_SET_AMBIG(snode);
}
else {
r = onig_node_str_cat(snode, buf, buf + len);
if (r < 0) {
onig_node_free(snode);
return r;
}
}
}
*(iarg->ptail) = onig_node_new_alt(snode, NULL_NODE);
CHECK_NULL_RETURN_MEMERR(*(iarg->ptail));
iarg->ptail = &(NCDR((*(iarg->ptail))));
}
}
return 0;
}
| 0 |
[
"CWE-476"
] |
Onigmo
|
00cc7e28a3ed54b3b512ef3b58ea737a57acf1f9
| 139,951,535,107,238,130,000,000,000,000,000,000,000 | 100 |
Fix SEGV in onig_error_code_to_str() (Fix #132)
When onig_new(ONIG_SYNTAX_PERL) fails with ONIGERR_INVALID_GROUP_NAME,
onig_error_code_to_str() crashes.
onig_scan_env_set_error_string() should have been used when returning
ONIGERR_INVALID_GROUP_NAME.
|
static int tipc_sendmsg(struct socket *sock,
struct msghdr *m, size_t dsz)
{
struct sock *sk = sock->sk;
int ret;
lock_sock(sk);
ret = __tipc_sendmsg(sock, m, dsz);
release_sock(sk);
return ret;
}
| 0 |
[
"CWE-703"
] |
linux
|
45e093ae2830cd1264677d47ff9a95a71f5d9f9c
| 85,441,286,997,262,940,000,000,000,000,000,000,000 | 12 |
tipc: check nl sock before parsing nested attributes
Make sure the socket for which the user is listing publication exists
before parsing the socket netlink attributes.
Prior to this patch a call without any socket caused a NULL pointer
dereference in tipc_nl_publ_dump().
Tested-and-reported-by: Baozeng Ding <[email protected]>
Signed-off-by: Richard Alpe <[email protected]>
Acked-by: Jon Maloy <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
png_check_chunk_name(png_structrp png_ptr, png_uint_32 chunk_name)
{
int i;
png_debug(1, "in png_check_chunk_name");
for (i=1; i<=4; ++i)
{
int c = chunk_name & 0xff;
if (c < 65 || c > 122 || (c > 90 && c < 97))
png_chunk_error(png_ptr, "invalid chunk type");
chunk_name >>= 8;
}
}
| 0 |
[
"CWE-120"
] |
libpng
|
a901eb3ce6087e0afeef988247f1a1aa208cb54d
| 251,515,594,105,549,600,000,000,000,000,000,000,000 | 16 |
[libpng16] Prevent reading over-length PLTE chunk (Cosmin Truta).
|
int ext4_mb_init_group(struct super_block *sb, ext4_group_t group, gfp_t gfp)
{
struct ext4_group_info *this_grp;
struct ext4_buddy e4b;
struct page *page;
int ret = 0;
might_sleep();
mb_debug(1, "init group %u\n", group);
this_grp = ext4_get_group_info(sb, group);
/*
* This ensures that we don't reinit the buddy cache
* page which map to the group from which we are already
* allocating. If we are looking at the buddy cache we would
* have taken a reference using ext4_mb_load_buddy and that
* would have pinned buddy page to page cache.
* The call to ext4_mb_get_buddy_page_lock will mark the
* page accessed.
*/
ret = ext4_mb_get_buddy_page_lock(sb, group, &e4b, gfp);
if (ret || !EXT4_MB_GRP_NEED_INIT(this_grp)) {
/*
* somebody initialized the group
* return without doing anything
*/
goto err;
}
page = e4b.bd_bitmap_page;
ret = ext4_mb_init_cache(page, NULL, gfp);
if (ret)
goto err;
if (!PageUptodate(page)) {
ret = -EIO;
goto err;
}
if (e4b.bd_buddy_page == NULL) {
/*
* If both the bitmap and buddy are in
* the same page we don't need to force
* init the buddy
*/
ret = 0;
goto err;
}
/* init buddy cache */
page = e4b.bd_buddy_page;
ret = ext4_mb_init_cache(page, e4b.bd_bitmap, gfp);
if (ret)
goto err;
if (!PageUptodate(page)) {
ret = -EIO;
goto err;
}
err:
ext4_mb_put_buddy_page_lock(&e4b);
return ret;
}
| 0 |
[
"CWE-416"
] |
linux
|
8844618d8aa7a9973e7b527d038a2a589665002c
| 231,005,119,026,378,000,000,000,000,000,000,000,000 | 60 |
ext4: only look at the bg_flags field if it is valid
The bg_flags field in the block group descripts is only valid if the
uninit_bg or metadata_csum feature is enabled. We were not
consistently looking at this field; fix this.
Also block group #0 must never have uninitialized allocation bitmaps,
or need to be zeroed, since that's where the root inode, and other
special inodes are set up. Check for these conditions and mark the
file system as corrupted if they are detected.
This addresses CVE-2018-10876.
https://bugzilla.kernel.org/show_bug.cgi?id=199403
Signed-off-by: Theodore Ts'o <[email protected]>
Cc: [email protected]
|
fill4 (unsigned short data[/*n*/], int n)
{
for (int i = 0; i < n; ++i)
data[i] = i & USHRT_MAX;
}
| 0 |
[
"CWE-190"
] |
openexr
|
51a92d67f53c08230734e74564c807043cbfe41e
| 86,003,367,165,073,430,000,000,000,000,000,000,000 | 5 |
check for valid Huf code lengths (#849)
* check for valid Huf code lengths
* test non-fast huf decoder in testHuf
Signed-off-by: Peter Hillman <[email protected]>
|
static void cmd_window_rsize(const char *data)
{
char rsizestr[MAX_INT_STRLEN];
int rsize;
if (!is_numeric(data, 0)) return;
rsize = atoi(data);
rsize -= MAIN_WINDOW_TEXT_WIDTH(WINDOW_MAIN(active_win));
if (rsize == 0) return;
ltoa(rsizestr, rsize < 0 ? -rsize : rsize);
if (rsize < 0)
cmd_window_rshrink(rsizestr);
else
cmd_window_rgrow(rsizestr);
}
| 0 |
[
"CWE-476"
] |
irssi
|
5b5bfef03596d95079c728f65f523570dd7b03aa
| 256,882,537,519,056,140,000,000,000,000,000,000,000 | 17 |
check the error condition of mainwindow_create
|
bool Arg_comparator::set_cmp_func_string()
{
THD *thd= current_thd;
func= is_owner_equal_func() ? &Arg_comparator::compare_e_string :
&Arg_comparator::compare_string;
if (compare_type() == STRING_RESULT &&
(*a)->result_type() == STRING_RESULT &&
(*b)->result_type() == STRING_RESULT)
{
/*
We must set cmp_collation here as we may be called from for an automatic
generated item, like in natural join
*/
if (owner->agg_arg_charsets_for_comparison(&m_compare_collation, a, b))
return true;
if ((*a)->type() == Item::FUNC_ITEM &&
((Item_func *) (*a))->functype() == Item_func::JSON_EXTRACT_FUNC)
{
func= is_owner_equal_func() ? &Arg_comparator::compare_e_json_str:
&Arg_comparator::compare_json_str;
return 0;
}
else if ((*b)->type() == Item::FUNC_ITEM &&
((Item_func *) (*b))->functype() == Item_func::JSON_EXTRACT_FUNC)
{
func= is_owner_equal_func() ? &Arg_comparator::compare_e_json_str:
&Arg_comparator::compare_str_json;
return 0;
}
}
a= cache_converted_constant(thd, a, &a_cache, compare_type_handler());
b= cache_converted_constant(thd, b, &b_cache, compare_type_handler());
return false;
}
| 0 |
[
"CWE-617"
] |
server
|
807945f2eb5fa22e6f233cc17b85a2e141efe2c8
| 329,199,827,716,717,650,000,000,000,000,000,000,000 | 36 |
MDEV-26402: A SEGV in Item_field::used_tables/update_depend_map_for_order...
When doing condition pushdown from HAVING into WHERE,
Item_equal::create_pushable_equalities() calls
item->set_extraction_flag(IMMUTABLE_FL) for constant items.
Then, Item::cleanup_excluding_immutables_processor() checks for this flag
to see if it should call item->cleanup() or leave the item as-is.
The failure happens when a constant item has a non-constant one inside it,
like:
(tbl.col=0 AND impossible_cond)
item->walk(cleanup_excluding_immutables_processor) works in a bottom-up
way so it
1. will call Item_func_eq(tbl.col=0)->cleanup()
2. will not call Item_cond_and->cleanup (as the AND is constant)
This creates an item tree where a fixed Item has an un-fixed Item inside
it which eventually causes an assertion failure.
Fixed by introducing this rule: instead of just calling
item->set_extraction_flag(IMMUTABLE_FL);
we call Item::walk() to set the flag for all sub-items of the item.
|
static int do_qib_user_sdma_queue_create(struct file *fp)
{
struct qib_filedata *fd = fp->private_data;
struct qib_ctxtdata *rcd = fd->rcd;
struct qib_devdata *dd = rcd->dd;
if (dd->flags & QIB_HAS_SEND_DMA) {
fd->pq = qib_user_sdma_queue_create(&dd->pcidev->dev,
dd->unit,
rcd->ctxt,
fd->subctxt);
if (!fd->pq)
return -ENOMEM;
}
return 0;
}
| 0 |
[
"CWE-284",
"CWE-264"
] |
linux
|
e6bd18f57aad1a2d1ef40e646d03ed0f2515c9e3
| 210,038,865,497,004,370,000,000,000,000,000,000,000 | 18 |
IB/security: Restrict use of the write() interface
The drivers/infiniband stack uses write() as a replacement for
bi-directional ioctl(). This is not safe. There are ways to
trigger write calls that result in the return structure that
is normally written to user space being shunted off to user
specified kernel memory instead.
For the immediate repair, detect and deny suspicious accesses to
the write API.
For long term, update the user space libraries and the kernel API
to something that doesn't present the same security vulnerabilities
(likely a structured ioctl() interface).
The impacted uAPI interfaces are generally only available if
hardware from drivers/infiniband is installed in the system.
Reported-by: Jann Horn <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
Signed-off-by: Jason Gunthorpe <[email protected]>
[ Expanded check to all known write() entry points ]
Cc: [email protected]
Signed-off-by: Doug Ledford <[email protected]>
|
static bool stack_slot_scratched(const struct bpf_verifier_env *env, u64 regno)
{
return (env->scratched_stack_slots >> regno) & 1;
}
| 0 |
[
"CWE-787"
] |
linux
|
64620e0a1e712a778095bd35cbb277dc2259281f
| 174,859,003,093,025,400,000,000,000,000,000,000,000 | 4 |
bpf: Fix out of bounds access for ringbuf helpers
Both bpf_ringbuf_submit() and bpf_ringbuf_discard() have ARG_PTR_TO_ALLOC_MEM
in their bpf_func_proto definition as their first argument. They both expect
the result from a prior bpf_ringbuf_reserve() call which has a return type of
RET_PTR_TO_ALLOC_MEM_OR_NULL.
Meaning, after a NULL check in the code, the verifier will promote the register
type in the non-NULL branch to a PTR_TO_MEM and in the NULL branch to a known
zero scalar. Generally, pointer arithmetic on PTR_TO_MEM is allowed, so the
latter could have an offset.
The ARG_PTR_TO_ALLOC_MEM expects a PTR_TO_MEM register type. However, the non-
zero result from bpf_ringbuf_reserve() must be fed into either bpf_ringbuf_submit()
or bpf_ringbuf_discard() but with the original offset given it will then read
out the struct bpf_ringbuf_hdr mapping.
The verifier missed to enforce a zero offset, so that out of bounds access
can be triggered which could be used to escalate privileges if unprivileged
BPF was enabled (disabled by default in kernel).
Fixes: 457f44363a88 ("bpf: Implement BPF ring buffer and verifier support for it")
Reported-by: <[email protected]> (SecCoder Security Lab)
Signed-off-by: Daniel Borkmann <[email protected]>
Acked-by: John Fastabend <[email protected]>
Acked-by: Alexei Starovoitov <[email protected]>
|
c_pdf14trans_equal(const gs_composite_t * pct0, const gs_composite_t * pct1)
{
return false;
}
| 0 |
[
"CWE-416"
] |
ghostpdl
|
90fd0c7ca3efc1ddff64a86f4104b13b3ac969eb
| 271,919,930,069,348,930,000,000,000,000,000,000,000 | 4 |
Bug 697456. Dont create new ctx when pdf14 device reenabled
This bug had yet another weird case where the user created a
file that pushed the pdf14 device twice. We were in that case,
creating a new ctx and blowing away the original one with out
proper clean up. To avoid, only create a new one when we need it.
|
static inline int dma_submit_error(dma_cookie_t cookie)
{
return cookie < 0 ? cookie : 0;
}
| 0 |
[] |
linux
|
7bced397510ab569d31de4c70b39e13355046387
| 117,646,721,342,459,300,000,000,000,000,000,000,000 | 4 |
net_dma: simple removal
Per commit "77873803363c net_dma: mark broken" net_dma is no longer used
and there is no plan to fix it.
This is the mechanical removal of bits in CONFIG_NET_DMA ifdef guards.
Reverting the remainder of the net_dma induced changes is deferred to
subsequent patches.
Marked for stable due to Roman's report of a memory leak in
dma_pin_iovec_pages():
https://lkml.org/lkml/2014/9/3/177
Cc: Dave Jiang <[email protected]>
Cc: Vinod Koul <[email protected]>
Cc: David Whipple <[email protected]>
Cc: Alexander Duyck <[email protected]>
Cc: <[email protected]>
Reported-by: Roman Gushchin <[email protected]>
Acked-by: David S. Miller <[email protected]>
Signed-off-by: Dan Williams <[email protected]>
|
static int imap_open_mailbox (CONTEXT* ctx)
{
IMAP_DATA *idata;
IMAP_STATUS* status;
char buf[LONG_STRING];
char bufout[LONG_STRING*2];
int count = 0;
IMAP_MBOX mx, pmx;
int rc;
const char *condstore;
if (imap_parse_path (ctx->path, &mx))
{
mutt_error (_("%s is an invalid IMAP path"), ctx->path);
return -1;
}
/* we require a connection which isn't currently in IMAP_SELECTED state */
if (!(idata = imap_conn_find (&(mx.account), MUTT_IMAP_CONN_NOSELECT)))
goto fail_noidata;
/* once again the context is new */
ctx->data = idata;
/* Clean up path and replace the one in the ctx */
imap_fix_path (idata, mx.mbox, buf, sizeof (buf));
if (!*buf)
strfcpy (buf, "INBOX", sizeof (buf));
FREE(&(idata->mailbox));
idata->mailbox = safe_strdup (buf);
imap_qualify_path (buf, sizeof (buf), &mx, idata->mailbox);
FREE (&(ctx->path));
FREE (&(ctx->realpath));
ctx->path = safe_strdup (buf);
ctx->realpath = safe_strdup (ctx->path);
idata->ctx = ctx;
/* clear mailbox status */
idata->status = 0;
memset (idata->ctx->rights, 0, sizeof (idata->ctx->rights));
idata->newMailCount = 0;
idata->max_msn = 0;
if (!ctx->quiet)
mutt_message (_("Selecting %s..."), idata->mailbox);
imap_munge_mbox_name (idata, buf, sizeof(buf), idata->mailbox);
/* pipeline ACL test */
if (mutt_bit_isset (idata->capabilities, ACL))
{
snprintf (bufout, sizeof (bufout), "MYRIGHTS %s", buf);
imap_exec (idata, bufout, IMAP_CMD_QUEUE);
}
/* assume we have all rights if ACL is unavailable */
else
{
mutt_bit_set (idata->ctx->rights, MUTT_ACL_LOOKUP);
mutt_bit_set (idata->ctx->rights, MUTT_ACL_READ);
mutt_bit_set (idata->ctx->rights, MUTT_ACL_SEEN);
mutt_bit_set (idata->ctx->rights, MUTT_ACL_WRITE);
mutt_bit_set (idata->ctx->rights, MUTT_ACL_INSERT);
mutt_bit_set (idata->ctx->rights, MUTT_ACL_POST);
mutt_bit_set (idata->ctx->rights, MUTT_ACL_CREATE);
mutt_bit_set (idata->ctx->rights, MUTT_ACL_DELETE);
}
/* pipeline the postponed count if possible */
pmx.mbox = NULL;
if (mx_is_imap (Postponed) && !imap_parse_path (Postponed, &pmx)
&& mutt_account_match (&pmx.account, &mx.account))
imap_status (Postponed, 1);
FREE (&pmx.mbox);
#if USE_HCACHE
if (mutt_bit_isset (idata->capabilities, CONDSTORE) &&
option (OPTIMAPCONDSTORE))
condstore = " (CONDSTORE)";
else
#endif
condstore = "";
snprintf (bufout, sizeof (bufout), "%s %s%s",
ctx->readonly ? "EXAMINE" : "SELECT",
buf, condstore);
idata->state = IMAP_SELECTED;
imap_cmd_start (idata, bufout);
status = imap_mboxcache_get (idata, idata->mailbox, 1);
do
{
char *pc;
if ((rc = imap_cmd_step (idata)) != IMAP_CMD_CONTINUE)
break;
pc = idata->buf + 2;
/* Obtain list of available flags here, may be overridden by a
* PERMANENTFLAGS tag in the OK response */
if (ascii_strncasecmp ("FLAGS", pc, 5) == 0)
{
/* don't override PERMANENTFLAGS */
if (!idata->flags)
{
dprint (3, (debugfile, "Getting mailbox FLAGS\n"));
if ((pc = imap_get_flags (&(idata->flags), pc)) == NULL)
goto fail;
}
}
/* PERMANENTFLAGS are massaged to look like FLAGS, then override FLAGS */
else if (ascii_strncasecmp ("OK [PERMANENTFLAGS", pc, 18) == 0)
{
dprint (3, (debugfile, "Getting mailbox PERMANENTFLAGS\n"));
/* safe to call on NULL */
mutt_free_list (&(idata->flags));
/* skip "OK [PERMANENT" so syntax is the same as FLAGS */
pc += 13;
if ((pc = imap_get_flags (&(idata->flags), pc)) == NULL)
goto fail;
}
/* save UIDVALIDITY for the header cache */
else if (ascii_strncasecmp ("OK [UIDVALIDITY", pc, 14) == 0)
{
dprint (3, (debugfile, "Getting mailbox UIDVALIDITY\n"));
pc += 3;
pc = imap_next_word (pc);
if (mutt_atoui (pc, &idata->uid_validity) < 0)
goto fail;
status->uidvalidity = idata->uid_validity;
}
else if (ascii_strncasecmp ("OK [UIDNEXT", pc, 11) == 0)
{
dprint (3, (debugfile, "Getting mailbox UIDNEXT\n"));
pc += 3;
pc = imap_next_word (pc);
if (mutt_atoui (pc, &idata->uidnext) < 0)
goto fail;
status->uidnext = idata->uidnext;
}
else if (ascii_strncasecmp ("OK [HIGHESTMODSEQ", pc, 17) == 0)
{
dprint (3, (debugfile, "Getting mailbox HIGHESTMODSEQ\n"));
pc += 3;
pc = imap_next_word (pc);
if (mutt_atoull (pc, &idata->modseq) < 0)
goto fail;
status->modseq = idata->modseq;
}
else if (ascii_strncasecmp ("OK [NOMODSEQ", pc, 12) == 0)
{
dprint (3, (debugfile, "Mailbox has NOMODSEQ set\n"));
status->modseq = idata->modseq = 0;
}
else
{
pc = imap_next_word (pc);
if (!ascii_strncasecmp ("EXISTS", pc, 6))
{
count = idata->newMailCount;
idata->newMailCount = 0;
}
}
}
while (rc == IMAP_CMD_CONTINUE);
if (rc == IMAP_CMD_NO)
{
char *s;
s = imap_next_word (idata->buf); /* skip seq */
s = imap_next_word (s); /* Skip response */
mutt_error ("%s", s);
mutt_sleep (2);
goto fail;
}
if (rc != IMAP_CMD_OK)
goto fail;
/* check for READ-ONLY notification */
if (!ascii_strncasecmp (imap_get_qualifier (idata->buf), "[READ-ONLY]", 11) &&
!mutt_bit_isset (idata->capabilities, ACL))
{
dprint (2, (debugfile, "Mailbox is read-only.\n"));
ctx->readonly = 1;
}
#ifdef DEBUG
/* dump the mailbox flags we've found */
if (debuglevel > 2)
{
if (!idata->flags)
dprint (3, (debugfile, "No folder flags found\n"));
else
{
LIST* t = idata->flags;
dprint (3, (debugfile, "Mailbox flags: "));
t = t->next;
while (t)
{
dprint (3, (debugfile, "[%s] ", t->data));
t = t->next;
}
dprint (3, (debugfile, "\n"));
}
}
#endif
if (!(mutt_bit_isset(idata->ctx->rights, MUTT_ACL_DELETE) ||
mutt_bit_isset(idata->ctx->rights, MUTT_ACL_SEEN) ||
mutt_bit_isset(idata->ctx->rights, MUTT_ACL_WRITE) ||
mutt_bit_isset(idata->ctx->rights, MUTT_ACL_INSERT)))
ctx->readonly = 1;
ctx->hdrmax = count;
ctx->hdrs = safe_calloc (count, sizeof (HEADER *));
ctx->v2r = safe_calloc (count, sizeof (int));
ctx->msgcount = 0;
if (count && (imap_read_headers (idata, 1, count, 1) < 0))
{
mutt_error _("Error opening mailbox");
mutt_sleep (1);
goto fail;
}
imap_disallow_reopen (ctx);
dprint (2, (debugfile, "imap_open_mailbox: msgcount is %d\n", ctx->msgcount));
FREE (&mx.mbox);
return 0;
fail:
if (idata->state == IMAP_SELECTED)
idata->state = IMAP_AUTHENTICATED;
fail_noidata:
FREE (&mx.mbox);
return -1;
}
| 0 |
[
"CWE-200",
"CWE-319"
] |
mutt
|
3e88866dc60b5fa6aaba6fd7c1710c12c1c3cd01
| 157,975,209,057,246,970,000,000,000,000,000,000,000 | 244 |
Prevent possible IMAP MITM via PREAUTH response.
This is similar to CVE-2014-2567 and CVE-2020-12398. STARTTLS is not
allowed in the Authenticated state, so previously Mutt would
implicitly mark the connection as authenticated and skip any
encryption checking/enabling.
No credentials are exposed, but it does allow messages to be sent to
an attacker, via postpone or fcc'ing for instance.
Reuse the $ssl_starttls quadoption "in reverse" to prompt to abort the
connection if it is unencrypted.
Thanks very much to Damian Poddebniak and Fabian Ising from the
Münster University of Applied Sciences for reporting this issue, and
their help in testing the fix.
|
cmsBool PreOptimize(cmsPipeline* Lut)
{
cmsBool AnyOpt = FALSE, Opt;
do {
Opt = FALSE;
// Remove all identities
Opt |= _Remove1Op(Lut, cmsSigIdentityElemType);
// Remove XYZ2Lab followed by Lab2XYZ
Opt |= _Remove2Op(Lut, cmsSigXYZ2LabElemType, cmsSigLab2XYZElemType);
// Remove Lab2XYZ followed by XYZ2Lab
Opt |= _Remove2Op(Lut, cmsSigLab2XYZElemType, cmsSigXYZ2LabElemType);
// Remove V4 to V2 followed by V2 to V4
Opt |= _Remove2Op(Lut, cmsSigLabV4toV2, cmsSigLabV2toV4);
// Remove V2 to V4 followed by V4 to V2
Opt |= _Remove2Op(Lut, cmsSigLabV2toV4, cmsSigLabV4toV2);
// Remove float pcs Lab conversions
Opt |= _Remove2Op(Lut, cmsSigLab2FloatPCS, cmsSigFloatPCS2Lab);
// Remove float pcs Lab conversions
Opt |= _Remove2Op(Lut, cmsSigXYZ2FloatPCS, cmsSigFloatPCS2XYZ);
// Simplify matrix.
Opt |= _MultiplyMatrix(Lut);
if (Opt) AnyOpt = TRUE;
} while (Opt);
return AnyOpt;
}
| 0 |
[
"CWE-125"
] |
Little-CMS
|
d41071eb8cfea7aa10a9262c12bd95d5d9d81c8f
| 298,420,975,397,289,500,000,000,000,000,000,000,000 | 38 |
Contributed fixes from Oracle
Two minor glitches
|
compat_do_replace(struct net *net, void __user *user, unsigned int len)
{
int ret;
struct compat_ipt_replace tmp;
struct xt_table_info *newinfo;
void *loc_cpu_entry;
struct ipt_entry *iter;
if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
return -EFAULT;
/* overflow check */
if (tmp.size >= INT_MAX / num_possible_cpus())
return -ENOMEM;
if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
return -ENOMEM;
tmp.name[sizeof(tmp.name)-1] = 0;
newinfo = xt_alloc_table_info(tmp.size);
if (!newinfo)
return -ENOMEM;
/* choose the copy that is on our node/cpu */
loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
tmp.size) != 0) {
ret = -EFAULT;
goto free_newinfo;
}
ret = translate_compat_table(net, tmp.name, tmp.valid_hooks,
&newinfo, &loc_cpu_entry, tmp.size,
tmp.num_entries, tmp.hook_entry,
tmp.underflow);
if (ret != 0)
goto free_newinfo;
duprintf("compat_do_replace: Translated table\n");
ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
tmp.num_counters, compat_ptr(tmp.counters));
if (ret)
goto free_newinfo_untrans;
return 0;
free_newinfo_untrans:
xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
cleanup_entry(iter, net);
free_newinfo:
xt_free_table_info(newinfo);
return ret;
}
| 0 |
[
"CWE-200"
] |
linux-2.6
|
78b79876761b86653df89c48a7010b5cbd41a84a
| 235,254,525,974,751,800,000,000,000,000,000,000,000 | 52 |
netfilter: ip_tables: fix infoleak to userspace
Structures ipt_replace, compat_ipt_replace, and xt_get_revision are
copied from userspace. Fields of these structs that are
zero-terminated strings are not checked. When they are used as argument
to a format string containing "%s" in request_module(), some sensitive
information is leaked to userspace via argument of spawned modprobe
process.
The first and the third bugs were introduced before the git epoch; the
second was introduced in 2722971c (v2.6.17-rc1). To trigger the bug
one should have CAP_NET_ADMIN.
Signed-off-by: Vasiliy Kulikov <[email protected]>
Signed-off-by: Patrick McHardy <[email protected]>
|
_outWithCheckOption(StringInfo str, const WithCheckOption *node)
{
WRITE_NODE_TYPE("WITHCHECKOPTION");
WRITE_STRING_FIELD(viewname);
WRITE_NODE_FIELD(qual);
WRITE_BOOL_FIELD(cascaded);
}
| 0 |
[
"CWE-362"
] |
postgres
|
5f173040e324f6c2eebb90d86cf1b0cdb5890f0a
| 200,337,381,261,472,550,000,000,000,000,000,000,000 | 8 |
Avoid repeated name lookups during table and index DDL.
If the name lookups come to different conclusions due to concurrent
activity, we might perform some parts of the DDL on a different table
than other parts. At least in the case of CREATE INDEX, this can be
used to cause the permissions checks to be performed against a
different table than the index creation, allowing for a privilege
escalation attack.
This changes the calling convention for DefineIndex, CreateTrigger,
transformIndexStmt, transformAlterTableStmt, CheckIndexCompatible
(in 9.2 and newer), and AlterTable (in 9.1 and older). In addition,
CheckRelationOwnership is removed in 9.2 and newer and the calling
convention is changed in older branches. A field has also been added
to the Constraint node (FkConstraint in 8.4). Third-party code calling
these functions or using the Constraint node will require updating.
Report by Andres Freund. Patch by Robert Haas and Andres Freund,
reviewed by Tom Lane.
Security: CVE-2014-0062
|
RequestXLogSwitch(void)
{
XLogRecPtr RecPtr;
XLogRecData rdata;
/* XLOG SWITCH, alone among xlog record types, has no data */
rdata.buffer = InvalidBuffer;
rdata.data = NULL;
rdata.len = 0;
rdata.next = NULL;
RecPtr = XLogInsert(RM_XLOG_ID, XLOG_SWITCH, &rdata);
return RecPtr;
}
| 0 |
[
"CWE-119"
] |
postgres
|
01824385aead50e557ca1af28640460fa9877d51
| 252,198,097,040,820,000,000,000,000,000,000,000,000 | 15 |
Prevent potential overruns of fixed-size buffers.
Coverity identified a number of places in which it couldn't prove that a
string being copied into a fixed-size buffer would fit. We believe that
most, perhaps all of these are in fact safe, or are copying data that is
coming from a trusted source so that any overrun is not really a security
issue. Nonetheless it seems prudent to forestall any risk by using
strlcpy() and similar functions.
Fixes by Peter Eisentraut and Jozef Mlich based on Coverity reports.
In addition, fix a potential null-pointer-dereference crash in
contrib/chkpass. The crypt(3) function is defined to return NULL on
failure, but chkpass.c didn't check for that before using the result.
The main practical case in which this could be an issue is if libc is
configured to refuse to execute unapproved hashing algorithms (e.g.,
"FIPS mode"). This ideally should've been a separate commit, but
since it touches code adjacent to one of the buffer overrun changes,
I included it in this commit to avoid last-minute merge issues.
This issue was reported by Honza Horak.
Security: CVE-2014-0065 for buffer overruns, CVE-2014-0066 for crypt()
|
static void gx_ttfExport__SetWidth(ttfExport *self, FloatPoint *p)
{
gx_ttfExport *e = (gx_ttfExport *)self;
e->w.x = float2fixed(p->x);
e->w.y = float2fixed(p->y);
}
| 0 |
[
"CWE-125"
] |
ghostpdl
|
937ccd17ac65935633b2ebc06cb7089b91e17e6b
| 186,571,058,284,412,100,000,000,000,000,000,000,000 | 7 |
Bug 698056: make bounds check in gx_ttfReader__Read more robust
|
TEST_P(DownstreamProtocolIntegrationTest, HittingDecoderFilterLimit) {
config_helper_.addFilter("{ name: envoy.http_dynamo_filter, config: {} }");
config_helper_.setBufferLimits(1024, 1024);
initialize();
codec_client_ = makeHttpConnection(lookupPort("http"));
// Envoy will likely connect and proxy some unspecified amount of data before
// hitting the buffer limit and disconnecting. Ignore this if it happens.
fake_upstreams_[0]->set_allow_unexpected_disconnects(true);
auto response =
codec_client_->makeRequestWithBody(Http::TestHeaderMapImpl{{":method", "POST"},
{":path", "/dynamo/url"},
{":scheme", "http"},
{":authority", "host"},
{"x-forwarded-for", "10.0.0.1"},
{"x-envoy-retry-on", "5xx"}},
1024 * 65);
response->waitForEndStream();
// With HTTP/1 there's a possible race where if the connection backs up early,
// the 413-and-connection-close may be sent while the body is still being
// sent, resulting in a write error and the connection being closed before the
// response is read.
if (downstream_protocol_ == Http::CodecClient::Type::HTTP2) {
ASSERT_TRUE(response->complete());
}
if (response->complete()) {
EXPECT_EQ("413", response->headers().Status()->value().getStringView());
}
}
| 0 |
[
"CWE-400",
"CWE-703"
] |
envoy
|
afc39bea36fd436e54262f150c009e8d72db5014
| 120,679,909,009,473,940,000,000,000,000,000,000,000 | 31 |
Track byteSize of HeaderMap internally.
Introduces a cached byte size updated internally in HeaderMap. The value
is stored as an optional, and is cleared whenever a non-const pointer or
reference to a HeaderEntry is accessed. The cached value can be set with
refreshByteSize() which performs an iteration over the HeaderMap to sum
the size of each key and value in the HeaderMap.
Signed-off-by: Asra Ali <[email protected]>
|
generate_cache_handler (LDAPOp *op,
LDAPMessage *res)
{
LDAPGetContactListOp *contact_list_op = (LDAPGetContactListOp *) op;
EBookBackendLDAP *bl = E_BOOK_BACKEND_LDAP (op->backend);
LDAPMessage *e;
gint msg_type;
EDataBookView *book_view;
GTimeVal start, end;
gulong diff;
if (enable_debug) {
printf ("generate_cache_handler ... \n");
g_get_current_time (&start);
}
g_rec_mutex_lock (&eds_ldap_handler_lock);
if (!bl->priv->ldap) {
g_rec_mutex_unlock (&eds_ldap_handler_lock);
ldap_op_finished (op);
if (enable_debug)
printf ("generate_cache_handler ... ldap handler is NULL \n");
return;
}
g_rec_mutex_unlock (&eds_ldap_handler_lock);
book_view = find_book_view (bl);
msg_type = ldap_msgtype (res);
if (msg_type == LDAP_RES_SEARCH_ENTRY) {
g_rec_mutex_lock (&eds_ldap_handler_lock);
if (bl->priv->ldap)
e = ldap_first_entry (bl->priv->ldap, res);
else
e = NULL;
g_rec_mutex_unlock (&eds_ldap_handler_lock);
while (e != NULL) {
EContact *contact = build_contact_from_entry (bl, e, NULL, NULL);
if (contact)
contact_list_op->contacts = g_slist_prepend (contact_list_op->contacts, contact);
g_rec_mutex_lock (&eds_ldap_handler_lock);
if (bl->priv->ldap)
e = ldap_next_entry (bl->priv->ldap, e);
else
e = NULL;
g_rec_mutex_unlock (&eds_ldap_handler_lock);
}
} else {
GSList *l;
gint contact_num = 0;
gchar *status_msg;
GTimeVal now;
gchar *update_str;
e_file_cache_clean (E_FILE_CACHE (bl->priv->cache));
e_file_cache_freeze_changes (E_FILE_CACHE (bl->priv->cache));
for (l = contact_list_op->contacts; l; l = g_slist_next (l)) {
EContact *contact = l->data;
contact_num++;
if (book_view) {
status_msg = g_strdup_printf (
_("Downloading contacts (%d)..."),
contact_num);
book_view_notify_status (bl, book_view, status_msg);
g_free (status_msg);
}
e_book_backend_cache_add_contact (bl->priv->cache, contact);
e_book_backend_notify_update (op->backend, contact);
}
e_book_backend_cache_set_populated (bl->priv->cache);
g_get_current_time (&now);
update_str = g_time_val_to_iso8601 (&now);
e_book_backend_cache_set_time (bl->priv->cache, update_str);
g_free (update_str);
e_file_cache_thaw_changes (E_FILE_CACHE (bl->priv->cache));
e_book_backend_notify_complete (op->backend);
ldap_op_finished (op);
if (enable_debug) {
g_get_current_time (&end);
diff = end.tv_sec * 1000 + end.tv_usec / 1000;
diff -= start.tv_sec * 1000 + start.tv_usec / 1000;
printf (
"generate_cache_handler ... completed in %ld.%03ld seconds\n",
diff / 1000,diff % 1000);
}
}
}
| 0 |
[] |
evolution-data-server
|
34bad61738e2127736947ac50e0c7969cc944972
| 199,634,041,656,446,800,000,000,000,000,000,000,000 | 92 |
Bug 796174 - strcat() considered unsafe for buffer overflow
|
static RList *methods(RBinFile *arch) {
RBinDexObj *bin;
if (!arch || !arch->o || !arch->o->bin_obj) {
return NULL;
}
bin = (RBinDexObj*) arch->o->bin_obj;
if (!bin->methods_list) {
dex_loadcode (arch, bin);
}
return bin->methods_list;
}
| 0 |
[
"CWE-125"
] |
radare2
|
ead645853a63bf83d8386702cad0cf23b31d7eeb
| 239,610,675,773,967,460,000,000,000,000,000,000,000 | 11 |
fix #6857
|
#ifdef CONFIG_KVM_COMPAT
static long kvm_vcpu_compat_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
struct kvm_vcpu *vcpu = filp->private_data;
void __user *argp = compat_ptr(arg);
int r;
if (vcpu->kvm->mm != current->mm || vcpu->kvm->vm_dead)
return -EIO;
switch (ioctl) {
case KVM_SET_SIGNAL_MASK: {
struct kvm_signal_mask __user *sigmask_arg = argp;
struct kvm_signal_mask kvm_sigmask;
sigset_t sigset;
if (argp) {
r = -EFAULT;
if (copy_from_user(&kvm_sigmask, argp,
sizeof(kvm_sigmask)))
goto out;
r = -EINVAL;
if (kvm_sigmask.len != sizeof(compat_sigset_t))
goto out;
r = -EFAULT;
if (get_compat_sigset(&sigset,
(compat_sigset_t __user *)sigmask_arg->sigset))
goto out;
r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset);
} else
r = kvm_vcpu_ioctl_set_sigmask(vcpu, NULL);
break;
}
default:
r = kvm_vcpu_ioctl(filp, ioctl, arg);
}
out:
return r;
| 0 |
[
"CWE-459"
] |
linux
|
683412ccf61294d727ead4a73d97397396e69a6b
| 46,693,594,551,161,520,000,000,000,000,000,000,000 | 40 |
KVM: SEV: add cache flush to solve SEV cache incoherency issues
Flush the CPU caches when memory is reclaimed from an SEV guest (where
reclaim also includes it being unmapped from KVM's memslots). Due to lack
of coherency for SEV encrypted memory, failure to flush results in silent
data corruption if userspace is malicious/broken and doesn't ensure SEV
guest memory is properly pinned and unpinned.
Cache coherency is not enforced across the VM boundary in SEV (AMD APM
vol.2 Section 15.34.7). Confidential cachelines, generated by confidential
VM guests have to be explicitly flushed on the host side. If a memory page
containing dirty confidential cachelines was released by VM and reallocated
to another user, the cachelines may corrupt the new user at a later time.
KVM takes a shortcut by assuming all confidential memory remain pinned
until the end of VM lifetime. Therefore, KVM does not flush cache at
mmu_notifier invalidation events. Because of this incorrect assumption and
the lack of cache flushing, malicous userspace can crash the host kernel:
creating a malicious VM and continuously allocates/releases unpinned
confidential memory pages when the VM is running.
Add cache flush operations to mmu_notifier operations to ensure that any
physical memory leaving the guest VM get flushed. In particular, hook
mmu_notifier_invalidate_range_start and mmu_notifier_release events and
flush cache accordingly. The hook after releasing the mmu lock to avoid
contention with other vCPUs.
Cc: [email protected]
Suggested-by: Sean Christpherson <[email protected]>
Reported-by: Mingwei Zhang <[email protected]>
Signed-off-by: Mingwei Zhang <[email protected]>
Message-Id: <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
|
void opj_j2k_copy_tile_quantization_parameters( opj_j2k_t *p_j2k )
{
OPJ_UINT32 i;
opj_cp_t *l_cp = NULL;
opj_tcp_t *l_tcp = NULL;
opj_tccp_t *l_ref_tccp = NULL;
opj_tccp_t *l_copied_tccp = NULL;
OPJ_UINT32 l_size;
/* preconditions */
assert(p_j2k != 00);
l_cp = &(p_j2k->m_cp);
l_tcp = p_j2k->m_specific_param.m_decoder.m_state == J2K_STATE_TPH ?
&l_cp->tcps[p_j2k->m_current_tile_number] :
p_j2k->m_specific_param.m_decoder.m_default_tcp;
l_ref_tccp = &l_tcp->tccps[0];
l_copied_tccp = l_ref_tccp + 1;
l_size = OPJ_J2K_MAXBANDS * sizeof(opj_stepsize_t);
for (i=1;i<p_j2k->m_private_image->numcomps;++i) {
l_copied_tccp->qntsty = l_ref_tccp->qntsty;
l_copied_tccp->numgbits = l_ref_tccp->numgbits;
memcpy(l_copied_tccp->stepsizes,l_ref_tccp->stepsizes,l_size);
++l_copied_tccp;
}
}
| 0 |
[] |
openjpeg
|
0fa5a17c98c4b8f9ee2286f4f0a50cf52a5fccb0
| 279,362,825,210,585,200,000,000,000,000,000,000,000 | 28 |
[trunk] Correct potential double free on malloc failure in opj_j2k_copy_default_tcp_and_create_tcp (fixes issue 492)
|
static int pf_detect(void)
{
struct pf_unit *pf = units;
int k, unit;
printk("%s: %s version %s, major %d, cluster %d, nice %d\n",
name, name, PF_VERSION, major, cluster, nice);
par_drv = pi_register_driver(name);
if (!par_drv) {
pr_err("failed to register %s driver\n", name);
return -1;
}
k = 0;
if (pf_drive_count == 0) {
if (pi_init(pf->pi, 1, -1, -1, -1, -1, -1, pf_scratch, PI_PF,
verbose, pf->name)) {
if (!pf_probe(pf) && pf->disk) {
pf->present = 1;
k++;
} else
pi_release(pf->pi);
}
} else
for (unit = 0; unit < PF_UNITS; unit++, pf++) {
int *conf = *drives[unit];
if (!conf[D_PRT])
continue;
if (pi_init(pf->pi, 0, conf[D_PRT], conf[D_MOD],
conf[D_UNI], conf[D_PRO], conf[D_DLY],
pf_scratch, PI_PF, verbose, pf->name)) {
if (pf->disk && !pf_probe(pf)) {
pf->present = 1;
k++;
} else
pi_release(pf->pi);
}
}
if (k)
return 0;
printk("%s: No ATAPI disk detected\n", name);
for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) {
if (!pf->disk)
continue;
blk_cleanup_queue(pf->disk->queue);
pf->disk->queue = NULL;
blk_mq_free_tag_set(&pf->tag_set);
put_disk(pf->disk);
}
pi_unregister_driver(par_drv);
return -1;
}
| 0 |
[
"CWE-476",
"CWE-125"
] |
linux
|
58ccd2d31e502c37e108b285bf3d343eb00c235b
| 27,863,968,266,425,910,000,000,000,000,000,000,000 | 54 |
paride/pf: Fix potential NULL pointer dereference
Syzkaller report this:
pf: pf version 1.04, major 47, cluster 64, nice 0
pf: No ATAPI disk detected
kasan: CONFIG_KASAN_INLINE enabled
kasan: GPF could be caused by NULL-ptr deref or user memory access
general protection fault: 0000 [#1] SMP KASAN PTI
CPU: 0 PID: 9887 Comm: syz-executor.0 Tainted: G C 5.1.0-rc3+ #8
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.10.2-1ubuntu1 04/01/2014
RIP: 0010:pf_init+0x7af/0x1000 [pf]
Code: 46 77 d2 48 89 d8 48 c1 e8 03 80 3c 28 00 74 08 48 89 df e8 03 25 a6 d2 4c 8b 23 49 8d bc 24 80 05 00 00 48 89 f8 48 c1 e8 03 <80> 3c 28 00 74 05 e8 e6 24 a6 d2 49 8b bc 24 80 05 00 00 e8 79 34
RSP: 0018:ffff8881abcbf998 EFLAGS: 00010202
RAX: 00000000000000b0 RBX: ffffffffc1e4a8a8 RCX: ffffffffaec50788
RDX: 0000000000039b10 RSI: ffffc9000153c000 RDI: 0000000000000580
RBP: dffffc0000000000 R08: ffffed103ee44e59 R09: ffffed103ee44e59
R10: 0000000000000001 R11: ffffed103ee44e58 R12: 0000000000000000
R13: ffffffffc1e4b028 R14: 0000000000000000 R15: 0000000000000020
FS: 00007f1b78a91700(0000) GS:ffff8881f7200000(0000) knlGS:0000000000000000
CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
CR2: 00007f6d72b207f8 CR3: 00000001d5790004 CR4: 00000000007606f0
DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
PKRU: 55555554
Call Trace:
? 0xffffffffc1e50000
do_one_initcall+0xbc/0x47d init/main.c:901
do_init_module+0x1b5/0x547 kernel/module.c:3456
load_module+0x6405/0x8c10 kernel/module.c:3804
__do_sys_finit_module+0x162/0x190 kernel/module.c:3898
do_syscall_64+0x9f/0x450 arch/x86/entry/common.c:290
entry_SYSCALL_64_after_hwframe+0x49/0xbe
RIP: 0033:0x462e99
Code: f7 d8 64 89 02 b8 ff ff ff ff c3 66 0f 1f 44 00 00 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 c7 c1 bc ff ff ff f7 d8 64 89 01 48
RSP: 002b:00007f1b78a90c58 EFLAGS: 00000246 ORIG_RAX: 0000000000000139
RAX: ffffffffffffffda RBX: 000000000073bf00 RCX: 0000000000462e99
RDX: 0000000000000000 RSI: 0000000020000180 RDI: 0000000000000003
RBP: 00007f1b78a90c70 R08: 0000000000000000 R09: 0000000000000000
R10: 0000000000000000 R11: 0000000000000246 R12: 00007f1b78a916bc
R13: 00000000004bcefa R14: 00000000006f6fb0 R15: 0000000000000004
Modules linked in: pf(+) paride gpio_tps65218 tps65218 i2c_cht_wc ati_remote dc395x act_meta_skbtcindex act_ife ife ecdh_generic rc_xbox_dvd sky81452_regulator v4l2_fwnode leds_blinkm snd_usb_hiface comedi(C) aes_ti slhc cfi_cmdset_0020 mtd cfi_util sx8654 mdio_gpio of_mdio fixed_phy mdio_bitbang libphy alcor_pci matrix_keymap hid_uclogic usbhid scsi_transport_fc videobuf2_v4l2 videobuf2_dma_sg snd_soc_pcm179x_spi snd_soc_pcm179x_codec i2c_demux_pinctrl mdev snd_indigodj isl6405 mii enc28j60 cmac adt7316_i2c(C) adt7316(C) fmc_trivial fmc nf_reject_ipv4 authenc rc_dtt200u rtc_ds1672 dvb_usb_dibusb_mc dvb_usb_dibusb_mc_common dib3000mc dibx000_common dvb_usb_dibusb_common dvb_usb dvb_core videobuf2_common videobuf2_vmalloc videobuf2_memops regulator_haptic adf7242 mac802154 ieee802154 s5h1409 da9034_ts snd_intel8x0m wmi cx24120 usbcore sdhci_cadence sdhci_pltfm sdhci mmc_core joydev i2c_algo_bit scsi_transport_iscsi iscsi_boot_sysfs ves1820 lockd grace nfs_acl auth_rpcgss sunrp
c
ip_vs snd_soc_adau7002 snd_cs4281 snd_rawmidi gameport snd_opl3_lib snd_seq_device snd_hwdep snd_ac97_codec ad7418 hid_primax hid snd_soc_cs4265 snd_soc_core snd_pcm_dmaengine snd_pcm snd_timer ac97_bus snd_compress snd soundcore ti_adc108s102 eeprom_93cx6 i2c_algo_pca mlxreg_hotplug st_pressure st_sensors industrialio_triggered_buffer kfifo_buf industrialio v4l2_common videodev media snd_soc_adau_utils rc_pinnacle_grey rc_core pps_gpio leds_lm3692x nandcore ledtrig_pattern iptable_security iptable_raw iptable_mangle iptable_nat nf_nat nf_conntrack nf_defrag_ipv6 nf_defrag_ipv4 iptable_filter bpfilter ip6_vti ip_vti ip_gre ipip sit tunnel4 ip_tunnel hsr veth netdevsim vxcan batman_adv cfg80211 rfkill chnl_net caif nlmon dummy team bonding vcan bridge stp llc ip6_gre gre ip6_tunnel tunnel6 tun mousedev ppdev tpm kvm_intel kvm irqbypass crct10dif_pclmul crc32_pclmul crc32c_intel ghash_clmulni_intel aesni_intel ide_pci_generic aes_x86_64 piix crypto_simd input_leds psmouse cryp
td
glue_helper ide_core intel_agp serio_raw intel_gtt agpgart ata_generic i2c_piix4 pata_acpi parport_pc parport rtc_cmos floppy sch_fq_codel ip_tables x_tables sha1_ssse3 sha1_generic ipv6 [last unloaded: paride]
Dumping ftrace buffer:
(ftrace buffer empty)
---[ end trace 7a818cf5f210d79e ]---
If alloc_disk fails in pf_init_units, pf->disk will be
NULL, however in pf_detect and pf_exit, it's not check
this before free.It may result a NULL pointer dereference.
Also when register_blkdev failed, blk_cleanup_queue() and
blk_mq_free_tag_set() should be called to free resources.
Reported-by: Hulk Robot <[email protected]>
Fixes: 6ce59025f118 ("paride/pf: cleanup queues when detection fails")
Signed-off-by: YueHaibing <[email protected]>
Signed-off-by: Jens Axboe <[email protected]>
|
static void init()
{
gs.filenum = -1;
gs.image = 0;
gs.altimage = 0;
gs.nexttmid = 0;
gs.vp.width = 0;
gs.vp.height = 0;
gs.vp.data = 0;
gs.viewportwidth = -1;
gs.viewportheight = -1;
}
| 0 |
[
"CWE-119"
] |
jasper
|
65536647d380571d1a9a6c91fa03775fb5bbd256
| 312,822,512,407,783,480,000,000,000,000,000,000,000 | 12 |
A new experimental memory allocator has been introduced. The allocator
is experimental in the sense that its API is not considered stable and
the allocator may change or disappear entirely in future versions of
the code. This new allocator tracks how much memory is being used by
jas_malloc and friends. A maximum upper bound on the memory usage can be
set via the experimental API provided and a default value can be set at
build time as well. Such functionality may be useful in run-time
environments where the user wants to be able to limit the amount of
memory used by JasPer. This allocator is not used by default.
Note: This feature needs C11 functionality.
Note: The memory allocator is not thread safe in its current form.
A new --memory-limit CLI option has been added to the jasper, imginfo,
imgcmp, and jiv programs. The option is only available when the code is
built with the new memory allocator.
The support for my old debug memory allocator from the 1990s has been
purged from the code. The debug memory allocator is probably not
a very useful thing with the advent of GCC/Clang code sanitizers.
The safe size_t integer functions no longer set their result upon failure.
A safe subtract operation was also added.
|
__perf_event_account_interrupt(struct perf_event *event, int throttle)
{
struct hw_perf_event *hwc = &event->hw;
int ret = 0;
u64 seq;
seq = __this_cpu_read(perf_throttled_seq);
if (seq != hwc->interrupts_seq) {
hwc->interrupts_seq = seq;
hwc->interrupts = 1;
} else {
hwc->interrupts++;
if (unlikely(throttle
&& hwc->interrupts >= max_samples_per_tick)) {
__this_cpu_inc(perf_throttled_count);
tick_dep_set_cpu(smp_processor_id(), TICK_DEP_BIT_PERF_EVENTS);
hwc->interrupts = MAX_INTERRUPTS;
perf_log_throttle(event, 0);
ret = 1;
}
}
if (event->attr.freq) {
u64 now = perf_clock();
s64 delta = now - hwc->freq_time_stamp;
hwc->freq_time_stamp = now;
if (delta > 0 && delta < 2*TICK_NSEC)
perf_adjust_period(event, delta, hwc->last_period, true);
}
return ret;
}
| 0 |
[
"CWE-190"
] |
linux
|
1572e45a924f254d9570093abde46430c3172e3d
| 171,196,423,173,487,500,000,000,000,000,000,000,000 | 34 |
perf/core: Fix the perf_cpu_time_max_percent check
Use "proc_dointvec_minmax" instead of "proc_dointvec" to check the input
value from user-space.
If not, we can set a big value and some vars will overflow like
"sysctl_perf_event_sample_rate" which will cause a lot of unexpected
problems.
Signed-off-by: Tan Xiaojun <[email protected]>
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Cc: <[email protected]>
Cc: <[email protected]>
Cc: Alexander Shishkin <[email protected]>
Cc: Arnaldo Carvalho de Melo <[email protected]>
Cc: Jiri Olsa <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Stephane Eranian <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: Vince Weaver <[email protected]>
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]>
|
static void check_suspend_mode(GuestSuspendMode mode, Error **errp)
{
SYSTEM_POWER_CAPABILITIES sys_pwr_caps;
Error *local_err = NULL;
ZeroMemory(&sys_pwr_caps, sizeof(sys_pwr_caps));
if (!GetPwrCapabilities(&sys_pwr_caps)) {
error_setg(&local_err, QERR_QGA_COMMAND_FAILED,
"failed to determine guest suspend capabilities");
goto out;
}
switch (mode) {
case GUEST_SUSPEND_MODE_DISK:
if (!sys_pwr_caps.SystemS4) {
error_setg(&local_err, QERR_QGA_COMMAND_FAILED,
"suspend-to-disk not supported by OS");
}
break;
case GUEST_SUSPEND_MODE_RAM:
if (!sys_pwr_caps.SystemS3) {
error_setg(&local_err, QERR_QGA_COMMAND_FAILED,
"suspend-to-ram not supported by OS");
}
break;
default:
error_setg(&local_err, QERR_INVALID_PARAMETER_VALUE, "mode",
"GuestSuspendMode");
}
out:
error_propagate(errp, local_err);
}
| 0 |
[
"CWE-190"
] |
qemu
|
141b197408ab398c4f474ac1a728ab316e921f2b
| 295,794,687,519,341,200,000,000,000,000,000,000,000 | 33 |
qga: check bytes count read by guest-file-read
While reading file content via 'guest-file-read' command,
'qmp_guest_file_read' routine allocates buffer of count+1
bytes. It could overflow for large values of 'count'.
Add check to avoid it.
Reported-by: Fakhri Zulkifli <[email protected]>
Signed-off-by: Prasad J Pandit <[email protected]>
Cc: [email protected]
Signed-off-by: Michael Roth <[email protected]>
|
ConnectionHandlerImpl::findActiveTcpListenerByAddress(const Network::Address::Instance& address) {
// This is a linear operation, may need to add a map<address, listener> to improve performance.
// However, linear performance might be adequate since the number of listeners is small.
// We do not return stopped listeners.
auto listener_it = std::find_if(
listeners_.begin(), listeners_.end(),
[&address](
const std::pair<Network::Address::InstanceConstSharedPtr, ActiveListenerDetails>& p) {
return p.second.tcp_listener_.has_value() && p.second.listener_->listener() != nullptr &&
p.first->type() == Network::Address::Type::Ip && *(p.first) == address;
});
// If there is exact address match, return the corresponding listener.
if (listener_it != listeners_.end()) {
return listener_it->second.tcp_listener_;
}
// Otherwise, we need to look for the wild card match, i.e., 0.0.0.0:[address_port].
// We do not return stopped listeners.
// TODO(wattli): consolidate with previous search for more efficiency.
listener_it = std::find_if(
listeners_.begin(), listeners_.end(),
[&address](
const std::pair<Network::Address::InstanceConstSharedPtr, ActiveListenerDetails>& p) {
return p.second.tcp_listener_.has_value() && p.second.listener_->listener() != nullptr &&
p.first->type() == Network::Address::Type::Ip &&
p.first->ip()->port() == address.ip()->port() && p.first->ip()->isAnyAddress();
});
return (listener_it != listeners_.end()) ? listener_it->second.tcp_listener_ : absl::nullopt;
}
| 0 |
[
"CWE-835"
] |
envoy
|
c8de199e2971f79cbcbc6b5eadc8c566b28705d1
| 17,796,200,842,816,350,000,000,000,000,000,000,000 | 30 |
listener: clean up accept filter before creating connection (#8922)
Signed-off-by: Yuchen Dai <[email protected]>
|
static int ext4_insert_delayed_block(struct inode *inode, ext4_lblk_t lblk)
{
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
int ret;
bool allocated = false;
/*
* If the cluster containing lblk is shared with a delayed,
* written, or unwritten extent in a bigalloc file system, it's
* already been accounted for and does not need to be reserved.
* A pending reservation must be made for the cluster if it's
* shared with a written or unwritten extent and doesn't already
* have one. Written and unwritten extents can be purged from the
* extents status tree if the system is under memory pressure, so
* it's necessary to examine the extent tree if a search of the
* extents status tree doesn't get a match.
*/
if (sbi->s_cluster_ratio == 1) {
ret = ext4_da_reserve_space(inode);
if (ret != 0) /* ENOSPC */
goto errout;
} else { /* bigalloc */
if (!ext4_es_scan_clu(inode, &ext4_es_is_delonly, lblk)) {
if (!ext4_es_scan_clu(inode,
&ext4_es_is_mapped, lblk)) {
ret = ext4_clu_mapped(inode,
EXT4_B2C(sbi, lblk));
if (ret < 0)
goto errout;
if (ret == 0) {
ret = ext4_da_reserve_space(inode);
if (ret != 0) /* ENOSPC */
goto errout;
} else {
allocated = true;
}
} else {
allocated = true;
}
}
}
ret = ext4_es_insert_delayed_block(inode, lblk, allocated);
errout:
return ret;
}
| 0 |
[
"CWE-416",
"CWE-401"
] |
linux
|
4ea99936a1630f51fc3a2d61a58ec4a1c4b7d55a
| 45,833,282,472,158,680,000,000,000,000,000,000,000 | 47 |
ext4: add more paranoia checking in ext4_expand_extra_isize handling
It's possible to specify a non-zero s_want_extra_isize via debugging
option, and this can cause bad things(tm) to happen when using a file
system with an inode size of 128 bytes.
Add better checking when the file system is mounted, as well as when
we are actually doing the trying to do the inode expansion.
Link: https://lore.kernel.org/r/[email protected]
Reported-by: [email protected]
Reported-by: [email protected]
Reported-by: [email protected]
Signed-off-by: Theodore Ts'o <[email protected]>
Cc: [email protected]
|
static const char *cache_id(const char *id)
{
static char clean[SHORT_STRING];
mutt_str_strfcpy(clean, id, sizeof(clean));
mutt_file_sanitize_filename(clean, true);
return clean;
}
| 0 |
[
"CWE-241",
"CWE-824"
] |
neomutt
|
93b8ac558752d09e1c56d4f1bc82631316fa9c82
| 96,140,334,141,722,470,000,000,000,000,000,000,000 | 7 |
Ensure UID in fetch_uidl
|
f_ch_readraw(typval_T *argvars, typval_T *rettv)
{
common_channel_read(argvars, rettv, TRUE, FALSE);
}
| 0 |
[
"CWE-78"
] |
vim
|
8c62a08faf89663e5633dc5036cd8695c80f1075
| 207,944,479,045,949,330,000,000,000,000,000,000,000 | 4 |
patch 8.1.0881: can execute shell commands in rvim through interfaces
Problem: Can execute shell commands in rvim through interfaces.
Solution: Disable using interfaces in restricted mode. Allow for writing
file with writefile(), histadd() and a few others.
|
static void *_umm_malloc( size_t size ) {
unsigned short int blocks;
unsigned short int blockSize = 0;
unsigned short int bestSize;
unsigned short int bestBlock;
unsigned short int cf;
if (umm_heap == NULL) {
umm_init();
}
/*
* the very first thing we do is figure out if we're being asked to allocate
* a size of 0 - and if we are we'll simply return a null pointer. if not
* then reduce the size by 1 byte so that the subsequent calculations on
* the number of blocks to allocate are easier...
*/
if( 0 == size ) {
DBG_LOG_DEBUG( "malloc a block of 0 bytes -> do nothing\n" );
return( (void *)NULL );
}
/* Protect the critical section... */
UMM_CRITICAL_ENTRY();
blocks = umm_blocks( size );
/*
* Now we can scan through the free list until we find a space that's big
* enough to hold the number of blocks we need.
*
* This part may be customized to be a best-fit, worst-fit, or first-fit
* algorithm
*/
cf = UMM_NFREE(0);
bestBlock = UMM_NFREE(0);
bestSize = 0x7FFF;
while( cf ) {
blockSize = (UMM_NBLOCK(cf) & UMM_BLOCKNO_MASK) - cf;
DBG_LOG_TRACE( "Looking at block %6i size %6i\n", cf, blockSize );
#if defined UMM_FIRST_FIT
/* This is the first block that fits! */
if( (blockSize >= blocks) )
break;
#elif defined UMM_BEST_FIT
if( (blockSize >= blocks) && (blockSize < bestSize) ) {
bestBlock = cf;
bestSize = blockSize;
}
#endif
cf = UMM_NFREE(cf);
}
if( 0x7FFF != bestSize ) {
cf = bestBlock;
blockSize = bestSize;
}
if( UMM_NBLOCK(cf) & UMM_BLOCKNO_MASK && blockSize >= blocks ) {
/*
* This is an existing block in the memory heap, we just need to split off
* what we need, unlink it from the free list and mark it as in use, and
* link the rest of the block back into the freelist as if it was a new
* block on the free list...
*/
if( blockSize == blocks ) {
/* It's an exact fit and we don't neet to split off a block. */
DBG_LOG_DEBUG( "Allocating %6i blocks starting at %6i - exact\n", blocks, cf );
/* Disconnect this block from the FREE list */
umm_disconnect_from_free_list( cf );
umm_stat.free_entries_cnt--;
} else {
/* It's not an exact fit and we need to split off a block. */
DBG_LOG_DEBUG( "Allocating %6i blocks starting at %6i - existing\n", blocks, cf );
/*
* split current free block `cf` into two blocks. The first one will be
* returned to user, so it's not free, and the second one will be free.
*/
umm_make_new_block( cf, blocks,
0/*`cf` is not free*/,
UMM_FREELIST_MASK/*new block is free*/);
/*
* `umm_make_new_block()` does not update the free pointers (it affects
* only free flags), but effectively we've just moved beginning of the
* free block from `cf` to `cf + blocks`. So we have to adjust pointers
* to and from adjacent free blocks.
*/
/* previous free block */
UMM_NFREE( UMM_PFREE(cf) ) = cf + blocks;
UMM_PFREE( cf + blocks ) = UMM_PFREE(cf);
/* next free block */
UMM_PFREE( UMM_NFREE(cf) ) = cf + blocks;
UMM_NFREE( cf + blocks ) = UMM_NFREE(cf);
}
umm_stat.free_blocks_cnt -= blocks;
} else {
/* Out of memory */
/* If application has provided OOM-callback, call it */
#if defined(UMM_OOM_CB)
UMM_OOM_CB(size, blocks);
#endif
DBG_LOG_DEBUG( "Can't allocate %5i blocks\n", blocks );
/* Release the critical section... */
UMM_CRITICAL_EXIT();
return( (void *)NULL );
}
/* Release the critical section... */
UMM_CRITICAL_EXIT();
return( (void *)&UMM_DATA(cf) );
}
| 0 |
[
"CWE-190"
] |
mongoose-os
|
b338266c0492cccdb8d8a93ee0b9217bc5a04036
| 274,588,988,909,952,950,000,000,000,000,000,000,000 | 136 |
Fix umm_malloc()
|
insert_op1 (op, loc, arg, end)
re_opcode_t op;
unsigned char *loc;
int arg;
unsigned char *end;
{
register unsigned char *pfrom = end;
register unsigned char *pto = end + 3;
while (pfrom != loc)
*--pto = *--pfrom;
store_op1 (op, loc, arg);
}
| 0 |
[
"CWE-190",
"CWE-252"
] |
glibc
|
2864e767053317538feafa815046fff89e5a16be
| 142,216,545,576,025,040,000,000,000,000,000,000,000 | 14 |
Update.
1999-11-09 Ulrich Drepper <[email protected]>
* elf/dl-load.c (_dl_dst_count): Allow $ORIGIN to point to
directory with the reference since this is as secure as using the
object with the dependency.
(_dl_dst_substitute): Likewise.
* elf/dl-load.c (_dl_dst_count): Change strings in first two
strncmp calls to allow reuse.
(_dl_dst_substitute): Likewise.
1999-11-01 Arnold D. Robbins <[email protected]>
* posix/regex.c (init_syntax_once): move below definition of
ISALNUM etc., then use ISALNUM to init the table, so that
the word ops will work if i18n'ed.
(SYNTAX): And subscript with 0xFF for 8bit character sets.
1999-11-09 Andreas Jaeger <[email protected]>
* sysdeps/unix/getlogin_r.c (getlogin_r): Sync with getlogin
implementation for ttyname_r call; fix inverted condition; return
ut_user. Closes PR libc/1438.
1999-11-09 Ulrich Drepper <[email protected]>
* timezone/checktab.awk: Update from tzcode1999h.
* timezone/africa: Update from tzdata1999i.
* timezone/asia: Likewise.
* timezone/australasia: Likewise.
* timezone/backward: Likewise.
* timezone/europe: Likewise.
* timezone/northamerica: Likewise.
* timezone/southamerica: Likewise.
* timezone/iso3166.tab: Likewise.
* timezone/zone.tab: Likewise.
* sysdeps/unix/sysv/linux/bits/resource.h: Define values also as
macros. Patch by [email protected] [PR libc/1439].
1999-11-09 Andreas Jaeger <[email protected]>
* posix/Makefile (tests): Added tst-getlogin.
* posix/tst-getlogin.c: New file, contains simple tests for
getlogin and getlogin_r.
1999-11-09 Andreas Schwab <[email protected]>
* misc/syslog.c: For LOG_PERROR only append a newline if
necessary.
|
static int cn_printf(struct core_name *cn, const char *fmt, ...)
{
char *cur;
int need;
int ret;
va_list arg;
va_start(arg, fmt);
need = vsnprintf(NULL, 0, fmt, arg);
va_end(arg);
if (likely(need < cn->size - cn->used - 1))
goto out_printf;
ret = expand_corename(cn);
if (ret)
goto expand_fail;
out_printf:
cur = cn->corename + cn->used;
va_start(arg, fmt);
vsnprintf(cur, need + 1, fmt, arg);
va_end(arg);
cn->used += need;
return 0;
expand_fail:
return ret;
}
| 0 |
[
"CWE-264"
] |
linux
|
259e5e6c75a910f3b5e656151dc602f53f9d7548
| 148,281,047,635,214,370,000,000,000,000,000,000,000 | 29 |
Add PR_{GET,SET}_NO_NEW_PRIVS to prevent execve from granting privs
With this change, calling
prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)
disables privilege granting operations at execve-time. For example, a
process will not be able to execute a setuid binary to change their uid
or gid if this bit is set. The same is true for file capabilities.
Additionally, LSM_UNSAFE_NO_NEW_PRIVS is defined to ensure that
LSMs respect the requested behavior.
To determine if the NO_NEW_PRIVS bit is set, a task may call
prctl(PR_GET_NO_NEW_PRIVS, 0, 0, 0, 0);
It returns 1 if set and 0 if it is not set. If any of the arguments are
non-zero, it will return -1 and set errno to -EINVAL.
(PR_SET_NO_NEW_PRIVS behaves similarly.)
This functionality is desired for the proposed seccomp filter patch
series. By using PR_SET_NO_NEW_PRIVS, it allows a task to modify the
system call behavior for itself and its child tasks without being
able to impact the behavior of a more privileged task.
Another potential use is making certain privileged operations
unprivileged. For example, chroot may be considered "safe" if it cannot
affect privileged tasks.
Note, this patch causes execve to fail when PR_SET_NO_NEW_PRIVS is
set and AppArmor is in use. It is fixed in a subsequent patch.
Signed-off-by: Andy Lutomirski <[email protected]>
Signed-off-by: Will Drewry <[email protected]>
Acked-by: Eric Paris <[email protected]>
Acked-by: Kees Cook <[email protected]>
v18: updated change desc
v17: using new define values as per 3.4
Signed-off-by: James Morris <[email protected]>
|
xdr_chrand_arg(XDR *xdrs, chrand_arg *objp)
{
if (!xdr_ui_4(xdrs, &objp->api_version)) {
return (FALSE);
}
if (!xdr_krb5_principal(xdrs, &objp->princ)) {
return (FALSE);
}
return (TRUE);
}
| 0 |
[
"CWE-703"
] |
krb5
|
a197e92349a4aa2141b5dff12e9dd44c2a2166e3
| 146,745,819,300,943,700,000,000,000,000,000,000,000 | 10 |
Fix kadm5/gssrpc XDR double free [CVE-2014-9421]
[MITKRB5-SA-2015-001] In auth_gssapi_unwrap_data(), do not free
partial deserialization results upon failure to deserialize. This
responsibility belongs to the callers, svctcp_getargs() and
svcudp_getargs(); doing it in the unwrap function results in freeing
the results twice.
In xdr_krb5_tl_data() and xdr_krb5_principal(), null out the pointers
we are freeing, as other XDR functions such as xdr_bytes() and
xdr_string().
ticket: 8056 (new)
target_version: 1.13.1
tags: pullup
|
timeout_connect(int sockfd, const struct sockaddr *serv_addr,
socklen_t addrlen, int *timeoutp)
{
int optval = 0;
socklen_t optlen = sizeof(optval);
/* No timeout: just do a blocking connect() */
if (timeoutp == NULL || *timeoutp <= 0)
return connect(sockfd, serv_addr, addrlen);
set_nonblock(sockfd);
for (;;) {
if (connect(sockfd, serv_addr, addrlen) == 0) {
/* Succeeded already? */
unset_nonblock(sockfd);
return 0;
} else if (errno == EINTR)
continue;
else if (errno != EINPROGRESS)
return -1;
break;
}
if (waitfd(sockfd, timeoutp, POLLIN | POLLOUT) == -1)
return -1;
/* Completed or failed */
if (getsockopt(sockfd, SOL_SOCKET, SO_ERROR, &optval, &optlen) == -1) {
debug("getsockopt: %s", strerror(errno));
return -1;
}
if (optval != 0) {
errno = optval;
return -1;
}
unset_nonblock(sockfd);
return 0;
}
| 0 |
[] |
openssh-portable
|
f3cbe43e28fe71427d41cfe3a17125b972710455
| 193,119,327,193,392,520,000,000,000,000,000,000,000 | 38 |
upstream: need initgroups() before setresgid(); reported by anton@,
ok deraadt@
OpenBSD-Commit-ID: 6aa003ee658b316960d94078f2a16edbc25087ce
|
_public_ PAM_EXTERN int pam_sm_open_session(
pam_handle_t *handle,
int flags,
int argc, const char **argv) {
_cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
_cleanup_(sd_bus_message_unrefp) sd_bus_message *m = NULL, *reply = NULL;
const char
*username, *id, *object_path, *runtime_path,
*service = NULL,
*tty = NULL, *display = NULL,
*remote_user = NULL, *remote_host = NULL,
*seat = NULL,
*type = NULL, *class = NULL,
*class_pam = NULL, *type_pam = NULL, *cvtnr = NULL, *desktop = NULL, *desktop_pam = NULL,
*memory_max = NULL, *tasks_max = NULL, *cpu_weight = NULL, *io_weight = NULL;
_cleanup_(sd_bus_flush_close_unrefp) sd_bus *bus = NULL;
int session_fd = -1, existing, r;
bool debug = false, remote;
struct passwd *pw;
uint32_t vtnr = 0;
uid_t original_uid;
assert(handle);
/* Make this a NOP on non-logind systems */
if (!logind_running())
return PAM_SUCCESS;
if (parse_argv(handle,
argc, argv,
&class_pam,
&type_pam,
&desktop_pam,
&debug) < 0)
return PAM_SESSION_ERR;
if (debug)
pam_syslog(handle, LOG_DEBUG, "pam-systemd initializing");
r = get_user_data(handle, &username, &pw);
if (r != PAM_SUCCESS) {
pam_syslog(handle, LOG_ERR, "Failed to get user data.");
return r;
}
/* Make sure we don't enter a loop by talking to
* systemd-logind when it is actually waiting for the
* background to finish start-up. If the service is
* "systemd-user" we simply set XDG_RUNTIME_DIR and
* leave. */
pam_get_item(handle, PAM_SERVICE, (const void**) &service);
if (streq_ptr(service, "systemd-user")) {
char rt[STRLEN("/run/user/") + DECIMAL_STR_MAX(uid_t)];
xsprintf(rt, "/run/user/"UID_FMT, pw->pw_uid);
if (validate_runtime_directory(handle, rt, pw->pw_uid)) {
r = pam_misc_setenv(handle, "XDG_RUNTIME_DIR", rt, 0);
if (r != PAM_SUCCESS) {
pam_syslog(handle, LOG_ERR, "Failed to set runtime dir.");
return r;
}
}
r = export_legacy_dbus_address(handle, pw->pw_uid, rt);
if (r != PAM_SUCCESS)
return r;
return PAM_SUCCESS;
}
/* Otherwise, we ask logind to create a session for us */
pam_get_item(handle, PAM_XDISPLAY, (const void**) &display);
pam_get_item(handle, PAM_TTY, (const void**) &tty);
pam_get_item(handle, PAM_RUSER, (const void**) &remote_user);
pam_get_item(handle, PAM_RHOST, (const void**) &remote_host);
seat = getenv_harder(handle, "XDG_SEAT", NULL);
cvtnr = getenv_harder(handle, "XDG_VTNR", NULL);
type = getenv_harder(handle, "XDG_SESSION_TYPE", type_pam);
class = getenv_harder(handle, "XDG_SESSION_CLASS", class_pam);
desktop = getenv_harder(handle, "XDG_SESSION_DESKTOP", desktop_pam);
tty = strempty(tty);
if (strchr(tty, ':')) {
/* A tty with a colon is usually an X11 display, placed there to show up in utmp. We rearrange things
* and don't pretend that an X display was a tty. */
if (isempty(display))
display = tty;
tty = NULL;
} else if (streq(tty, "cron")) {
/* cron is setting PAM_TTY to "cron" for some reason (the commit carries no information why, but
* probably because it wants to set it to something as pam_time/pam_access/… require PAM_TTY to be set
* (as they otherwise even try to update it!) — but cron doesn't actually allocate a TTY for its forked
* off processes.) */
type = "unspecified";
class = "background";
tty = NULL;
} else if (streq(tty, "ssh")) {
/* ssh has been setting PAM_TTY to "ssh" (for the same reason as cron does this, see above. For further
* details look for "PAM_TTY_KLUDGE" in the openssh sources). */
type ="tty";
class = "user";
tty = NULL; /* This one is particularly sad, as this means that ssh sessions — even though usually
* associated with a pty — won't be tracked by their tty in logind. This is because ssh
* does the PAM session registration early for new connections, and registers a pty only
* much later (this is because it doesn't know yet if it needs one at all, as whether to
* register a pty or not is negotiated much later in the protocol). */
} else
/* Chop off leading /dev prefix that some clients specify, but others do not. */
tty = skip_dev_prefix(tty);
/* If this fails vtnr will be 0, that's intended */
if (!isempty(cvtnr))
(void) safe_atou32(cvtnr, &vtnr);
if (!isempty(display) && !vtnr) {
if (isempty(seat))
(void) get_seat_from_display(display, &seat, &vtnr);
else if (streq(seat, "seat0"))
(void) get_seat_from_display(display, NULL, &vtnr);
}
if (seat && !streq(seat, "seat0") && vtnr != 0) {
if (debug)
pam_syslog(handle, LOG_DEBUG, "Ignoring vtnr %"PRIu32" for %s which is not seat0", vtnr, seat);
vtnr = 0;
}
if (isempty(type))
type = !isempty(display) ? "x11" :
!isempty(tty) ? "tty" : "unspecified";
if (isempty(class))
class = streq(type, "unspecified") ? "background" : "user";
remote = !isempty(remote_host) && !is_localhost(remote_host);
(void) pam_get_data(handle, "systemd.memory_max", (const void **)&memory_max);
(void) pam_get_data(handle, "systemd.tasks_max", (const void **)&tasks_max);
(void) pam_get_data(handle, "systemd.cpu_weight", (const void **)&cpu_weight);
(void) pam_get_data(handle, "systemd.io_weight", (const void **)&io_weight);
/* Talk to logind over the message bus */
r = sd_bus_open_system(&bus);
if (r < 0) {
pam_syslog(handle, LOG_ERR, "Failed to connect to system bus: %s", strerror(-r));
return PAM_SESSION_ERR;
}
if (debug) {
pam_syslog(handle, LOG_DEBUG, "Asking logind to create session: "
"uid="UID_FMT" pid="PID_FMT" service=%s type=%s class=%s desktop=%s seat=%s vtnr=%"PRIu32" tty=%s display=%s remote=%s remote_user=%s remote_host=%s",
pw->pw_uid, getpid_cached(),
strempty(service),
type, class, strempty(desktop),
strempty(seat), vtnr, strempty(tty), strempty(display),
yes_no(remote), strempty(remote_user), strempty(remote_host));
pam_syslog(handle, LOG_DEBUG, "Session limits: "
"memory_max=%s tasks_max=%s cpu_weight=%s io_weight=%s",
strna(memory_max), strna(tasks_max), strna(cpu_weight), strna(io_weight));
}
r = sd_bus_message_new_method_call(
bus,
&m,
"org.freedesktop.login1",
"/org/freedesktop/login1",
"org.freedesktop.login1.Manager",
"CreateSession");
if (r < 0) {
pam_syslog(handle, LOG_ERR, "Failed to create CreateSession method call: %s", strerror(-r));
return PAM_SESSION_ERR;
}
r = sd_bus_message_append(m, "uusssssussbss",
(uint32_t) pw->pw_uid,
0,
service,
type,
class,
desktop,
seat,
vtnr,
tty,
display,
remote,
remote_user,
remote_host);
if (r < 0) {
pam_syslog(handle, LOG_ERR, "Failed to append to bus message: %s", strerror(-r));
return PAM_SESSION_ERR;
}
r = sd_bus_message_open_container(m, 'a', "(sv)");
if (r < 0) {
pam_syslog(handle, LOG_ERR, "Failed to open message container: %s", strerror(-r));
return PAM_SYSTEM_ERR;
}
r = append_session_memory_max(handle, m, memory_max);
if (r < 0)
return PAM_SESSION_ERR;
r = append_session_tasks_max(handle, m, tasks_max);
if (r < 0)
return PAM_SESSION_ERR;
r = append_session_cg_weight(handle, m, cpu_weight, "CPUWeight");
if (r < 0)
return PAM_SESSION_ERR;
r = append_session_cg_weight(handle, m, io_weight, "IOWeight");
if (r < 0)
return PAM_SESSION_ERR;
r = sd_bus_message_close_container(m);
if (r < 0) {
pam_syslog(handle, LOG_ERR, "Failed to close message container: %s", strerror(-r));
return PAM_SYSTEM_ERR;
}
r = sd_bus_call(bus, m, 0, &error, &reply);
if (r < 0) {
if (sd_bus_error_has_name(&error, BUS_ERROR_SESSION_BUSY)) {
if (debug)
pam_syslog(handle, LOG_DEBUG, "Not creating session: %s", bus_error_message(&error, r));
return PAM_SUCCESS;
} else {
pam_syslog(handle, LOG_ERR, "Failed to create session: %s", bus_error_message(&error, r));
return PAM_SYSTEM_ERR;
}
}
r = sd_bus_message_read(reply,
"soshusub",
&id,
&object_path,
&runtime_path,
&session_fd,
&original_uid,
&seat,
&vtnr,
&existing);
if (r < 0) {
pam_syslog(handle, LOG_ERR, "Failed to parse message: %s", strerror(-r));
return PAM_SESSION_ERR;
}
if (debug)
pam_syslog(handle, LOG_DEBUG, "Reply from logind: "
"id=%s object_path=%s runtime_path=%s session_fd=%d seat=%s vtnr=%u original_uid=%u",
id, object_path, runtime_path, session_fd, seat, vtnr, original_uid);
r = update_environment(handle, "XDG_SESSION_ID", id);
if (r != PAM_SUCCESS)
return r;
if (original_uid == pw->pw_uid) {
/* Don't set $XDG_RUNTIME_DIR if the user we now
* authenticated for does not match the original user
* of the session. We do this in order not to result
* in privileged apps clobbering the runtime directory
* unnecessarily. */
if (validate_runtime_directory(handle, runtime_path, pw->pw_uid)) {
r = update_environment(handle, "XDG_RUNTIME_DIR", runtime_path);
if (r != PAM_SUCCESS)
return r;
}
r = export_legacy_dbus_address(handle, pw->pw_uid, runtime_path);
if (r != PAM_SUCCESS)
return r;
}
/* Most likely we got the session/type/class from environment variables, but might have gotten the data
* somewhere else (for example PAM module parameters). Let's now update the environment variables, so that this
* data is inherited into the session processes, and programs can rely on them to be initialized. */
r = update_environment(handle, "XDG_SESSION_TYPE", type);
if (r != PAM_SUCCESS)
return r;
r = update_environment(handle, "XDG_SESSION_CLASS", class);
if (r != PAM_SUCCESS)
return r;
r = update_environment(handle, "XDG_SESSION_DESKTOP", desktop);
if (r != PAM_SUCCESS)
return r;
r = update_environment(handle, "XDG_SEAT", seat);
if (r != PAM_SUCCESS)
return r;
if (vtnr > 0) {
char buf[DECIMAL_STR_MAX(vtnr)];
sprintf(buf, "%u", vtnr);
r = update_environment(handle, "XDG_VTNR", buf);
if (r != PAM_SUCCESS)
return r;
}
r = pam_set_data(handle, "systemd.existing", INT_TO_PTR(!!existing), NULL);
if (r != PAM_SUCCESS) {
pam_syslog(handle, LOG_ERR, "Failed to install existing flag.");
return r;
}
if (session_fd >= 0) {
session_fd = fcntl(session_fd, F_DUPFD_CLOEXEC, 3);
if (session_fd < 0) {
pam_syslog(handle, LOG_ERR, "Failed to dup session fd: %m");
return PAM_SESSION_ERR;
}
r = pam_set_data(handle, "systemd.session-fd", FD_TO_PTR(session_fd), NULL);
if (r != PAM_SUCCESS) {
pam_syslog(handle, LOG_ERR, "Failed to install session fd.");
safe_close(session_fd);
return r;
}
}
return PAM_SUCCESS;
}
| 0 |
[
"CWE-863"
] |
systemd
|
83d4ab55336ff8a0643c6aa627b31e351a24040a
| 197,237,057,821,222,860,000,000,000,000,000,000,000 | 335 |
pam-systemd: use secure_getenv() rather than getenv()
And explain why in a comment.
|
pch_says_nonexistent (bool which)
{
return p_says_nonexistent[which];
}
| 0 |
[
"CWE-59"
] |
patch
|
44a987e02f04b9d81a0db4a611145cad1093a2d3
| 279,778,520,962,661,130,000,000,000,000,000,000,000 | 4 |
Add line number overflow checking
* bootstrap.conf: use intprops module.
* src/common.h: Define LINENUM_MIN and LINENUM_MAX macros.
* src/pch.c (another_hunk): Add line number overflow checking. Based on Robert
C. Seacord's INT32-C document for integer overflow checking and Tobias
Stoeckmann's "integer overflows and oob memory access" patch for FreeBSD.
|
QPDF::flattenPagesTree()
{
// If not already done, flatten the /Pages structure and
// initialize pageobj_to_pages_pos.
if (! this->m->pageobj_to_pages_pos.empty())
{
return;
}
// Push inherited objects down to the /Page level. As a side
// effect this->m->all_pages will also be generated.
pushInheritedAttributesToPage(true, true);
QPDFObjectHandle pages = getRoot().getKey("/Pages");
size_t const len = this->m->all_pages.size();
for (size_t pos = 0; pos < len; ++pos)
{
// populate pageobj_to_pages_pos and fix parent pointer
insertPageobjToPage(this->m->all_pages.at(pos), toI(pos), true);
this->m->all_pages.at(pos).replaceKey("/Parent", pages);
}
pages.replaceKey("/Kids", QPDFObjectHandle::newArray(this->m->all_pages));
// /Count has not changed
if (pages.getKey("/Count").getUIntValue() != len)
{
throw std::logic_error("/Count is wrong after flattening pages tree");
}
}
| 0 |
[
"CWE-787"
] |
qpdf
|
d71f05ca07eb5c7cfa4d6d23e5c1f2a800f52e8e
| 322,755,649,374,562,660,000,000,000,000,000,000,000 | 31 |
Fix sign and conversion warnings (major)
This makes all integer type conversions that have potential data loss
explicit with calls that do range checks and raise an exception. After
this commit, qpdf builds with no warnings when -Wsign-conversion
-Wconversion is used with gcc or clang or when -W3 -Wd4800 is used
with MSVC. This significantly reduces the likelihood of potential
crashes from bogus integer values.
There are some parts of the code that take int when they should take
size_t or an offset. Such places would make qpdf not support files
with more than 2^31 of something that usually wouldn't be so large. In
the event that such a file shows up and is valid, at least qpdf would
raise an error in the right spot so the issue could be legitimately
addressed rather than failing in some weird way because of a silent
overflow condition.
|
Invocation(OperationContext*, const OpMsgRequest& request, BasicCommand* command)
: CommandInvocation(command),
_command(command),
_request(&request),
_dbName(_request->getDatabase().toString()) {}
| 0 |
[
"CWE-20"
] |
mongo
|
722f06f3217c029ef9c50062c8cc775966fd7ead
| 317,824,427,202,606,500,000,000,000,000,000,000,000 | 5 |
SERVER-38275 ban find explain with UUID
|
ews_get_transitions_group (ESoapParameter *node)
{
EEwsCalendarTransitionsGroup *tg = NULL;
EEwsCalendarTo *transition = NULL;
ESoapParameter *param = NULL;
gchar *id = NULL;
GSList *absolute_date_transitions = NULL;
GSList *recurring_date_transitions = NULL;
GSList *recurring_day_transitions = NULL;
id = e_soap_parameter_get_property (node, "Id");
if (id == NULL)
return NULL;
param = e_soap_parameter_get_first_child_by_name (node, "Transition");
if (param != NULL)
transition = ews_get_to (param);
absolute_date_transitions = ews_get_absolute_date_transitions_list (node);
recurring_date_transitions = ews_get_recurring_date_transitions_list (node);
recurring_day_transitions = ews_get_recurring_day_transitions_list (node);
tg = e_ews_calendar_transitions_group_new ();
tg->id = id;
tg->transition = transition;
tg->absolute_date_transitions = absolute_date_transitions;
tg->recurring_date_transitions = recurring_date_transitions;
tg->recurring_day_transitions = recurring_day_transitions;
return tg;
}
| 0 |
[
"CWE-295"
] |
evolution-ews
|
915226eca9454b8b3e5adb6f2fff9698451778de
| 213,346,456,897,177,100,000,000,000,000,000,000,000 | 31 |
I#27 - SSL Certificates are not validated
This depends on https://gitlab.gnome.org/GNOME/evolution-data-server/commit/6672b8236139bd6ef41ecb915f4c72e2a052dba5 too.
Closes https://gitlab.gnome.org/GNOME/evolution-ews/issues/27
|
inline const WCHAR *GetDirW(int index)
{
WCHAR *ptr = dirTableW[index];
if (!ptr) {
/* simulate the existence of this drive */
ptr = szLocalBufferW;
ptr[0] = 'A' + index;
ptr[1] = ':';
ptr[2] = '\\';
ptr[3] = 0;
}
return ptr;
};
| 0 |
[] |
perl5
|
52236464559c6e410a4587d3c6da9639e75f3ec1
| 293,365,112,092,533,800,000,000,000,000,000,000,000 | 13 |
avoid invalid memory access in MapPath[AW]
This issue was assigned CVE-2015-8608. [perl #126755]
|
static unsigned short de_get16(void *ptr, guint endian)
{
unsigned short val;
memcpy(&val, ptr, sizeof(val));
val = DE_ENDIAN16(val);
return val;
}
| 0 |
[
"CWE-787"
] |
gdk-pixbuf
|
c2a40a92fe3df4111ed9da51fe3368c079b86926
| 8,754,576,071,310,304,000,000,000,000,000,000,000 | 9 |
jpeg: Throw error when number of color components is unsupported
Explicitly check "3" or "4" output color components.
gdk-pixbuf assumed that the value of output_components to be either
3 or 4, but not an invalid value (9) or an unsupported value (1).
The way the buffer size was deduced was using a naive "== 4" check,
with a 1, 3 or 9 color component picture getting the same buffer size,
a size just sufficient for 3 color components, causing invalid writes
later when libjpeg-turbo was decoding the image.
CVE-2017-2862
Sent by from Marcin 'Icewall' Noga of Cisco Talos
https://bugzilla.gnome.org/show_bug.cgi?id=784866
|
gs_grab_finalize (GObject *object)
{
GSGrab *grab;
g_return_if_fail (object != NULL);
g_return_if_fail (GS_IS_GRAB (object));
grab = GS_GRAB (object);
g_return_if_fail (grab->priv != NULL);
gtk_widget_destroy (grab->priv->invisible);
G_OBJECT_CLASS (gs_grab_parent_class)->finalize (object);
}
| 0 |
[] |
gnome-screensaver
|
f93a22c175090cf02e80bc3ee676b53f1251f685
| 290,926,850,137,339,900,000,000,000,000,000,000,000 | 15 |
Nullify grab window variables when windows are destroyed
If we don't do this then there is a time period where the
grab window variables contain dangling pointers which can
cause crashes.
Part of fix for
https://bugzilla.gnome.org/show_bug.cgi?id=609789
|
static int read_pbkdf2_params(ASN1_TYPE pbes2_asn,
const gnutls_datum_t * der,
struct pbkdf2_params *params)
{
int params_start, params_end;
int params_len, len, result;
ASN1_TYPE pbkdf2_asn = ASN1_TYPE_EMPTY;
char oid[64];
memset(params, 0, sizeof(params));
/* Check the key derivation algorithm
*/
len = sizeof(oid);
result =
asn1_read_value(pbes2_asn, "keyDerivationFunc.algorithm", oid,
&len);
if (result != ASN1_SUCCESS) {
gnutls_assert();
return _gnutls_asn2err(result);
}
_gnutls_hard_log("keyDerivationFunc.algorithm: %s\n", oid);
if (strcmp(oid, PBKDF2_OID) != 0) {
gnutls_assert();
_gnutls_x509_log
("PKCS #8 key derivation OID '%s' is unsupported.\n", oid);
return _gnutls_asn2err(result);
}
result =
asn1_der_decoding_startEnd(pbes2_asn, der->data, der->size,
"keyDerivationFunc.parameters",
¶ms_start, ¶ms_end);
if (result != ASN1_SUCCESS) {
gnutls_assert();
return _gnutls_asn2err(result);
}
params_len = params_end - params_start + 1;
/* Now check the key derivation and the encryption
* functions.
*/
if ((result =
asn1_create_element(_gnutls_get_pkix(),
"PKIX1.pkcs-5-PBKDF2-params",
&pbkdf2_asn)) != ASN1_SUCCESS) {
gnutls_assert();
return _gnutls_asn2err(result);
}
result =
asn1_der_decoding(&pbkdf2_asn, &der->data[params_start],
params_len, NULL);
if (result != ASN1_SUCCESS) {
gnutls_assert();
result = _gnutls_asn2err(result);
goto error;
}
/* read the salt */
params->salt_size = sizeof(params->salt);
result =
asn1_read_value(pbkdf2_asn, "salt.specified", params->salt,
¶ms->salt_size);
if (result != ASN1_SUCCESS) {
gnutls_assert();
result = _gnutls_asn2err(result);
goto error;
}
_gnutls_hard_log("salt.specified.size: %d\n", params->salt_size);
/* read the iteration count
*/
result =
_gnutls_x509_read_uint(pbkdf2_asn, "iterationCount",
¶ms->iter_count);
if (result != ASN1_SUCCESS) {
gnutls_assert();
goto error;
}
_gnutls_hard_log("iterationCount: %d\n", params->iter_count);
/* read the keylength, if it is set.
*/
result =
_gnutls_x509_read_uint(pbkdf2_asn, "keyLength", ¶ms->key_size);
if (result < 0) {
params->key_size = 0;
}
_gnutls_hard_log("keyLength: %d\n", params->key_size);
/* We don't read the PRF. We only use the default.
*/
return 0;
error:
asn1_delete_structure(&pbkdf2_asn);
return result;
}
| 0 |
[] |
gnutls
|
112d537da5f3500f14316db26d18c37d678a5e0e
| 181,573,386,104,191,920,000,000,000,000,000,000,000 | 102 |
some changes for 64bit machines.
|
static int usbvision_v4l2_mmap(struct file *file, struct vm_area_struct *vma)
{
struct usb_usbvision *usbvision = video_drvdata(file);
int res;
if (mutex_lock_interruptible(&usbvision->v4l2_lock))
return -ERESTARTSYS;
res = usbvision_mmap(file, vma);
mutex_unlock(&usbvision->v4l2_lock);
return res;
}
| 0 |
[
"CWE-17"
] |
media_tree
|
fa52bd506f274b7619955917abfde355e3d19ffe
| 41,225,232,450,617,926,000,000,000,000,000,000,000 | 11 |
[media] usbvision: fix crash on detecting device with invalid configuration
The usbvision driver crashes when a specially crafted usb device with invalid
number of interfaces or endpoints is detected. This fix adds checks that the
device has proper configuration expected by the driver.
Reported-by: Ralf Spenneberg <[email protected]>
Signed-off-by: Vladis Dronov <[email protected]>
Signed-off-by: Mauro Carvalho Chehab <[email protected]>
|
Subsets and Splits
CWE 416 & 19
The query filters records related to specific CWEs (Common Weakness Enumerations), providing a basic overview of entries with these vulnerabilities but without deeper analysis.
CWE Frequency in Train Set
Counts the occurrences of each CWE (Common Weakness Enumeration) in the dataset, providing a basic distribution but limited insight.