func
stringlengths 0
484k
| target
int64 0
1
| cwe
listlengths 0
4
| project
stringclasses 799
values | commit_id
stringlengths 40
40
| hash
float64 1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
| size
int64 1
24k
| message
stringlengths 0
13.3k
|
---|---|---|---|---|---|---|---|
getCharacters(FileInfo *file, CharsString *characters) {
/* Get ruleChars string */
CharsString token;
if (!getToken(file, &token, "characters")) return 0;
return parseChars(file, characters, &token);
}
| 0 |
[
"CWE-787"
] |
liblouis
|
2e4772befb2b1c37cb4b9d6572945115ee28630a
| 191,167,099,419,753,550,000,000,000,000,000,000,000 | 6 |
Prevent an invalid memory writes in compileRule
Thanks to Han Zheng for reporting it
Fixes #1214
|
stub_callback(isc_task_t *task, isc_event_t *event) {
const char me[] = "stub_callback";
dns_requestevent_t *revent = (dns_requestevent_t *)event;
dns_stub_t *stub = NULL;
dns_message_t *msg = NULL;
dns_zone_t *zone = NULL;
char master[ISC_SOCKADDR_FORMATSIZE];
char source[ISC_SOCKADDR_FORMATSIZE];
uint32_t nscnt, cnamecnt, refresh, retry, expire;
isc_result_t result;
isc_time_t now;
bool exiting = false;
isc_interval_t i;
unsigned int j, soacount;
stub = revent->ev_arg;
INSIST(DNS_STUB_VALID(stub));
UNUSED(task);
zone = stub->zone;
ENTER;
TIME_NOW(&now);
LOCK_ZONE(zone);
if (DNS_ZONE_FLAG(zone, DNS_ZONEFLG_EXITING)) {
zone_debuglog(zone, me, 1, "exiting");
exiting = true;
goto next_master;
}
isc_sockaddr_format(&zone->masteraddr, master, sizeof(master));
isc_sockaddr_format(&zone->sourceaddr, source, sizeof(source));
if (revent->result != ISC_R_SUCCESS) {
if (revent->result == ISC_R_TIMEDOUT &&
!DNS_ZONE_FLAG(zone, DNS_ZONEFLG_NOEDNS)) {
DNS_ZONE_SETFLAG(zone, DNS_ZONEFLG_NOEDNS);
dns_zone_log(zone, ISC_LOG_DEBUG(1),
"refreshing stub: timeout retrying "
" without EDNS master %s (source %s)",
master, source);
goto same_master;
}
dns_zonemgr_unreachableadd(zone->zmgr, &zone->masteraddr,
&zone->sourceaddr, &now);
dns_zone_log(zone, ISC_LOG_INFO,
"could not refresh stub from master %s"
" (source %s): %s", master, source,
dns_result_totext(revent->result));
goto next_master;
}
result = dns_message_create(zone->mctx, DNS_MESSAGE_INTENTPARSE, &msg);
if (result != ISC_R_SUCCESS)
goto next_master;
result = dns_request_getresponse(revent->request, msg, 0);
if (result != ISC_R_SUCCESS)
goto next_master;
/*
* Unexpected rcode.
*/
if (msg->rcode != dns_rcode_noerror) {
char rcode[128];
isc_buffer_t rb;
isc_buffer_init(&rb, rcode, sizeof(rcode));
(void)dns_rcode_totext(msg->rcode, &rb);
if (!DNS_ZONE_FLAG(zone, DNS_ZONEFLG_NOEDNS) &&
(msg->rcode == dns_rcode_servfail ||
msg->rcode == dns_rcode_notimp ||
msg->rcode == dns_rcode_formerr)) {
dns_zone_log(zone, ISC_LOG_DEBUG(1),
"refreshing stub: rcode (%.*s) retrying "
"without EDNS master %s (source %s)",
(int)rb.used, rcode, master, source);
DNS_ZONE_SETFLAG(zone, DNS_ZONEFLG_NOEDNS);
goto same_master;
}
dns_zone_log(zone, ISC_LOG_INFO,
"refreshing stub: "
"unexpected rcode (%.*s) from %s (source %s)",
(int)rb.used, rcode, master, source);
goto next_master;
}
/*
* We need complete messages.
*/
if ((msg->flags & DNS_MESSAGEFLAG_TC) != 0) {
if (dns_request_usedtcp(revent->request)) {
dns_zone_log(zone, ISC_LOG_INFO,
"refreshing stub: truncated TCP "
"response from master %s (source %s)",
master, source);
goto next_master;
}
DNS_ZONE_SETFLAG(zone, DNS_ZONEFLG_USEVC);
goto same_master;
}
/*
* If non-auth log and next master.
*/
if ((msg->flags & DNS_MESSAGEFLAG_AA) == 0) {
dns_zone_log(zone, ISC_LOG_INFO, "refreshing stub: "
"non-authoritative answer from "
"master %s (source %s)", master, source);
goto next_master;
}
/*
* Sanity checks.
*/
cnamecnt = message_count(msg, DNS_SECTION_ANSWER, dns_rdatatype_cname);
nscnt = message_count(msg, DNS_SECTION_ANSWER, dns_rdatatype_ns);
if (cnamecnt != 0) {
dns_zone_log(zone, ISC_LOG_INFO,
"refreshing stub: unexpected CNAME response "
"from master %s (source %s)", master, source);
goto next_master;
}
if (nscnt == 0) {
dns_zone_log(zone, ISC_LOG_INFO,
"refreshing stub: no NS records in response "
"from master %s (source %s)", master, source);
goto next_master;
}
/*
* Save answer.
*/
result = save_nsrrset(msg, &zone->origin, stub->db, stub->version);
if (result != ISC_R_SUCCESS) {
dns_zone_log(zone, ISC_LOG_INFO,
"refreshing stub: unable to save NS records "
"from master %s (source %s)", master, source);
goto next_master;
}
/*
* Tidy up.
*/
dns_db_closeversion(stub->db, &stub->version, true);
ZONEDB_LOCK(&zone->dblock, isc_rwlocktype_write);
if (zone->db == NULL)
zone_attachdb(zone, stub->db);
result = zone_get_from_db(zone, zone->db, NULL, &soacount, NULL,
&refresh, &retry, &expire, NULL, NULL);
if (result == ISC_R_SUCCESS && soacount > 0U) {
zone->refresh = RANGE(refresh, zone->minrefresh,
zone->maxrefresh);
zone->retry = RANGE(retry, zone->minretry, zone->maxretry);
zone->expire = RANGE(expire, zone->refresh + zone->retry,
DNS_MAX_EXPIRE);
DNS_ZONE_SETFLAG(zone, DNS_ZONEFLG_HAVETIMERS);
}
ZONEDB_UNLOCK(&zone->dblock, isc_rwlocktype_write);
dns_db_detach(&stub->db);
dns_message_destroy(&msg);
isc_event_free(&event);
dns_request_destroy(&zone->request);
DNS_ZONE_CLRFLAG(zone, DNS_ZONEFLG_REFRESH);
DNS_ZONE_SETFLAG(zone, DNS_ZONEFLG_LOADED);
DNS_ZONE_JITTER_ADD(&now, zone->refresh, &zone->refreshtime);
isc_interval_set(&i, zone->expire, 0);
DNS_ZONE_TIME_ADD(&now, zone->expire, &zone->expiretime);
if (zone->masterfile != NULL)
zone_needdump(zone, 0);
zone_settimer(zone, &now);
goto free_stub;
next_master:
if (stub->version != NULL)
dns_db_closeversion(stub->db, &stub->version, false);
if (stub->db != NULL)
dns_db_detach(&stub->db);
if (msg != NULL)
dns_message_destroy(&msg);
isc_event_free(&event);
dns_request_destroy(&zone->request);
/*
* Skip to next failed / untried master.
*/
do {
zone->curmaster++;
} while (zone->curmaster < zone->masterscnt &&
zone->mastersok[zone->curmaster]);
DNS_ZONE_CLRFLAG(zone, DNS_ZONEFLG_NOEDNS);
if (exiting || zone->curmaster >= zone->masterscnt) {
bool done = true;
if (!exiting &&
DNS_ZONE_OPTION(zone, DNS_ZONEOPT_USEALTXFRSRC) &&
!DNS_ZONE_FLAG(zone, DNS_ZONEFLG_USEALTXFRSRC)) {
/*
* Did we get a good answer from all the masters?
*/
for (j = 0; j < zone->masterscnt; j++)
if (zone->mastersok[j] == false) {
done = false;
break;
}
} else
done = true;
if (!done) {
zone->curmaster = 0;
/*
* Find the next failed master.
*/
while (zone->curmaster < zone->masterscnt &&
zone->mastersok[zone->curmaster])
zone->curmaster++;
DNS_ZONE_SETFLAG(zone, DNS_ZONEFLG_USEALTXFRSRC);
} else {
DNS_ZONE_CLRFLAG(zone, DNS_ZONEFLG_REFRESH);
zone_settimer(zone, &now);
goto free_stub;
}
}
queue_soa_query(zone);
goto free_stub;
same_master:
if (msg != NULL)
dns_message_destroy(&msg);
isc_event_free(&event);
dns_request_destroy(&zone->request);
ns_query(zone, NULL, stub);
UNLOCK_ZONE(zone);
goto done;
free_stub:
UNLOCK_ZONE(zone);
stub->magic = 0;
dns_zone_idetach(&stub->zone);
INSIST(stub->db == NULL);
INSIST(stub->version == NULL);
isc_mem_put(stub->mctx, stub, sizeof(*stub));
done:
INSIST(event == NULL);
return;
}
| 0 |
[
"CWE-327"
] |
bind9
|
f09352d20a9d360e50683cd1d2fc52ccedcd77a0
| 169,655,409,138,470,560,000,000,000,000,000,000,000 | 257 |
Update keyfetch_done compute_tag check
If in keyfetch_done the compute_tag fails (because for example the
algorithm is not supported), don't crash, but instead ignore the
key.
|
static int dissect_CPMSetBindings(tvbuff_t *tvb, packet_info *pinfo, proto_tree *parent_tree, gboolean in, void *private_data)
{
gint offset = 16;
struct CPMSetBindingsIn request;
col_append_str(pinfo->cinfo, COL_INFO, "SetBindings");
if (in) {
struct mswsp_ct *ct = NULL;
struct message_data *data = NULL;
proto_item *ti;
proto_tree *tree, *pad_tree;
guint32 size, num, n;
gint64 column_size;
ti = proto_tree_add_item(parent_tree, hf_mswsp_msg, tvb, offset, -1, ENC_NA);
tree = proto_item_add_subtree(ti, ett_mswsp_msg);
proto_item_set_text(ti, "SetBindingsIn");
pad_tree = proto_tree_add_subtree(tree, tvb, offset, 0, ett_mswsp_pad, &ti, "Padding");
proto_tree_add_item(tree, hf_mswsp_msg_cpmsetbinding_hcursor, tvb, offset, 4, ENC_LITTLE_ENDIAN);
request.hcursor = tvb_get_letohl(tvb, offset);
offset += 4;
request.brow = tvb_get_letohl(tvb, offset);
proto_tree_add_item(tree, hf_mswsp_msg_cpmsetbinding_cbrow, tvb, offset, 4, ENC_LITTLE_ENDIAN);
offset += 4;
size = tvb_get_letohl(tvb, offset);
request.bbindingdesc = size;
proto_tree_add_item(tree, hf_mswsp_msg_cpmsetbinding_desc, tvb, offset, 4, ENC_LITTLE_ENDIAN);
offset += 4;
request.dummy = tvb_get_letohl(tvb, offset);
proto_tree_add_item(tree, hf_mswsp_msg_cpmsetbinding_dummy, tvb, offset, 4, ENC_LITTLE_ENDIAN);
offset += 4;
num = tvb_get_letohl(tvb, offset);
request.ccolumns = num;
ti = proto_tree_add_item(tree, hf_mswsp_msg_cpmsetbinding_ccolumns, tvb, offset, 4, ENC_LITTLE_ENDIAN);
offset += 4;
proto_tree_add_item(tree, hf_mswsp_msg_cpmsetbinding_acolumns, tvb, offset, size-4, ENC_NA);
/* Sanity check size value */
column_size = num*MIN_CTABLECOL_SIZE;
if (num > MAX_CTABLECOL_SIZE || column_size > tvb_reported_length_remaining(tvb, offset))
{
expert_add_info(pinfo, ti, &ei_mswsp_msg_cpmsetbinding_ccolumns);
return tvb_reported_length(tvb);
}
ct = get_create_converstation_data(pinfo);
request.acolumns = (struct CTableColumn*)wmem_alloc(wmem_file_scope(),
sizeof(struct CTableColumn) * num);
for (n=0; n<num; n++) {
offset = parse_padding(tvb, offset, 4, pad_tree, "padding_aColumns[%u]", n);
offset = parse_CTableColumn(tvb, pinfo, offset, tree, pad_tree, &request.acolumns[n],"aColumns[%u]", n);
}
data = find_or_create_message_data(ct, pinfo,0xD0,in, private_data);
if (data) {
data->content.bindingsin = request;
}
} else { /* server only returns status with header */
}
return tvb_reported_length(tvb);
}
| 0 |
[
"CWE-770"
] |
wireshark
|
b7a0650e061b5418ab4a8f72c6e4b00317aff623
| 129,332,429,742,541,740,000,000,000,000,000,000,000 | 71 |
MS-WSP: Don't allocate huge amounts of memory.
Add a couple of memory allocation sanity checks, one of which
fixes #17331.
|
slapi_pblock_get_task_warning(Slapi_PBlock *pb)
{
#ifdef PBLOCK_ANALYTICS
pblock_analytics_record(pb, SLAPI_TASK_WARNING);
#endif
if (pb->pb_task != NULL) {
return pb->pb_task->task_warning;
}
return 0;
}
| 0 |
[
"CWE-415"
] |
389-ds-base
|
a3c298f8140d3e4fa1bd5a670f1bb965a21a9b7b
| 287,354,676,523,406,870,000,000,000,000,000,000,000 | 10 |
Issue 5218 - double-free of the virtual attribute context in persistent search (#5219)
description:
A search is processed by a worker using a private pblock.
If the search is persistent, the worker spawn a thread
and kind of duplicate its private pblock so that the spawn
thread continue to process the persistent search.
Then worker ends the initial search, reinit (free) its private pblock,
and returns monitoring the wait_queue.
When the persistent search completes, it frees the duplicated
pblock.
The problem is that private pblock and duplicated pblock
are referring to a same structure (pb_vattr_context).
That can lead to a double free
Fix:
When cloning the pblock (slapi_pblock_clone) make sure
to transfert the references inside the original (private)
pblock to the target (cloned) one
That includes pb_vattr_context pointer.
Reviewed by: Mark Reynolds, James Chapman, Pierre Rogier (Thanks !)
Co-authored-by: Mark Reynolds <[email protected]>
|
int js_isregexp(js_State *J, int idx)
{
js_Value *v = stackidx(J, idx);
return v->type == JS_TOBJECT && v->u.object->type == JS_CREGEXP;
}
| 0 |
[
"CWE-476"
] |
mujs
|
77ab465f1c394bb77f00966cd950650f3f53cb24
| 7,004,631,823,063,290,000,000,000,000,000,000,000 | 5 |
Fix 697401: Error when dropping extra arguments to lightweight functions.
|
static inline void encode_claim_delegate_cur(struct xdr_stream *xdr, const struct qstr *name, const nfs4_stateid *stateid)
{
__be32 *p;
p = reserve_space(xdr, 4+NFS4_STATEID_SIZE);
*p++ = cpu_to_be32(NFS4_OPEN_CLAIM_DELEGATE_CUR);
xdr_encode_opaque_fixed(p, stateid->data, NFS4_STATEID_SIZE);
encode_string(xdr, name->len, name->name);
}
| 0 |
[
"CWE-703",
"CWE-189"
] |
linux
|
bf118a342f10dafe44b14451a1392c3254629a1f
| 171,421,726,823,130,330,000,000,000,000,000,000,000 | 9 |
NFSv4: include bitmap in nfsv4 get acl data
The NFSv4 bitmap size is unbounded: a server can return an arbitrary
sized bitmap in an FATTR4_WORD0_ACL request. Replace using the
nfs4_fattr_bitmap_maxsz as a guess to the maximum bitmask returned by a server
with the inclusion of the bitmap (xdr length plus bitmasks) and the acl data
xdr length to the (cached) acl page data.
This is a general solution to commit e5012d1f "NFSv4.1: update
nfs4_fattr_bitmap_maxsz" and fixes hitting a BUG_ON in xdr_shrink_bufhead
when getting ACLs.
Fix a bug in decode_getacl that returned -EINVAL on ACLs > page when getxattr
was called with a NULL buffer, preventing ACL > PAGE_SIZE from being retrieved.
Cc: [email protected]
Signed-off-by: Andy Adamson <[email protected]>
Signed-off-by: Trond Myklebust <[email protected]>
|
static void dissect_ACKNACK(tvbuff_t *tvb, packet_info *pinfo, gint offset, guint8 flags,
const guint encoding, int octets_to_next_header, proto_tree *tree,
proto_item *item, endpoint_guid *guid) {
/* RTPS 1.0/1.1:
* 0...2...........7...............15.............23...............31
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | ACK |X|X|X|X|X|X|F|E| octetsToNextHeader |
* +---------------+---------------+---------------+---------------+
* | ObjectId readerObjectId |
* +---------------+---------------+---------------+---------------+
* | ObjectId writerObjectId |
* +---------------+---------------+---------------+---------------+
* | |
* + Bitmap bitmap +
* | |
* +---------------+---------------+---------------+---------------+
* | Counter count |
* +---------------+---------------+---------------+---------------+
*
* RTPS 1.2/2.0:
* 0...2...........7...............15.............23...............31
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | ACKNACK |X|X|X|X|X|X|F|E| octetsToNextHeader |
* +---------------+---------------+---------------+---------------+
* | EntityId readerEntityId |
* +---------------+---------------+---------------+---------------+
* | EntityId writerEntityId |
* +---------------+---------------+---------------+---------------+
* | |
* + SequenceNumberSet readerSNState +
* | |
* +---------------+---------------+---------------+---------------+
* | Counter count |
* +---------------+---------------+---------------+---------------+
*/
gint original_offset; /* Offset to the readerEntityId */
proto_item *octet_item;
guint32 wid;
proto_tree_add_bitmask_value(tree, tvb, offset + 1, hf_rtps_sm_flags, ett_rtps_flags, ACKNACK_FLAGS, flags);
octet_item = proto_tree_add_item(tree, hf_rtps_sm_octets_to_next_header, tvb, offset + 2, 2, encoding);
if (octets_to_next_header < 20) {
expert_add_info_format(pinfo, octet_item, &ei_rtps_sm_octets_to_next_header_error, "(Error: should be >= 20)");
return;
}
offset += 4;
original_offset = offset;
/* readerEntityId */
rtps_util_add_entity_id(tree, tvb, offset, hf_rtps_sm_rdentity_id, hf_rtps_sm_rdentity_id_key,
hf_rtps_sm_rdentity_id_kind, ett_rtps_rdentity, "readerEntityId", NULL);
offset += 4;
/* writerEntityId */
rtps_util_add_entity_id(tree, tvb, offset, hf_rtps_sm_wrentity_id, hf_rtps_sm_wrentity_id_key,
hf_rtps_sm_wrentity_id_kind, ett_rtps_wrentity, "writerEntityId", &wid);
offset += 4;
guid->entity_id = wid;
guid->fields_present |= GUID_HAS_ENTITY_ID;
rtps_util_topic_info_add_tree(tree, tvb, offset, guid);
/* Bitmap */
offset = rtps_util_add_bitmap(tree, tvb, offset, encoding, "readerSNState", TRUE);
/* RTPS 1.0 didn't have count: make sure we don't decode it wrong
* in this case
*/
if (offset + 4 == original_offset + octets_to_next_header) {
/* Count is present */
proto_tree_add_item(tree, hf_rtps_acknack_count, tvb, offset, 4, encoding);
} else if (offset < original_offset + octets_to_next_header) {
/* In this case there must be something wrong in the bitmap: there
* are some extra bytes that we don't know how to decode
*/
expert_add_info_format(pinfo, item, &ei_rtps_extra_bytes, "Don't know how to decode those extra bytes: %d", octets_to_next_header - offset);
} else if (offset > original_offset + octets_to_next_header) {
/* Decoding the bitmap went over the end of this submessage.
* Enter an item in the protocol tree that spans over the entire
* submessage.
*/
expert_add_info(pinfo, item, &ei_rtps_missing_bytes);
}
}
| 0 |
[
"CWE-401"
] |
wireshark
|
33e63d19e5496c151bad69f65cdbc7cba2b4c211
| 244,772,503,192,367,460,000,000,000,000,000,000,000 | 86 |
RTPS: Fixup our coherent set map.
coherent_set_tracking.coherent_set_registry_map uses a struct as a key,
but the hash and comparison routines treat keys as a sequence of bytes.
Make sure every key byte is initialized. Fixes #16994.
Call wmem_strong_hash on our key in coherent_set_key_hash_by_key instead
of creating and leaking a GBytes struct.
|
TEST_F(RenameCollectionTest, LongIndexNameAllowedForTargetCollection) {
ASSERT_GREATER_THAN(_targetNssDifferentDb.size(), _sourceNss.size());
std::size_t longestIndexNameAllowedForSource =
NamespaceString::MaxNsLen - 2U /*strlen(".$")*/ - _sourceNss.size();
ASSERT_OK(_sourceNss.checkLengthForRename(longestIndexNameAllowedForSource));
ASSERT_EQUALS(ErrorCodes::InvalidLength,
_targetNssDifferentDb.checkLengthForRename(longestIndexNameAllowedForSource));
_createCollection(_opCtx.get(), _sourceNss);
const std::string indexName(longestIndexNameAllowedForSource, 'a');
_createIndexOnEmptyCollection(_opCtx.get(), _sourceNss, indexName);
ASSERT_OK(renameCollection(_opCtx.get(), _sourceNss, _targetNssDifferentDb, {}));
}
| 0 |
[
"CWE-20"
] |
mongo
|
35c1b1f588f04926a958ad2fe4d9c59d79f81e8b
| 133,301,610,355,801,220,000,000,000,000,000,000,000 | 13 |
SERVER-35636 renameCollectionForApplyOps checks for complete namespace
|
release_keyid_list (keyid_list_t k)
{
while (k)
{
keyid_list_t k2 = k->next;
xfree (k);
k = k2;
}
}
| 0 |
[
"CWE-310"
] |
gnupg
|
4bde12206c5bf199dc6e12a74af8da4558ba41bf
| 213,749,643,851,249,800,000,000,000,000,000,000,000 | 9 |
gpg: Distinguish between missing and cleared key flags.
* include/cipher.h (PUBKEY_USAGE_NONE): New.
* g10/getkey.c (parse_key_usage): Set new flag.
--
We do not want to use the default capabilities (derived from the
algorithm) if any key flags are given in a signature. Thus if key
flags are used in any way, the default key capabilities are never
used.
This allows to create a key with key flags set to all zero so it can't
be used. This better reflects common sense.
|
void nf_ct_frag6_output(unsigned int hooknum, struct sk_buff *skb,
struct net_device *in, struct net_device *out,
int (*okfn)(struct sk_buff *))
{
struct sk_buff *s, *s2;
unsigned int ret = 0;
for (s = NFCT_FRAG6_CB(skb)->orig; s;) {
nf_conntrack_put_reasm(s->nfct_reasm);
nf_conntrack_get_reasm(skb);
s->nfct_reasm = skb;
s2 = s->next;
s->next = NULL;
if (ret != -ECANCELED)
ret = NF_HOOK_THRESH(NFPROTO_IPV6, hooknum, s,
in, out, okfn,
NF_IP6_PRI_CONNTRACK_DEFRAG + 1);
else
kfree_skb(s);
s = s2;
}
nf_conntrack_put_reasm(skb);
}
| 0 |
[] |
linux
|
3ef0eb0db4bf92c6d2510fe5c4dc51852746f206
| 161,904,198,095,965,900,000,000,000,000,000,000,000 | 26 |
net: frag, move LRU list maintenance outside of rwlock
Updating the fragmentation queues LRU (Least-Recently-Used) list,
required taking the hash writer lock. However, the LRU list isn't
tied to the hash at all, so we can use a separate lock for it.
Original-idea-by: Florian Westphal <[email protected]>
Signed-off-by: Jesper Dangaard Brouer <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
{
struct inode *inode = d_inode(dentry);
int err = 0;
struct btrfs_root *root = BTRFS_I(dir)->root;
struct btrfs_trans_handle *trans;
if (inode->i_size > BTRFS_EMPTY_DIR_SIZE)
return -ENOTEMPTY;
if (btrfs_ino(inode) == BTRFS_FIRST_FREE_OBJECTID)
return -EPERM;
trans = __unlink_start_trans(dir);
if (IS_ERR(trans))
return PTR_ERR(trans);
if (unlikely(btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
err = btrfs_unlink_subvol(trans, root, dir,
BTRFS_I(inode)->location.objectid,
dentry->d_name.name,
dentry->d_name.len);
goto out;
}
err = btrfs_orphan_add(trans, inode);
if (err)
goto out;
/* now the directory is empty */
err = btrfs_unlink_inode(trans, root, dir, d_inode(dentry),
dentry->d_name.name, dentry->d_name.len);
if (!err)
btrfs_i_size_write(inode, 0);
out:
btrfs_end_transaction(trans, root);
btrfs_btree_balance_dirty(root);
return err;
}
| 0 |
[
"CWE-200"
] |
linux
|
0305cd5f7fca85dae392b9ba85b116896eb7c1c7
| 50,090,928,401,754,580,000,000,000,000,000,000,000 | 39 |
Btrfs: fix truncation of compressed and inlined extents
When truncating a file to a smaller size which consists of an inline
extent that is compressed, we did not discard (or made unusable) the
data between the new file size and the old file size, wasting metadata
space and allowing for the truncated data to be leaked and the data
corruption/loss mentioned below.
We were also not correctly decrementing the number of bytes used by the
inode, we were setting it to zero, giving a wrong report for callers of
the stat(2) syscall. The fsck tool also reported an error about a mismatch
between the nbytes of the file versus the real space used by the file.
Now because we weren't discarding the truncated region of the file, it
was possible for a caller of the clone ioctl to actually read the data
that was truncated, allowing for a security breach without requiring root
access to the system, using only standard filesystem operations. The
scenario is the following:
1) User A creates a file which consists of an inline and compressed
extent with a size of 2000 bytes - the file is not accessible to
any other users (no read, write or execution permission for anyone
else);
2) The user truncates the file to a size of 1000 bytes;
3) User A makes the file world readable;
4) User B creates a file consisting of an inline extent of 2000 bytes;
5) User B issues a clone operation from user A's file into its own
file (using a length argument of 0, clone the whole range);
6) User B now gets to see the 1000 bytes that user A truncated from
its file before it made its file world readbale. User B also lost
the bytes in the range [1000, 2000[ bytes from its own file, but
that might be ok if his/her intention was reading stale data from
user A that was never supposed to be public.
Note that this contrasts with the case where we truncate a file from 2000
bytes to 1000 bytes and then truncate it back from 1000 to 2000 bytes. In
this case reading any byte from the range [1000, 2000[ will return a value
of 0x00, instead of the original data.
This problem exists since the clone ioctl was added and happens both with
and without my recent data loss and file corruption fixes for the clone
ioctl (patch "Btrfs: fix file corruption and data loss after cloning
inline extents").
So fix this by truncating the compressed inline extents as we do for the
non-compressed case, which involves decompressing, if the data isn't already
in the page cache, compressing the truncated version of the extent, writing
the compressed content into the inline extent and then truncate it.
The following test case for fstests reproduces the problem. In order for
the test to pass both this fix and my previous fix for the clone ioctl
that forbids cloning a smaller inline extent into a larger one,
which is titled "Btrfs: fix file corruption and data loss after cloning
inline extents", are needed. Without that other fix the test fails in a
different way that does not leak the truncated data, instead part of
destination file gets replaced with zeroes (because the destination file
has a larger inline extent than the source).
seq=`basename $0`
seqres=$RESULT_DIR/$seq
echo "QA output created by $seq"
tmp=/tmp/$$
status=1 # failure is the default!
trap "_cleanup; exit \$status" 0 1 2 3 15
_cleanup()
{
rm -f $tmp.*
}
# get standard environment, filters and checks
. ./common/rc
. ./common/filter
# real QA test starts here
_need_to_be_root
_supported_fs btrfs
_supported_os Linux
_require_scratch
_require_cloner
rm -f $seqres.full
_scratch_mkfs >>$seqres.full 2>&1
_scratch_mount "-o compress"
# Create our test files. File foo is going to be the source of a clone operation
# and consists of a single inline extent with an uncompressed size of 512 bytes,
# while file bar consists of a single inline extent with an uncompressed size of
# 256 bytes. For our test's purpose, it's important that file bar has an inline
# extent with a size smaller than foo's inline extent.
$XFS_IO_PROG -f -c "pwrite -S 0xa1 0 128" \
-c "pwrite -S 0x2a 128 384" \
$SCRATCH_MNT/foo | _filter_xfs_io
$XFS_IO_PROG -f -c "pwrite -S 0xbb 0 256" $SCRATCH_MNT/bar | _filter_xfs_io
# Now durably persist all metadata and data. We do this to make sure that we get
# on disk an inline extent with a size of 512 bytes for file foo.
sync
# Now truncate our file foo to a smaller size. Because it consists of a
# compressed and inline extent, btrfs did not shrink the inline extent to the
# new size (if the extent was not compressed, btrfs would shrink it to 128
# bytes), it only updates the inode's i_size to 128 bytes.
$XFS_IO_PROG -c "truncate 128" $SCRATCH_MNT/foo
# Now clone foo's inline extent into bar.
# This clone operation should fail with errno EOPNOTSUPP because the source
# file consists only of an inline extent and the file's size is smaller than
# the inline extent of the destination (128 bytes < 256 bytes). However the
# clone ioctl was not prepared to deal with a file that has a size smaller
# than the size of its inline extent (something that happens only for compressed
# inline extents), resulting in copying the full inline extent from the source
# file into the destination file.
#
# Note that btrfs' clone operation for inline extents consists of removing the
# inline extent from the destination inode and copy the inline extent from the
# source inode into the destination inode, meaning that if the destination
# inode's inline extent is larger (N bytes) than the source inode's inline
# extent (M bytes), some bytes (N - M bytes) will be lost from the destination
# file. Btrfs could copy the source inline extent's data into the destination's
# inline extent so that we would not lose any data, but that's currently not
# done due to the complexity that would be needed to deal with such cases
# (specially when one or both extents are compressed), returning EOPNOTSUPP, as
# it's normally not a very common case to clone very small files (only case
# where we get inline extents) and copying inline extents does not save any
# space (unlike for normal, non-inlined extents).
$CLONER_PROG -s 0 -d 0 -l 0 $SCRATCH_MNT/foo $SCRATCH_MNT/bar
# Now because the above clone operation used to succeed, and due to foo's inline
# extent not being shinked by the truncate operation, our file bar got the whole
# inline extent copied from foo, making us lose the last 128 bytes from bar
# which got replaced by the bytes in range [128, 256[ from foo before foo was
# truncated - in other words, data loss from bar and being able to read old and
# stale data from foo that should not be possible to read anymore through normal
# filesystem operations. Contrast with the case where we truncate a file from a
# size N to a smaller size M, truncate it back to size N and then read the range
# [M, N[, we should always get the value 0x00 for all the bytes in that range.
# We expected the clone operation to fail with errno EOPNOTSUPP and therefore
# not modify our file's bar data/metadata. So its content should be 256 bytes
# long with all bytes having the value 0xbb.
#
# Without the btrfs bug fix, the clone operation succeeded and resulted in
# leaking truncated data from foo, the bytes that belonged to its range
# [128, 256[, and losing data from bar in that same range. So reading the
# file gave us the following content:
#
# 0000000 a1 a1 a1 a1 a1 a1 a1 a1 a1 a1 a1 a1 a1 a1 a1 a1
# *
# 0000200 2a 2a 2a 2a 2a 2a 2a 2a 2a 2a 2a 2a 2a 2a 2a 2a
# *
# 0000400
echo "File bar's content after the clone operation:"
od -t x1 $SCRATCH_MNT/bar
# Also because the foo's inline extent was not shrunk by the truncate
# operation, btrfs' fsck, which is run by the fstests framework everytime a
# test completes, failed reporting the following error:
#
# root 5 inode 257 errors 400, nbytes wrong
status=0
exit
Cc: [email protected]
Signed-off-by: Filipe Manana <[email protected]>
|
static int ffs_func_set_alt(struct usb_function *f,
unsigned interface, unsigned alt)
{
struct ffs_function *func = ffs_func_from_usb(f);
struct ffs_data *ffs = func->ffs;
int ret = 0, intf;
if (alt != (unsigned)-1) {
intf = ffs_func_revmap_intf(func, interface);
if (unlikely(intf < 0))
return intf;
}
if (ffs->func)
ffs_func_eps_disable(ffs->func);
if (ffs->state == FFS_DEACTIVATED) {
ffs->state = FFS_CLOSING;
INIT_WORK(&ffs->reset_work, ffs_reset_work);
schedule_work(&ffs->reset_work);
return -ENODEV;
}
if (ffs->state != FFS_ACTIVE)
return -ENODEV;
if (alt == (unsigned)-1) {
ffs->func = NULL;
ffs_event_add(ffs, FUNCTIONFS_DISABLE);
return 0;
}
ffs->func = func;
ret = ffs_func_eps_enable(func);
if (likely(ret >= 0))
ffs_event_add(ffs, FUNCTIONFS_ENABLE);
return ret;
}
| 0 |
[
"CWE-416",
"CWE-362"
] |
linux
|
38740a5b87d53ceb89eb2c970150f6e94e00373a
| 37,721,575,909,873,340,000,000,000,000,000,000,000 | 38 |
usb: gadget: f_fs: Fix use-after-free
When using asynchronous read or write operations on the USB endpoints the
issuer of the IO request is notified by calling the ki_complete() callback
of the submitted kiocb when the URB has been completed.
Calling this ki_complete() callback will free kiocb. Make sure that the
structure is no longer accessed beyond that point, otherwise undefined
behaviour might occur.
Fixes: 2e4c7553cd6f ("usb: gadget: f_fs: add aio support")
Cc: <[email protected]> # v3.15+
Signed-off-by: Lars-Peter Clausen <[email protected]>
Signed-off-by: Felipe Balbi <[email protected]>
|
parse_options(const char *data, struct parsed_mount_info *parsed_info)
{
char *value = NULL;
char *equals = NULL;
char *next_keyword = NULL;
char *out = parsed_info->options;
unsigned long *filesys_flags = &parsed_info->flags;
int out_len = 0;
int word_len;
int rc = 0;
int got_uid = 0;
int got_cruid = 0;
int got_gid = 0;
uid_t uid, cruid = 0;
gid_t gid;
char *ep;
struct passwd *pw;
struct group *gr;
/*
* max 32-bit uint in decimal is 4294967295 which is 10 chars wide
* +1 for NULL, and +1 for good measure
*/
char txtbuf[12];
/* make sure we're starting from beginning */
out[0] = '\0';
/* BB fixme check for separator override BB */
uid = getuid();
if (uid != 0)
got_uid = 1;
gid = getgid();
if (gid != 0)
got_gid = 1;
if (!data)
return EX_USAGE;
/*
* format is keyword,keyword2=value2,keyword3=value3...
* data = next keyword
* value = next value ie stuff after equal sign
*/
while (data && *data) {
next_keyword = strchr(data, ','); /* BB handle sep= */
/* temporarily null terminate end of keyword=value pair */
if (next_keyword)
*next_keyword++ = 0;
/* temporarily null terminate keyword if there's a value */
value = NULL;
if ((equals = strchr(data, '=')) != NULL) {
*equals = '\0';
value = equals + 1;
}
switch(parse_opt_token(data)) {
case OPT_USERS:
if (!value || !*value) {
*filesys_flags |= MS_USERS;
goto nocopy;
}
break;
case OPT_USER:
if (!value || !*value) {
if (data[4] == '\0') {
*filesys_flags |= MS_USER;
goto nocopy;
} else {
fprintf(stderr,
"username specified with no parameter\n");
return EX_USAGE;
}
} else {
/* domain/username%password */
const int max = MAX_DOMAIN_SIZE +
MAX_USERNAME_SIZE +
MOUNT_PASSWD_SIZE + 2;
if (strnlen(value, max + 1) >= max + 1) {
fprintf(stderr, "username too long\n");
return EX_USAGE;
}
rc = parse_username(value, parsed_info);
if (rc) {
fprintf(stderr,
"problem parsing username\n");
return rc;
}
goto nocopy;
}
case OPT_PASS:
if (parsed_info->got_password) {
fprintf(stderr,
"password specified twice, ignoring second\n");
goto nocopy;
}
if (!value || !*value) {
parsed_info->got_password = 1;
goto nocopy;
}
rc = set_password(parsed_info, value);
if (rc)
return rc;
goto nocopy;
case OPT_SEC:
if (value) {
if (!strncmp(value, "none", 4) ||
!strncmp(value, "krb5", 4))
parsed_info->got_password = 1;
}
break;
case OPT_IP:
if (!value || !*value) {
fprintf(stderr,
"target ip address argument missing\n");
} else if (strnlen(value, MAX_ADDRESS_LEN) <=
MAX_ADDRESS_LEN) {
strcpy(parsed_info->addrlist, value);
if (parsed_info->verboseflag)
fprintf(stderr,
"ip address %s override specified\n",
value);
goto nocopy;
} else {
fprintf(stderr, "ip address too long\n");
return EX_USAGE;
}
break;
/* unc || target || path */
case OPT_UNC:
if (!value || !*value) {
fprintf(stderr,
"invalid path to network resource\n");
return EX_USAGE;
}
rc = parse_unc(value, parsed_info);
if (rc)
return rc;
break;
/* dom || workgroup */
case OPT_DOM:
if (!value || !*value) {
fprintf(stderr, "CIFS: invalid domain name\n");
return EX_USAGE;
}
if (strnlen(value, sizeof(parsed_info->domain)) >=
sizeof(parsed_info->domain)) {
fprintf(stderr, "domain name too long\n");
return EX_USAGE;
}
strlcpy(parsed_info->domain, value,
sizeof(parsed_info->domain));
goto nocopy;
case OPT_CRED:
if (!value || !*value) {
fprintf(stderr,
"invalid credential file name specified\n");
return EX_USAGE;
}
rc = open_cred_file(value, parsed_info);
if (rc) {
fprintf(stderr,
"error %d (%s) opening credential file %s\n",
rc, strerror(rc), value);
return rc;
}
break;
case OPT_UID:
if (!value || !*value)
goto nocopy;
got_uid = 1;
errno = 0;
uid = strtoul(value, &ep, 10);
if (errno == 0)
goto nocopy;
pw = getpwnam(value);
if (pw == NULL) {
fprintf(stderr, "bad user name \"%s\"\n", value);
return EX_USAGE;
}
uid = pw->pw_uid;
goto nocopy;
case OPT_CRUID:
if (!value || !*value)
goto nocopy;
got_cruid = 1;
errno = 0;
cruid = strtoul(value, &ep, 10);
if (errno == 0)
goto nocopy;
pw = getpwnam(value);
if (pw == NULL) {
fprintf(stderr, "bad user name \"%s\"\n", value);
return EX_USAGE;
}
cruid = pw->pw_uid;
goto nocopy;
case OPT_GID:
if (!value || !*value)
goto nocopy;
got_gid = 1;
errno = 0;
gid = strtoul(value, &ep, 10);
if (errno == 0)
goto nocopy;
gr = getgrnam(value);
if (gr == NULL) {
fprintf(stderr, "bad group name \"%s\"\n", value);
return EX_USAGE;
}
gid = gr->gr_gid;
goto nocopy;
/* fmask fall through to file_mode */
case OPT_FMASK:
fprintf(stderr,
"WARNING: CIFS mount option 'fmask' is\
deprecated. Use 'file_mode' instead.\n");
data = "file_mode"; /* BB fix this */
case OPT_FILE_MODE:
if (!value || !*value) {
fprintf(stderr,
"Option '%s' requires a numerical argument\n",
data);
return EX_USAGE;
}
if (value[0] != '0')
fprintf(stderr,
"WARNING: '%s' not expressed in octal.\n",
data);
break;
/* dmask falls through to dir_mode */
case OPT_DMASK:
fprintf(stderr,
"WARNING: CIFS mount option 'dmask' is\
deprecated. Use 'dir_mode' instead.\n");
data = "dir_mode";
case OPT_DIR_MODE:
if (!value || !*value) {
fprintf(stderr,
"Option '%s' requires a numerical argument\n",
data);
return EX_USAGE;
}
if (value[0] != '0')
fprintf(stderr,
"WARNING: '%s' not expressed in octal.\n",
data);
break;
/* the following mount options should be
stripped out from what is passed into the kernel
since these options are best passed as the
mount flags rather than redundantly to the kernel
and could generate spurious warnings depending on the
level of the corresponding cifs vfs kernel code */
case OPT_NO_SUID:
*filesys_flags |= MS_NOSUID;
break;
case OPT_SUID:
*filesys_flags &= ~MS_NOSUID;
break;
case OPT_NO_DEV:
*filesys_flags |= MS_NODEV;
break;
/* nolock || nobrl */
case OPT_NO_LOCK:
*filesys_flags &= ~MS_MANDLOCK;
break;
case OPT_MAND:
*filesys_flags |= MS_MANDLOCK;
goto nocopy;
case OPT_NOMAND:
*filesys_flags &= ~MS_MANDLOCK;
goto nocopy;
case OPT_DEV:
*filesys_flags &= ~MS_NODEV;
break;
case OPT_NO_EXEC:
*filesys_flags |= MS_NOEXEC;
break;
case OPT_EXEC:
*filesys_flags &= ~MS_NOEXEC;
break;
case OPT_GUEST:
parsed_info->got_user = 1;
parsed_info->got_password = 1;
break;
case OPT_RO:
*filesys_flags |= MS_RDONLY;
goto nocopy;
case OPT_RW:
*filesys_flags &= ~MS_RDONLY;
goto nocopy;
case OPT_REMOUNT:
*filesys_flags |= MS_REMOUNT;
break;
case OPT_IGNORE:
goto nocopy;
}
/* check size before copying option to buffer */
word_len = strlen(data);
if (value)
word_len += 1 + strlen(value);
/* need 2 extra bytes for comma and null byte */
if (out_len + word_len + 2 > MAX_OPTIONS_LEN) {
fprintf(stderr, "Options string too long\n");
return EX_USAGE;
}
/* put back equals sign, if any */
if (equals)
*equals = '=';
/* go ahead and copy */
if (out_len)
strlcat(out, ",", MAX_OPTIONS_LEN);
strlcat(out, data, MAX_OPTIONS_LEN);
out_len = strlen(out);
nocopy:
data = next_keyword;
}
/* special-case the uid and gid */
if (got_uid) {
word_len = snprintf(txtbuf, sizeof(txtbuf), "%u", uid);
/* comma + "uid=" + terminating NULL == 6 */
if (out_len + word_len + 6 > MAX_OPTIONS_LEN) {
fprintf(stderr, "Options string too long\n");
return EX_USAGE;
}
if (out_len) {
strlcat(out, ",", MAX_OPTIONS_LEN);
out_len++;
}
snprintf(out + out_len, word_len + 5, "uid=%s", txtbuf);
out_len = strlen(out);
}
if (got_cruid) {
word_len = snprintf(txtbuf, sizeof(txtbuf), "%u", cruid);
/* comma + "cruid=" + terminating NULL == 6 */
if (out_len + word_len + 8 > MAX_OPTIONS_LEN) {
fprintf(stderr, "Options string too long\n");
return EX_USAGE;
}
if (out_len) {
strlcat(out, ",", MAX_OPTIONS_LEN);
out_len++;
}
snprintf(out + out_len, word_len + 7, "cruid=%s", txtbuf);
out_len = strlen(out);
}
if (got_gid) {
word_len = snprintf(txtbuf, sizeof(txtbuf), "%u", gid);
/* comma + "gid=" + terminating NULL == 6 */
if (out_len + word_len + 6 > MAX_OPTIONS_LEN) {
fprintf(stderr, "Options string too long\n");
return EX_USAGE;
}
if (out_len) {
strlcat(out, ",", MAX_OPTIONS_LEN);
out_len++;
}
snprintf(out + out_len, word_len + 5, "gid=%s", txtbuf);
}
return 0;
}
| 0 |
[
"CWE-20"
] |
cifs-utils
|
f6eae44a3d05b6515a59651e6bed8b6dde689aec
| 221,107,615,698,170,440,000,000,000,000,000,000,000 | 400 |
mtab: handle ENOSPC/EFBIG condition properly when altering mtab
It's possible that when mount.cifs goes to append the mtab that there
won't be enough space to do so, and the mntent won't be appended to the
file in its entirety.
Add a my_endmntent routine that will fflush and then fsync the FILE if
that succeeds. If either fails then it will truncate the file back to
its provided size. It will then call endmntent unconditionally.
Have add_mtab call fstat on the opened mtab file in order to get the
size of the file before it has been appended. Assuming that that
succeeds, use my_endmntent to ensure that the file is not corrupted
before closing it. It's possible that we'll have a small race window
where the mtab is incorrect, but it should be quickly corrected.
This was reported some time ago as CVE-2011-1678:
http://openwall.com/lists/oss-security/2011/03/04/9
...and it seems to fix the reproducer that I was able to come up with.
Signed-off-by: Jeff Layton <[email protected]>
Reviewed-by: Suresh Jayaraman <[email protected]>
|
prepare_missing(mrb_state *mrb, mrb_value recv, mrb_sym mid, struct RClass **clsp, uint32_t a, uint16_t *c, mrb_value blk, int super)
{
mrb_sym missing = MRB_SYM(method_missing);
mrb_callinfo *ci = mrb->c->ci;
uint16_t b = *c;
mrb_int n = b & 0xf;
mrb_int nk = (b>>4) & 0xf;
mrb_value *argv = &ci->stack[a+1];
mrb_value args;
mrb_method_t m;
/* pack positional arguments */
if (n == 15) args = argv[0];
else args = mrb_ary_new_from_values(mrb, n, argv);
if (mrb_func_basic_p(mrb, recv, missing, mrb_obj_missing)) {
method_missing:
if (super) mrb_no_method_error(mrb, mid, args, "no superclass method '%n'", mid);
else mrb_method_missing(mrb, mid, recv, args);
/* not reached */
}
if (mid != missing) {
*clsp = mrb_class(mrb, recv);
}
m = mrb_method_search_vm(mrb, clsp, missing);
if (MRB_METHOD_UNDEF_P(m)) goto method_missing; /* just in case */
mrb_stack_extend(mrb, a+4);
argv = &ci->stack[a+1]; /* maybe reallocated */
argv[0] = args;
if (nk == 0) {
argv[1] = blk;
}
else {
mrb_assert(nk == 15);
argv[1] = argv[n];
argv[2] = blk;
}
*c = 15 | (uint16_t)(nk<<4);
mrb_ary_unshift(mrb, args, mrb_symbol_value(mid));
return m;
}
| 0 |
[
"CWE-122",
"CWE-787"
] |
mruby
|
47068ae07a5fa3aa9a1879cdfe98a9ce0f339299
| 329,620,459,116,985,240,000,000,000,000,000,000,000 | 42 |
vm.c: packed arguments length may be zero for `send` method.
|
piv_process_history(sc_card_t *card)
{
piv_private_data_t * priv = PIV_DATA(card);
int r;
int i;
int enumtag;
u8 * rbuf = NULL;
size_t rbuflen = 0;
const u8 * body;
size_t bodylen;
const u8 * num;
size_t numlen;
const u8 * url = NULL;
size_t urllen;
u8 * ocfhfbuf = NULL;
unsigned int cla_out, tag_out;
size_t ocfhflen;
const u8 * seq;
const u8 * seqtag;
size_t seqlen;
const u8 * keyref;
size_t keyreflen;
const u8 * cert;
size_t certlen;
size_t certobjlen, i2;
u8 * certobj;
u8 * cp;
SC_FUNC_CALLED(card->ctx, SC_LOG_DEBUG_VERBOSE);
r = piv_get_cached_data(card, PIV_OBJ_HISTORY, &rbuf, &rbuflen);
if (r == SC_ERROR_FILE_NOT_FOUND)
r = 0; /* OK if not found */
if (r <= 0) {
priv->obj_cache[PIV_OBJ_HISTORY].flags |= PIV_OBJ_CACHE_NOT_PRESENT;
goto err; /* no file, must be pre 800-73-3 card and not on card */
}
/* the object is now cached, see what we have */
if (rbuflen != 0) {
body = rbuf;
if ((r = sc_asn1_read_tag(&body, rbuflen, &cla_out, &tag_out, &bodylen)) != SC_SUCCESS) {
sc_log(card->ctx, "DER problem %d",r);
r = SC_ERROR_INVALID_ASN1_OBJECT;
goto err;
}
if ( cla_out+tag_out == 0x53 && body != NULL && bodylen != 0) {
numlen = 0;
num = sc_asn1_find_tag(card->ctx, body, bodylen, 0xC1, &numlen);
if (num) {
if (numlen != 1 || *num > PIV_OBJ_RETIRED_X509_20-PIV_OBJ_RETIRED_X509_1+1) {
r = SC_ERROR_INTERNAL; /* TODO some other error */
goto err;
}
priv->keysWithOnCardCerts = *num;
}
numlen = 0;
num = sc_asn1_find_tag(card->ctx, body, bodylen, 0xC2, &numlen);
if (num) {
if (numlen != 1 || *num > PIV_OBJ_RETIRED_X509_20-PIV_OBJ_RETIRED_X509_1+1) {
r = SC_ERROR_INTERNAL; /* TODO some other error */
goto err;
}
priv->keysWithOffCardCerts = *num;
}
url = sc_asn1_find_tag(card->ctx, body, bodylen, 0xF3, &urllen);
if (url) {
priv->offCardCertURL = calloc(1,urllen+1);
if (priv->offCardCertURL == NULL)
LOG_FUNC_RETURN(card->ctx, SC_ERROR_OUT_OF_MEMORY);
memcpy(priv->offCardCertURL, url, urllen);
}
}
else {
sc_log(card->ctx, "Problem with History object\n");
goto err;
}
}
sc_log(card->ctx, "History on=%d off=%d URL=%s",
priv->keysWithOnCardCerts, priv->keysWithOffCardCerts,
priv->offCardCertURL ? priv->offCardCertURL:"NONE");
/* now mark what objects are on the card */
for (i=0; i<priv->keysWithOnCardCerts; i++)
priv->obj_cache[PIV_OBJ_RETIRED_X509_1+i].flags &= ~PIV_OBJ_CACHE_NOT_PRESENT;
/*
* If user has gotten copy of the file from the offCardCertsURL,
* we will read in and add the certs to the cache as listed on
* the card. some of the certs may be on the card as well.
*
* Get file name from url. verify that the filename is valid
* The URL ends in a SHA1 string. We will use this as the filename
* in the directory used for the PKCS15 cache
*/
r = 0;
if (priv->offCardCertURL) {
char * fp;
char filename[PATH_MAX];
if (strncmp("http://", priv->offCardCertURL, 7)) {
r = SC_ERROR_INVALID_DATA;
goto err;
}
/* find the last / so we have the filename part */
fp = strrchr(priv->offCardCertURL + 7,'/');
if (fp == NULL) {
r = SC_ERROR_INVALID_DATA;
goto err;
}
fp++;
/* Use the same directory as used for other OpenSC cached items */
r = sc_get_cache_dir(card->ctx, filename, sizeof(filename) - strlen(fp) - 2);
if (r != SC_SUCCESS)
goto err;
#ifdef _WIN32
strcat(filename,"\\");
#else
strcat(filename,"/");
#endif
strcat(filename,fp);
r = piv_read_obj_from_file(card, filename,
&ocfhfbuf, &ocfhflen);
if (r == SC_ERROR_FILE_NOT_FOUND) {
r = 0;
goto err;
}
/*
* Its a seq of seq of a key ref and cert
*/
body = ocfhfbuf;
if (sc_asn1_read_tag(&body, ocfhflen, &cla_out,
&tag_out, &bodylen) != SC_SUCCESS
|| cla_out+tag_out != 0x30) {
sc_log(card->ctx, "DER problem");
r = SC_ERROR_INVALID_ASN1_OBJECT;
goto err;
}
seq = body;
while (bodylen > 0) {
seqtag = seq;
if (sc_asn1_read_tag(&seq, bodylen, &cla_out,
&tag_out, &seqlen) != SC_SUCCESS
|| cla_out+tag_out != 0x30) {
sc_log(card->ctx, "DER problem");
r = SC_ERROR_INVALID_ASN1_OBJECT;
goto err;
}
keyref = sc_asn1_find_tag(card->ctx, seq, seqlen, 0x04, &keyreflen);
if (!keyref || keyreflen != 1 ||
(*keyref < 0x82 || *keyref > 0x95)) {
sc_log(card->ctx, "DER problem");
r = SC_ERROR_INVALID_ASN1_OBJECT;
goto err;
}
cert = keyref + keyreflen;
certlen = seqlen - (cert - seq);
enumtag = PIV_OBJ_RETIRED_X509_1 + *keyref - 0x82;
/* now add the cert like another object */
i2 = put_tag_and_len(0x70,certlen, NULL)
+ put_tag_and_len(0x71, 1, NULL)
+ put_tag_and_len(0xFE, 0, NULL);
certobjlen = put_tag_and_len(0x53, i2, NULL);
certobj = malloc(certobjlen);
if (certobj == NULL) {
r = SC_ERROR_OUT_OF_MEMORY;
goto err;
}
cp = certobj;
put_tag_and_len(0x53, i2, &cp);
put_tag_and_len(0x70,certlen, &cp);
memcpy(cp, cert, certlen);
cp += certlen;
put_tag_and_len(0x71, 1,&cp);
*cp++ = 0x00;
put_tag_and_len(0xFE, 0, &cp);
priv->obj_cache[enumtag].obj_data = certobj;
priv->obj_cache[enumtag].obj_len = certobjlen;
priv->obj_cache[enumtag].flags |= PIV_OBJ_CACHE_VALID;
priv->obj_cache[enumtag].flags &= ~PIV_OBJ_CACHE_NOT_PRESENT;
r = piv_cache_internal_data(card, enumtag);
sc_log(card->ctx, "got internal r=%d",r);
certobj = NULL;
sc_log(card->ctx,
"Added from off card file #%d %p:%"SC_FORMAT_LEN_SIZE_T"u 0x%02X",
enumtag,
priv->obj_cache[enumtag].obj_data,
priv->obj_cache[enumtag].obj_len, *keyref);
bodylen -= (seqlen + seq - seqtag);
seq += seqlen;
}
}
err:
if (ocfhfbuf)
free(ocfhfbuf);
LOG_FUNC_RETURN(card->ctx, r);
}
| 0 |
[
"CWE-125"
] |
OpenSC
|
8fe377e93b4b56060e5bbfb6f3142ceaeca744fa
| 230,416,800,748,063,000,000,000,000,000,000,000,000 | 217 |
fixed out of bounds reads
Thanks to Eric Sesterhenn from X41 D-SEC GmbH
for reporting and suggesting security fixes.
|
int pci_piix3_xen_ide_unplug(DeviceState *dev)
{
PCIIDEState *pci_ide;
DriveInfo *di;
int i;
IDEDevice *idedev;
pci_ide = PCI_IDE(dev);
for (i = 0; i < 4; i++) {
di = drive_get_by_index(IF_IDE, i);
if (di != NULL && !di->media_cd) {
BlockBackend *blk = blk_by_legacy_dinfo(di);
DeviceState *ds = blk_get_attached_dev(blk);
if (ds) {
blk_detach_dev(blk, ds);
}
pci_ide->bus[di->bus].ifs[di->unit].blk = NULL;
if (!(i % 2)) {
idedev = pci_ide->bus[di->bus].master;
} else {
idedev = pci_ide->bus[di->bus].slave;
}
idedev->conf.blk = NULL;
blk_unref(blk);
}
}
qdev_reset_all(DEVICE(dev));
return 0;
}
| 0 |
[] |
qemu
|
6cd387833d05e8ad31829d97e474dc420625aed9
| 50,331,044,897,901,220,000,000,000,000,000,000,000 | 30 |
Fix release_drive on unplugged devices (pci_piix3_xen_ide_unplug)
pci_piix3_xen_ide_unplug should completely unhook the unplugged
IDEDevice from the corresponding BlockBackend, otherwise the next call
to release_drive will try to detach the drive again.
Suggested-by: Kevin Wolf <[email protected]>
Signed-off-by: Stefano Stabellini <[email protected]>
|
static int vmload_interception(struct vcpu_svm *svm)
{
struct vmcb *nested_vmcb;
struct kvm_host_map map;
int ret;
if (nested_svm_check_permissions(svm))
return 1;
ret = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(svm->vmcb->save.rax), &map);
if (ret) {
if (ret == -EINVAL)
kvm_inject_gp(&svm->vcpu, 0);
return 1;
}
nested_vmcb = map.hva;
ret = kvm_skip_emulated_instruction(&svm->vcpu);
nested_svm_vmloadsave(nested_vmcb, svm->vmcb);
kvm_vcpu_unmap(&svm->vcpu, &map, true);
return ret;
}
| 0 |
[
"CWE-401"
] |
linux
|
d80b64ff297e40c2b6f7d7abc1b3eba70d22a068
| 327,826,894,884,546,540,000,000,000,000,000,000,000 | 25 |
KVM: SVM: Fix potential memory leak in svm_cpu_init()
When kmalloc memory for sd->sev_vmcbs failed, we forget to free the page
held by sd->save_area. Also get rid of the var r as '-ENOMEM' is actually
the only possible outcome here.
Reviewed-by: Liran Alon <[email protected]>
Reviewed-by: Vitaly Kuznetsov <[email protected]>
Signed-off-by: Miaohe Lin <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
|
static void pcnet_csr_writew(PCNetState *s, uint32_t rap, uint32_t new_value)
{
uint16_t val = new_value;
#ifdef PCNET_DEBUG_CSR
printf("pcnet_csr_writew rap=%d val=0x%04x\n", rap, val);
#endif
switch (rap) {
case 0:
s->csr[0] &= ~(val & 0x7f00); /* Clear any interrupt flags */
s->csr[0] = (s->csr[0] & ~0x0040) | (val & 0x0048);
val = (val & 0x007f) | (s->csr[0] & 0x7f00);
/* IFF STOP, STRT and INIT are set, clear STRT and INIT */
if ((val&7) == 7)
val &= ~3;
if (!CSR_STOP(s) && (val & 4))
pcnet_stop(s);
if (!CSR_INIT(s) && (val & 1))
pcnet_init(s);
if (!CSR_STRT(s) && (val & 2))
pcnet_start(s);
if (CSR_TDMD(s))
pcnet_transmit(s);
return;
case 1:
case 2:
case 8:
case 9:
case 10:
case 11:
case 12:
case 13:
case 14:
case 15:
case 18: /* CRBAL */
case 19: /* CRBAU */
case 20: /* CXBAL */
case 21: /* CXBAU */
case 22: /* NRBAU */
case 23: /* NRBAU */
case 24:
case 25:
case 26:
case 27:
case 28:
case 29:
case 30:
case 31:
case 32:
case 33:
case 34:
case 35:
case 36:
case 37:
case 38:
case 39:
case 40: /* CRBC */
case 41:
case 42: /* CXBC */
case 43:
case 44:
case 45:
case 46: /* POLL */
case 47: /* POLLINT */
case 72:
case 74:
break;
case 76: /* RCVRL */
case 78: /* XMTRL */
val = (val > 0) ? val : 512;
break;
case 112:
if (CSR_STOP(s) || CSR_SPND(s))
break;
return;
case 3:
break;
case 4:
s->csr[4] &= ~(val & 0x026a);
val &= ~0x026a; val |= s->csr[4] & 0x026a;
break;
case 5:
s->csr[5] &= ~(val & 0x0a90);
val &= ~0x0a90; val |= s->csr[5] & 0x0a90;
break;
case 16:
pcnet_csr_writew(s,1,val);
return;
case 17:
pcnet_csr_writew(s,2,val);
return;
case 58:
pcnet_bcr_writew(s,BCR_SWS,val);
break;
default:
return;
}
s->csr[rap] = val;
}
| 0 |
[] |
qemu
|
34e29ce754c02bb6b3bdd244fbb85033460feaff
| 243,585,336,363,805,140,000,000,000,000,000,000,000 | 106 |
net: pcnet: check rx/tx descriptor ring length
The AMD PC-Net II emulator has set of control and status(CSR)
registers. Of these, CSR76 and CSR78 hold receive and transmit
descriptor ring length respectively. This ring length could range
from 1 to 65535. Setting ring length to zero leads to an infinite
loop in pcnet_rdra_addr() or pcnet_transmit(). Add check to avoid it.
Reported-by: Li Qiang <[email protected]>
Signed-off-by: Prasad J Pandit <[email protected]>
Signed-off-by: Jason Wang <[email protected]>
|
bool DataWriterImpl::deadline_timer_reschedule()
{
assert(qos_.deadline().period != c_TimeInfinite);
std::unique_lock<RecursiveTimedMutex> lock(writer_->getMutex());
steady_clock::time_point next_deadline_us;
if (!history_.get_next_deadline(timer_owner_, next_deadline_us))
{
logError(PUBLISHER, "Could not get the next deadline from the history");
return false;
}
auto interval_ms = duration_cast<milliseconds>(next_deadline_us - steady_clock::now());
deadline_timer_->update_interval_millisec(static_cast<double>(interval_ms.count()));
return true;
}
| 0 |
[
"CWE-284"
] |
Fast-DDS
|
d2aeab37eb4fad4376b68ea4dfbbf285a2926384
| 325,324,591,235,962,500,000,000,000,000,000,000,000 | 17 |
check remote permissions (#1387)
* Refs 5346. Blackbox test
Signed-off-by: Iker Luengo <[email protected]>
* Refs 5346. one-way string compare
Signed-off-by: Iker Luengo <[email protected]>
* Refs 5346. Do not add partition separator on last partition
Signed-off-by: Iker Luengo <[email protected]>
* Refs 5346. Uncrustify
Signed-off-by: Iker Luengo <[email protected]>
* Refs 5346. Uncrustify
Signed-off-by: Iker Luengo <[email protected]>
* Refs 3680. Access control unit testing
It only covers Partition and Topic permissions
Signed-off-by: Iker Luengo <[email protected]>
* Refs #3680. Fix partition check on Permissions plugin.
Signed-off-by: Iker Luengo <[email protected]>
* Refs 3680. Uncrustify
Signed-off-by: Iker Luengo <[email protected]>
* Refs 3680. Fix tests on mac
Signed-off-by: Iker Luengo <[email protected]>
* Refs 3680. Fix windows tests
Signed-off-by: Iker Luengo <[email protected]>
* Refs 3680. Avoid memory leak on test
Signed-off-by: Iker Luengo <[email protected]>
* Refs 3680. Proxy data mocks should not return temporary objects
Signed-off-by: Iker Luengo <[email protected]>
* refs 3680. uncrustify
Signed-off-by: Iker Luengo <[email protected]>
Co-authored-by: Miguel Company <[email protected]>
|
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
DelogoContext *s = inlink->dst->priv;
AVFilterLink *outlink = inlink->dst->outputs[0];
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
AVFrame *out;
int hsub0 = desc->log2_chroma_w;
int vsub0 = desc->log2_chroma_h;
int direct = 0;
int plane;
AVRational sar;
if (av_frame_is_writable(in)) {
direct = 1;
out = in;
} else {
out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!out) {
av_frame_free(&in);
return AVERROR(ENOMEM);
}
av_frame_copy_props(out, in);
}
sar = in->sample_aspect_ratio;
/* Assume square pixels if SAR is unknown */
if (!sar.num)
sar.num = sar.den = 1;
for (plane = 0; plane < 4 && in->data[plane]; plane++) {
int hsub = plane == 1 || plane == 2 ? hsub0 : 0;
int vsub = plane == 1 || plane == 2 ? vsub0 : 0;
apply_delogo(out->data[plane], out->linesize[plane],
in ->data[plane], in ->linesize[plane],
FF_CEIL_RSHIFT(inlink->w, hsub),
FF_CEIL_RSHIFT(inlink->h, vsub),
sar, s->x>>hsub, s->y>>vsub,
/* Up and left borders were rounded down, inject lost bits
* into width and height to avoid error accumulation */
FF_CEIL_RSHIFT(s->w + (s->x & ((1<<hsub)-1)), hsub),
FF_CEIL_RSHIFT(s->h + (s->y & ((1<<vsub)-1)), vsub),
s->band>>FFMIN(hsub, vsub),
s->show, direct);
}
if (!direct)
av_frame_free(&in);
return ff_filter_frame(outlink, out);
}
| 1 |
[
"CWE-119",
"CWE-787"
] |
FFmpeg
|
e43a0a232dbf6d3c161823c2e07c52e76227a1bc
| 263,707,311,931,965,460,000,000,000,000,000,000,000 | 52 |
avfilter: fix plane validity checks
Fixes out of array accesses
Signed-off-by: Michael Niedermayer <[email protected]>
|
static l_noret undefgoto (LexState *ls, Labeldesc *gt) {
const char *msg;
if (eqstr(gt->name, luaS_newliteral(ls->L, "break"))) {
msg = "break outside loop at line %d";
msg = luaO_pushfstring(ls->L, msg, gt->line);
}
else {
msg = "no visible label '%s' for <goto> at line %d";
msg = luaO_pushfstring(ls->L, msg, getstr(gt->name), gt->line);
}
luaK_semerror(ls, msg);
}
| 0 |
[
"CWE-125"
] |
lua
|
1f3c6f4534c6411313361697d98d1145a1f030fa
| 271,943,950,654,015,050,000,000,000,000,000,000,000 | 12 |
Bug: Lua can generate wrong code when _ENV is <const>
|
nfqnl_rcv_nl_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
struct netlink_notify *n = ptr;
struct nfnl_queue_net *q = nfnl_queue_pernet(n->net);
if (event == NETLINK_URELEASE && n->protocol == NETLINK_NETFILTER) {
int i;
/* destroy all instances for this portid */
spin_lock(&q->instances_lock);
for (i = 0; i < INSTANCE_BUCKETS; i++) {
struct hlist_node *t2;
struct nfqnl_instance *inst;
struct hlist_head *head = &q->instance_table[i];
hlist_for_each_entry_safe(inst, t2, head, hlist) {
if (n->portid == inst->peer_portid)
__instance_destroy(inst);
}
}
spin_unlock(&q->instances_lock);
}
return NOTIFY_DONE;
}
| 0 |
[
"CWE-416"
] |
net
|
36d5fe6a000790f56039afe26834265db0a3ad4c
| 324,992,989,332,072,400,000,000,000,000,000,000,000 | 25 |
core, nfqueue, openvswitch: Orphan frags in skb_zerocopy and handle errors
skb_zerocopy can copy elements of the frags array between skbs, but it doesn't
orphan them. Also, it doesn't handle errors, so this patch takes care of that
as well, and modify the callers accordingly. skb_tx_error() is also added to
the callers so they will signal the failed delivery towards the creator of the
skb.
Signed-off-by: Zoltan Kiss <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static bool cgroupfs_mount_cgroup(void *hdata, const char *root, int type)
{
size_t bufsz = strlen(root) + sizeof("/sys/fs/cgroup");
char *path = NULL;
char **parts = NULL;
char *dirname = NULL;
char *abs_path = NULL;
char *abs_path2 = NULL;
struct cgfs_data *cgfs_d;
struct cgroup_process_info *info, *base_info;
int r, saved_errno = 0;
cgfs_d = hdata;
if (!cgfs_d)
return false;
base_info = cgfs_d->info;
/* If we get passed the _NOSPEC types, we default to _MIXED, since we don't
* have access to the lxc_conf object at this point. It really should be up
* to the caller to fix this, but this doesn't really hurt.
*/
if (type == LXC_AUTO_CGROUP_FULL_NOSPEC)
type = LXC_AUTO_CGROUP_FULL_MIXED;
else if (type == LXC_AUTO_CGROUP_NOSPEC)
type = LXC_AUTO_CGROUP_MIXED;
if (type < LXC_AUTO_CGROUP_RO || type > LXC_AUTO_CGROUP_FULL_MIXED) {
ERROR("could not mount cgroups into container: invalid type specified internally");
errno = EINVAL;
return false;
}
path = calloc(1, bufsz);
if (!path)
return false;
snprintf(path, bufsz, "%s/sys/fs/cgroup", root);
r = mount("cgroup_root", path, "tmpfs", MS_NOSUID|MS_NODEV|MS_NOEXEC|MS_RELATIME, "size=10240k,mode=755");
if (r < 0) {
SYSERROR("could not mount tmpfs to /sys/fs/cgroup in the container");
return false;
}
/* now mount all the hierarchies we care about */
for (info = base_info; info; info = info->next) {
size_t subsystem_count, i;
struct cgroup_mount_point *mp = info->designated_mount_point;
if (!mp)
mp = lxc_cgroup_find_mount_point(info->hierarchy, info->cgroup_path, true);
if (!mp) {
SYSERROR("could not find original mount point for cgroup hierarchy while trying to mount cgroup filesystem");
goto out_error;
}
subsystem_count = lxc_array_len((void **)info->hierarchy->subsystems);
parts = calloc(subsystem_count + 1, sizeof(char *));
if (!parts)
goto out_error;
for (i = 0; i < subsystem_count; i++) {
if (!strncmp(info->hierarchy->subsystems[i], "name=", 5))
parts[i] = info->hierarchy->subsystems[i] + 5;
else
parts[i] = info->hierarchy->subsystems[i];
}
dirname = lxc_string_join(",", (const char **)parts, false);
if (!dirname)
goto out_error;
/* create subsystem directory */
abs_path = lxc_append_paths(path, dirname);
if (!abs_path)
goto out_error;
r = mkdir_p(abs_path, 0755);
if (r < 0 && errno != EEXIST) {
SYSERROR("could not create cgroup subsystem directory /sys/fs/cgroup/%s", dirname);
goto out_error;
}
abs_path2 = lxc_append_paths(abs_path, info->cgroup_path);
if (!abs_path2)
goto out_error;
if (type == LXC_AUTO_CGROUP_FULL_RO || type == LXC_AUTO_CGROUP_FULL_RW || type == LXC_AUTO_CGROUP_FULL_MIXED) {
/* bind-mount the cgroup entire filesystem there */
if (strcmp(mp->mount_prefix, "/") != 0) {
/* FIXME: maybe we should just try to remount the entire hierarchy
* with a regular mount command? may that works? */
ERROR("could not automatically mount cgroup-full to /sys/fs/cgroup/%s: host has no mount point for this cgroup filesystem that has access to the root cgroup", dirname);
goto out_error;
}
r = mount(mp->mount_point, abs_path, "none", MS_BIND, 0);
if (r < 0) {
SYSERROR("error bind-mounting %s to %s", mp->mount_point, abs_path);
goto out_error;
}
/* main cgroup path should be read-only */
if (type == LXC_AUTO_CGROUP_FULL_RO || type == LXC_AUTO_CGROUP_FULL_MIXED) {
r = mount(NULL, abs_path, NULL, MS_REMOUNT|MS_BIND|MS_RDONLY, NULL);
if (r < 0) {
SYSERROR("error re-mounting %s readonly", abs_path);
goto out_error;
}
}
/* own cgroup should be read-write */
if (type == LXC_AUTO_CGROUP_FULL_MIXED) {
r = mount(abs_path2, abs_path2, NULL, MS_BIND, NULL);
if (r < 0) {
SYSERROR("error bind-mounting %s onto itself", abs_path2);
goto out_error;
}
r = mount(NULL, abs_path2, NULL, MS_REMOUNT|MS_BIND, NULL);
if (r < 0) {
SYSERROR("error re-mounting %s readwrite", abs_path2);
goto out_error;
}
}
} else {
/* create path for container's cgroup */
r = mkdir_p(abs_path2, 0755);
if (r < 0 && errno != EEXIST) {
SYSERROR("could not create cgroup directory /sys/fs/cgroup/%s%s", dirname, info->cgroup_path);
goto out_error;
}
/* for read-only and mixed cases, we have to bind-mount the tmpfs directory
* that points to the hierarchy itself (i.e. /sys/fs/cgroup/cpu etc.) onto
* itself and then bind-mount it read-only, since we keep the tmpfs itself
* read-write (see comment below)
*/
if (type == LXC_AUTO_CGROUP_MIXED || type == LXC_AUTO_CGROUP_RO) {
r = mount(abs_path, abs_path, NULL, MS_BIND, NULL);
if (r < 0) {
SYSERROR("error bind-mounting %s onto itself", abs_path);
goto out_error;
}
r = mount(NULL, abs_path, NULL, MS_REMOUNT|MS_BIND|MS_RDONLY, NULL);
if (r < 0) {
SYSERROR("error re-mounting %s readonly", abs_path);
goto out_error;
}
}
free(abs_path);
abs_path = NULL;
/* bind-mount container's cgroup to that directory */
abs_path = cgroup_to_absolute_path(mp, info->cgroup_path, NULL);
if (!abs_path)
goto out_error;
r = mount(abs_path, abs_path2, "none", MS_BIND, 0);
if (r < 0) {
SYSERROR("error bind-mounting %s to %s", abs_path, abs_path2);
goto out_error;
}
if (type == LXC_AUTO_CGROUP_RO) {
r = mount(NULL, abs_path2, NULL, MS_REMOUNT|MS_BIND|MS_RDONLY, NULL);
if (r < 0) {
SYSERROR("error re-mounting %s readonly", abs_path2);
goto out_error;
}
}
}
free(abs_path);
free(abs_path2);
abs_path = NULL;
abs_path2 = NULL;
/* add symlinks for every single subsystem */
if (subsystem_count > 1) {
for (i = 0; i < subsystem_count; i++) {
abs_path = lxc_append_paths(path, parts[i]);
if (!abs_path)
goto out_error;
r = symlink(dirname, abs_path);
if (r < 0)
WARN("could not create symlink %s -> %s in /sys/fs/cgroup of container", parts[i], dirname);
free(abs_path);
abs_path = NULL;
}
}
free(dirname);
free(parts);
dirname = NULL;
parts = NULL;
}
/* We used to remount the entire tmpfs readonly if any :ro or
* :mixed mode was specified. However, Ubuntu's mountall has the
* unfortunate behavior to block bootup if /sys/fs/cgroup is
* mounted read-only and cannot be remounted read-write.
* (mountall reads /lib/init/fstab and tries to (re-)mount all of
* these if they are not already mounted with the right options;
* it contains an entry for /sys/fs/cgroup. In case it can't do
* that, it prompts for the user to either manually fix it or
* boot anyway. But without user input, booting of the container
* hangs.)
*
* Instead of remounting the entire tmpfs readonly, we only
* remount the paths readonly that are part of the cgroup
* hierarchy.
*/
free(path);
return true;
out_error:
saved_errno = errno;
free(path);
free(dirname);
free(parts);
free(abs_path);
free(abs_path2);
errno = saved_errno;
return false;
}
| 1 |
[
"CWE-59",
"CWE-61"
] |
lxc
|
592fd47a6245508b79fe6ac819fe6d3b2c1289be
| 4,054,613,066,748,944,300,000,000,000,000,000,000 | 217 |
CVE-2015-1335: Protect container mounts against symlinks
When a container starts up, lxc sets up the container's inital fstree
by doing a bunch of mounting, guided by the container configuration
file. The container config is owned by the admin or user on the host,
so we do not try to guard against bad entries. However, since the
mount target is in the container, it's possible that the container admin
could divert the mount with symbolic links. This could bypass proper
container startup (i.e. confinement of a root-owned container by the
restrictive apparmor policy, by diverting the required write to
/proc/self/attr/current), or bypass the (path-based) apparmor policy
by diverting, say, /proc to /mnt in the container.
To prevent this,
1. do not allow mounts to paths containing symbolic links
2. do not allow bind mounts from relative paths containing symbolic
links.
Details:
Define safe_mount which ensures that the container has not inserted any
symbolic links into any mount targets for mounts to be done during
container setup.
The host's mount path may contain symbolic links. As it is under the
control of the administrator, that's ok. So safe_mount begins the check
for symbolic links after the rootfs->mount, by opening that directory.
It opens each directory along the path using openat() relative to the
parent directory using O_NOFOLLOW. When the target is reached, it
mounts onto /proc/self/fd/<targetfd>.
Use safe_mount() in mount_entry(), when mounting container proc,
and when needed. In particular, safe_mount() need not be used in
any case where:
1. the mount is done in the container's namespace
2. the mount is for the container's rootfs
3. the mount is relative to a tmpfs or proc/sysfs which we have
just safe_mount()ed ourselves
Since we were using proc/net as a temporary placeholder for /proc/sys/net
during container startup, and proc/net is a symbolic link, use proc/tty
instead.
Update the lxc.container.conf manpage with details about the new
restrictions.
Finally, add a testcase to test some symbolic link possibilities.
Reported-by: Roman Fiedler
Signed-off-by: Serge Hallyn <[email protected]>
Acked-by: Stéphane Graber <[email protected]>
|
static void xrun(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
trace_xrun(substream);
if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE)
snd_pcm_gettime(runtime, (struct timespec *)&runtime->status->tstamp);
snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN);
if (xrun_debug(substream, XRUN_DEBUG_BASIC)) {
char name[16];
snd_pcm_debug_name(substream, name, sizeof(name));
pcm_warn(substream->pcm, "XRUN: %s\n", name);
dump_stack_on_xrun(substream);
}
}
| 0 |
[
"CWE-416",
"CWE-362"
] |
linux
|
3aa02cb664c5fb1042958c8d1aa8c35055a2ebc4
| 77,870,993,652,957,830,000,000,000,000,000,000,000 | 15 |
ALSA: pcm : Call kill_fasync() in stream lock
Currently kill_fasync() is called outside the stream lock in
snd_pcm_period_elapsed(). This is potentially racy, since the stream
may get released even during the irq handler is running. Although
snd_pcm_release_substream() calls snd_pcm_drop(), this doesn't
guarantee that the irq handler finishes, thus the kill_fasync() call
outside the stream spin lock may be invoked after the substream is
detached, as recently reported by KASAN.
As a quick workaround, move kill_fasync() call inside the stream
lock. The fasync is rarely used interface, so this shouldn't have a
big impact from the performance POV.
Ideally, we should implement some sync mechanism for the proper finish
of stream and irq handler. But this oneliner should suffice for most
cases, so far.
Reported-by: Baozeng Ding <[email protected]>
Signed-off-by: Takashi Iwai <[email protected]>
|
static int ssl23_get_server_hello(SSL *s)
{
char buf[8];
unsigned char *p;
int i;
int n;
n=ssl23_read_bytes(s,7);
if (n != 7) return(n);
p=s->packet;
memcpy(buf,p,n);
if ((p[0] & 0x80) && (p[2] == SSL2_MT_SERVER_HELLO) &&
(p[5] == 0x00) && (p[6] == 0x02))
{
#ifdef OPENSSL_NO_SSL2
SSLerr(SSL_F_SSL23_GET_SERVER_HELLO,SSL_R_UNSUPPORTED_PROTOCOL);
goto err;
#else
/* we are talking sslv2 */
/* we need to clean up the SSLv3 setup and put in the
* sslv2 stuff. */
int ch_len;
if (s->options & SSL_OP_NO_SSLv2)
{
SSLerr(SSL_F_SSL23_GET_SERVER_HELLO,SSL_R_UNSUPPORTED_PROTOCOL);
goto err;
}
if (s->s2 == NULL)
{
if (!ssl2_new(s))
goto err;
}
else
ssl2_clear(s);
if (s->options & SSL_OP_NETSCAPE_CHALLENGE_BUG)
ch_len=SSL2_CHALLENGE_LENGTH;
else
ch_len=SSL2_MAX_CHALLENGE_LENGTH;
/* write out sslv2 challenge */
i=(SSL3_RANDOM_SIZE < ch_len)
?SSL3_RANDOM_SIZE:ch_len;
s->s2->challenge_length=i;
memcpy(s->s2->challenge,
&(s->s3->client_random[SSL3_RANDOM_SIZE-i]),i);
if (s->s3 != NULL) ssl3_free(s);
if (!BUF_MEM_grow_clean(s->init_buf,
SSL2_MAX_RECORD_LENGTH_3_BYTE_HEADER))
{
SSLerr(SSL_F_SSL23_GET_SERVER_HELLO,ERR_R_BUF_LIB);
goto err;
}
s->state=SSL2_ST_GET_SERVER_HELLO_A;
if (!(s->client_version == SSL2_VERSION))
/* use special padding (SSL 3.0 draft/RFC 2246, App. E.2) */
s->s2->ssl2_rollback=1;
/* setup the 7 bytes we have read so we get them from
* the sslv2 buffer */
s->rstate=SSL_ST_READ_HEADER;
s->packet_length=n;
s->packet= &(s->s2->rbuf[0]);
memcpy(s->packet,buf,n);
s->s2->rbuf_left=n;
s->s2->rbuf_offs=0;
/* we have already written one */
s->s2->write_sequence=1;
s->method=SSLv2_client_method();
s->handshake_func=s->method->ssl_connect;
#endif
}
else if (p[1] == SSL3_VERSION_MAJOR &&
((p[2] == SSL3_VERSION_MINOR) ||
(p[2] == TLS1_VERSION_MINOR)) &&
((p[0] == SSL3_RT_HANDSHAKE && p[5] == SSL3_MT_SERVER_HELLO) ||
(p[0] == SSL3_RT_ALERT && p[3] == 0 && p[4] == 2)))
{
/* we have sslv3 or tls1 (server hello or alert) */
if ((p[2] == SSL3_VERSION_MINOR) &&
!(s->options & SSL_OP_NO_SSLv3))
{
#ifdef OPENSSL_FIPS
if(FIPS_mode())
{
SSLerr(SSL_F_SSL23_GET_SERVER_HELLO,
SSL_R_ONLY_TLS_ALLOWED_IN_FIPS_MODE);
goto err;
}
#endif
s->version=SSL3_VERSION;
s->method=SSLv3_client_method();
}
else if ((p[2] == TLS1_VERSION_MINOR) &&
!(s->options & SSL_OP_NO_TLSv1))
{
s->version=TLS1_VERSION;
s->method=TLSv1_client_method();
}
else
{
SSLerr(SSL_F_SSL23_GET_SERVER_HELLO,SSL_R_UNSUPPORTED_PROTOCOL);
goto err;
}
/* ensure that TLS_MAX_VERSION is up-to-date */
OPENSSL_assert(s->version <= TLS_MAX_VERSION);
if (p[0] == SSL3_RT_ALERT && p[5] != SSL3_AL_WARNING)
{
/* fatal alert */
void (*cb)(const SSL *ssl,int type,int val)=NULL;
int j;
if (s->info_callback != NULL)
cb=s->info_callback;
else if (s->ctx->info_callback != NULL)
cb=s->ctx->info_callback;
i=p[5];
if (cb != NULL)
{
j=(i<<8)|p[6];
cb(s,SSL_CB_READ_ALERT,j);
}
if (s->msg_callback)
s->msg_callback(0, s->version, SSL3_RT_ALERT, p+5, 2, s, s->msg_callback_arg);
s->rwstate=SSL_NOTHING;
SSLerr(SSL_F_SSL23_GET_SERVER_HELLO,SSL_AD_REASON_OFFSET+p[6]);
goto err;
}
if (!ssl_init_wbio_buffer(s,1)) goto err;
/* we are in this state */
s->state=SSL3_ST_CR_SRVR_HELLO_A;
/* put the 7 bytes we have read into the input buffer
* for SSLv3 */
s->rstate=SSL_ST_READ_HEADER;
s->packet_length=n;
if (s->s3->rbuf.buf == NULL)
if (!ssl3_setup_buffers(s))
goto err;
s->packet= &(s->s3->rbuf.buf[0]);
memcpy(s->packet,buf,n);
s->s3->rbuf.left=n;
s->s3->rbuf.offset=0;
s->handshake_func=s->method->ssl_connect;
}
else
{
SSLerr(SSL_F_SSL23_GET_SERVER_HELLO,SSL_R_UNKNOWN_PROTOCOL);
goto err;
}
s->init_num=0;
/* Since, if we are sending a ssl23 client hello, we are not
* reusing a session-id */
if (!ssl_get_new_session(s,0))
goto err;
return(SSL_connect(s));
err:
return(-1);
}
| 1 |
[
"CWE-310"
] |
openssl
|
cd332a07503bd9771595de87e768179f81715704
| 238,530,017,078,438,040,000,000,000,000,000,000,000 | 180 |
Fix no-ssl3 configuration option
CVE-2014-3568
Reviewed-by: Emilia Kasper <[email protected]>
Reviewed-by: Rich Salz <[email protected]>
|
smtp_quit (CamelSmtpTransport *transport,
CamelStreamBuffer *istream,
CamelStream *ostream,
GCancellable *cancellable,
GError **error)
{
/* we are going to reset the smtp server (just to be nice) */
gchar *cmdbuf, *respbuf = NULL;
cmdbuf = g_strdup ("QUIT\r\n");
d (fprintf (stderr, "[SMTP] sending: %s", cmdbuf));
if (camel_stream_write_string (ostream, cmdbuf, cancellable, error) == -1) {
g_free (cmdbuf);
g_prefix_error (error, _("QUIT command failed: "));
return FALSE;
}
g_free (cmdbuf);
do {
/* Check for "221" */
g_free (respbuf);
respbuf = camel_stream_buffer_read_line (istream, cancellable, error);
d (fprintf (stderr, "[SMTP] received: %s\n", respbuf ? respbuf : "(null)"));
if (respbuf == NULL) {
g_prefix_error (error, _("QUIT command failed: "));
transport->connected = FALSE;
return FALSE;
}
if (strncmp (respbuf, "221", 3) != 0) {
smtp_set_error (transport, istream, respbuf, cancellable, error);
g_prefix_error (error, _("QUIT command failed: "));
g_free (respbuf);
return FALSE;
}
} while (*(respbuf+3) == '-'); /* if we got "221-" then loop again */
g_free (respbuf);
return TRUE;
}
| 0 |
[
"CWE-74"
] |
evolution-data-server
|
ba82be72cfd427b5d72ff21f929b3a6d8529c4df
| 55,101,301,076,453,420,000,000,000,000,000,000,000 | 43 |
I#226 - CVE-2020-14928: Response Injection via STARTTLS in SMTP and POP3
Closes https://gitlab.gnome.org/GNOME/evolution-data-server/-/issues/226
|
hb_buffer_duplicate_out_buffer (HB_Buffer buffer)
{
if (!buffer->alt_string)
buffer->alt_string = malloc (buffer->allocated * sizeof (buffer->alt_string[0]));
buffer->out_string = buffer->alt_string;
memcpy (buffer->out_string, buffer->in_string, buffer->out_length * sizeof (buffer->out_string[0]));
buffer->separate_out = TRUE;
return HB_Err_Ok;
}
| 0 |
[] |
pango
|
336bb3201096bdd0494d29926dd44e8cca8bed26
| 158,877,504,577,036,580,000,000,000,000,000,000,000 | 11 |
[HB] Remove all references to the old code!
|
zbegintransparencymaskgroup(i_ctx_t *i_ctx_p)
{
os_ptr op = osp;
os_ptr dop = op - 4;
gs_transparency_mask_params_t params;
ref *pparam;
gs_rect bbox;
int code;
static const char *const subtype_names[] = {
GS_TRANSPARENCY_MASK_SUBTYPE_NAMES, 0
};
check_type(*dop, t_dictionary);
check_dict_read(*dop);
if (dict_find_string(dop, "Subtype", &pparam) <= 0)
return_error(gs_error_rangecheck);
if ((code = enum_param(imemory, pparam, subtype_names)) < 0)
return code;
gs_trans_mask_params_init(¶ms, code);
params.replacing = true;
if ((code = dict_floats_param(imemory, dop, "Background",
cs_num_components(gs_currentcolorspace(i_ctx_p->pgs)),
params.Background, NULL)) < 0)
return code;
else if (code > 0)
params.Background_components = code;
if ((code = dict_floats_param(imemory, dop, "GrayBackground",
1, ¶ms.GrayBackground, NULL)) < 0)
return code;
if (dict_find_string(dop, "TransferFunction", &pparam) > 0) {
gs_function_t *pfn = ref_function(pparam);
if (pfn == 0 || pfn->params.m != 1 || pfn->params.n != 1)
return_error(gs_error_rangecheck);
params.TransferFunction = tf_using_function;
params.TransferFunction_data = pfn;
}
code = rect_param(&bbox, op);
if (code < 0)
return code;
check_type(op[-5], t_boolean);
/* Is the colorspace set for this mask ? */
if (op[-5].value.boolval) {
params.ColorSpace = gs_currentcolorspace(igs);
/* Lets make sure that it is not an ICC color space that came from
a PS CIE color space or a PS color space. These are 1-way color
spaces and cannot be used for group color spaces */
if (gs_color_space_is_PSCIE(params.ColorSpace))
params.ColorSpace = NULL;
else if (gs_color_space_is_ICC(params.ColorSpace) &&
params.ColorSpace->cmm_icc_profile_data != NULL &&
params.ColorSpace->cmm_icc_profile_data->profile_handle != NULL) {
if (gscms_is_input(params.ColorSpace->cmm_icc_profile_data->profile_handle,
params.ColorSpace->cmm_icc_profile_data->memory))
params.ColorSpace = NULL;
}
} else {
params.ColorSpace = NULL;
}
code = gs_begin_transparency_mask(igs, ¶ms, &bbox, false);
if (code < 0)
return code;
pop(6);
return code;
}
| 0 |
[
"CWE-704"
] |
ghostpdl
|
548bb434e81dadcc9f71adf891a3ef5bea8e2b4e
| 172,687,784,961,006,200,000,000,000,000,000,000,000 | 67 |
PS interpreter - add some type checking
These were 'probably' safe anyway, since they mostly treat the objects
as integers without checking, which at least can't result in a crash.
Nevertheless, we ought to check.
The return from comparedictkeys could be wrong if one of the keys had
a value which was not an array, it could incorrectly decide the two
were in fact the same.
|
pixOctreeQuantizePixels(PIX *pixs,
CQCELL ***cqcaa,
l_int32 ditherflag)
{
l_uint8 *bufu8r, *bufu8g, *bufu8b;
l_int32 rval, gval, bval;
l_int32 octindex, index;
l_int32 val1, val2, val3, dif;
l_int32 w, h, wpls, wpld, i, j, success;
l_int32 rc, gc, bc;
l_int32 *buf1r, *buf1g, *buf1b, *buf2r, *buf2g, *buf2b;
l_uint32 *rtab, *gtab, *btab;
l_uint32 *datas, *datad, *lines, *lined;
PIX *pixd;
PROCNAME("pixOctreeQuantizePixels");
if (!pixs)
return (PIX *)ERROR_PTR("pixs not defined", procName, NULL);
if (pixGetDepth(pixs) != 32)
return (PIX *)ERROR_PTR("pixs must be 32 bpp", procName, NULL);
if (!cqcaa)
return (PIX *)ERROR_PTR("cqcaa not defined", procName, NULL);
/* Make output 8 bpp palette image */
pixGetDimensions(pixs, &w, &h, NULL);
datas = pixGetData(pixs);
wpls = pixGetWpl(pixs);
if ((pixd = pixCreate(w, h, 8)) == NULL)
return (PIX *)ERROR_PTR("pixd not made", procName, NULL);
pixCopyResolution(pixd, pixs);
pixCopyInputFormat(pixd, pixs);
datad = pixGetData(pixd);
wpld = pixGetWpl(pixd);
/* Make the canonical index tables */
rtab = gtab = btab = NULL;
makeRGBToIndexTables(CqNLevels, &rtab, >ab, &btab);
/* Traverse tree from root, looking for lowest cube
* that is a leaf, and set dest pix to its
* colortable index value. The results are far
* better when dithering to get a more accurate
* average color. */
if (ditherflag == 0) { /* no dithering */
for (i = 0; i < h; i++) {
lines = datas + i * wpls;
lined = datad + i * wpld;
for (j = 0; j < w; j++) {
extractRGBValues(lines[j], &rval, &gval, &bval);
octindex = rtab[rval] | gtab[gval] | btab[bval];
octreeFindColorCell(octindex, cqcaa, &index, &rc, &gc, &bc);
SET_DATA_BYTE(lined, j, index);
}
}
} else { /* Dither */
success = TRUE;
bufu8r = bufu8g = bufu8b = NULL;
buf1r = buf1g = buf1b = buf2r = buf2g = buf2b = NULL;
bufu8r = (l_uint8 *)LEPT_CALLOC(w, sizeof(l_uint8));
bufu8g = (l_uint8 *)LEPT_CALLOC(w, sizeof(l_uint8));
bufu8b = (l_uint8 *)LEPT_CALLOC(w, sizeof(l_uint8));
buf1r = (l_int32 *)LEPT_CALLOC(w, sizeof(l_int32));
buf1g = (l_int32 *)LEPT_CALLOC(w, sizeof(l_int32));
buf1b = (l_int32 *)LEPT_CALLOC(w, sizeof(l_int32));
buf2r = (l_int32 *)LEPT_CALLOC(w, sizeof(l_int32));
buf2g = (l_int32 *)LEPT_CALLOC(w, sizeof(l_int32));
buf2b = (l_int32 *)LEPT_CALLOC(w, sizeof(l_int32));
if (!bufu8r || !bufu8g || !bufu8b || !buf1r || !buf1g ||
!buf1b || !buf2r || !buf2g || !buf2b) {
L_ERROR("buffer not made\n", procName);
success = FALSE;
goto buffer_cleanup;
}
/* Start by priming buf2; line 1 is above line 2 */
pixGetRGBLine(pixs, 0, bufu8r, bufu8g, bufu8b);
for (j = 0; j < w; j++) {
buf2r[j] = 64 * bufu8r[j];
buf2g[j] = 64 * bufu8g[j];
buf2b[j] = 64 * bufu8b[j];
}
for (i = 0; i < h - 1; i++) {
/* Swap data 2 --> 1, and read in new line 2 */
memcpy(buf1r, buf2r, 4 * w);
memcpy(buf1g, buf2g, 4 * w);
memcpy(buf1b, buf2b, 4 * w);
pixGetRGBLine(pixs, i + 1, bufu8r, bufu8g, bufu8b);
for (j = 0; j < w; j++) {
buf2r[j] = 64 * bufu8r[j];
buf2g[j] = 64 * bufu8g[j];
buf2b[j] = 64 * bufu8b[j];
}
/* Dither */
lined = datad + i * wpld;
for (j = 0; j < w - 1; j++) {
rval = buf1r[j] / 64;
gval = buf1g[j] / 64;
bval = buf1b[j] / 64;
octindex = rtab[rval] | gtab[gval] | btab[bval];
octreeFindColorCell(octindex, cqcaa, &index, &rc, &gc, &bc);
SET_DATA_BYTE(lined, j, index);
dif = buf1r[j] / 8 - 8 * rc;
if (dif != 0) {
val1 = buf1r[j + 1] + 3 * dif;
val2 = buf2r[j] + 3 * dif;
val3 = buf2r[j + 1] + 2 * dif;
if (dif > 0) {
buf1r[j + 1] = L_MIN(16383, val1);
buf2r[j] = L_MIN(16383, val2);
buf2r[j + 1] = L_MIN(16383, val3);
} else {
buf1r[j + 1] = L_MAX(0, val1);
buf2r[j] = L_MAX(0, val2);
buf2r[j + 1] = L_MAX(0, val3);
}
}
dif = buf1g[j] / 8 - 8 * gc;
if (dif != 0) {
val1 = buf1g[j + 1] + 3 * dif;
val2 = buf2g[j] + 3 * dif;
val3 = buf2g[j + 1] + 2 * dif;
if (dif > 0) {
buf1g[j + 1] = L_MIN(16383, val1);
buf2g[j] = L_MIN(16383, val2);
buf2g[j + 1] = L_MIN(16383, val3);
} else {
buf1g[j + 1] = L_MAX(0, val1);
buf2g[j] = L_MAX(0, val2);
buf2g[j + 1] = L_MAX(0, val3);
}
}
dif = buf1b[j] / 8 - 8 * bc;
if (dif != 0) {
val1 = buf1b[j + 1] + 3 * dif;
val2 = buf2b[j] + 3 * dif;
val3 = buf2b[j + 1] + 2 * dif;
if (dif > 0) {
buf1b[j + 1] = L_MIN(16383, val1);
buf2b[j] = L_MIN(16383, val2);
buf2b[j + 1] = L_MIN(16383, val3);
} else {
buf1b[j + 1] = L_MAX(0, val1);
buf2b[j] = L_MAX(0, val2);
buf2b[j + 1] = L_MAX(0, val3);
}
}
}
/* Get last pixel in row; no downward propagation */
rval = buf1r[w - 1] / 64;
gval = buf1g[w - 1] / 64;
bval = buf1b[w - 1] / 64;
octindex = rtab[rval] | gtab[gval] | btab[bval];
octreeFindColorCell(octindex, cqcaa, &index, &rc, &gc, &bc);
SET_DATA_BYTE(lined, w - 1, index);
}
/* Get last row of pixels; no leftward propagation */
lined = datad + (h - 1) * wpld;
for (j = 0; j < w; j++) {
rval = buf2r[j] / 64;
gval = buf2g[j] / 64;
bval = buf2b[j] / 64;
octindex = rtab[rval] | gtab[gval] | btab[bval];
octreeFindColorCell(octindex, cqcaa, &index, &rc, &gc, &bc);
SET_DATA_BYTE(lined, j, index);
}
buffer_cleanup:
LEPT_FREE(bufu8r);
LEPT_FREE(bufu8g);
LEPT_FREE(bufu8b);
LEPT_FREE(buf1r);
LEPT_FREE(buf1g);
LEPT_FREE(buf1b);
LEPT_FREE(buf2r);
LEPT_FREE(buf2g);
LEPT_FREE(buf2b);
if (!success) pixDestroy(&pixd);
}
LEPT_FREE(rtab);
LEPT_FREE(gtab);
LEPT_FREE(btab);
return pixd;
}
| 0 |
[
"CWE-125"
] |
leptonica
|
5ee24b398bb67666f6d173763eaaedd9c36fb1e5
| 64,048,389,961,671,970,000,000,000,000,000,000,000 | 192 |
Fixed issue 22140 in oss-fuzz: Heap-buffer-overflow
* color quantized pix must be 8 bpp before extra colors are added.
|
int ssl3_connect(SSL *s)
{
BUF_MEM *buf=NULL;
unsigned long Time=(unsigned long)time(NULL);
void (*cb)(const SSL *ssl,int type,int val)=NULL;
int ret= -1;
int new_state,state,skip=0;
RAND_add(&Time,sizeof(Time),0);
ERR_clear_error();
clear_sys_error();
if (s->info_callback != NULL)
cb=s->info_callback;
else if (s->ctx->info_callback != NULL)
cb=s->ctx->info_callback;
s->in_handshake++;
if (!SSL_in_init(s) || SSL_in_before(s)) SSL_clear(s);
for (;;)
{
state=s->state;
switch(s->state)
{
case SSL_ST_RENEGOTIATE:
s->new_session=1;
s->state=SSL_ST_CONNECT;
s->ctx->stats.sess_connect_renegotiate++;
/* break */
case SSL_ST_BEFORE:
case SSL_ST_CONNECT:
case SSL_ST_BEFORE|SSL_ST_CONNECT:
case SSL_ST_OK|SSL_ST_CONNECT:
s->server=0;
if (cb != NULL) cb(s,SSL_CB_HANDSHAKE_START,1);
if ((s->version & 0xff00 ) != 0x0300)
{
SSLerr(SSL_F_SSL3_CONNECT, ERR_R_INTERNAL_ERROR);
ret = -1;
goto end;
}
/* s->version=SSL3_VERSION; */
s->type=SSL_ST_CONNECT;
if (s->init_buf == NULL)
{
if ((buf=BUF_MEM_new()) == NULL)
{
ret= -1;
goto end;
}
if (!BUF_MEM_grow(buf,SSL3_RT_MAX_PLAIN_LENGTH))
{
ret= -1;
goto end;
}
s->init_buf=buf;
buf=NULL;
}
if (!ssl3_setup_buffers(s)) { ret= -1; goto end; }
/* setup buffing BIO */
if (!ssl_init_wbio_buffer(s,0)) { ret= -1; goto end; }
/* don't push the buffering BIO quite yet */
ssl3_init_finished_mac(s);
s->state=SSL3_ST_CW_CLNT_HELLO_A;
s->ctx->stats.sess_connect++;
s->init_num=0;
break;
case SSL3_ST_CW_CLNT_HELLO_A:
case SSL3_ST_CW_CLNT_HELLO_B:
s->shutdown=0;
ret=ssl3_client_hello(s);
if (ret <= 0) goto end;
s->state=SSL3_ST_CR_SRVR_HELLO_A;
s->init_num=0;
/* turn on buffering for the next lot of output */
if (s->bbio != s->wbio)
s->wbio=BIO_push(s->bbio,s->wbio);
break;
case SSL3_ST_CR_SRVR_HELLO_A:
case SSL3_ST_CR_SRVR_HELLO_B:
ret=ssl3_get_server_hello(s);
if (ret <= 0) goto end;
if (s->hit)
s->state=SSL3_ST_CR_FINISHED_A;
else
s->state=SSL3_ST_CR_CERT_A;
s->init_num=0;
break;
case SSL3_ST_CR_CERT_A:
case SSL3_ST_CR_CERT_B:
#ifndef OPENSSL_NO_TLSEXT
ret=ssl3_check_finished(s);
if (ret <= 0) goto end;
if (ret == 2)
{
s->hit = 1;
if (s->tlsext_ticket_expected)
s->state=SSL3_ST_CR_SESSION_TICKET_A;
else
s->state=SSL3_ST_CR_FINISHED_A;
s->init_num=0;
break;
}
#endif
/* Check if it is anon DH/ECDH */
/* or PSK */
if (!(s->s3->tmp.new_cipher->algorithm_auth & SSL_aNULL) &&
!(s->s3->tmp.new_cipher->algorithm_mkey & SSL_kPSK))
{
ret=ssl3_get_server_certificate(s);
if (ret <= 0) goto end;
#ifndef OPENSSL_NO_TLSEXT
if (s->tlsext_status_expected)
s->state=SSL3_ST_CR_CERT_STATUS_A;
else
s->state=SSL3_ST_CR_KEY_EXCH_A;
}
else
{
skip = 1;
s->state=SSL3_ST_CR_KEY_EXCH_A;
}
#else
}
else
skip=1;
s->state=SSL3_ST_CR_KEY_EXCH_A;
#endif
s->init_num=0;
break;
case SSL3_ST_CR_KEY_EXCH_A:
case SSL3_ST_CR_KEY_EXCH_B:
ret=ssl3_get_key_exchange(s);
if (ret <= 0) goto end;
s->state=SSL3_ST_CR_CERT_REQ_A;
s->init_num=0;
/* at this point we check that we have the
* required stuff from the server */
if (!ssl3_check_cert_and_algorithm(s))
{
ret= -1;
goto end;
}
break;
case SSL3_ST_CR_CERT_REQ_A:
case SSL3_ST_CR_CERT_REQ_B:
ret=ssl3_get_certificate_request(s);
if (ret <= 0) goto end;
s->state=SSL3_ST_CR_SRVR_DONE_A;
s->init_num=0;
break;
case SSL3_ST_CR_SRVR_DONE_A:
case SSL3_ST_CR_SRVR_DONE_B:
ret=ssl3_get_server_done(s);
if (ret <= 0) goto end;
if (s->s3->tmp.cert_req)
s->state=SSL3_ST_CW_CERT_A;
else
s->state=SSL3_ST_CW_KEY_EXCH_A;
s->init_num=0;
break;
case SSL3_ST_CW_CERT_A:
case SSL3_ST_CW_CERT_B:
case SSL3_ST_CW_CERT_C:
case SSL3_ST_CW_CERT_D:
ret=ssl3_send_client_certificate(s);
if (ret <= 0) goto end;
s->state=SSL3_ST_CW_KEY_EXCH_A;
s->init_num=0;
break;
case SSL3_ST_CW_KEY_EXCH_A:
case SSL3_ST_CW_KEY_EXCH_B:
ret=ssl3_send_client_key_exchange(s);
if (ret <= 0) goto end;
/* EAY EAY EAY need to check for DH fix cert
* sent back */
/* For TLS, cert_req is set to 2, so a cert chain
* of nothing is sent, but no verify packet is sent */
/* XXX: For now, we do not support client
* authentication in ECDH cipher suites with
* ECDH (rather than ECDSA) certificates.
* We need to skip the certificate verify
* message when client's ECDH public key is sent
* inside the client certificate.
*/
if (s->s3->tmp.cert_req == 1)
{
s->state=SSL3_ST_CW_CERT_VRFY_A;
}
else
{
s->state=SSL3_ST_CW_CHANGE_A;
s->s3->change_cipher_spec=0;
}
if (s->s3->flags & TLS1_FLAGS_SKIP_CERT_VERIFY)
{
s->state=SSL3_ST_CW_CHANGE_A;
s->s3->change_cipher_spec=0;
}
s->init_num=0;
break;
case SSL3_ST_CW_CERT_VRFY_A:
case SSL3_ST_CW_CERT_VRFY_B:
ret=ssl3_send_client_verify(s);
if (ret <= 0) goto end;
s->state=SSL3_ST_CW_CHANGE_A;
s->init_num=0;
s->s3->change_cipher_spec=0;
break;
case SSL3_ST_CW_CHANGE_A:
case SSL3_ST_CW_CHANGE_B:
ret=ssl3_send_change_cipher_spec(s,
SSL3_ST_CW_CHANGE_A,SSL3_ST_CW_CHANGE_B);
if (ret <= 0) goto end;
#if defined(OPENSSL_NO_TLSEXT) || defined(OPENSSL_NO_NPN)
s->state=SSL3_ST_CW_FINISHED_A;
#else
if (s->next_proto_negotiated)
s->state=SSL3_ST_CW_NEXT_PROTO_A;
else
s->state=SSL3_ST_CW_FINISHED_A;
#endif
s->init_num=0;
s->session->cipher=s->s3->tmp.new_cipher;
#ifdef OPENSSL_NO_COMP
s->session->compress_meth=0;
#else
if (s->s3->tmp.new_compression == NULL)
s->session->compress_meth=0;
else
s->session->compress_meth=
s->s3->tmp.new_compression->id;
#endif
if (!s->method->ssl3_enc->setup_key_block(s))
{
ret= -1;
goto end;
}
if (!s->method->ssl3_enc->change_cipher_state(s,
SSL3_CHANGE_CIPHER_CLIENT_WRITE))
{
ret= -1;
goto end;
}
break;
#if !defined(OPENSSL_NO_TLSEXT) && !defined(OPENSSL_NO_NPN)
case SSL3_ST_CW_NEXT_PROTO_A:
case SSL3_ST_CW_NEXT_PROTO_B:
ret=ssl3_send_next_proto(s);
if (ret <= 0) goto end;
s->state=SSL3_ST_CW_FINISHED_A;
break;
#endif
case SSL3_ST_CW_FINISHED_A:
case SSL3_ST_CW_FINISHED_B:
ret=ssl3_send_finished(s,
SSL3_ST_CW_FINISHED_A,SSL3_ST_CW_FINISHED_B,
s->method->ssl3_enc->client_finished_label,
s->method->ssl3_enc->client_finished_label_len);
if (ret <= 0) goto end;
s->state=SSL3_ST_CW_FLUSH;
/* clear flags */
s->s3->flags&= ~SSL3_FLAGS_POP_BUFFER;
if (s->hit)
{
s->s3->tmp.next_state=SSL_ST_OK;
if (s->s3->flags & SSL3_FLAGS_DELAY_CLIENT_FINISHED)
{
s->state=SSL_ST_OK;
s->s3->flags|=SSL3_FLAGS_POP_BUFFER;
s->s3->delay_buf_pop_ret=0;
}
}
else
{
#ifndef OPENSSL_NO_TLSEXT
/* Allow NewSessionTicket if ticket expected */
if (s->tlsext_ticket_expected)
s->s3->tmp.next_state=SSL3_ST_CR_SESSION_TICKET_A;
else
#endif
s->s3->tmp.next_state=SSL3_ST_CR_FINISHED_A;
}
s->init_num=0;
break;
#ifndef OPENSSL_NO_TLSEXT
case SSL3_ST_CR_SESSION_TICKET_A:
case SSL3_ST_CR_SESSION_TICKET_B:
ret=ssl3_get_new_session_ticket(s);
if (ret <= 0) goto end;
s->state=SSL3_ST_CR_FINISHED_A;
s->init_num=0;
break;
case SSL3_ST_CR_CERT_STATUS_A:
case SSL3_ST_CR_CERT_STATUS_B:
ret=ssl3_get_cert_status(s);
if (ret <= 0) goto end;
s->state=SSL3_ST_CR_KEY_EXCH_A;
s->init_num=0;
break;
#endif
case SSL3_ST_CR_FINISHED_A:
case SSL3_ST_CR_FINISHED_B:
ret=ssl3_get_finished(s,SSL3_ST_CR_FINISHED_A,
SSL3_ST_CR_FINISHED_B);
if (ret <= 0) goto end;
if (s->hit)
s->state=SSL3_ST_CW_CHANGE_A;
else
s->state=SSL_ST_OK;
s->init_num=0;
break;
case SSL3_ST_CW_FLUSH:
s->rwstate=SSL_WRITING;
if (BIO_flush(s->wbio) <= 0)
{
ret= -1;
goto end;
}
s->rwstate=SSL_NOTHING;
s->state=s->s3->tmp.next_state;
break;
case SSL_ST_OK:
/* clean a few things up */
ssl3_cleanup_key_block(s);
if (s->init_buf != NULL)
{
BUF_MEM_free(s->init_buf);
s->init_buf=NULL;
}
/* If we are not 'joining' the last two packets,
* remove the buffering now */
if (!(s->s3->flags & SSL3_FLAGS_POP_BUFFER))
ssl_free_wbio_buffer(s);
/* else do it later in ssl3_write */
s->init_num=0;
s->new_session=0;
ssl_update_cache(s,SSL_SESS_CACHE_CLIENT);
if (s->hit) s->ctx->stats.sess_hit++;
ret=1;
/* s->server=0; */
s->handshake_func=ssl3_connect;
s->ctx->stats.sess_connect_good++;
if (cb != NULL) cb(s,SSL_CB_HANDSHAKE_DONE,1);
goto end;
/* break; */
default:
SSLerr(SSL_F_SSL3_CONNECT,SSL_R_UNKNOWN_STATE);
ret= -1;
goto end;
/* break; */
}
/* did we do anything */
if (!s->s3->tmp.reuse_message && !skip)
{
if (s->debug)
{
if ((ret=BIO_flush(s->wbio)) <= 0)
goto end;
}
if ((cb != NULL) && (s->state != state))
{
new_state=s->state;
s->state=state;
cb(s,SSL_CB_CONNECT_LOOP,1);
s->state=new_state;
}
}
skip=0;
}
| 0 |
[] |
openssl
|
ee2ffc279417f15fef3b1073c7dc81a908991516
| 264,062,758,472,625,200,000,000,000,000,000,000,000 | 424 |
Add Next Protocol Negotiation.
|
MATCHER_P(HeaderMapEqualIgnoreOrder, expected, "") {
const bool equal = TestUtility::headerMapEqualIgnoreOrder(*arg, *expected);
if (!equal) {
*result_listener << "\n"
<< TestUtility::addLeftAndRightPadding("Expected header map:") << "\n"
<< *expected
<< TestUtility::addLeftAndRightPadding("is not equal to actual header map:")
<< "\n"
<< *arg << TestUtility::addLeftAndRightPadding("") // line full of padding
<< "\n";
}
return equal;
}
| 0 |
[] |
envoy
|
2c60632d41555ec8b3d9ef5246242be637a2db0f
| 69,149,684,119,834,780,000,000,000,000,000,000,000 | 13 |
http: header map security fixes for duplicate headers (#197)
Previously header matching did not match on all headers for
non-inline headers. This patch changes the default behavior to
always logically match on all headers. Multiple individual
headers will be logically concatenated with ',' similar to what
is done with inline headers. This makes the behavior effectively
consistent. This behavior can be temporary reverted by setting
the runtime value "envoy.reloadable_features.header_match_on_all_headers"
to "false".
Targeted fixes have been additionally performed on the following
extensions which make them consider all duplicate headers by default as
a comma concatenated list:
1) Any extension using CEL matching on headers.
2) The header to metadata filter.
3) The JWT filter.
4) The Lua filter.
Like primary header matching used in routing, RBAC, etc. this behavior
can be disabled by setting the runtime value
"envoy.reloadable_features.header_match_on_all_headers" to false.
Finally, the setCopy() header map API previously only set the first
header in the case of duplicate non-inline headers. setCopy() now
behaves similiarly to the other set*() APIs and replaces all found
headers with a single value. This may have had security implications
in the extauth filter which uses this API. This behavior can be disabled
by setting the runtime value
"envoy.reloadable_features.http_set_copy_replace_all_headers" to false.
Fixes https://github.com/envoyproxy/envoy-setec/issues/188
Signed-off-by: Matt Klein <[email protected]>
|
void generic_start_io_acct(struct request_queue *q, int rw,
unsigned long sectors, struct hd_struct *part)
{
int cpu = part_stat_lock();
part_round_stats(q, cpu, part);
part_stat_inc(cpu, part, ios[rw]);
part_stat_add(cpu, part, sectors[rw], sectors);
part_inc_in_flight(q, part, rw);
part_stat_unlock();
}
| 0 |
[
"CWE-772",
"CWE-787"
] |
linux
|
95d78c28b5a85bacbc29b8dba7c04babb9b0d467
| 199,097,510,273,221,000,000,000,000,000,000,000,000 | 12 |
fix unbalanced page refcounting in bio_map_user_iov
bio_map_user_iov and bio_unmap_user do unbalanced pages refcounting if
IO vector has small consecutive buffers belonging to the same page.
bio_add_pc_page merges them into one, but the page reference is never
dropped.
Cc: [email protected]
Signed-off-by: Vitaly Mayatskikh <[email protected]>
Signed-off-by: Al Viro <[email protected]>
|
_PUBLIC_ _PURE_ size_t count_chars(const char *s, char c)
{
size_t count = 0;
while (*s) {
if (*s == c) count++;
s ++;
}
return count;
}
| 0 |
[] |
samba
|
8eae8d28bce2c3f6a323d3dc48ed10c2e6bb1ba5
| 132,721,315,637,006,110,000,000,000,000,000,000 | 11 |
CVE-2013-4476: lib-util: add file_check_permissions()
Bug: https://bugzilla.samba.org/show_bug.cgi?id=10234
Signed-off-by: Björn Baumbach <[email protected]>
Reviewed-by: Stefan Metzmacher <[email protected]>
|
void pb_release(const pb_msgdesc_t *fields, void *dest_struct)
{
pb_field_iter_t iter;
if (!dest_struct)
return; /* Ignore NULL pointers, similar to free() */
if (!pb_field_iter_begin(&iter, fields, dest_struct))
return; /* Empty message type */
do
{
pb_release_single_field(&iter);
} while (pb_field_iter_next(&iter));
}
| 0 |
[
"CWE-763"
] |
nanopb
|
e2f0ccf939d9f82931d085acb6df8e9a182a4261
| 143,914,140,750,776,300,000,000,000,000,000,000,000 | 15 |
Fix invalid free() with oneof (#647)
Nanopb would call free() or realloc() on an invalid
(attacker controlled) pointer value when all the following
conditions are true:
- PB_ENABLE_MALLOC is defined at the compile time
- Message definition contains an oneof field, and the oneof
contains at least one pointer type field and at least one
non-pointer type field.
- Data being decoded first contains a non-pointer value for
the oneof field, and later contains an overwriting pointer
value.
Depending on message layout, the bug may not be exploitable in all
cases, but it is known to be exploitable at least with string and
bytes fields. Actual security impact will also depend on the heap
implementation used.
|
inline unsigned long copy_transact_fpr_to_user(void __user *to,
struct task_struct *task)
{
return __copy_to_user(to, task->thread.transact_fp.fpr,
ELF_NFPREG * sizeof(double));
}
| 0 |
[
"CWE-20",
"CWE-284",
"CWE-369"
] |
linux
|
d2b9d2a5ad5ef04ff978c9923d19730cb05efd55
| 23,584,933,133,558,598,000,000,000,000,000,000,000 | 6 |
powerpc/tm: Block signal return setting invalid MSR state
Currently we allow both the MSR T and S bits to be set by userspace on
a signal return. Unfortunately this is a reserved configuration and
will cause a TM Bad Thing exception if attempted (via rfid).
This patch checks for this case in both the 32 and 64 bit signals
code. If both T and S are set, we mark the context as invalid.
Found using a syscall fuzzer.
Fixes: 2b0a576d15e0 ("powerpc: Add new transactional memory state to the signal context")
Cc: [email protected] # v3.9+
Signed-off-by: Michael Neuling <[email protected]>
Signed-off-by: Michael Ellerman <[email protected]>
|
static void nft_setelem_data_activate(const struct net *net,
const struct nft_set *set,
struct nft_set_elem *elem)
{
const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv);
if (nft_set_ext_exists(ext, NFT_SET_EXT_DATA))
nft_data_hold(nft_set_ext_data(ext), set->dtype);
if (nft_set_ext_exists(ext, NFT_SET_EXT_OBJREF))
(*nft_set_ext_obj(ext))->use++;
}
| 0 |
[
"CWE-665"
] |
linux
|
ad9f151e560b016b6ad3280b48e42fa11e1a5440
| 243,139,332,733,340,870,000,000,000,000,000,000,000 | 11 |
netfilter: nf_tables: initialize set before expression setup
nft_set_elem_expr_alloc() needs an initialized set if expression sets on
the NFT_EXPR_GC flag. Move set fields initialization before expression
setup.
[4512935.019450] ==================================================================
[4512935.019456] BUG: KASAN: null-ptr-deref in nft_set_elem_expr_alloc+0x84/0xd0 [nf_tables]
[4512935.019487] Read of size 8 at addr 0000000000000070 by task nft/23532
[4512935.019494] CPU: 1 PID: 23532 Comm: nft Not tainted 5.12.0-rc4+ #48
[...]
[4512935.019502] Call Trace:
[4512935.019505] dump_stack+0x89/0xb4
[4512935.019512] ? nft_set_elem_expr_alloc+0x84/0xd0 [nf_tables]
[4512935.019536] ? nft_set_elem_expr_alloc+0x84/0xd0 [nf_tables]
[4512935.019560] kasan_report.cold.12+0x5f/0xd8
[4512935.019566] ? nft_set_elem_expr_alloc+0x84/0xd0 [nf_tables]
[4512935.019590] nft_set_elem_expr_alloc+0x84/0xd0 [nf_tables]
[4512935.019615] nf_tables_newset+0xc7f/0x1460 [nf_tables]
Reported-by: [email protected]
Fixes: 65038428b2c6 ("netfilter: nf_tables: allow to specify stateful expression in set definition")
Signed-off-by: Pablo Neira Ayuso <[email protected]>
|
static int load_bitmap_header(struct bitmap_index *index)
{
struct bitmap_disk_header *header = (void *)index->map;
if (index->map_size < sizeof(*header) + 20)
return error("Corrupted bitmap index (missing header data)");
if (memcmp(header->magic, BITMAP_IDX_SIGNATURE, sizeof(BITMAP_IDX_SIGNATURE)) != 0)
return error("Corrupted bitmap index file (wrong header)");
index->version = ntohs(header->version);
if (index->version != 1)
return error("Unsupported version for bitmap index file (%d)", index->version);
/* Parse known bitmap format options */
{
uint32_t flags = ntohs(header->options);
if ((flags & BITMAP_OPT_FULL_DAG) == 0)
return error("Unsupported options for bitmap index file "
"(Git requires BITMAP_OPT_FULL_DAG)");
if (flags & BITMAP_OPT_HASH_CACHE) {
unsigned char *end = index->map + index->map_size - 20;
index->hashes = ((uint32_t *)end) - index->pack->num_objects;
}
}
index->entry_count = ntohl(header->entry_count);
index->map_pos += sizeof(*header);
return 0;
}
| 0 |
[
"CWE-119",
"CWE-787"
] |
git
|
de1e67d0703894cb6ea782e36abb63976ab07e60
| 167,061,696,973,829,030,000,000,000,000,000,000,000 | 32 |
list-objects: pass full pathname to callbacks
When we find a blob at "a/b/c", we currently pass this to
our show_object_fn callbacks as two components: "a/b/" and
"c". Callbacks which want the full value then call
path_name(), which concatenates the two. But this is an
inefficient interface; the path is a strbuf, and we could
simply append "c" to it temporarily, then roll back the
length, without creating a new copy.
So we could improve this by teaching the callsites of
path_name() this trick (and there are only 3). But we can
also notice that no callback actually cares about the
broken-down representation, and simply pass each callback
the full path "a/b/c" as a string. The callback code becomes
even simpler, then, as we do not have to worry about freeing
an allocated buffer, nor rolling back our modification to
the strbuf.
This is theoretically less efficient, as some callbacks
would not bother to format the final path component. But in
practice this is not measurable. Since we use the same
strbuf over and over, our work to grow it is amortized, and
we really only pay to memcpy a few bytes.
Signed-off-by: Jeff King <[email protected]>
Signed-off-by: Junio C Hamano <[email protected]>
|
static int store_object(
enum object_type type,
struct strbuf *dat,
struct last_object *last,
struct object_id *oidout,
uintmax_t mark)
{
void *out, *delta;
struct object_entry *e;
unsigned char hdr[96];
struct object_id oid;
unsigned long hdrlen, deltalen;
git_SHA_CTX c;
git_zstream s;
hdrlen = xsnprintf((char *)hdr, sizeof(hdr), "%s %lu",
typename(type), (unsigned long)dat->len) + 1;
git_SHA1_Init(&c);
git_SHA1_Update(&c, hdr, hdrlen);
git_SHA1_Update(&c, dat->buf, dat->len);
git_SHA1_Final(oid.hash, &c);
if (oidout)
oidcpy(oidout, &oid);
e = insert_object(&oid);
if (mark)
insert_mark(mark, e);
if (e->idx.offset) {
duplicate_count_by_type[type]++;
return 1;
} else if (find_sha1_pack(oid.hash, packed_git)) {
e->type = type;
e->pack_id = MAX_PACK_ID;
e->idx.offset = 1; /* just not zero! */
duplicate_count_by_type[type]++;
return 1;
}
if (last && last->data.buf && last->depth < max_depth && dat->len > 20) {
delta_count_attempts_by_type[type]++;
delta = diff_delta(last->data.buf, last->data.len,
dat->buf, dat->len,
&deltalen, dat->len - 20);
} else
delta = NULL;
git_deflate_init(&s, pack_compression_level);
if (delta) {
s.next_in = delta;
s.avail_in = deltalen;
} else {
s.next_in = (void *)dat->buf;
s.avail_in = dat->len;
}
s.avail_out = git_deflate_bound(&s, s.avail_in);
s.next_out = out = xmalloc(s.avail_out);
while (git_deflate(&s, Z_FINISH) == Z_OK)
; /* nothing */
git_deflate_end(&s);
/* Determine if we should auto-checkpoint. */
if ((max_packsize && (pack_size + 60 + s.total_out) > max_packsize)
|| (pack_size + 60 + s.total_out) < pack_size) {
/* This new object needs to *not* have the current pack_id. */
e->pack_id = pack_id + 1;
cycle_packfile();
/* We cannot carry a delta into the new pack. */
if (delta) {
FREE_AND_NULL(delta);
git_deflate_init(&s, pack_compression_level);
s.next_in = (void *)dat->buf;
s.avail_in = dat->len;
s.avail_out = git_deflate_bound(&s, s.avail_in);
s.next_out = out = xrealloc(out, s.avail_out);
while (git_deflate(&s, Z_FINISH) == Z_OK)
; /* nothing */
git_deflate_end(&s);
}
}
e->type = type;
e->pack_id = pack_id;
e->idx.offset = pack_size;
object_count++;
object_count_by_type[type]++;
crc32_begin(pack_file);
if (delta) {
off_t ofs = e->idx.offset - last->offset;
unsigned pos = sizeof(hdr) - 1;
delta_count_by_type[type]++;
e->depth = last->depth + 1;
hdrlen = encode_in_pack_object_header(hdr, sizeof(hdr),
OBJ_OFS_DELTA, deltalen);
sha1write(pack_file, hdr, hdrlen);
pack_size += hdrlen;
hdr[pos] = ofs & 127;
while (ofs >>= 7)
hdr[--pos] = 128 | (--ofs & 127);
sha1write(pack_file, hdr + pos, sizeof(hdr) - pos);
pack_size += sizeof(hdr) - pos;
} else {
e->depth = 0;
hdrlen = encode_in_pack_object_header(hdr, sizeof(hdr),
type, dat->len);
sha1write(pack_file, hdr, hdrlen);
pack_size += hdrlen;
}
sha1write(pack_file, out, s.total_out);
pack_size += s.total_out;
e->idx.crc32 = crc32_end(pack_file);
free(out);
free(delta);
if (last) {
if (last->no_swap) {
last->data = *dat;
} else {
strbuf_swap(&last->data, dat);
}
last->offset = e->idx.offset;
last->depth = e->depth;
}
return 0;
}
| 0 |
[] |
git
|
68061e3470210703cb15594194718d35094afdc0
| 14,925,461,404,289,950,000,000,000,000,000,000,000 | 134 |
fast-import: disallow "feature export-marks" by default
The fast-import stream command "feature export-marks=<path>" lets the
stream write marks to an arbitrary path. This may be surprising if you
are running fast-import against an untrusted input (which otherwise
cannot do anything except update Git objects and refs).
Let's disallow the use of this feature by default, and provide a
command-line option to re-enable it (you can always just use the
command-line --export-marks as well, but the in-stream version provides
an easy way for exporters to control the process).
This is a backwards-incompatible change, since the default is flipping
to the new, safer behavior. However, since the main users of the
in-stream versions would be import/export-based remote helpers, and
since we trust remote helpers already (which are already running
arbitrary code), we'll pass the new option by default when reading a
remote helper's stream. This should minimize the impact.
Note that the implementation isn't totally simple, as we have to work
around the fact that fast-import doesn't parse its command-line options
until after it has read any "feature" lines from the stream. This is how
it lets command-line options override in-stream. But in our case, it's
important to parse the new --allow-unsafe-features first.
There are three options for resolving this:
1. Do a separate "early" pass over the options. This is easy for us to
do because there are no command-line options that allow the
"unstuck" form (so there's no chance of us mistaking an argument
for an option), though it does introduce a risk of incorrect
parsing later (e.g,. if we convert to parse-options).
2. Move the option parsing phase back to the start of the program, but
teach the stream-reading code never to override an existing value.
This is tricky, because stream "feature" lines override each other
(meaning we'd have to start tracking the source for every option).
3. Accept that we might parse a "feature export-marks" line that is
forbidden, as long we don't _act_ on it until after we've parsed
the command line options.
This would, in fact, work with the current code, but only because
the previous patch fixed the export-marks parser to avoid touching
the filesystem.
So while it works, it does carry risk of somebody getting it wrong
in the future in a rather subtle and unsafe way.
I've gone with option (1) here as simple, safe, and unlikely to cause
regressions.
This fixes CVE-2019-1348.
Signed-off-by: Jeff King <[email protected]>
|
asmlinkage ssize_t sys_readahead(int fd, loff_t offset, size_t count)
{
ssize_t ret;
struct file *file;
ret = -EBADF;
file = fget(fd);
if (file) {
if (file->f_mode & FMODE_READ) {
struct address_space *mapping = file->f_mapping;
pgoff_t start = offset >> PAGE_CACHE_SHIFT;
pgoff_t end = (offset + count - 1) >> PAGE_CACHE_SHIFT;
unsigned long len = end - start + 1;
ret = do_readahead(mapping, file, start, len);
}
fput(file);
}
return ret;
}
| 0 |
[
"CWE-193"
] |
linux-2.6
|
94ad374a0751f40d25e22e036c37f7263569d24c
| 169,461,602,936,137,000,000,000,000,000,000,000,000 | 19 |
Fix off-by-one error in iov_iter_advance()
The iov_iter_advance() function would look at the iov->iov_len entry
even though it might have iterated over the whole array, and iov was
pointing past the end. This would cause DEBUG_PAGEALLOC to trigger a
kernel page fault if the allocation was at the end of a page, and the
next page was unallocated.
The quick fix is to just change the order of the tests: check that there
is any iovec data left before we check the iov entry itself.
Thanks to Alexey Dobriyan for finding this case, and testing the fix.
Reported-and-tested-by: Alexey Dobriyan <[email protected]>
Cc: Nick Piggin <[email protected]>
Cc: Andrew Morton <[email protected]>
Cc: <[email protected]> [2.6.25.x, 2.6.26.x]
Signed-off-by: Linus Torvalds <[email protected]>
|
void run() {
intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
BSONObj specObject = BSON("" << spec());
BSONElement specElement = specObject.firstElement();
VariablesParseState vps = expCtx->variablesParseState;
intrusive_ptr<Expression> expression = Expression::parseOperand(expCtx, specElement, vps);
ASSERT_BSONOBJ_EQ(constify(spec()), expressionToBson(expression));
ASSERT_BSONOBJ_EQ(BSON("" << expectedResult()),
toBson(expression->evaluate(fromBson(BSON("a" << 1)))));
intrusive_ptr<Expression> optimized = expression->optimize();
ASSERT_BSONOBJ_EQ(BSON("" << expectedResult()),
toBson(optimized->evaluate(fromBson(BSON("a" << 1)))));
}
| 0 |
[
"CWE-835"
] |
mongo
|
0a076417d1d7fba3632b73349a1fd29a83e68816
| 49,993,419,959,389,910,000,000,000,000,000,000,000 | 13 |
SERVER-38070 fix infinite loop in agg expression
|
void ring_status_indication(struct s_smc *smc, u_long status)
{
PRINTK("ring_status_indication( ");
if (status & RS_RES15)
PRINTK("RS_RES15 ");
if (status & RS_HARDERROR)
PRINTK("RS_HARDERROR ");
if (status & RS_SOFTERROR)
PRINTK("RS_SOFTERROR ");
if (status & RS_BEACON)
PRINTK("RS_BEACON ");
if (status & RS_PATHTEST)
PRINTK("RS_PATHTEST ");
if (status & RS_SELFTEST)
PRINTK("RS_SELFTEST ");
if (status & RS_RES9)
PRINTK("RS_RES9 ");
if (status & RS_DISCONNECT)
PRINTK("RS_DISCONNECT ");
if (status & RS_RES7)
PRINTK("RS_RES7 ");
if (status & RS_DUPADDR)
PRINTK("RS_DUPADDR ");
if (status & RS_NORINGOP)
PRINTK("RS_NORINGOP ");
if (status & RS_VERSION)
PRINTK("RS_VERSION ");
if (status & RS_STUCKBYPASSS)
PRINTK("RS_STUCKBYPASSS ");
if (status & RS_EVENT)
PRINTK("RS_EVENT ");
if (status & RS_RINGOPCHANGE)
PRINTK("RS_RINGOPCHANGE ");
if (status & RS_RES0)
PRINTK("RS_RES0 ");
PRINTK("]\n");
} // ring_status_indication
| 0 |
[
"CWE-264"
] |
linux-2.6
|
c25b9abbc2c2c0da88e180c3933d6e773245815a
| 155,350,943,493,081,760,000,000,000,000,000,000,000 | 37 |
drivers/net/skfp: if !capable(CAP_NET_ADMIN): inverted logic
Fix inverted logic
Signed-off-by: Roel Kluin <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static jpc_enc_cp_t *cp_create(char *optstr, jas_image_t *image)
{
jpc_enc_cp_t *cp;
jas_tvparser_t *tvp;
int ret;
int numilyrrates;
double *ilyrrates;
int i;
int tagid;
jpc_enc_tcp_t *tcp;
jpc_enc_tccp_t *tccp;
jpc_enc_ccp_t *ccp;
int cmptno;
uint_fast16_t rlvlno;
uint_fast16_t prcwidthexpn;
uint_fast16_t prcheightexpn;
bool enablemct;
uint_fast32_t jp2overhead;
uint_fast16_t lyrno;
uint_fast32_t hsteplcm;
uint_fast32_t vsteplcm;
bool mctvalid;
tvp = 0;
cp = 0;
ilyrrates = 0;
numilyrrates = 0;
if (!(cp = jas_malloc(sizeof(jpc_enc_cp_t)))) {
goto error;
}
prcwidthexpn = 15;
prcheightexpn = 15;
enablemct = true;
jp2overhead = 0;
cp->ccps = 0;
cp->debug = 0;
cp->imgareatlx = UINT_FAST32_MAX;
cp->imgareatly = UINT_FAST32_MAX;
cp->refgrdwidth = 0;
cp->refgrdheight = 0;
cp->tilegrdoffx = UINT_FAST32_MAX;
cp->tilegrdoffy = UINT_FAST32_MAX;
cp->tilewidth = 0;
cp->tileheight = 0;
cp->numcmpts = jas_image_numcmpts(image);
hsteplcm = 1;
vsteplcm = 1;
for (cmptno = 0; cmptno < jas_image_numcmpts(image); ++cmptno) {
if (jas_image_cmptbrx(image, cmptno) + jas_image_cmpthstep(image, cmptno) <=
jas_image_brx(image) || jas_image_cmptbry(image, cmptno) +
jas_image_cmptvstep(image, cmptno) <= jas_image_bry(image)) {
jas_eprintf("unsupported image type\n");
goto error;
}
/* Note: We ought to be calculating the LCMs here. Fix some day. */
hsteplcm *= jas_image_cmpthstep(image, cmptno);
vsteplcm *= jas_image_cmptvstep(image, cmptno);
}
if (!(cp->ccps = jas_alloc2(cp->numcmpts, sizeof(jpc_enc_ccp_t)))) {
goto error;
}
for (cmptno = 0, ccp = cp->ccps; cmptno < JAS_CAST(int, cp->numcmpts); ++cmptno,
++ccp) {
ccp->sampgrdstepx = jas_image_cmpthstep(image, cmptno);
ccp->sampgrdstepy = jas_image_cmptvstep(image, cmptno);
/* XXX - this isn't quite correct for more general image */
ccp->sampgrdsubstepx = 0;
ccp->sampgrdsubstepx = 0;
ccp->prec = jas_image_cmptprec(image, cmptno);
ccp->sgnd = jas_image_cmptsgnd(image, cmptno);
ccp->numstepsizes = 0;
memset(ccp->stepsizes, 0, sizeof(ccp->stepsizes));
}
cp->rawsize = jas_image_rawsize(image);
cp->totalsize = UINT_FAST32_MAX;
tcp = &cp->tcp;
tcp->csty = 0;
tcp->intmode = true;
tcp->prg = JPC_COD_LRCPPRG;
tcp->numlyrs = 1;
tcp->ilyrrates = 0;
tccp = &cp->tccp;
tccp->csty = 0;
tccp->maxrlvls = 6;
tccp->cblkwidthexpn = 6;
tccp->cblkheightexpn = 6;
tccp->cblksty = 0;
tccp->numgbits = 2;
if (!(tvp = jas_tvparser_create(optstr ? optstr : ""))) {
goto error;
}
while (!(ret = jas_tvparser_next(tvp))) {
switch (jas_taginfo_nonull(jas_taginfos_lookup(encopts,
jas_tvparser_gettag(tvp)))->id) {
case OPT_DEBUG:
cp->debug = atoi(jas_tvparser_getval(tvp));
break;
case OPT_IMGAREAOFFX:
cp->imgareatlx = atoi(jas_tvparser_getval(tvp));
break;
case OPT_IMGAREAOFFY:
cp->imgareatly = atoi(jas_tvparser_getval(tvp));
break;
case OPT_TILEGRDOFFX:
cp->tilegrdoffx = atoi(jas_tvparser_getval(tvp));
break;
case OPT_TILEGRDOFFY:
cp->tilegrdoffy = atoi(jas_tvparser_getval(tvp));
break;
case OPT_TILEWIDTH:
cp->tilewidth = atoi(jas_tvparser_getval(tvp));
break;
case OPT_TILEHEIGHT:
cp->tileheight = atoi(jas_tvparser_getval(tvp));
break;
case OPT_PRCWIDTH:
prcwidthexpn = jpc_floorlog2(atoi(jas_tvparser_getval(tvp)));
break;
case OPT_PRCHEIGHT:
prcheightexpn = jpc_floorlog2(atoi(jas_tvparser_getval(tvp)));
break;
case OPT_CBLKWIDTH:
tccp->cblkwidthexpn =
jpc_floorlog2(atoi(jas_tvparser_getval(tvp)));
break;
case OPT_CBLKHEIGHT:
tccp->cblkheightexpn =
jpc_floorlog2(atoi(jas_tvparser_getval(tvp)));
break;
case OPT_MODE:
if ((tagid = jas_taginfo_nonull(jas_taginfos_lookup(modetab,
jas_tvparser_getval(tvp)))->id) < 0) {
jas_eprintf("ignoring invalid mode %s\n",
jas_tvparser_getval(tvp));
} else {
tcp->intmode = (tagid == MODE_INT);
}
break;
case OPT_PRG:
if ((tagid = jas_taginfo_nonull(jas_taginfos_lookup(prgordtab,
jas_tvparser_getval(tvp)))->id) < 0) {
jas_eprintf("ignoring invalid progression order %s\n",
jas_tvparser_getval(tvp));
} else {
tcp->prg = tagid;
}
break;
case OPT_NOMCT:
enablemct = false;
break;
case OPT_MAXRLVLS:
tccp->maxrlvls = atoi(jas_tvparser_getval(tvp));
break;
case OPT_SOP:
cp->tcp.csty |= JPC_COD_SOP;
break;
case OPT_EPH:
cp->tcp.csty |= JPC_COD_EPH;
break;
case OPT_LAZY:
tccp->cblksty |= JPC_COX_LAZY;
break;
case OPT_TERMALL:
tccp->cblksty |= JPC_COX_TERMALL;
break;
case OPT_SEGSYM:
tccp->cblksty |= JPC_COX_SEGSYM;
break;
case OPT_VCAUSAL:
tccp->cblksty |= JPC_COX_VSC;
break;
case OPT_RESET:
tccp->cblksty |= JPC_COX_RESET;
break;
case OPT_PTERM:
tccp->cblksty |= JPC_COX_PTERM;
break;
case OPT_NUMGBITS:
cp->tccp.numgbits = atoi(jas_tvparser_getval(tvp));
break;
case OPT_RATE:
if (ratestrtosize(jas_tvparser_getval(tvp), cp->rawsize,
&cp->totalsize)) {
jas_eprintf("ignoring bad rate specifier %s\n",
jas_tvparser_getval(tvp));
}
break;
case OPT_ILYRRATES:
if (jpc_atoaf(jas_tvparser_getval(tvp), &numilyrrates,
&ilyrrates)) {
jas_eprintf("warning: invalid intermediate layer rates specifier ignored (%s)\n",
jas_tvparser_getval(tvp));
}
break;
case OPT_JP2OVERHEAD:
jp2overhead = atoi(jas_tvparser_getval(tvp));
break;
default:
jas_eprintf("warning: ignoring invalid option %s\n",
jas_tvparser_gettag(tvp));
break;
}
}
jas_tvparser_destroy(tvp);
tvp = 0;
if (cp->totalsize != UINT_FAST32_MAX) {
cp->totalsize = (cp->totalsize > jp2overhead) ?
(cp->totalsize - jp2overhead) : 0;
}
if (cp->imgareatlx == UINT_FAST32_MAX) {
cp->imgareatlx = 0;
} else {
if (hsteplcm != 1) {
jas_eprintf("warning: overriding imgareatlx value\n");
}
cp->imgareatlx *= hsteplcm;
}
if (cp->imgareatly == UINT_FAST32_MAX) {
cp->imgareatly = 0;
} else {
if (vsteplcm != 1) {
jas_eprintf("warning: overriding imgareatly value\n");
}
cp->imgareatly *= vsteplcm;
}
cp->refgrdwidth = cp->imgareatlx + jas_image_width(image);
cp->refgrdheight = cp->imgareatly + jas_image_height(image);
if (cp->tilegrdoffx == UINT_FAST32_MAX) {
cp->tilegrdoffx = cp->imgareatlx;
}
if (cp->tilegrdoffy == UINT_FAST32_MAX) {
cp->tilegrdoffy = cp->imgareatly;
}
if (!cp->tilewidth) {
cp->tilewidth = cp->refgrdwidth - cp->tilegrdoffx;
}
if (!cp->tileheight) {
cp->tileheight = cp->refgrdheight - cp->tilegrdoffy;
}
if (cp->numcmpts == 3) {
mctvalid = true;
for (cmptno = 0; cmptno < jas_image_numcmpts(image); ++cmptno) {
if (jas_image_cmptprec(image, cmptno) != jas_image_cmptprec(image, 0) ||
jas_image_cmptsgnd(image, cmptno) != jas_image_cmptsgnd(image, 0) ||
jas_image_cmptwidth(image, cmptno) != jas_image_cmptwidth(image, 0) ||
jas_image_cmptheight(image, cmptno) != jas_image_cmptheight(image, 0)) {
mctvalid = false;
}
}
} else {
mctvalid = false;
}
if (mctvalid && enablemct && jas_clrspc_fam(jas_image_clrspc(image)) != JAS_CLRSPC_FAM_RGB) {
jas_eprintf("warning: color space apparently not RGB\n");
}
if (mctvalid && enablemct && jas_clrspc_fam(jas_image_clrspc(image)) == JAS_CLRSPC_FAM_RGB) {
tcp->mctid = (tcp->intmode) ? (JPC_MCT_RCT) : (JPC_MCT_ICT);
} else {
tcp->mctid = JPC_MCT_NONE;
}
tccp->qmfbid = (tcp->intmode) ? (JPC_COX_RFT) : (JPC_COX_INS);
for (rlvlno = 0; rlvlno < tccp->maxrlvls; ++rlvlno) {
tccp->prcwidthexpns[rlvlno] = prcwidthexpn;
tccp->prcheightexpns[rlvlno] = prcheightexpn;
}
if (prcwidthexpn != 15 || prcheightexpn != 15) {
tccp->csty |= JPC_COX_PRT;
}
/* Ensure that the tile width and height is valid. */
if (!cp->tilewidth) {
jas_eprintf("invalid tile width %lu\n", (unsigned long)
cp->tilewidth);
goto error;
}
if (!cp->tileheight) {
jas_eprintf("invalid tile height %lu\n", (unsigned long)
cp->tileheight);
goto error;
}
/* Ensure that the tile grid offset is valid. */
if (cp->tilegrdoffx > cp->imgareatlx ||
cp->tilegrdoffy > cp->imgareatly ||
cp->tilegrdoffx + cp->tilewidth < cp->imgareatlx ||
cp->tilegrdoffy + cp->tileheight < cp->imgareatly) {
jas_eprintf("invalid tile grid offset (%lu, %lu)\n",
(unsigned long) cp->tilegrdoffx, (unsigned long)
cp->tilegrdoffy);
goto error;
}
cp->numhtiles = JPC_CEILDIV(cp->refgrdwidth - cp->tilegrdoffx,
cp->tilewidth);
cp->numvtiles = JPC_CEILDIV(cp->refgrdheight - cp->tilegrdoffy,
cp->tileheight);
cp->numtiles = cp->numhtiles * cp->numvtiles;
if (ilyrrates && numilyrrates > 0) {
tcp->numlyrs = numilyrrates + 1;
if (!(tcp->ilyrrates = jas_alloc2((tcp->numlyrs - 1),
sizeof(jpc_fix_t)))) {
goto error;
}
for (i = 0; i < JAS_CAST(int, tcp->numlyrs - 1); ++i) {
tcp->ilyrrates[i] = jpc_dbltofix(ilyrrates[i]);
}
}
/* Ensure that the integer mode is used in the case of lossless
coding. */
if (cp->totalsize == UINT_FAST32_MAX && (!cp->tcp.intmode)) {
jas_eprintf("cannot use real mode for lossless coding\n");
goto error;
}
/* Ensure that the precinct width is valid. */
if (prcwidthexpn > 15) {
jas_eprintf("invalid precinct width\n");
goto error;
}
/* Ensure that the precinct height is valid. */
if (prcheightexpn > 15) {
jas_eprintf("invalid precinct height\n");
goto error;
}
/* Ensure that the code block width is valid. */
if (cp->tccp.cblkwidthexpn < 2 || cp->tccp.cblkwidthexpn > 12) {
jas_eprintf("invalid code block width %d\n",
JPC_POW2(cp->tccp.cblkwidthexpn));
goto error;
}
/* Ensure that the code block height is valid. */
if (cp->tccp.cblkheightexpn < 2 || cp->tccp.cblkheightexpn > 12) {
jas_eprintf("invalid code block height %d\n",
JPC_POW2(cp->tccp.cblkheightexpn));
goto error;
}
/* Ensure that the code block size is not too large. */
if (cp->tccp.cblkwidthexpn + cp->tccp.cblkheightexpn > 12) {
jas_eprintf("code block size too large\n");
goto error;
}
/* Ensure that the number of layers is valid. */
if (cp->tcp.numlyrs > 16384) {
jas_eprintf("too many layers\n");
goto error;
}
/* There must be at least one resolution level. */
if (cp->tccp.maxrlvls < 1) {
jas_eprintf("must be at least one resolution level\n");
goto error;
}
/* Ensure that the number of guard bits is valid. */
if (cp->tccp.numgbits > 8) {
jas_eprintf("invalid number of guard bits\n");
goto error;
}
/* Ensure that the rate is within the legal range. */
if (cp->totalsize != UINT_FAST32_MAX && cp->totalsize > cp->rawsize) {
jas_eprintf("warning: specified rate is unreasonably large (%lu > %lu)\n", (unsigned long) cp->totalsize, (unsigned long) cp->rawsize);
}
/* Ensure that the intermediate layer rates are valid. */
if (tcp->numlyrs > 1) {
/* The intermediate layers rates must increase monotonically. */
for (lyrno = 0; lyrno + 2 < tcp->numlyrs; ++lyrno) {
if (tcp->ilyrrates[lyrno] >= tcp->ilyrrates[lyrno + 1]) {
jas_eprintf("intermediate layer rates must increase monotonically\n");
goto error;
}
}
/* The intermediate layer rates must be less than the overall rate. */
if (cp->totalsize != UINT_FAST32_MAX) {
for (lyrno = 0; lyrno < tcp->numlyrs - 1; ++lyrno) {
if (jpc_fixtodbl(tcp->ilyrrates[lyrno]) > ((double) cp->totalsize)
/ cp->rawsize) {
jas_eprintf("warning: intermediate layer rates must be less than overall rate\n");
goto error;
}
}
}
}
if (ilyrrates) {
jas_free(ilyrrates);
}
return cp;
error:
if (ilyrrates) {
jas_free(ilyrrates);
}
if (tvp) {
jas_tvparser_destroy(tvp);
}
if (cp) {
jpc_enc_cp_destroy(cp);
}
return 0;
}
| 0 |
[
"CWE-189"
] |
jasper
|
3c55b399c36ef46befcb21e4ebc4799367f89684
| 249,238,926,149,914,640,000,000,000,000,000,000,000 | 427 |
At many places in the code, jas_malloc or jas_recalloc was being
invoked with the size argument being computed in a manner that would not
allow integer overflow to be detected. Now, these places in the code
have been modified to use special-purpose memory allocation functions
(e.g., jas_alloc2, jas_alloc3, jas_realloc2) that check for overflow.
This should fix many security problems.
|
void CIRCNetwork::SetAltNick(const CString& s) {
if (m_pUser->GetAltNick().Equals(s)) {
m_sAltNick = "";
} else {
m_sAltNick = s;
}
}
| 0 |
[
"CWE-20"
] |
znc
|
64613bc8b6b4adf1e32231f9844d99cd512b8973
| 8,749,175,029,456,342,000,000,000,000,000,000,000 | 7 |
Don't crash if user specified invalid encoding.
This is CVE-2019-9917
|
static int parse_smacro_template(Token ***tpp, SMacro *tmpl)
{
int nparam = 0;
enum sparmflags flags;
struct smac_param *params = NULL;
bool err, done;
bool greedy = false;
Token **tn = *tpp;
Token *t = *tn;
Token *name;
/*
* DO NOT skip whitespace here, or we won't be able to distinguish:
*
* %define foo (a,b) ; no arguments, (a,b) is the expansion
* %define bar(a,b) ; two arguments, empty expansion
*
* This ambiguity was inherited from C.
*/
if (!tok_is(t, '('))
goto finish;
if (tmpl) {
Token *tx = t;
Token **txpp = &tx;
int sparam;
/* Count parameters first */
sparam = parse_smacro_template(&txpp, NULL);
if (!sparam)
goto finish; /* No parameters, we're done */
nasm_newn(params, sparam);
}
/* Skip leading paren */
tn = &t->next;
t = *tn;
name = NULL;
flags = 0;
err = done = false;
while (!done) {
if (!t || !t->type) {
if (name || flags)
nasm_nonfatal("`)' expected to terminate macro template");
else
nasm_nonfatal("parameter identifier expected");
break;
}
switch (t->type) {
case TOK_ID:
if (name)
goto bad;
name = t;
break;
case TOK_OTHER:
if (t->len != 1)
goto bad;
switch (t->text.a[0]) {
case '=':
flags |= SPARM_EVAL;
break;
case '&':
flags |= SPARM_STR;
break;
case '!':
flags |= SPARM_NOSTRIP;
break;
case '+':
flags |= SPARM_GREEDY;
greedy = true;
break;
case ',':
if (greedy)
nasm_nonfatal("greedy parameter must be last");
/* fall through */
case ')':
if (params) {
if (name)
steal_Token(¶ms[nparam].name, name);
params[nparam].flags = flags;
}
nparam++;
name = NULL;
flags = 0;
done = t->text.a[0] == ')';
break;
default:
goto bad;
}
break;
case TOK_WHITESPACE:
break;
default:
bad:
if (!err) {
nasm_nonfatal("garbage `%s' in macro parameter list", tok_text(t));
err = true;
}
break;
}
tn = &t->next;
t = *tn;
}
finish:
while (t && t->type == TOK_WHITESPACE) {
tn = &t->next;
t = t->next;
}
*tpp = tn;
if (tmpl) {
tmpl->nparam = nparam;
tmpl->greedy = greedy;
tmpl->params = params;
}
return nparam;
}
| 0 |
[] |
nasm
|
6299a3114ce0f3acd55d07de201a8ca2f0a83059
| 311,333,828,906,999,180,000,000,000,000,000,000,000 | 125 |
BR 3392708: fix NULL pointer reference for invalid %stacksize
After issuing an error message for a missing %stacksize argument, need
to quit rather than continuing to try to access the pointer.
Fold uses of tok_text() while we are at it.
Reported-by: Suhwan <[email protected]>
Signed-off-by: H. Peter Anvin (Intel) <[email protected]>
|
unsigned long regs_get_register(struct pt_regs *regs, unsigned int offset)
{
if (offset >= NUM_GPRS)
return 0;
return regs->gprs[offset];
}
| 0 |
[
"CWE-264",
"CWE-269"
] |
linux
|
dab6cf55f81a6e16b8147aed9a843e1691dcd318
| 282,561,247,591,507,970,000,000,000,000,000,000,000 | 6 |
s390/ptrace: fix PSW mask check
The PSW mask check of the PTRACE_POKEUSR_AREA command is incorrect.
The PSW_MASK_USER define contains the PSW_MASK_ASC bits, the ptrace
interface accepts all combinations for the address-space-control
bits. To protect the kernel space the PSW mask check in ptrace needs
to reject the address-space-control bit combination for home space.
Fixes CVE-2014-3534
Cc: [email protected]
Signed-off-by: Martin Schwidefsky <[email protected]>
|
static int check_call(struct bpf_verifier_env *env, int func_id, int insn_idx)
{
const struct bpf_func_proto *fn = NULL;
struct bpf_reg_state *regs;
struct bpf_call_arg_meta meta;
bool changes_data;
int i, err;
/* find function prototype */
if (func_id < 0 || func_id >= __BPF_FUNC_MAX_ID) {
verbose(env, "invalid func %s#%d\n", func_id_name(func_id),
func_id);
return -EINVAL;
}
if (env->ops->get_func_proto)
fn = env->ops->get_func_proto(func_id);
if (!fn) {
verbose(env, "unknown func %s#%d\n", func_id_name(func_id),
func_id);
return -EINVAL;
}
/* eBPF programs must be GPL compatible to use GPL-ed functions */
if (!env->prog->gpl_compatible && fn->gpl_only) {
verbose(env, "cannot call GPL only function from proprietary program\n");
return -EINVAL;
}
/* With LD_ABS/IND some JITs save/restore skb from r1. */
changes_data = bpf_helper_changes_pkt_data(fn->func);
if (changes_data && fn->arg1_type != ARG_PTR_TO_CTX) {
verbose(env, "kernel subsystem misconfigured func %s#%d: r1 != ctx\n",
func_id_name(func_id), func_id);
return -EINVAL;
}
memset(&meta, 0, sizeof(meta));
meta.pkt_access = fn->pkt_access;
/* We only support one arg being in raw mode at the moment, which
* is sufficient for the helper functions we have right now.
*/
err = check_raw_mode(fn);
if (err) {
verbose(env, "kernel subsystem misconfigured func %s#%d\n",
func_id_name(func_id), func_id);
return err;
}
/* check args */
err = check_func_arg(env, BPF_REG_1, fn->arg1_type, &meta);
if (err)
return err;
err = check_func_arg(env, BPF_REG_2, fn->arg2_type, &meta);
if (err)
return err;
err = check_func_arg(env, BPF_REG_3, fn->arg3_type, &meta);
if (err)
return err;
err = check_func_arg(env, BPF_REG_4, fn->arg4_type, &meta);
if (err)
return err;
err = check_func_arg(env, BPF_REG_5, fn->arg5_type, &meta);
if (err)
return err;
/* Mark slots with STACK_MISC in case of raw mode, stack offset
* is inferred from register state.
*/
for (i = 0; i < meta.access_size; i++) {
err = check_mem_access(env, insn_idx, meta.regno, i, BPF_B, BPF_WRITE, -1);
if (err)
return err;
}
regs = cur_regs(env);
/* reset caller saved regs */
for (i = 0; i < CALLER_SAVED_REGS; i++) {
mark_reg_not_init(env, regs, caller_saved[i]);
check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
}
/* update return register (already marked as written above) */
if (fn->ret_type == RET_INTEGER) {
/* sets type to SCALAR_VALUE */
mark_reg_unknown(env, regs, BPF_REG_0);
} else if (fn->ret_type == RET_VOID) {
regs[BPF_REG_0].type = NOT_INIT;
} else if (fn->ret_type == RET_PTR_TO_MAP_VALUE_OR_NULL) {
struct bpf_insn_aux_data *insn_aux;
regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL;
/* There is no offset yet applied, variable or fixed */
mark_reg_known_zero(env, regs, BPF_REG_0);
regs[BPF_REG_0].off = 0;
/* remember map_ptr, so that check_map_access()
* can check 'value_size' boundary of memory access
* to map element returned from bpf_map_lookup_elem()
*/
if (meta.map_ptr == NULL) {
verbose(env,
"kernel subsystem misconfigured verifier\n");
return -EINVAL;
}
regs[BPF_REG_0].map_ptr = meta.map_ptr;
regs[BPF_REG_0].id = ++env->id_gen;
insn_aux = &env->insn_aux_data[insn_idx];
if (!insn_aux->map_ptr)
insn_aux->map_ptr = meta.map_ptr;
else if (insn_aux->map_ptr != meta.map_ptr)
insn_aux->map_ptr = BPF_MAP_PTR_POISON;
} else {
verbose(env, "unknown return type %d of func %s#%d\n",
fn->ret_type, func_id_name(func_id), func_id);
return -EINVAL;
}
err = check_map_func_compatibility(env, meta.map_ptr, func_id);
if (err)
return err;
if (changes_data)
clear_all_pkt_pointers(env);
return 0;
}
| 0 |
[
"CWE-119",
"CWE-284"
] |
linux
|
468f6eafa6c44cb2c5d8aad35e12f06c240a812a
| 70,995,405,665,289,370,000,000,000,000,000,000,000 | 127 |
bpf: fix 32-bit ALU op verification
32-bit ALU ops operate on 32-bit values and have 32-bit outputs.
Adjust the verifier accordingly.
Fixes: f1174f77b50c ("bpf/verifier: rework value tracking")
Signed-off-by: Jann Horn <[email protected]>
Signed-off-by: Alexei Starovoitov <[email protected]>
Signed-off-by: Daniel Borkmann <[email protected]>
|
int main(int argc, char *argv[])
{
/* I18n */
setlocale(LC_ALL, "");
#if ENABLE_NLS
bindtextdomain(PACKAGE, LOCALEDIR);
textdomain(PACKAGE);
#endif
guint owner_id;
abrt_init(argv);
const char *program_usage_string = _(
"& [options]"
);
enum {
OPT_v = 1 << 0,
OPT_t = 1 << 1,
};
/* Keep enum above and order of options below in sync! */
struct options program_options[] = {
OPT__VERBOSE(&g_verbose),
OPT_INTEGER('t', NULL, &g_timeout_value, _("Exit after NUM seconds of inactivity")),
OPT_END()
};
/*unsigned opts =*/ parse_opts(argc, argv, program_options, program_usage_string);
export_abrt_envvars(0);
/* When dbus daemon starts us, it doesn't set PATH
* (I saw it set only DBUS_STARTER_ADDRESS and DBUS_STARTER_BUS_TYPE).
* In this case, set something sane:
*/
const char *env_path = getenv("PATH");
if (!env_path || !env_path[0])
putenv((char*)"PATH=/usr/sbin:/usr/bin:/sbin:/bin");
msg_prefix = "abrt-dbus"; /* for log(), error_msg() and such */
if (getuid() != 0)
error_msg_and_die(_("This program must be run as root."));
glib_init();
/* We are lazy here - we don't want to manually provide
* the introspection data structures - so we just build
* them from XML.
*/
introspection_data = g_dbus_node_info_new_for_xml(introspection_xml, NULL);
g_assert(introspection_data != NULL);
owner_id = g_bus_own_name(G_BUS_TYPE_SYSTEM,
ABRT_DBUS_NAME,
G_BUS_NAME_OWNER_FLAGS_NONE,
on_bus_acquired,
NULL,
on_name_lost,
NULL,
NULL);
/* initialize the g_settings_dump_location */
load_abrt_conf();
loop = g_main_loop_new(NULL, FALSE);
g_main_loop_run(loop);
log_notice("Cleaning up");
g_bus_unown_name(owner_id);
g_dbus_node_info_unref(introspection_data);
free_abrt_conf_data();
return 0;
}
| 0 |
[
"CWE-59"
] |
abrt
|
7417505e1d93cc95ec648b74e3c801bc67aacb9f
| 80,080,151,437,477,910,000,000,000,000,000,000,000 | 76 |
daemon, dbus: allow only root to create CCpp, Koops, vmcore and xorg
Florian Weimer <[email protected]>:
This prevents users from feeding things that are not actually
coredumps and excerpts from /proc to these analyzers.
For example, it should not be possible to trigger a rule with
“EVENT=post-create analyzer=CCpp” using NewProblem
Related: #1212861
Signed-off-by: Jakub Filak <[email protected]>
|
OPJ_BOOL opj_t2_encode_packets(opj_t2_t* p_t2,
OPJ_UINT32 p_tile_no,
opj_tcd_tile_t *p_tile,
OPJ_UINT32 p_maxlayers,
OPJ_BYTE *p_dest,
OPJ_UINT32 * p_data_written,
OPJ_UINT32 p_max_len,
opj_codestream_info_t *cstr_info,
OPJ_UINT32 p_tp_num,
OPJ_INT32 p_tp_pos,
OPJ_UINT32 p_pino,
J2K_T2_MODE p_t2_mode,
opj_event_mgr_t *p_manager)
{
OPJ_BYTE *l_current_data = p_dest;
OPJ_UINT32 l_nb_bytes = 0;
OPJ_UINT32 compno;
OPJ_UINT32 poc;
opj_pi_iterator_t *l_pi = 00;
opj_pi_iterator_t *l_current_pi = 00;
opj_image_t *l_image = p_t2->image;
opj_cp_t *l_cp = p_t2->cp;
opj_tcp_t *l_tcp = &l_cp->tcps[p_tile_no];
OPJ_UINT32 pocno = (l_cp->rsiz == OPJ_PROFILE_CINEMA_4K) ? 2 : 1;
OPJ_UINT32 l_max_comp = l_cp->m_specific_param.m_enc.m_max_comp_size > 0 ?
l_image->numcomps : 1;
OPJ_UINT32 l_nb_pocs = l_tcp->numpocs + 1;
l_pi = opj_pi_initialise_encode(l_image, l_cp, p_tile_no, p_t2_mode);
if (!l_pi) {
return OPJ_FALSE;
}
* p_data_written = 0;
if (p_t2_mode == THRESH_CALC) { /* Calculating threshold */
l_current_pi = l_pi;
for (compno = 0; compno < l_max_comp; ++compno) {
OPJ_UINT32 l_comp_len = 0;
l_current_pi = l_pi;
for (poc = 0; poc < pocno ; ++poc) {
OPJ_UINT32 l_tp_num = compno;
/* TODO MSD : check why this function cannot fail (cf. v1) */
opj_pi_create_encode(l_pi, l_cp, p_tile_no, poc, l_tp_num, p_tp_pos, p_t2_mode);
if (l_current_pi->poc.prg == OPJ_PROG_UNKNOWN) {
/* TODO ADE : add an error */
opj_pi_destroy(l_pi, l_nb_pocs);
return OPJ_FALSE;
}
while (opj_pi_next(l_current_pi)) {
if (l_current_pi->layno < p_maxlayers) {
l_nb_bytes = 0;
if (! opj_t2_encode_packet(p_tile_no, p_tile, l_tcp, l_current_pi,
l_current_data, &l_nb_bytes,
p_max_len, cstr_info,
p_t2_mode,
p_manager)) {
opj_pi_destroy(l_pi, l_nb_pocs);
return OPJ_FALSE;
}
l_comp_len += l_nb_bytes;
l_current_data += l_nb_bytes;
p_max_len -= l_nb_bytes;
* p_data_written += l_nb_bytes;
}
}
if (l_cp->m_specific_param.m_enc.m_max_comp_size) {
if (l_comp_len > l_cp->m_specific_param.m_enc.m_max_comp_size) {
opj_pi_destroy(l_pi, l_nb_pocs);
return OPJ_FALSE;
}
}
++l_current_pi;
}
}
} else { /* t2_mode == FINAL_PASS */
opj_pi_create_encode(l_pi, l_cp, p_tile_no, p_pino, p_tp_num, p_tp_pos,
p_t2_mode);
l_current_pi = &l_pi[p_pino];
if (l_current_pi->poc.prg == OPJ_PROG_UNKNOWN) {
/* TODO ADE : add an error */
opj_pi_destroy(l_pi, l_nb_pocs);
return OPJ_FALSE;
}
while (opj_pi_next(l_current_pi)) {
if (l_current_pi->layno < p_maxlayers) {
l_nb_bytes = 0;
if (! opj_t2_encode_packet(p_tile_no, p_tile, l_tcp, l_current_pi,
l_current_data, &l_nb_bytes, p_max_len,
cstr_info, p_t2_mode, p_manager)) {
opj_pi_destroy(l_pi, l_nb_pocs);
return OPJ_FALSE;
}
l_current_data += l_nb_bytes;
p_max_len -= l_nb_bytes;
* p_data_written += l_nb_bytes;
/* INDEX >> */
if (cstr_info) {
if (cstr_info->index_write) {
opj_tile_info_t *info_TL = &cstr_info->tile[p_tile_no];
opj_packet_info_t *info_PK = &info_TL->packet[cstr_info->packno];
if (!cstr_info->packno) {
info_PK->start_pos = info_TL->end_header + 1;
} else {
info_PK->start_pos = ((l_cp->m_specific_param.m_enc.m_tp_on | l_tcp->POC) &&
info_PK->start_pos) ? info_PK->start_pos : info_TL->packet[cstr_info->packno -
1].end_pos + 1;
}
info_PK->end_pos = info_PK->start_pos + l_nb_bytes - 1;
info_PK->end_ph_pos += info_PK->start_pos -
1; /* End of packet header which now only represents the distance
to start of packet is incremented by value of start of packet*/
}
cstr_info->packno++;
}
/* << INDEX */
++p_tile->packno;
}
}
}
opj_pi_destroy(l_pi, l_nb_pocs);
return OPJ_TRUE;
}
| 0 |
[
"CWE-416",
"CWE-787"
] |
openjpeg
|
c535531f03369623b9b833ef41952c62257b507e
| 118,397,352,435,155,420,000,000,000,000,000,000,000 | 140 |
opj_t2_encode_packet(): fix potential write heap buffer overflow (#992)
|
smb2_plain_req_init(__le16 smb2_command, struct cifs_tcon *tcon,
void **request_buf, unsigned int *total_len)
{
int rc;
struct smb2_sync_hdr *shdr;
rc = smb2_reconnect(smb2_command, tcon);
if (rc)
return rc;
/* BB eventually switch this to SMB2 specific small buf size */
*request_buf = cifs_small_buf_get();
if (*request_buf == NULL) {
/* BB should we add a retry in here if not a writepage? */
return -ENOMEM;
}
shdr = (struct smb2_sync_hdr *)(*request_buf);
fill_small_buf(smb2_command, tcon, shdr, total_len);
if (tcon != NULL) {
#ifdef CONFIG_CIFS_STATS2
uint16_t com_code = le16_to_cpu(smb2_command);
cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_sent[com_code]);
#endif
cifs_stats_inc(&tcon->num_smbs_sent);
}
return rc;
}
| 0 |
[
"CWE-476"
] |
linux
|
cabfb3680f78981d26c078a26e5c748531257ebb
| 319,664,284,848,348,730,000,000,000,000,000,000,000 | 32 |
CIFS: Enable encryption during session setup phase
In order to allow encryption on SMB connection we need to exchange
a session key and generate encryption and decryption keys.
Signed-off-by: Pavel Shilovsky <[email protected]>
|
static void store_param_float(NET *net, MYSQL_BIND *param)
{
float value= *(float*) param->buffer;
float4store(net->write_pos, value);
net->write_pos+= 4;
}
| 0 |
[] |
mysql-server
|
3d8134d2c9b74bc8883ffe2ef59c168361223837
| 98,262,926,697,841,550,000,000,000,000,000,000,000 | 6 |
Bug#25988681: USE-AFTER-FREE IN MYSQL_STMT_CLOSE()
Description: If mysql_stmt_close() encountered error,
it recorded error in prepared statement
but then frees memory assigned to prepared
statement. If mysql_stmt_error() is used
to get error information, it will result
into use after free.
In all cases where mysql_stmt_close() can
fail, error would have been set by
cli_advanced_command in MYSQL structure.
Solution: Don't copy error from MYSQL using set_stmt_errmsg.
There is no automated way to test the fix since
it is in mysql_stmt_close() which does not expect
any reply from server.
Reviewed-By: Georgi Kodinov <[email protected]>
Reviewed-By: Ramil Kalimullin <[email protected]>
|
void Uint32Analysis::UnmarkUnsafePhis() {
// No phis were collected. Nothing to do.
if (phis_.length() == 0) return;
// Worklist used to transitively clear kUint32 from phis that
// are used as arguments to other phis.
ZoneList<HPhi*> worklist(phis_.length(), zone_);
// Phi can be used as a uint32 value if and only if
// all its operands are uint32 values and all its
// uses are uint32 safe.
// Iterate over collected phis and unmark those that
// are unsafe. When unmarking phi unmark its operands
// and add it to the worklist if it is a phi as well.
// Phis that are still marked as safe are shifted down
// so that all safe phis form a prefix of the phis_ array.
int phi_count = 0;
for (int i = 0; i < phis_.length(); i++) {
HPhi* phi = phis_[i];
if (CheckPhiOperands(phi) && Uint32UsesAreSafe(phi)) {
phis_[phi_count++] = phi;
} else {
UnmarkPhi(phi, &worklist);
}
}
// Now phis array contains only those phis that have safe
// non-phi uses. Start transitively clearing kUint32 flag
// from phi operands of discovered non-safe phies until
// only safe phies are left.
while (!worklist.is_empty()) {
while (!worklist.is_empty()) {
HPhi* phi = worklist.RemoveLast();
UnmarkPhi(phi, &worklist);
}
// Check if any operands to safe phies were unmarked
// turning a safe phi into unsafe. The same value
// can flow into several phis.
int new_phi_count = 0;
for (int i = 0; i < phi_count; i++) {
HPhi* phi = phis_[i];
if (CheckPhiOperands(phi)) {
phis_[new_phi_count++] = phi;
} else {
UnmarkPhi(phi, &worklist);
}
}
phi_count = new_phi_count;
}
}
| 0 |
[] |
node
|
fd80a31e0697d6317ce8c2d289575399f4e06d21
| 143,496,616,002,668,400,000,000,000,000,000,000,000 | 54 |
deps: backport 5f836c from v8 upstream
Original commit message:
Fix Hydrogen bounds check elimination
When combining bounds checks, they must all be moved before the first load/store
that they are guarding.
BUG=chromium:344186
LOG=y
[email protected]
Review URL: https://codereview.chromium.org/172093002
git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@19475 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
fix #8070
|
int MonConnection::handle_auth_bad_method(
uint32_t old_auth_method,
int result,
const std::vector<uint32_t>& allowed_methods,
const std::vector<uint32_t>& allowed_modes)
{
ldout(cct,10) << __func__ << " old_auth_method " << old_auth_method
<< " result " << cpp_strerror(result)
<< " allowed_methods " << allowed_methods << dendl;
std::vector<uint32_t> auth_supported;
auth_registry->get_supported_methods(con->get_peer_type(), &auth_supported);
auto p = std::find(auth_supported.begin(), auth_supported.end(),
old_auth_method);
assert(p != auth_supported.end());
p = std::find_first_of(std::next(p), auth_supported.end(),
allowed_methods.begin(), allowed_methods.end());
if (p == auth_supported.end()) {
lderr(cct) << __func__ << " server allowed_methods " << allowed_methods
<< " but i only support " << auth_supported << dendl;
return -EACCES;
}
auth_method = *p;
ldout(cct,10) << __func__ << " will try " << auth_method << " next" << dendl;
return 0;
}
| 0 |
[
"CWE-294"
] |
ceph
|
6c14c2fb5650426285428dfe6ca1597e5ea1d07d
| 26,865,330,765,328,590,000,000,000,000,000,000,000 | 25 |
mon/MonClient: bring back CEPHX_V2 authorizer challenges
Commit c58c5754dfd2 ("msg/async/ProtocolV1: use AuthServer and
AuthClient") introduced a backwards compatibility issue into msgr1.
To fix it, commit 321548010578 ("mon/MonClient: skip CEPHX_V2
challenge if client doesn't support it") set out to skip authorizer
challenges for peers that don't support CEPHX_V2. However, it
made it so that authorizer challenges are skipped for all peers in
both msgr1 and msgr2 cases, effectively disabling the protection
against replay attacks that was put in place in commit f80b848d3f83
("auth/cephx: add authorizer challenge", CVE-2018-1128).
This is because con->get_features() always returns 0 at that
point. In msgr1 case, the peer shares its features along with the
authorizer, but while they are available in connect_msg.features they
aren't assigned to con until ProtocolV1::open(). In msgr2 case, the
peer doesn't share its features until much later (in CLIENT_IDENT
frame, i.e. after the authentication phase). The result is that
!CEPHX_V2 branch is taken in all cases and replay attack protection
is lost.
Only clusters with cephx_service_require_version set to 2 on the
service daemons would not be silently downgraded. But, since the
default is 1 and there are no reports of looping on BADAUTHORIZER
faults, I'm pretty sure that no one has ever done that. Note that
cephx_require_version set to 2 would have no effect even though it
is supposed to be stronger than cephx_service_require_version
because MonClient::handle_auth_request() didn't check it.
To fix:
- for msgr1, check connect_msg.features (as was done before commit
c58c5754dfd2) and challenge if CEPHX_V2 is supported. Together
with two preceding patches that resurrect proper cephx_* option
handling in msgr1, this covers both "I want old clients to work"
and "I wish to require better authentication" use cases.
- for msgr2, don't check anything and always challenge. CEPHX_V2
predates msgr2, anyone speaking msgr2 must support it.
Signed-off-by: Ilya Dryomov <[email protected]>
(cherry picked from commit 4a82c72e3bdddcb625933e83af8b50a444b961f1)
|
static bool read_entry(RBuffer *b, ut64 addr, struct minidump_directory *entry) {
st64 o_addr = r_buf_seek (b, 0, R_BUF_CUR);
if (r_buf_seek (b, addr, R_BUF_SET) < 0) {
return false;
}
entry->stream_type = r_buf_read_le32 (b);
entry->location.data_size = r_buf_read_le32 (b);
entry->location.rva = r_buf_read_le32 (b);
r_buf_seek (b, o_addr, R_BUF_SET);
return true;
}
| 0 |
[
"CWE-400",
"CWE-703"
] |
radare2
|
27fe8031782d3a06c3998eaa94354867864f9f1b
| 24,233,691,285,435,035,000,000,000,000,000,000,000 | 11 |
Fix DoS in the minidump parser ##crash
* Reported by lazymio via huntr.dev
* Reproducer: mdmp-dos
|
static void perf_group_detach(struct perf_event *event)
{
struct perf_event *sibling, *tmp;
struct list_head *list = NULL;
/*
* We can have double detach due to exit/hot-unplug + close.
*/
if (!(event->attach_state & PERF_ATTACH_GROUP))
return;
event->attach_state &= ~PERF_ATTACH_GROUP;
/*
* If this is a sibling, remove it from its group.
*/
if (event->group_leader != event) {
list_del_init(&event->group_entry);
event->group_leader->nr_siblings--;
goto out;
}
if (!list_empty(&event->group_entry))
list = &event->group_entry;
/*
* If this was a group event with sibling events then
* upgrade the siblings to singleton events by adding them
* to whatever list we are on.
*/
list_for_each_entry_safe(sibling, tmp, &event->sibling_list, group_entry) {
if (list)
list_move_tail(&sibling->group_entry, list);
sibling->group_leader = sibling;
/* Inherit group flags from the previous leader */
sibling->group_caps = event->group_caps;
WARN_ON_ONCE(sibling->ctx != event->ctx);
}
out:
perf_event__header_size(event->group_leader);
list_for_each_entry(tmp, &event->group_leader->sibling_list, group_entry)
perf_event__header_size(tmp);
}
| 0 |
[
"CWE-362",
"CWE-125"
] |
linux
|
321027c1fe77f892f4ea07846aeae08cefbbb290
| 204,567,984,886,157,930,000,000,000,000,000,000,000 | 47 |
perf/core: Fix concurrent sys_perf_event_open() vs. 'move_group' race
Di Shen reported a race between two concurrent sys_perf_event_open()
calls where both try and move the same pre-existing software group
into a hardware context.
The problem is exactly that described in commit:
f63a8daa5812 ("perf: Fix event->ctx locking")
... where, while we wait for a ctx->mutex acquisition, the event->ctx
relation can have changed under us.
That very same commit failed to recognise sys_perf_event_context() as an
external access vector to the events and thereby didn't apply the
established locking rules correctly.
So while one sys_perf_event_open() call is stuck waiting on
mutex_lock_double(), the other (which owns said locks) moves the group
about. So by the time the former sys_perf_event_open() acquires the
locks, the context we've acquired is stale (and possibly dead).
Apply the established locking rules as per perf_event_ctx_lock_nested()
to the mutex_lock_double() for the 'move_group' case. This obviously means
we need to validate state after we acquire the locks.
Reported-by: Di Shen (Keen Lab)
Tested-by: John Dias <[email protected]>
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Cc: Alexander Shishkin <[email protected]>
Cc: Arnaldo Carvalho de Melo <[email protected]>
Cc: Arnaldo Carvalho de Melo <[email protected]>
Cc: Jiri Olsa <[email protected]>
Cc: Kees Cook <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Min Chong <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Stephane Eranian <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: Vince Weaver <[email protected]>
Fixes: f63a8daa5812 ("perf: Fix event->ctx locking")
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]>
|
static int asn1_d2i_read_bio(BIO *in, BUF_MEM **pb)
{
BUF_MEM *b;
unsigned char *p;
int i;
size_t want = HEADER_SIZE;
int eos = 0;
size_t off = 0;
size_t len = 0;
const unsigned char *q;
long slen;
int inf, tag, xclass;
b = BUF_MEM_new();
if (b == NULL) {
ASN1err(ASN1_F_ASN1_D2I_READ_BIO, ERR_R_MALLOC_FAILURE);
return -1;
}
ERR_clear_error();
for (;;) {
if (want >= (len - off)) {
want -= (len - off);
if (len + want < len || !BUF_MEM_grow_clean(b, len + want)) {
ASN1err(ASN1_F_ASN1_D2I_READ_BIO, ERR_R_MALLOC_FAILURE);
goto err;
}
i = BIO_read(in, &(b->data[len]), want);
if ((i < 0) && ((len - off) == 0)) {
ASN1err(ASN1_F_ASN1_D2I_READ_BIO, ASN1_R_NOT_ENOUGH_DATA);
goto err;
}
if (i > 0) {
if (len + i < len) {
ASN1err(ASN1_F_ASN1_D2I_READ_BIO, ASN1_R_TOO_LONG);
goto err;
}
len += i;
}
}
/* else data already loaded */
p = (unsigned char *)&(b->data[off]);
q = p;
inf = ASN1_get_object(&q, &slen, &tag, &xclass, len - off);
if (inf & 0x80) {
unsigned long e;
e = ERR_GET_REASON(ERR_peek_error());
if (e != ASN1_R_TOO_LONG)
goto err;
else
ERR_clear_error(); /* clear error */
}
i = q - p; /* header length */
off += i; /* end of data */
if (inf & 1) {
/* no data body so go round again */
eos++;
if (eos < 0) {
ASN1err(ASN1_F_ASN1_D2I_READ_BIO, ASN1_R_HEADER_TOO_LONG);
goto err;
}
want = HEADER_SIZE;
} else if (eos && (slen == 0) && (tag == V_ASN1_EOC)) {
/* eos value, so go back and read another header */
eos--;
if (eos <= 0)
break;
else
want = HEADER_SIZE;
} else {
/* suck in slen bytes of data */
want = slen;
if (want > (len - off)) {
want -= (len - off);
if (want > INT_MAX /* BIO_read takes an int length */ ||
len + want < len) {
ASN1err(ASN1_F_ASN1_D2I_READ_BIO, ASN1_R_TOO_LONG);
goto err;
}
if (!BUF_MEM_grow_clean(b, len + want)) {
ASN1err(ASN1_F_ASN1_D2I_READ_BIO, ERR_R_MALLOC_FAILURE);
goto err;
}
while (want > 0) {
i = BIO_read(in, &(b->data[len]), want);
if (i <= 0) {
ASN1err(ASN1_F_ASN1_D2I_READ_BIO,
ASN1_R_NOT_ENOUGH_DATA);
goto err;
}
/*
* This can't overflow because |len+want| didn't
* overflow.
*/
len += i;
want -= i;
}
}
if (off + slen < off) {
ASN1err(ASN1_F_ASN1_D2I_READ_BIO, ASN1_R_TOO_LONG);
goto err;
}
off += slen;
if (eos <= 0) {
break;
} else
want = HEADER_SIZE;
}
}
if (off > INT_MAX) {
ASN1err(ASN1_F_ASN1_D2I_READ_BIO, ASN1_R_TOO_LONG);
goto err;
}
*pb = b;
return off;
err:
BUF_MEM_free(b);
return -1;
}
| 1 |
[
"CWE-399"
] |
openssl
|
c62981390d6cf9e3d612c489b8b77c2913b25807
| 169,354,499,671,052,600,000,000,000,000,000,000,000 | 126 |
Harden ASN.1 BIO handling of large amounts of data.
If the ASN.1 BIO is presented with a large length field read it in
chunks of increasing size checking for EOF on each read. This prevents
small files allocating excessive amounts of data.
CVE-2016-2109
Thanks to Brian Carpenter for reporting this issue.
Reviewed-by: Viktor Dukhovni <[email protected]>
|
static void nfs_mark_client_ready(struct nfs_client *clp, int state)
{
clp->cl_cons_state = state;
wake_up_all(&nfs_client_active_wq);
}
| 0 |
[
"CWE-20"
] |
linux-2.6
|
54af3bb543c071769141387a42deaaab5074da55
| 141,576,999,425,405,620,000,000,000,000,000,000,000 | 5 |
NFS: Fix an Oops in encode_lookup()
It doesn't look as if the NFS file name limit is being initialised correctly
in the struct nfs_server. Make sure that we limit whatever is being set in
nfs_probe_fsinfo() and nfs_init_server().
Also ensure that readdirplus and nfs4_path_walk respect our file name
limits.
Signed-off-by: Trond Myklebust <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
void sendSpawnRequest(NegotiationDetails &details) {
TRACE_POINT();
try {
string data = "You have control 1.0\n"
"passenger_root: " + resourceLocator.getRoot() + "\n"
"passenger_version: " PASSENGER_VERSION "\n"
"ruby_libdir: " + resourceLocator.getRubyLibDir() + "\n"
"generation_dir: " + generation->getPath() + "\n"
"gupid: " + details.gupid + "\n"
"connect_password: " + details.connectPassword + "\n";
vector<string> args;
vector<string>::const_iterator it, end;
details.options->toVector(args, resourceLocator);
for (it = args.begin(); it != args.end(); it++) {
const string &key = *it;
it++;
const string &value = *it;
data.append(key + ": " + value + "\n");
}
writeExact(details.adminSocket, data, &details.timeout);
P_TRACE(2, "Spawn request for " << details.options->appRoot << ":\n" << data);
writeExact(details.adminSocket, "\n", &details.timeout);
} catch (const SystemException &e) {
if (e.code() == EPIPE) {
/* Ignore this. Process might have written an
* error response before reading the arguments,
* in which case we'll want to show that instead.
*/
} else {
throw;
}
}
}
| 0 |
[] |
passenger
|
8c6693e0818772c345c979840d28312c2edd4ba4
| 155,644,756,206,731,590,000,000,000,000,000,000,000 | 35 |
Security check socket filenames reported by spawned application processes.
|
static bool qxl_rom_monitors_config_changed(QXLRom *rom,
VDAgentMonitorsConfig *monitors_config,
unsigned int max_outputs)
{
int i;
unsigned int monitors_count;
monitors_count = MIN(monitors_config->num_of_monitors, max_outputs);
if (rom->client_monitors_config.count != monitors_count) {
return true;
}
for (i = 0 ; i < rom->client_monitors_config.count ; ++i) {
VDAgentMonConfig *monitor = &monitors_config->monitors[i];
QXLURect *rect = &rom->client_monitors_config.heads[i];
/* monitor->depth ignored */
if ((rect->left != monitor->x) ||
(rect->top != monitor->y) ||
(rect->right != monitor->x + monitor->width) ||
(rect->bottom != monitor->y + monitor->height)) {
return true;
}
}
return false;
}
| 0 |
[
"CWE-476"
] |
qemu
|
d52680fc932efb8a2f334cc6993e705ed1e31e99
| 22,906,566,878,984,673,000,000,000,000,000,000,000 | 27 |
qxl: check release info object
When releasing spice resources in release_resource() routine,
if release info object 'ext.info' is null, it leads to null
pointer dereference. Add check to avoid it.
Reported-by: Bugs SysSec <[email protected]>
Signed-off-by: Prasad J Pandit <[email protected]>
Message-id: [email protected]
Signed-off-by: Gerd Hoffmann <[email protected]>
|
*/
PHP_FUNCTION(date_time_set)
{
zval *object;
php_date_obj *dateobj;
long h, i, s = 0;
if (zend_parse_method_parameters(ZEND_NUM_ARGS() TSRMLS_CC, getThis(), "Oll|l", &object, date_ce_date, &h, &i, &s) == FAILURE) {
RETURN_FALSE;
}
dateobj = (php_date_obj *) zend_object_store_get_object(object TSRMLS_CC);
DATE_CHECK_INITIALIZED(dateobj->time, DateTime);
dateobj->time->h = h;
dateobj->time->i = i;
dateobj->time->s = s;
timelib_update_ts(dateobj->time, NULL);
RETURN_ZVAL(object, 1, 0);
| 0 |
[] |
php-src
|
7b1898183032eeabc64a086ff040af991cebcd93
| 194,043,686,905,926,830,000,000,000,000,000,000,000 | 18 |
Fix bug #68942 (Use after free vulnerability in unserialize() with DateTimeZone)
Conflicts:
ext/date/php_date.c
|
static void fn_show_mem(struct vc_data *vc)
{
show_mem(0, NULL);
}
| 0 |
[
"CWE-416"
] |
linux
|
6ca03f90527e499dd5e32d6522909e2ad390896b
| 13,451,478,099,276,530,000,000,000,000,000,000,000 | 4 |
vt: keyboard, simplify vt_kdgkbsent
Use 'strlen' of the string, add one for NUL terminator and simply do
'copy_to_user' instead of the explicit 'for' loop. This makes the
KDGKBSENT case more compact.
The only thing we need to take care about is NULL 'func_table[i]'. Use
an empty string in that case.
The original check for overflow could never trigger as the func_buf
strings are always shorter or equal to 'struct kbsentry's.
Cc: <[email protected]>
Signed-off-by: Jiri Slaby <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Greg Kroah-Hartman <[email protected]>
|
rsock_init_unixsock(VALUE sock, VALUE path, int server)
{
struct sockaddr_un sockaddr;
socklen_t sockaddrlen;
int fd, status;
rb_io_t *fptr;
path = unixsock_path_value(path);
INIT_SOCKADDR_UN(&sockaddr, sizeof(struct sockaddr_un));
if (sizeof(sockaddr.sun_path) < (size_t)RSTRING_LEN(path)) {
rb_raise(rb_eArgError, "too long unix socket path (%ldbytes given but %dbytes max)",
RSTRING_LEN(path), (int)sizeof(sockaddr.sun_path));
}
memcpy(sockaddr.sun_path, RSTRING_PTR(path), RSTRING_LEN(path));
sockaddrlen = rsock_unix_sockaddr_len(path);
fd = rsock_socket(AF_UNIX, SOCK_STREAM, 0);
if (fd < 0) {
rsock_sys_fail_path("socket(2)", path);
}
if (server) {
status = bind(fd, (struct sockaddr*)&sockaddr, sockaddrlen);
}
else {
int prot;
struct unixsock_arg arg;
arg.sockaddr = &sockaddr;
arg.sockaddrlen = sockaddrlen;
arg.fd = fd;
status = (int)rb_protect(unixsock_connect_internal, (VALUE)&arg, &prot);
if (prot) {
close(fd);
rb_jump_tag(prot);
}
}
if (status < 0) {
int e = errno;
close(fd);
rsock_syserr_fail_path(e, "connect(2)", path);
}
if (server) {
if (listen(fd, SOMAXCONN) < 0) {
int e = errno;
close(fd);
rsock_syserr_fail_path(e, "listen(2)", path);
}
}
rsock_init_sock(sock, fd);
if (server) {
GetOpenFile(sock, fptr);
fptr->pathv = rb_str_new_frozen(path);
}
return sock;
}
| 0 |
[
"CWE-20"
] |
ruby
|
b78fa27ae0b717c5569878c106a67d5047e5fb88
| 88,744,700,706,388,880,000,000,000,000,000,000,000 | 60 |
unixsocket.c: abstract namespace
* ext/socket/unixsocket.c (unixsock_path_value): fix r62991 for
Linux abstract namespace.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@63000 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
|
At_Root_Block_Obj Parser::parse_at_root_block()
{
stack.push_back(Scope::AtRoot);
ParserState at_source_position = pstate;
Block_Obj body;
At_Root_Query_Obj expr;
Lookahead lookahead_result;
if (lex_css< exactly<'('> >()) {
expr = parse_at_root_query();
}
if (peek_css < exactly<'{'> >()) {
lex <optional_spaces>();
body = parse_block(true);
}
else if ((lookahead_result = lookahead_for_selector(position)).found) {
Ruleset_Obj r = parse_ruleset(lookahead_result);
body = SASS_MEMORY_NEW(Block, r->pstate(), 1, true);
body->append(r);
}
At_Root_Block_Obj at_root = SASS_MEMORY_NEW(At_Root_Block, at_source_position, body);
if (!expr.isNull()) at_root->expression(expr);
stack.pop_back();
return at_root;
}
| 0 |
[
"CWE-125"
] |
libsass
|
eb15533b07773c30dc03c9d742865604f47120ef
| 48,314,105,218,221,400,000,000,000,000,000,000,000 | 24 |
Fix memory leak in `parse_ie_keyword_arg`
`kwd_arg` would never get freed when there was a parse error in
`parse_ie_keyword_arg`.
Closes #2656
|
static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
const struct sockaddr *dst_addr)
{
if (!src_addr || !src_addr->sa_family) {
src_addr = (struct sockaddr *) &id->route.addr.src_addr;
src_addr->sa_family = dst_addr->sa_family;
if (IS_ENABLED(CONFIG_IPV6) &&
dst_addr->sa_family == AF_INET6) {
struct sockaddr_in6 *src_addr6 = (struct sockaddr_in6 *) src_addr;
struct sockaddr_in6 *dst_addr6 = (struct sockaddr_in6 *) dst_addr;
src_addr6->sin6_scope_id = dst_addr6->sin6_scope_id;
if (ipv6_addr_type(&dst_addr6->sin6_addr) & IPV6_ADDR_LINKLOCAL)
id->route.addr.dev_addr.bound_dev_if = dst_addr6->sin6_scope_id;
} else if (dst_addr->sa_family == AF_IB) {
((struct sockaddr_ib *) src_addr)->sib_pkey =
((struct sockaddr_ib *) dst_addr)->sib_pkey;
}
}
return rdma_bind_addr(id, src_addr);
}
| 0 |
[
"CWE-416"
] |
linux
|
bc0bdc5afaa740d782fbf936aaeebd65e5c2921d
| 122,420,188,540,399,480,000,000,000,000,000,000,000 | 20 |
RDMA/cma: Do not change route.addr.src_addr.ss_family
If the state is not idle then rdma_bind_addr() will immediately fail and
no change to global state should happen.
For instance if the state is already RDMA_CM_LISTEN then this will corrupt
the src_addr and would cause the test in cma_cancel_operation():
if (cma_any_addr(cma_src_addr(id_priv)) && !id_priv->cma_dev)
To view a mangled src_addr, eg with a IPv6 loopback address but an IPv4
family, failing the test.
This would manifest as this trace from syzkaller:
BUG: KASAN: use-after-free in __list_add_valid+0x93/0xa0 lib/list_debug.c:26
Read of size 8 at addr ffff8881546491e0 by task syz-executor.1/32204
CPU: 1 PID: 32204 Comm: syz-executor.1 Not tainted 5.12.0-rc8-syzkaller #0
Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011
Call Trace:
__dump_stack lib/dump_stack.c:79 [inline]
dump_stack+0x141/0x1d7 lib/dump_stack.c:120
print_address_description.constprop.0.cold+0x5b/0x2f8 mm/kasan/report.c:232
__kasan_report mm/kasan/report.c:399 [inline]
kasan_report.cold+0x7c/0xd8 mm/kasan/report.c:416
__list_add_valid+0x93/0xa0 lib/list_debug.c:26
__list_add include/linux/list.h:67 [inline]
list_add_tail include/linux/list.h:100 [inline]
cma_listen_on_all drivers/infiniband/core/cma.c:2557 [inline]
rdma_listen+0x787/0xe00 drivers/infiniband/core/cma.c:3751
ucma_listen+0x16a/0x210 drivers/infiniband/core/ucma.c:1102
ucma_write+0x259/0x350 drivers/infiniband/core/ucma.c:1732
vfs_write+0x28e/0xa30 fs/read_write.c:603
ksys_write+0x1ee/0x250 fs/read_write.c:658
do_syscall_64+0x2d/0x70 arch/x86/entry/common.c:46
entry_SYSCALL_64_after_hwframe+0x44/0xae
Which is indicating that an rdma_id_private was destroyed without doing
cma_cancel_listens().
Instead of trying to re-use the src_addr memory to indirectly create an
any address build one explicitly on the stack and bind to that as any
other normal flow would do.
Link: https://lore.kernel.org/r/[email protected]
Cc: [email protected]
Fixes: 732d41c545bb ("RDMA/cma: Make the locking for automatic state transition more clear")
Reported-by: [email protected]
Tested-by: Hao Sun <[email protected]>
Reviewed-by: Leon Romanovsky <[email protected]>
Signed-off-by: Jason Gunthorpe <[email protected]>
|
uint32 max_char_length() const
{ return max_length / collation.collation->mbmaxlen; }
| 0 |
[] |
mysql-server
|
f7316aa0c9a3909fc7498e7b95d5d3af044a7e21
| 223,966,289,795,416,160,000,000,000,000,000,000,000 | 2 |
Bug#26361149 MYSQL SERVER CRASHES AT: COL IN(IFNULL(CONST,
COL), NAME_CONST('NAME', NULL))
Backport of Bug#19143243 fix.
NAME_CONST item can return NULL_ITEM type in case of incorrect arguments.
NULL_ITEM has special processing in Item_func_in function.
In Item_func_in::fix_length_and_dec an array of possible comparators is
created. Since NAME_CONST function has NULL_ITEM type, corresponding
array element is empty. Then NAME_CONST is wrapped to ITEM_CACHE.
ITEM_CACHE can not return proper type(NULL_ITEM) in Item_func_in::val_int(),
so the NULL_ITEM is attempted compared with an empty comparator.
The fix is to disable the caching of Item_name_const item.
|
int mod_wstunnel_frame_send(handler_ctx *hctx, mod_wstunnel_frame_type_t type,
const char *payload, size_t siz) {
#ifdef _MOD_WEBSOCKET_SPEC_RFC_6455_
if (hctx->hybivers >= 8) return send_rfc_6455(hctx, type, payload, siz);
#endif /* _MOD_WEBSOCKET_SPEC_RFC_6455_ */
#ifdef _MOD_WEBSOCKET_SPEC_IETF_00_
if (0 == hctx->hybivers) return send_ietf_00(hctx, type, payload, siz);
#endif /* _MOD_WEBSOCKET_SPEC_IETF_00_ */
return -1;
}
| 0 |
[
"CWE-476"
] |
lighttpd1.4
|
971773f1fae600074b46ef64f3ca1f76c227985f
| 134,390,396,819,532,600,000,000,000,000,000,000,000 | 10 |
[mod_wstunnel] fix crash with bad hybivers (fixes #3165)
(thx Michał Dardas)
x-ref:
"mod_wstunnel null pointer dereference"
https://redmine.lighttpd.net/issues/3165
|
TRIO_PUBLIC_STRING size_t trio_string_format_date_max TRIO_ARGS4((self, max, format, datetime),
trio_string_t* self, size_t max,
TRIO_CONST char* format,
TRIO_CONST struct tm* datetime)
{
assert(self);
return trio_format_date_max(self->content, max, format, datetime);
}
| 0 |
[
"CWE-190",
"CWE-125"
] |
FreeRDP
|
05cd9ea2290d23931f615c1b004d4b2e69074e27
| 334,487,102,935,755,520,000,000,000,000,000,000,000 | 9 |
Fixed TrioParse and trio_length limts.
CVE-2020-4030 thanks to @antonio-morales for finding this.
|
static void sdb_concat_by_path(Sdb *s, const char *path) {
Sdb *db = sdb_new (0, path, 0);
sdb_merge (s, db);
sdb_close (db);
sdb_free (db);
}
| 0 |
[
"CWE-78"
] |
radare2
|
5411543a310a470b1257fb93273cdd6e8dfcb3af
| 291,170,862,994,350,640,000,000,000,000,000,000,000 | 6 |
More fixes for the CVE-2019-14745
|
vhost_user_check_and_alloc_queue_pair(struct virtio_net *dev,
struct VhostUserMsg *msg)
{
uint32_t vring_idx;
switch (msg->request.master) {
case VHOST_USER_SET_VRING_KICK:
case VHOST_USER_SET_VRING_CALL:
case VHOST_USER_SET_VRING_ERR:
vring_idx = msg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
break;
case VHOST_USER_SET_VRING_NUM:
case VHOST_USER_SET_VRING_BASE:
case VHOST_USER_SET_VRING_ENABLE:
vring_idx = msg->payload.state.index;
break;
case VHOST_USER_SET_VRING_ADDR:
vring_idx = msg->payload.addr.index;
break;
default:
return 0;
}
if (vring_idx >= VHOST_MAX_VRING) {
VHOST_LOG_CONFIG(ERR,
"invalid vring index: %u\n", vring_idx);
return -1;
}
if (dev->virtqueue[vring_idx])
return 0;
return alloc_vring_queue(dev, vring_idx);
}
| 0 |
[
"CWE-190"
] |
dpdk
|
c78d94189dced04def987a17f16097fcb197a186
| 9,402,609,735,214,432,000,000,000,000,000,000,000 | 34 |
vhost: fix vring index check
vhost_user_check_and_alloc_queue_pair() is used to extract
a vring index from a payload. This function validates the
index and is called early on in when performing message
handling. Most message handlers depend on it correctly
validating the vring index.
Depending on the message type the vring index is in
different parts of the payload. The function contains a
switch/case for each type and copies the index. This is
stored in a uint16. This index is then validated. Depending
on the message, the source index is an unsigned int. If
integer truncation occurs (uint->uint16) the top 16 bits
of the index are never validated.
When they are used later on (e.g. in
vhost_user_set_vring_num() or vhost_user_set_vring_addr())
it can lead to out of bound indexing. The out of bound
indexed data gets written to, and hence this can cause
memory corruption.
This patch fixes this vulnerability by declaring vring
index as an unsigned int in
vhost_user_check_and_alloc_queue_pair().
CVE-2020-10723
Fixes: 160cbc815b41 ("vhost: remove a hack on queue allocation")
Cc: [email protected]
Reported-by: Ilja Van Sprundel <[email protected]>
Signed-off-by: Maxime Coquelin <[email protected]>
Reviewed-by: Xiaolong Ye <[email protected]>
Reviewed-by: Ilja Van Sprundel <[email protected]>
|
gdk_pixbuf__jpeg_image_save_to_callback (GdkPixbufSaveFunc save_func,
gpointer user_data,
GdkPixbuf *pixbuf,
gchar **keys,
gchar **values,
GError **error)
{
return real_save_jpeg (pixbuf, keys, values, error,
TRUE, NULL, save_func, user_data);
}
| 0 |
[
"CWE-787"
] |
gdk-pixbuf
|
c2a40a92fe3df4111ed9da51fe3368c079b86926
| 188,367,302,394,330,900,000,000,000,000,000,000,000 | 10 |
jpeg: Throw error when number of color components is unsupported
Explicitly check "3" or "4" output color components.
gdk-pixbuf assumed that the value of output_components to be either
3 or 4, but not an invalid value (9) or an unsupported value (1).
The way the buffer size was deduced was using a naive "== 4" check,
with a 1, 3 or 9 color component picture getting the same buffer size,
a size just sufficient for 3 color components, causing invalid writes
later when libjpeg-turbo was decoding the image.
CVE-2017-2862
Sent by from Marcin 'Icewall' Noga of Cisco Talos
https://bugzilla.gnome.org/show_bug.cgi?id=784866
|
GetOutboundPinholeTimeout(struct upnphttp * h, const char * action, const char * ns)
{
int r;
static const char resp[] =
"<u:%sResponse "
"xmlns:u=\"%s\">"
"<OutboundPinholeTimeout>%d</OutboundPinholeTimeout>"
"</u:%sResponse>";
char body[512];
int bodylen;
struct NameValueParserData data;
char * int_ip, * int_port, * rem_host, * rem_port, * protocol;
int opt=0;
/*int proto=0;*/
unsigned short iport, rport;
if (GETFLAG(IPV6FCFWDISABLEDMASK))
{
SoapError(h, 702, "FirewallDisabled");
return;
}
ParseNameValue(h->req_buf + h->req_contentoff, h->req_contentlen, &data);
int_ip = GetValueFromNameValueList(&data, "InternalClient");
int_port = GetValueFromNameValueList(&data, "InternalPort");
rem_host = GetValueFromNameValueList(&data, "RemoteHost");
rem_port = GetValueFromNameValueList(&data, "RemotePort");
protocol = GetValueFromNameValueList(&data, "Protocol");
if (!int_port || !rem_port || !protocol)
{
ClearNameValueList(&data);
SoapError(h, 402, "Invalid Args");
return;
}
rport = (unsigned short)atoi(rem_port);
iport = (unsigned short)atoi(int_port);
/*proto = atoi(protocol);*/
syslog(LOG_INFO, "%s: retrieving timeout for outbound pinhole from [%s]:%hu to [%s]:%hu protocol %s", action, int_ip, iport,rem_host, rport, protocol);
/* TODO */
r = -1;/*upnp_check_outbound_pinhole(proto, &opt);*/
switch(r)
{
case 1: /* success */
bodylen = snprintf(body, sizeof(body), resp,
action, ns/*"urn:schemas-upnp-org:service:WANIPv6FirewallControl:1"*/,
opt, action);
BuildSendAndCloseSoapResp(h, body, bodylen);
break;
case -5: /* Protocol not supported */
SoapError(h, 705, "ProtocolNotSupported");
break;
default:
SoapError(h, 501, "ActionFailed");
}
ClearNameValueList(&data);
}
| 0 |
[
"CWE-476"
] |
miniupnp
|
86030db849260dd8fb2ed975b9890aef1b62b692
| 173,514,320,258,047,670,000,000,000,000,000,000,000 | 63 |
fix error from commit 13585f15c7f7dc28bbbba1661efb280d530d114c
|
libxlLoggerFree(libxlLogger *logger)
{
xentoollog_logger *xtl_logger = (xentoollog_logger*)logger;
if (logger->defaultLogFile)
VIR_FORCE_FCLOSE(logger->defaultLogFile);
g_clear_pointer(&logger->files, g_hash_table_unref);
virMutexDestroy(&logger->tableLock);
xtl_logger_destroy(xtl_logger);
}
| 0 |
[
"CWE-703",
"CWE-667"
] |
libvirt
|
a7a03324d86e111f81687b5315b8f296dde84340
| 173,547,476,830,991,320,000,000,000,000,000,000,000 | 9 |
libxl: Protect access to libxlLogger files hash table
The hash table of log file objects in libxlLogger is not protected against
concurrent access. It is possible for one thread to remove an entry while
another is updating it. Add a mutex to the libxlLogger object and lock it
when accessing the files hash table.
Signed-off-by: Jim Fehlig <[email protected]>
Reviewed-by: Daniel P. Berrangé <[email protected]>
Reviewed-by: Ján Tomko <[email protected]>
|
GF_Err udta_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
u32 i;
GF_UserDataMap *map;
GF_UserDataBox *ptr = (GF_UserDataBox *)s;
e = gf_isom_box_write_header(s, bs);
if (e) return e;
i=0;
while ((map = (GF_UserDataMap *)gf_list_enum(ptr->recordList, &i))) {
//warning: here we are not passing the actual "parent" of the list
//but the UDTA box. The parent itself is not an box, we don't care about it
e = gf_isom_box_array_write(s, map->other_boxes, bs);
if (e) return e;
}
return GF_OK;
}
| 0 |
[
"CWE-125"
] |
gpac
|
bceb03fd2be95097a7b409ea59914f332fb6bc86
| 102,027,806,849,525,320,000,000,000,000,000,000,000 | 18 |
fixed 2 possible heap overflows (inc. #1088)
|
static const char *flat_escape_value_str(AVBPrint *dst, const char *src)
{
const char *p;
for (p = src; *p; p++) {
switch (*p) {
case '\n': av_bprintf(dst, "%s", "\\n"); break;
case '\r': av_bprintf(dst, "%s", "\\r"); break;
case '\\': av_bprintf(dst, "%s", "\\\\"); break;
case '"': av_bprintf(dst, "%s", "\\\""); break;
case '`': av_bprintf(dst, "%s", "\\`"); break;
case '$': av_bprintf(dst, "%s", "\\$"); break;
default: av_bprint_chars(dst, *p, 1); break;
}
}
return dst->str;
}
| 0 |
[
"CWE-476"
] |
FFmpeg
|
837cb4325b712ff1aab531bf41668933f61d75d2
| 262,465,962,891,708,000,000,000,000,000,000,000,000 | 17 |
ffprobe: Fix null pointer dereference with color primaries
Found-by: AD-lab of venustech
Signed-off-by: Michael Niedermayer <[email protected]>
|
nautilus_file_mark_desktop_file_executable (GFile *file,
GtkWindow *parent_window,
gboolean interactive,
NautilusOpCallback done_callback,
gpointer done_callback_data)
{
GTask *task;
MarkTrustedJob *job;
job = op_job_new (MarkTrustedJob, parent_window);
job->file = g_object_ref (file);
job->interactive = interactive;
job->done_callback = done_callback;
job->done_callback_data = done_callback_data;
task = g_task_new (NULL, NULL, mark_desktop_file_executable_task_done, job);
g_task_set_task_data (task, job, NULL);
g_task_run_in_thread (task, mark_desktop_file_executable_task_thread_func);
g_object_unref (task);
}
| 0 |
[
"CWE-20"
] |
nautilus
|
1630f53481f445ada0a455e9979236d31a8d3bb0
| 290,929,870,451,674,150,000,000,000,000,000,000,000 | 20 |
mime-actions: use file metadata for trusting desktop files
Currently we only trust desktop files that have the executable bit
set, and don't replace the displayed icon or the displayed name until
it's trusted, which prevents for running random programs by a malicious
desktop file.
However, the executable permission is preserved if the desktop file
comes from a compressed file.
To prevent this, add a metadata::trusted metadata to the file once the
user acknowledges the file as trusted. This adds metadata to the file,
which cannot be added unless it has access to the computer.
Also remove the SHEBANG "trusted" content we were putting inside the
desktop file, since that doesn't add more security since it can come
with the file itself.
https://bugzilla.gnome.org/show_bug.cgi?id=777991
|
long SSL_CTX_ctrl(SSL_CTX *ctx,int cmd,long larg,void *parg)
{
long l;
switch (cmd)
{
case SSL_CTRL_GET_READ_AHEAD:
return(ctx->read_ahead);
case SSL_CTRL_SET_READ_AHEAD:
l=ctx->read_ahead;
ctx->read_ahead=larg;
return(l);
case SSL_CTRL_SET_MSG_CALLBACK_ARG:
ctx->msg_callback_arg = parg;
return 1;
case SSL_CTRL_GET_MAX_CERT_LIST:
return(ctx->max_cert_list);
case SSL_CTRL_SET_MAX_CERT_LIST:
l=ctx->max_cert_list;
ctx->max_cert_list=larg;
return(l);
case SSL_CTRL_SET_SESS_CACHE_SIZE:
l=ctx->session_cache_size;
ctx->session_cache_size=larg;
return(l);
case SSL_CTRL_GET_SESS_CACHE_SIZE:
return(ctx->session_cache_size);
case SSL_CTRL_SET_SESS_CACHE_MODE:
l=ctx->session_cache_mode;
ctx->session_cache_mode=larg;
return(l);
case SSL_CTRL_GET_SESS_CACHE_MODE:
return(ctx->session_cache_mode);
case SSL_CTRL_SESS_NUMBER:
return(lh_SSL_SESSION_num_items(ctx->sessions));
case SSL_CTRL_SESS_CONNECT:
return(ctx->stats.sess_connect);
case SSL_CTRL_SESS_CONNECT_GOOD:
return(ctx->stats.sess_connect_good);
case SSL_CTRL_SESS_CONNECT_RENEGOTIATE:
return(ctx->stats.sess_connect_renegotiate);
case SSL_CTRL_SESS_ACCEPT:
return(ctx->stats.sess_accept);
case SSL_CTRL_SESS_ACCEPT_GOOD:
return(ctx->stats.sess_accept_good);
case SSL_CTRL_SESS_ACCEPT_RENEGOTIATE:
return(ctx->stats.sess_accept_renegotiate);
case SSL_CTRL_SESS_HIT:
return(ctx->stats.sess_hit);
case SSL_CTRL_SESS_CB_HIT:
return(ctx->stats.sess_cb_hit);
case SSL_CTRL_SESS_MISSES:
return(ctx->stats.sess_miss);
case SSL_CTRL_SESS_TIMEOUTS:
return(ctx->stats.sess_timeout);
case SSL_CTRL_SESS_CACHE_FULL:
return(ctx->stats.sess_cache_full);
case SSL_CTRL_OPTIONS:
return(ctx->options|=larg);
case SSL_CTRL_CLEAR_OPTIONS:
return(ctx->options&=~larg);
case SSL_CTRL_MODE:
return(ctx->mode|=larg);
case SSL_CTRL_CLEAR_MODE:
return(ctx->mode&=~larg);
case SSL_CTRL_SET_MAX_SEND_FRAGMENT:
if (larg < 512 || larg > SSL3_RT_MAX_PLAIN_LENGTH)
return 0;
ctx->max_send_fragment = larg;
return 1;
default:
return(ctx->method->ssl_ctx_ctrl(ctx,cmd,larg,parg));
}
}
| 0 |
[] |
openssl
|
ee2ffc279417f15fef3b1073c7dc81a908991516
| 29,539,267,807,896,103,000,000,000,000,000,000,000 | 78 |
Add Next Protocol Negotiation.
|
void GCompletionFieldSetCompletion(GGadget *g,GTextCompletionHandler completion) {
((GCompletionField *) g)->completion = completion;
((GTextField *) g)->accepts_tabs = ((GCompletionField *) g)->completion != NULL;
}
| 0 |
[
"CWE-119",
"CWE-787"
] |
fontforge
|
626f751752875a0ddd74b9e217b6f4828713573c
| 162,299,619,126,191,950,000,000,000,000,000,000,000 | 4 |
Warn users before discarding their unsaved scripts (#3852)
* Warn users before discarding their unsaved scripts
This closes #3846.
|
static void hpack_dht_dump(FILE *out, const struct hpack_dht *dht)
{
unsigned int i;
unsigned int slot;
char name[4096], value[4096];
for (i = HPACK_SHT_SIZE; i < HPACK_SHT_SIZE + dht->used; i++) {
slot = (hpack_get_dte(dht, i - HPACK_SHT_SIZE + 1) - dht->dte);
fprintf(out, "idx=%d slot=%u name=<%s> value=<%s> addr=%u-%u\n",
i, slot,
istpad(name, hpack_idx_to_name(dht, i)).ptr,
istpad(value, hpack_idx_to_value(dht, i)).ptr,
dht->dte[slot].addr, dht->dte[slot].addr+dht->dte[slot].nlen+dht->dte[slot].vlen-1);
}
}
| 0 |
[
"CWE-787"
] |
haproxy
|
5dfc5d5cd0d2128d77253ead3acf03a421ab5b88
| 44,550,574,767,832,560,000,000,000,000,000,000,000 | 15 |
BUG/CRITICAL: hpack: never index a header into the headroom after wrapping
The HPACK header table is implemented as a wrapping list inside a contigous
area. Headers names and values are stored from right to left while indexes
are stored from left to right. When there's no more room to store a new one,
we wrap to the right again, or possibly defragment it if needed. The condition
do use the right part (called tailroom) or the left part (called headroom)
depends on the location of the last inserted header. After wrapping happens,
the code forces to stick to tailroom by pretending there's no more headroom,
so that the size fit test always fails. The problem is that nothing prevents
from storing a header with an empty name and empty value, resulting in a
total size of zero bytes, which satisfies the condition to use the headroom.
Doing this in a wrapped buffer results in changing the "front" header index
and causing miscalculations on the available size and the addresses of the
next headers. This may even allow to overwrite some parts of the index,
opening the possibility to perform arbitrary writes into a 32-bit relative
address space.
This patch fixes the issue by making sure the headroom is considered only
when the buffer does not wrap, instead of relying on the zero size. This
must be backported to all versions supporting H2, which is as far as 1.8.
Many thanks to Felix Wilhelm of Google Project Zero for responsibly
reporting this problem with a reproducer and a detailed analysis.
CVE-2020-11100 was assigned to this issue.
|
static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text,
struct ts_config *conf,
struct ts_state *state)
{
return skb_seq_read(offset, text, TS_SKB_CB(state));
| 0 |
[
"CWE-703",
"CWE-125"
] |
linux
|
8605330aac5a5785630aec8f64378a54891937cc
| 294,053,187,886,278,030,000,000,000,000,000,000,000 | 6 |
tcp: fix SCM_TIMESTAMPING_OPT_STATS for normal skbs
__sock_recv_timestamp can be called for both normal skbs (for
receive timestamps) and for skbs on the error queue (for transmit
timestamps).
Commit 1c885808e456
(tcp: SOF_TIMESTAMPING_OPT_STATS option for SO_TIMESTAMPING)
assumes any skb passed to __sock_recv_timestamp are from
the error queue, containing OPT_STATS in the content of the skb.
This results in accessing invalid memory or generating junk
data.
To fix this, set skb->pkt_type to PACKET_OUTGOING for packets
on the error queue. This is safe because on the receive path
on local sockets skb->pkt_type is never set to PACKET_OUTGOING.
With that, copy OPT_STATS from a packet, only if its pkt_type
is PACKET_OUTGOING.
Fixes: 1c885808e456 ("tcp: SOF_TIMESTAMPING_OPT_STATS option for SO_TIMESTAMPING")
Reported-by: JongHwan Kim <[email protected]>
Signed-off-by: Soheil Hassas Yeganeh <[email protected]>
Signed-off-by: Eric Dumazet <[email protected]>
Signed-off-by: Willem de Bruijn <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
TiledInputFile::dataWindowForLevel (int l) const
{
return dataWindowForLevel (l, l);
}
| 0 |
[
"CWE-125"
] |
openexr
|
e79d2296496a50826a15c667bf92bdc5a05518b4
| 284,377,496,376,512,030,000,000,000,000,000,000,000 | 4 |
fix memory leaks and invalid memory accesses
Signed-off-by: Peter Hillman <[email protected]>
|
static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp)
{
int k, new_interface, blen, usg;
Sg_request *srp;
Sg_fd *fp;
const sg_io_hdr_t *hp;
const char * cp;
unsigned int ms;
k = 0;
list_for_each_entry(fp, &sdp->sfds, sfd_siblings) {
k++;
read_lock(&fp->rq_list_lock); /* irqs already disabled */
seq_printf(s, " FD(%d): timeout=%dms bufflen=%d "
"(res)sgat=%d low_dma=%d\n", k,
jiffies_to_msecs(fp->timeout),
fp->reserve.bufflen,
(int) fp->reserve.k_use_sg,
(int) sdp->device->host->unchecked_isa_dma);
seq_printf(s, " cmd_q=%d f_packid=%d k_orphan=%d closed=0\n",
(int) fp->cmd_q, (int) fp->force_packid,
(int) fp->keep_orphan);
list_for_each_entry(srp, &fp->rq_list, entry) {
hp = &srp->header;
new_interface = (hp->interface_id == '\0') ? 0 : 1;
if (srp->res_used) {
if (new_interface &&
(SG_FLAG_MMAP_IO & hp->flags))
cp = " mmap>> ";
else
cp = " rb>> ";
} else {
if (SG_INFO_DIRECT_IO_MASK & hp->info)
cp = " dio>> ";
else
cp = " ";
}
seq_puts(s, cp);
blen = srp->data.bufflen;
usg = srp->data.k_use_sg;
seq_puts(s, srp->done ?
((1 == srp->done) ? "rcv:" : "fin:")
: "act:");
seq_printf(s, " id=%d blen=%d",
srp->header.pack_id, blen);
if (srp->done)
seq_printf(s, " dur=%d", hp->duration);
else {
ms = jiffies_to_msecs(jiffies);
seq_printf(s, " t_o/elap=%d/%d",
(new_interface ? hp->timeout :
jiffies_to_msecs(fp->timeout)),
(ms > hp->duration ? ms - hp->duration : 0));
}
seq_printf(s, "ms sgat=%d op=0x%02x\n", usg,
(int) srp->data.cmd_opcode);
}
if (list_empty(&fp->rq_list))
seq_puts(s, " No requests active\n");
read_unlock(&fp->rq_list_lock);
}
}
| 0 |
[
"CWE-200"
] |
linux
|
3e0097499839e0fe3af380410eababe5a47c4cf9
| 46,553,055,406,437,100,000,000,000,000,000,000,000 | 62 |
scsi: sg: fixup infoleak when using SG_GET_REQUEST_TABLE
When calling SG_GET_REQUEST_TABLE ioctl only a half-filled table is
returned; the remaining part will then contain stale kernel memory
information. This patch zeroes out the entire table to avoid this
issue.
Signed-off-by: Hannes Reinecke <[email protected]>
Reviewed-by: Bart Van Assche <[email protected]>
Reviewed-by: Christoph Hellwig <[email protected]>
Reviewed-by: Eric Dumazet <[email protected]>
Signed-off-by: Martin K. Petersen <[email protected]>
|
static int check_trust(X509_STORE_CTX *ctx, int num_untrusted)
{
int i, ok = 0;
X509 *x = NULL;
X509 *mx;
struct dane_st *dane = (struct dane_st *)ctx->dane;
int num = sk_X509_num(ctx->chain);
int trust;
/*
* Check for a DANE issuer at depth 1 or greater, if it is a DANE-TA(2)
* match, we're done, otherwise we'll merely record the match depth.
*/
if (DANETLS_HAS_TA(dane) && num_untrusted > 0 && num_untrusted < num) {
switch (trust = check_dane_issuer(ctx, num_untrusted)) {
case X509_TRUST_TRUSTED:
case X509_TRUST_REJECTED:
return trust;
}
}
/*
* Check trusted certificates in chain at depth num_untrusted and up.
* Note, that depths 0..num_untrusted-1 may also contain trusted
* certificates, but the caller is expected to have already checked those,
* and wants to incrementally check just any added since.
*/
for (i = num_untrusted; i < num; i++) {
x = sk_X509_value(ctx->chain, i);
trust = X509_check_trust(x, ctx->param->trust, 0);
/* If explicitly trusted return trusted */
if (trust == X509_TRUST_TRUSTED)
goto trusted;
if (trust == X509_TRUST_REJECTED)
goto rejected;
}
/*
* If we are looking at a trusted certificate, and accept partial chains,
* the chain is PKIX trusted.
*/
if (num_untrusted < num) {
if (ctx->param->flags & X509_V_FLAG_PARTIAL_CHAIN)
goto trusted;
return X509_TRUST_UNTRUSTED;
}
if (num_untrusted == num && ctx->param->flags & X509_V_FLAG_PARTIAL_CHAIN) {
/*
* Last-resort call with no new trusted certificates, check the leaf
* for a direct trust store match.
*/
i = 0;
x = sk_X509_value(ctx->chain, i);
mx = lookup_cert_match(ctx, x);
if (!mx)
return X509_TRUST_UNTRUSTED;
/*
* Check explicit auxiliary trust/reject settings. If none are set,
* we'll accept X509_TRUST_UNTRUSTED when not self-signed.
*/
trust = X509_check_trust(mx, ctx->param->trust, 0);
if (trust == X509_TRUST_REJECTED) {
X509_free(mx);
goto rejected;
}
/* Replace leaf with trusted match */
(void) sk_X509_set(ctx->chain, 0, mx);
X509_free(x);
ctx->num_untrusted = 0;
goto trusted;
}
/*
* If no trusted certs in chain at all return untrusted and allow
* standard (no issuer cert) etc errors to be indicated.
*/
return X509_TRUST_UNTRUSTED;
rejected:
ctx->error_depth = i;
ctx->current_cert = x;
ctx->error = X509_V_ERR_CERT_REJECTED;
ok = ctx->verify_cb(0, ctx);
if (!ok)
return X509_TRUST_REJECTED;
return X509_TRUST_UNTRUSTED;
trusted:
if (!DANETLS_ENABLED(dane))
return X509_TRUST_TRUSTED;
if (dane->pdpth < 0)
dane->pdpth = num_untrusted;
/* With DANE, PKIX alone is not trusted until we have both */
if (dane->mdpth >= 0)
return X509_TRUST_TRUSTED;
return X509_TRUST_UNTRUSTED;
}
| 0 |
[] |
openssl
|
33cc5dde478ba5ad79f8fd4acd8737f0e60e236e
| 217,035,163,601,888,550,000,000,000,000,000,000,000 | 100 |
Compat self-signed trust with reject-only aux data
When auxiliary data contains only reject entries, continue to trust
self-signed objects just as when no auxiliary data is present.
This makes it possible to reject specific uses without changing
what's accepted (and thus overring the underlying EKU).
Added new supported certs and doubled test count from 38 to 76.
Reviewed-by: Dr. Stephen Henson <[email protected]>
|
void pad_short_parameter(std::string& param, unsigned int max_len)
{
if (param.length() < max_len)
{
QTC::TC("qpdf", "QPDF_encryption pad short parameter");
param.append(max_len - param.length(), '\0');
}
}
| 1 |
[
"CWE-787"
] |
qpdf
|
d71f05ca07eb5c7cfa4d6d23e5c1f2a800f52e8e
| 138,848,666,017,848,980,000,000,000,000,000,000,000 | 8 |
Fix sign and conversion warnings (major)
This makes all integer type conversions that have potential data loss
explicit with calls that do range checks and raise an exception. After
this commit, qpdf builds with no warnings when -Wsign-conversion
-Wconversion is used with gcc or clang or when -W3 -Wd4800 is used
with MSVC. This significantly reduces the likelihood of potential
crashes from bogus integer values.
There are some parts of the code that take int when they should take
size_t or an offset. Such places would make qpdf not support files
with more than 2^31 of something that usually wouldn't be so large. In
the event that such a file shows up and is valid, at least qpdf would
raise an error in the right spot so the issue could be legitimately
addressed rather than failing in some weird way because of a silent
overflow condition.
|
proto_register_usb_audio(void)
{
static hf_register_info hf[] = {
{ &hf_midi_cable_number,
{ "Cable Number", "usbaudio.midi.cable_number", FT_UINT8, BASE_HEX,
NULL, 0xF0, NULL, HFILL }},
{ &hf_midi_code_index,
{ "Code Index", "usbaudio.midi.code_index", FT_UINT8, BASE_HEX,
VALS(code_index_vals), 0x0F, NULL, HFILL }},
{ &hf_midi_event,
{ "MIDI Event", "usbaudio.midi.event", FT_UINT24, BASE_HEX,
NULL, 0, NULL, HFILL }},
{ &hf_ac_if_desc_subtype,
{ "Subtype", "usbaudio.ac_if_subtype", FT_UINT8, BASE_HEX|BASE_EXT_STRING,
&ac_subtype_vals_ext, 0x00, "bDescriptorSubtype", HFILL }},
{ &hf_ac_if_hdr_ver,
{ "Version", "usbaudio.ac_if_hdr.bcdADC",
FT_DOUBLE, BASE_NONE, NULL, 0, "bcdADC", HFILL }},
{ &hf_ac_if_hdr_total_len,
{ "Total length", "usbaudio.ac_if_hdr.wTotalLength",
FT_UINT16, BASE_DEC, NULL, 0x00, "wTotalLength", HFILL }},
{ &hf_ac_if_hdr_bInCollection,
{ "Total number of interfaces", "usbaudio.ac_if_hdr.bInCollection",
FT_UINT8, BASE_DEC, NULL, 0x00, "bInCollection", HFILL }},
{ &hf_ac_if_hdr_if_num,
{ "Interface number", "usbaudio.ac_if_hdr.baInterfaceNr",
FT_UINT8, BASE_DEC, NULL, 0x00, "baInterfaceNr", HFILL }},
{ &hf_ac_if_input_terminalid,
{ "Terminal ID", "usbaudio.ac_if_input.bTerminalID",
FT_UINT8, BASE_DEC, NULL, 0x00, "bTerminalID", HFILL }},
{ &hf_ac_if_input_terminaltype,
{ "Terminal Type", "usbaudio.ac_if_input.wTerminalType", FT_UINT16,
BASE_HEX|BASE_EXT_STRING, &terminal_types_vals_ext, 0x00, "wTerminalType", HFILL }},
{ &hf_ac_if_input_assocterminal,
{ "Assoc Terminal", "usbaudio.ac_if_input.bAssocTerminal",
FT_UINT8, BASE_DEC, NULL, 0x00, "bAssocTerminal", HFILL }},
{ &hf_ac_if_input_nrchannels,
{ "Number Channels", "usbaudio.ac_if_input.bNrChannels",
FT_UINT8, BASE_DEC, NULL, 0x00, "bNrChannels", HFILL }},
{ &hf_ac_if_input_channelconfig,
{ "Channel Config", "usbaudio.ac_if_input.wChannelConfig",
FT_UINT16, BASE_HEX, NULL, 0x00, "wChannelConfig", HFILL }},
{ &hf_ac_if_input_channelnames,
{ "Channel Names", "usbaudio.ac_if_input.iChannelNames",
FT_UINT8, BASE_DEC, NULL, 0x00, "iChannelNames", HFILL }},
{ &hf_ac_if_input_terminal,
{ "Terminal", "usbaudio.ac_if_input.iTerminal",
FT_UINT8, BASE_DEC, NULL, 0x00, "iTerminal", HFILL }},
{ &hf_ac_if_output_terminalid,
{ "Terminal ID", "usbaudio.ac_if_output.bTerminalID",
FT_UINT8, BASE_DEC, NULL, 0x00, "bTerminalID", HFILL }},
{ &hf_ac_if_output_terminaltype,
{ "Terminal Type", "usbaudio.ac_if_output.wTerminalType", FT_UINT16,
BASE_HEX|BASE_EXT_STRING, &terminal_types_vals_ext, 0x00, "wTerminalType", HFILL }},
{ &hf_ac_if_output_assocterminal,
{ "Assoc Terminal", "usbaudio.ac_if_output.bAssocTerminal",
FT_UINT8, BASE_DEC, NULL, 0x00, "bAssocTerminal", HFILL }},
{ &hf_ac_if_output_sourceid,
{ "Source ID", "usbaudio.ac_if_output.bSourceID",
FT_UINT8, BASE_DEC, NULL, 0x00, "bSourceID", HFILL }},
{ &hf_ac_if_output_terminal,
{ "Terminal", "usbaudio.ac_if_output.iTerminal",
FT_UINT8, BASE_DEC, NULL, 0x00, "iTerminal", HFILL }},
{ &hf_ac_if_fu_unitid,
{ "Unit ID", "usbaudio.ac_if_fu.bUnitID",
FT_UINT8, BASE_DEC, NULL, 0x00, "bUnitID", HFILL }},
{ &hf_ac_if_fu_sourceid,
{ "Source ID", "usbaudio.ac_if_fu.bSourceID",
FT_UINT8, BASE_DEC, NULL, 0x00, "bSourceID", HFILL }},
{ &hf_ac_if_fu_controlsize,
{ "Control Size", "usbaudio.ac_if_fu.bControlSize",
FT_UINT8, BASE_DEC, NULL, 0x00, "bControlSize", HFILL }},
{ &hf_ac_if_fu_controls,
{ "Controls", "usbaudio.ac_if_fu.bmaControls",
FT_BYTES, BASE_NONE, NULL, 0x00, "bmaControls", HFILL }},
{ &hf_ac_if_fu_control,
{ "Control", "usbaudio.ac_if_fu.bmaControl",
FT_UINT8, BASE_HEX, NULL, 0x00, "bmaControls", HFILL }},
{ &hf_ac_if_fu_controls_d0,
{ "Mute", "usbaudio.ac_if_fu.bmaControls.d0",
FT_BOOLEAN, 8, NULL, 0x01, NULL, HFILL }},
{ &hf_ac_if_fu_controls_d1,
{ "Volume", "usbaudio.ac_if_fu.bmaControls.d1",
FT_BOOLEAN, 8, NULL, 0x02, NULL, HFILL }},
{ &hf_ac_if_fu_controls_d2,
{ "Bass", "usbaudio.ac_if_fu.bmaControls.d2",
FT_BOOLEAN, 8, NULL, 0x04, NULL, HFILL }},
{ &hf_ac_if_fu_controls_d3,
{ "Mid", "usbaudio.ac_if_fu.bmaControls.d3",
FT_BOOLEAN, 8, NULL, 0x08, NULL, HFILL }},
{ &hf_ac_if_fu_controls_d4,
{ "Treble", "usbaudio.ac_if_fu.bmaControls.d4",
FT_BOOLEAN, 8, NULL, 0x10, NULL, HFILL }},
{ &hf_ac_if_fu_controls_d5,
{ "Graphic Equalizer", "usbaudio.ac_if_fu.bmaControls.d5",
FT_BOOLEAN, 8, NULL, 0x20, NULL, HFILL }},
{ &hf_ac_if_fu_controls_d6,
{ "Automatic Gain", "usbaudio.ac_if_fu.bmaControls.d6",
FT_BOOLEAN, 8, NULL, 0x40, NULL, HFILL }},
{ &hf_ac_if_fu_controls_d7,
{ "Delay", "usbaudio.ac_if_fu.bmaControls.d7",
FT_BOOLEAN, 8, NULL, 0x80, NULL, HFILL }},
{ &hf_ac_if_fu_controls_d8,
{ "Bass Boost", "usbaudio.ac_if_fu.bmaControls.d8",
FT_BOOLEAN, 8, NULL, 0x01, NULL, HFILL }},
{ &hf_ac_if_fu_controls_d9,
{ "Loudness", "usbaudio.ac_if_fu.bmaControls.d9",
FT_BOOLEAN, 8, NULL, 0x02, NULL, HFILL }},
{ &hf_ac_if_fu_controls_rsv,
{ "Reserved", "usbaudio.ac_if_fu.bmaControls.rsv",
FT_UINT8, BASE_HEX, NULL, 0xFC, "Must be zero", HFILL }},
{ &hf_ac_if_fu_ifeature,
{ "Feature", "usbaudio.ac_if_fu.iFeature",
FT_UINT8, BASE_DEC, NULL, 0x00, "iFeature", HFILL }},
{ &hf_as_if_desc_subtype,
{ "Subtype", "usbaudio.as_if_subtype", FT_UINT8, BASE_HEX|BASE_EXT_STRING,
&as_subtype_vals_ext, 0x00, "bDescriptorSubtype", HFILL }},
{ &hf_as_if_gen_term_id,
{ "Terminal ID", "usbaudio.as_if_gen.bTerminalLink",
FT_UINT8, BASE_DEC, NULL, 0x00, "bTerminalLink", HFILL }},
{ &hf_as_if_gen_delay,
{ "Interface delay in frames", "usbaudio.as_if_gen.bDelay",
FT_UINT8, BASE_DEC, NULL, 0x00, "bDelay", HFILL }},
{ &hf_as_if_gen_format,
{ "Format", "usbaudio.as_if_gen.wFormatTag",
FT_UINT16, BASE_HEX, NULL, 0x00, "wFormatTag", HFILL }},
{ &hf_as_if_ft_formattype,
{ "FormatType", "usbaudio.as_if_ft.bFormatType",
FT_UINT8, BASE_DEC, NULL, 0x00, "wFormatType", HFILL }},
{ &hf_as_if_ft_maxbitrate,
{ "Max Bit Rate", "usbaudio.as_if_ft.wMaxBitRate",
FT_UINT16, BASE_DEC, NULL, 0x00, "wMaxBitRate", HFILL }},
{ &hf_as_if_ft_nrchannels,
{ "Number Channels", "usbaudio.as_if_ft.bNrChannels",
FT_UINT8, BASE_DEC, NULL, 0x00, "bNrChannels", HFILL }},
{ &hf_as_if_ft_subframesize,
{ "Subframe Size", "usbaudio.as_if_ft.bSubframeSize",
FT_UINT8, BASE_DEC, NULL, 0x00, "bSubframeSize", HFILL }},
{ &hf_as_if_ft_bitresolution,
{ "Bit Resolution", "usbaudio.as_if_ft.bBitResolution",
FT_UINT8, BASE_DEC, NULL, 0x00, "bBitResolution", HFILL }},
{ &hf_as_if_ft_samplesperframe,
{ "Samples Per Frame", "usbaudio.as_if_ft.wSamplesPerFrame",
FT_UINT16, BASE_DEC, NULL, 0x00, "wSamplesPerFrame", HFILL }},
{ &hf_as_if_ft_samfreqtype,
{ "Samples Frequence Type", "usbaudio.as_if_ft.bSamFreqType",
FT_UINT8, BASE_DEC, NULL, 0x00, "bSamFreqType", HFILL }},
{ &hf_as_if_ft_lowersamfreq,
{ "Lower Samples Frequence", "usbaudio.as_if_ft.tLowerSamFreq",
FT_UINT24, BASE_DEC, NULL, 0x00, "tLowerSamFreq", HFILL }},
{ &hf_as_if_ft_uppersamfreq,
{ "Upper Samples Frequence", "usbaudio.as_if_ft.tUpperSamFreq",
FT_UINT24, BASE_DEC, NULL, 0x00, "tUpperSamFreq", HFILL }},
{ &hf_as_if_ft_samfreq,
{ "Samples Frequence", "usbaudio.as_if_ft.tSamFreq",
FT_UINT24, BASE_DEC, NULL, 0x00, "tSamFreq", HFILL }},
{ &hf_as_ep_desc_subtype,
{ "Subtype", "usbaudio.as_ep_subtype", FT_UINT8,
BASE_HEX, NULL, 0x00, "bDescriptorSubtype", HFILL }},
{ &hf_sysex_msg_fragments,
{ "Message fragments", "usbaudio.sysex.fragments",
FT_NONE, BASE_NONE, NULL, 0x00, NULL, HFILL }},
{ &hf_sysex_msg_fragment,
{ "Message fragment", "usbaudio.sysex.fragment",
FT_FRAMENUM, BASE_NONE, NULL, 0x00, NULL, HFILL }},
{ &hf_sysex_msg_fragment_overlap,
{ "Message fragment overlap", "usbaudio.sysex.fragment.overlap",
FT_BOOLEAN, 0, NULL, 0x00, NULL, HFILL }},
{ &hf_sysex_msg_fragment_overlap_conflicts,
{ "Message fragment overlapping with conflicting data",
"usbaudio.sysex.fragment.overlap.conflicts",
FT_BOOLEAN, 0, NULL, 0x00, NULL, HFILL }},
{ &hf_sysex_msg_fragment_multiple_tails,
{ "Message has multiple tail fragments",
"usbaudio.sysex.fragment.multiple_tails",
FT_BOOLEAN, 0, NULL, 0x00, NULL, HFILL }},
{ &hf_sysex_msg_fragment_too_long_fragment,
{ "Message fragment too long", "usbaudio.sysex.fragment.too_long_fragment",
FT_BOOLEAN, 0, NULL, 0x00, NULL, HFILL }},
{ &hf_sysex_msg_fragment_error,
{ "Message defragmentation error", "usbaudio.sysex.fragment.error",
FT_FRAMENUM, BASE_NONE, NULL, 0x00, NULL, HFILL }},
{ &hf_sysex_msg_fragment_count,
{ "Message fragment count", "usbaudio.sysex.fragment.count",
FT_UINT32, BASE_DEC, NULL, 0x00, NULL, HFILL }},
{ &hf_sysex_msg_reassembled_in,
{ "Reassembled in", "usbaudio.sysex.reassembled.in",
FT_FRAMENUM, BASE_NONE, NULL, 0x00, NULL, HFILL }},
{ &hf_sysex_msg_reassembled_length,
{ "Reassembled length", "usbaudio.sysex.reassembled.length",
FT_UINT32, BASE_DEC, NULL, 0x00, NULL, HFILL }},
{ &hf_sysex_msg_reassembled_data,
{ "Reassembled data", "usbaudio.sysex.reassembled.data",
FT_BYTES, BASE_NONE, NULL, 0x00, NULL, HFILL }}
};
static gint *usb_audio_subtrees[] = {
&ett_usb_audio,
&ett_usb_audio_desc,
&ett_sysex_msg_fragment,
&ett_sysex_msg_fragments,
&ett_ac_if_fu_controls,
&ett_ac_if_fu_controls0,
&ett_ac_if_fu_controls1
};
static ei_register_info ei[] = {
{ &ei_usb_audio_undecoded, { "usbaudio.undecoded", PI_UNDECODED, PI_WARN, "Not dissected yet (report to wireshark.org)", EXPFILL }},
};
expert_module_t *expert_usb_audio;
proto_usb_audio = proto_register_protocol("USB Audio", "USBAUDIO", "usbaudio");
proto_register_field_array(proto_usb_audio, hf, array_length(hf));
proto_register_subtree_array(usb_audio_subtrees, array_length(usb_audio_subtrees));
expert_usb_audio = expert_register_protocol(proto_usb_audio);
expert_register_field_array(expert_usb_audio, ei, array_length(ei));
register_init_routine(&midi_data_reassemble_init);
register_cleanup_routine(&midi_data_reassemble_cleanup);
register_dissector("usbaudio", dissect_usb_audio_bulk, proto_usb_audio);
}
| 0 |
[
"CWE-476"
] |
wireshark
|
2cb5985bf47bdc8bea78d28483ed224abdd33dc6
| 198,755,189,563,088,550,000,000,000,000,000,000,000 | 224 |
Make class "type" for USB conversations.
USB dissectors can't assume that only their class type has been passed around in the conversation. Make explicit check that class type expected matches the dissector and stop/prevent dissection if there isn't a match.
Bug: 12356
Change-Id: Ib23973a4ebd0fbb51952ffc118daf95e3389a209
Reviewed-on: https://code.wireshark.org/review/15212
Petri-Dish: Michael Mann <[email protected]>
Reviewed-by: Martin Kaiser <[email protected]>
Petri-Dish: Martin Kaiser <[email protected]>
Tested-by: Petri Dish Buildbot <[email protected]>
Reviewed-by: Michael Mann <[email protected]>
|
BSONObj spec() {
return BSON("$and" << BSON_ARRAY(1 << BSON("$and" << BSON_ARRAY(1)) << "$a"
<< "$b"));
}
| 0 |
[
"CWE-835"
] |
mongo
|
0a076417d1d7fba3632b73349a1fd29a83e68816
| 11,040,504,985,571,892,000,000,000,000,000,000,000 | 4 |
SERVER-38070 fix infinite loop in agg expression
|
Status Peek(std::size_t index, Tuple* tuple) {
std::unique_lock<std::mutex> lock(mu_);
// Wait if the requested index is not available
non_empty_cond_var_.wait(
lock, [index, this]() { return index < this->buf_.size(); });
// Place tensors in the output tuple
for (const auto& tensor : buf_[index]) {
tuple->push_back(tensor);
}
return Status::OK();
}
| 0 |
[
"CWE-20",
"CWE-703"
] |
tensorflow
|
cebe3c45d76357d201c65bdbbf0dbe6e8a63bbdb
| 257,686,118,535,212,000,000,000,000,000,000,000,000 | 14 |
Fix tf.raw_ops.StagePeek vulnerability with invalid `index`.
Check that input is actually a scalar before treating it as such.
PiperOrigin-RevId: 445524908
|
psutil_disk_io_counters(PyObject *self, PyObject *args) {
kstat_ctl_t *kc;
kstat_t *ksp;
kstat_io_t kio;
PyObject *py_retdict = PyDict_New();
PyObject *py_disk_info = NULL;
if (py_retdict == NULL)
return NULL;
kc = kstat_open();
if (kc == NULL) {
PyErr_SetFromErrno(PyExc_OSError);;
goto error;
}
ksp = kc->kc_chain;
while (ksp != NULL) {
if (ksp->ks_type == KSTAT_TYPE_IO) {
if (strcmp(ksp->ks_class, "disk") == 0) {
if (kstat_read(kc, ksp, &kio) == -1) {
kstat_close(kc);
return PyErr_SetFromErrno(PyExc_OSError);;
}
py_disk_info = Py_BuildValue(
"(IIKKLL)",
kio.reads,
kio.writes,
kio.nread,
kio.nwritten,
kio.rtime / 1000 / 1000, // from nano to milli secs
kio.wtime / 1000 / 1000 // from nano to milli secs
);
if (!py_disk_info)
goto error;
if (PyDict_SetItemString(py_retdict, ksp->ks_name,
py_disk_info))
goto error;
Py_CLEAR(py_disk_info);
}
}
ksp = ksp->ks_next;
}
kstat_close(kc);
return py_retdict;
error:
Py_XDECREF(py_disk_info);
Py_DECREF(py_retdict);
if (kc != NULL)
kstat_close(kc);
return NULL;
}
| 0 |
[
"CWE-415"
] |
psutil
|
7d512c8e4442a896d56505be3e78f1156f443465
| 240,673,103,024,276,720,000,000,000,000,000,000,000 | 52 |
Use Py_CLEAR instead of Py_DECREF to also set the variable to NULL (#1616)
These files contain loops that convert system data into python objects
and during the process they create objects and dereference their
refcounts after they have been added to the resulting list.
However, in case of errors during the creation of those python objects,
the refcount to previously allocated objects is dropped again with
Py_XDECREF, which should be a no-op in case the paramater is NULL. Even
so, in most of these loops the variables pointing to the objects are
never set to NULL, even after Py_DECREF is called at the end of the loop
iteration. This means, after the first iteration, if an error occurs
those python objects will get their refcount dropped two times,
resulting in a possible double-free.
|
static int jpc_dec_tileinit(jpc_dec_t *dec, jpc_dec_tile_t *tile)
{
jpc_dec_tcomp_t *tcomp;
int compno;
int rlvlno;
jpc_dec_rlvl_t *rlvl;
jpc_dec_band_t *band;
jpc_dec_prc_t *prc;
int bndno;
jpc_tsfb_band_t *bnd;
int bandno;
jpc_dec_ccp_t *ccp;
int prccnt;
jpc_dec_cblk_t *cblk;
int cblkcnt;
uint_fast32_t tlprcxstart;
uint_fast32_t tlprcystart;
uint_fast32_t brprcxend;
uint_fast32_t brprcyend;
uint_fast32_t tlcbgxstart;
uint_fast32_t tlcbgystart;
uint_fast32_t brcbgxend;
uint_fast32_t brcbgyend;
uint_fast32_t cbgxstart;
uint_fast32_t cbgystart;
uint_fast32_t cbgxend;
uint_fast32_t cbgyend;
uint_fast32_t tlcblkxstart;
uint_fast32_t tlcblkystart;
uint_fast32_t brcblkxend;
uint_fast32_t brcblkyend;
uint_fast32_t cblkxstart;
uint_fast32_t cblkystart;
uint_fast32_t cblkxend;
uint_fast32_t cblkyend;
uint_fast32_t tmpxstart;
uint_fast32_t tmpystart;
uint_fast32_t tmpxend;
uint_fast32_t tmpyend;
jpc_dec_cp_t *cp;
jpc_tsfb_band_t bnds[64];
jpc_pchg_t *pchg;
int pchgno;
jpc_dec_cmpt_t *cmpt;
cp = tile->cp;
tile->realmode = 0;
if (cp->mctid == JPC_MCT_ICT) {
tile->realmode = 1;
}
for (compno = 0, tcomp = tile->tcomps, cmpt = dec->cmpts; compno <
dec->numcomps; ++compno, ++tcomp, ++cmpt) {
ccp = &tile->cp->ccps[compno];
if (ccp->qmfbid == JPC_COX_INS) {
tile->realmode = 1;
}
tcomp->numrlvls = ccp->numrlvls;
if (!(tcomp->rlvls = jas_alloc2(tcomp->numrlvls,
sizeof(jpc_dec_rlvl_t)))) {
return -1;
}
if (!(tcomp->data = jas_seq2d_create(JPC_CEILDIV(tile->xstart,
cmpt->hstep), JPC_CEILDIV(tile->ystart, cmpt->vstep),
JPC_CEILDIV(tile->xend, cmpt->hstep), JPC_CEILDIV(tile->yend,
cmpt->vstep)))) {
return -1;
}
if (!(tcomp->tsfb = jpc_cod_gettsfb(ccp->qmfbid,
tcomp->numrlvls - 1))) {
return -1;
}
{
jpc_tsfb_getbands(tcomp->tsfb, jas_seq2d_xstart(tcomp->data),
jas_seq2d_ystart(tcomp->data), jas_seq2d_xend(tcomp->data),
jas_seq2d_yend(tcomp->data), bnds);
}
for (rlvlno = 0, rlvl = tcomp->rlvls; rlvlno < tcomp->numrlvls;
++rlvlno, ++rlvl) {
rlvl->bands = 0;
rlvl->xstart = JPC_CEILDIVPOW2(tcomp->xstart,
tcomp->numrlvls - 1 - rlvlno);
rlvl->ystart = JPC_CEILDIVPOW2(tcomp->ystart,
tcomp->numrlvls - 1 - rlvlno);
rlvl->xend = JPC_CEILDIVPOW2(tcomp->xend,
tcomp->numrlvls - 1 - rlvlno);
rlvl->yend = JPC_CEILDIVPOW2(tcomp->yend,
tcomp->numrlvls - 1 - rlvlno);
rlvl->prcwidthexpn = ccp->prcwidthexpns[rlvlno];
rlvl->prcheightexpn = ccp->prcheightexpns[rlvlno];
tlprcxstart = JPC_FLOORDIVPOW2(rlvl->xstart,
rlvl->prcwidthexpn) << rlvl->prcwidthexpn;
tlprcystart = JPC_FLOORDIVPOW2(rlvl->ystart,
rlvl->prcheightexpn) << rlvl->prcheightexpn;
brprcxend = JPC_CEILDIVPOW2(rlvl->xend,
rlvl->prcwidthexpn) << rlvl->prcwidthexpn;
brprcyend = JPC_CEILDIVPOW2(rlvl->yend,
rlvl->prcheightexpn) << rlvl->prcheightexpn;
rlvl->numhprcs = (brprcxend - tlprcxstart) >>
rlvl->prcwidthexpn;
rlvl->numvprcs = (brprcyend - tlprcystart) >>
rlvl->prcheightexpn;
rlvl->numprcs = rlvl->numhprcs * rlvl->numvprcs;
if (rlvl->xstart >= rlvl->xend || rlvl->ystart >= rlvl->yend) {
rlvl->bands = 0;
rlvl->numprcs = 0;
rlvl->numhprcs = 0;
rlvl->numvprcs = 0;
continue;
}
if (!rlvlno) {
tlcbgxstart = tlprcxstart;
tlcbgystart = tlprcystart;
brcbgxend = brprcxend;
brcbgyend = brprcyend;
rlvl->cbgwidthexpn = rlvl->prcwidthexpn;
rlvl->cbgheightexpn = rlvl->prcheightexpn;
} else {
tlcbgxstart = JPC_CEILDIVPOW2(tlprcxstart, 1);
tlcbgystart = JPC_CEILDIVPOW2(tlprcystart, 1);
brcbgxend = JPC_CEILDIVPOW2(brprcxend, 1);
brcbgyend = JPC_CEILDIVPOW2(brprcyend, 1);
rlvl->cbgwidthexpn = rlvl->prcwidthexpn - 1;
rlvl->cbgheightexpn = rlvl->prcheightexpn - 1;
}
rlvl->cblkwidthexpn = JAS_MIN(ccp->cblkwidthexpn,
rlvl->cbgwidthexpn);
rlvl->cblkheightexpn = JAS_MIN(ccp->cblkheightexpn,
rlvl->cbgheightexpn);
rlvl->numbands = (!rlvlno) ? 1 : 3;
if (!(rlvl->bands = jas_alloc2(rlvl->numbands,
sizeof(jpc_dec_band_t)))) {
return -1;
}
for (bandno = 0, band = rlvl->bands;
bandno < rlvl->numbands; ++bandno, ++band) {
bndno = (!rlvlno) ? 0 : (3 * (rlvlno - 1) +
bandno + 1);
bnd = &bnds[bndno];
band->orient = bnd->orient;
band->stepsize = ccp->stepsizes[bndno];
band->analgain = JPC_NOMINALGAIN(ccp->qmfbid,
tcomp->numrlvls - 1, rlvlno, band->orient);
band->absstepsize = jpc_calcabsstepsize(band->stepsize,
cmpt->prec + band->analgain);
band->numbps = ccp->numguardbits +
JPC_QCX_GETEXPN(band->stepsize) - 1;
band->roishift = (ccp->roishift + band->numbps >= JPC_PREC) ?
(JPC_PREC - 1 - band->numbps) : ccp->roishift;
band->data = 0;
band->prcs = 0;
if (bnd->xstart == bnd->xend || bnd->ystart == bnd->yend) {
continue;
}
if (!(band->data = jas_seq2d_create(0, 0, 0, 0))) {
return -1;
}
jas_seq2d_bindsub(band->data, tcomp->data, bnd->locxstart,
bnd->locystart, bnd->locxend, bnd->locyend);
jas_seq2d_setshift(band->data, bnd->xstart, bnd->ystart);
assert(rlvl->numprcs);
if (!(band->prcs = jas_alloc2(rlvl->numprcs,
sizeof(jpc_dec_prc_t)))) {
return -1;
}
/************************************************/
cbgxstart = tlcbgxstart;
cbgystart = tlcbgystart;
for (prccnt = rlvl->numprcs, prc = band->prcs;
prccnt > 0; --prccnt, ++prc) {
cbgxend = cbgxstart + (1 << rlvl->cbgwidthexpn);
cbgyend = cbgystart + (1 << rlvl->cbgheightexpn);
prc->xstart = JAS_MAX(cbgxstart, JAS_CAST(uint_fast32_t,
jas_seq2d_xstart(band->data)));
prc->ystart = JAS_MAX(cbgystart, JAS_CAST(uint_fast32_t,
jas_seq2d_ystart(band->data)));
prc->xend = JAS_MIN(cbgxend, JAS_CAST(uint_fast32_t,
jas_seq2d_xend(band->data)));
prc->yend = JAS_MIN(cbgyend, JAS_CAST(uint_fast32_t,
jas_seq2d_yend(band->data)));
if (prc->xend > prc->xstart && prc->yend > prc->ystart) {
tlcblkxstart = JPC_FLOORDIVPOW2(prc->xstart,
rlvl->cblkwidthexpn) << rlvl->cblkwidthexpn;
tlcblkystart = JPC_FLOORDIVPOW2(prc->ystart,
rlvl->cblkheightexpn) << rlvl->cblkheightexpn;
brcblkxend = JPC_CEILDIVPOW2(prc->xend,
rlvl->cblkwidthexpn) << rlvl->cblkwidthexpn;
brcblkyend = JPC_CEILDIVPOW2(prc->yend,
rlvl->cblkheightexpn) << rlvl->cblkheightexpn;
prc->numhcblks = (brcblkxend - tlcblkxstart) >>
rlvl->cblkwidthexpn;
prc->numvcblks = (brcblkyend - tlcblkystart) >>
rlvl->cblkheightexpn;
prc->numcblks = prc->numhcblks * prc->numvcblks;
assert(prc->numcblks > 0);
if (!(prc->incltagtree = jpc_tagtree_create(
prc->numhcblks, prc->numvcblks))) {
return -1;
}
if (!(prc->numimsbstagtree = jpc_tagtree_create(
prc->numhcblks, prc->numvcblks))) {
return -1;
}
if (!(prc->cblks = jas_alloc2(prc->numcblks,
sizeof(jpc_dec_cblk_t)))) {
return -1;
}
cblkxstart = cbgxstart;
cblkystart = cbgystart;
for (cblkcnt = prc->numcblks, cblk = prc->cblks; cblkcnt > 0;) {
cblkxend = cblkxstart + (1 << rlvl->cblkwidthexpn);
cblkyend = cblkystart + (1 << rlvl->cblkheightexpn);
tmpxstart = JAS_MAX(cblkxstart, prc->xstart);
tmpystart = JAS_MAX(cblkystart, prc->ystart);
tmpxend = JAS_MIN(cblkxend, prc->xend);
tmpyend = JAS_MIN(cblkyend, prc->yend);
if (tmpxend > tmpxstart && tmpyend > tmpystart) {
cblk->firstpassno = -1;
cblk->mqdec = 0;
cblk->nulldec = 0;
cblk->flags = 0;
cblk->numpasses = 0;
cblk->segs.head = 0;
cblk->segs.tail = 0;
cblk->curseg = 0;
cblk->numimsbs = 0;
cblk->numlenbits = 3;
cblk->flags = 0;
if (!(cblk->data = jas_seq2d_create(0, 0, 0, 0))) {
return -1;
}
jas_seq2d_bindsub(cblk->data, band->data,
tmpxstart, tmpystart, tmpxend, tmpyend);
++cblk;
--cblkcnt;
}
cblkxstart += 1 << rlvl->cblkwidthexpn;
if (cblkxstart >= cbgxend) {
cblkxstart = cbgxstart;
cblkystart += 1 << rlvl->cblkheightexpn;
}
}
} else {
prc->cblks = 0;
prc->incltagtree = 0;
prc->numimsbstagtree = 0;
}
cbgxstart += 1 << rlvl->cbgwidthexpn;
if (cbgxstart >= brcbgxend) {
cbgxstart = tlcbgxstart;
cbgystart += 1 << rlvl->cbgheightexpn;
}
}
/********************************************/
}
}
}
if (!(tile->pi = jpc_dec_pi_create(dec, tile))) {
return -1;
}
for (pchgno = 0; pchgno < jpc_pchglist_numpchgs(tile->cp->pchglist);
++pchgno) {
pchg = jpc_pchg_copy(jpc_pchglist_get(tile->cp->pchglist, pchgno));
assert(pchg);
jpc_pi_addpchg(tile->pi, pchg);
}
jpc_pi_init(tile->pi);
return 0;
}
| 0 |
[
"CWE-476"
] |
jasper
|
69a1439a5381e42b06ec6a06ed2675eb793babee
| 228,370,315,124,015,570,000,000,000,000,000,000,000 | 282 |
The member (pi) in tiles was not properly initialized.
This is now corrected.
Also, each tile is now only cleaned up once.
|
static int _TS_RESP_verify_token(TS_VERIFY_CTX *ctx,
PKCS7 *token, TS_TST_INFO *tst_info)
{
X509 *signer = NULL;
GENERAL_NAME *tsa_name = TS_TST_INFO_get_tsa(tst_info);
X509_ALGOR *md_alg = NULL;
unsigned char *imprint = NULL;
unsigned imprint_len = 0;
int ret = 0;
/* Verify the signature. */
if ((ctx->flags & TS_VFY_SIGNATURE)
&& !TS_RESP_verify_signature(token, ctx->certs, ctx->store,
&signer))
goto err;
/* Check version number of response. */
if ((ctx->flags & TS_VFY_VERSION)
&& TS_TST_INFO_get_version(tst_info) != 1)
{
TSerr(TS_F_TS_VERIFY, TS_R_UNSUPPORTED_VERSION);
goto err;
}
/* Check policies. */
if ((ctx->flags & TS_VFY_POLICY)
&& !TS_check_policy(ctx->policy, tst_info))
goto err;
/* Check message imprints. */
if ((ctx->flags & TS_VFY_IMPRINT)
&& !TS_check_imprints(ctx->md_alg, ctx->imprint, ctx->imprint_len,
tst_info))
goto err;
/* Compute and check message imprints. */
if ((ctx->flags & TS_VFY_DATA)
&& (!TS_compute_imprint(ctx->data, tst_info,
&md_alg, &imprint, &imprint_len)
|| !TS_check_imprints(md_alg, imprint, imprint_len, tst_info)))
goto err;
/* Check nonces. */
if ((ctx->flags & TS_VFY_NONCE)
&& !TS_check_nonces(ctx->nonce, tst_info))
goto err;
/* Check whether TSA name and signer certificate match. */
if ((ctx->flags & TS_VFY_SIGNER)
&& tsa_name && !TS_check_signer_name(tsa_name, signer))
{
TSerr(TS_F_TS_RESP_VERIFY_TOKEN, TS_R_TSA_NAME_MISMATCH);
goto err;
}
/* Check whether the TSA is the expected one. */
if ((ctx->flags & TS_VFY_TSA_NAME)
&& !TS_check_signer_name(ctx->tsa_name, signer))
{
TSerr(TS_F_TS_RESP_VERIFY_TOKEN, TS_R_TSA_UNTRUSTED);
goto err;
}
ret = 1;
err:
X509_free(signer);
X509_ALGOR_free(md_alg);
OPENSSL_free(imprint);
return ret;
}
| 0 |
[] |
openssl
|
c7235be6e36c4bef84594aa3b2f0561db84b63d8
| 156,797,182,484,585,460,000,000,000,000,000,000,000 | 70 |
RFC 3161 compliant time stamp request creation, response generation
and response verification.
Submitted by: Zoltan Glozik <[email protected]>
Reviewed by: Ulf Moeller
|
bool Magick::Image::setColorMetric(const Image &reference_)
{
bool
status;
Image
ref=reference_;
GetPPException;
modifyImage();
status=static_cast<bool>(SetImageColorMetric(image(),ref.constImage(),
exceptionInfo));
ThrowImageException;
return(status);
}
| 0 |
[
"CWE-416"
] |
ImageMagick
|
8c35502217c1879cb8257c617007282eee3fe1cc
| 60,949,699,104,621,780,000,000,000,000,000,000,000 | 15 |
Added missing return to avoid use after free.
|
GF_Err gf_isom_rtp_packet_set_flags(GF_ISOFile *the_file, u32 trackNumber,
u8 PackingBit,
u8 eXtensionBit,
u8 MarkerBit,
u8 disposable_packet,
u8 IsRepeatedPacket)
{
GF_TrackBox *trak;
GF_HintSampleEntryBox *entry;
GF_RTPPacket *pck;
u32 dataRefIndex, ind;
GF_Err e;
trak = gf_isom_get_track_from_file(the_file, trackNumber);
if (!trak || !CheckHintFormat(trak, GF_ISOM_HINT_RTP)) return GF_BAD_PARAM;
e = Media_GetSampleDesc(trak->Media, trak->Media->information->sampleTable->currentEntryIndex, (GF_SampleEntryBox **) &entry, &dataRefIndex);
if (e) return e;
if (!entry->hint_sample) return GF_BAD_PARAM;
ind = gf_list_count(entry->hint_sample->packetTable);
if (!ind) return GF_BAD_PARAM;
pck = (GF_RTPPacket *)gf_list_get(entry->hint_sample->packetTable, ind-1);
pck->P_bit = PackingBit ? 1 : 0;
pck->X_bit = eXtensionBit ? 1 : 0;
pck->M_bit = MarkerBit ? 1 : 0;
pck->B_bit = disposable_packet ? 1 : 0;
pck->R_bit = IsRepeatedPacket ? 1 : 0;
return GF_OK;
}
| 0 |
[
"CWE-787"
] |
gpac
|
86c1566f040b2b84c72afcb6cbd444c5aff56cfe
| 293,453,141,137,838,700,000,000,000,000,000,000,000 | 31 |
fixed #1894
|
add_account_key_values (GoaOAuthProvider *provider,
GVariantBuilder *builder)
{
g_variant_builder_add (builder, "{ss}", "MailEnabled", "true");
g_variant_builder_add (builder, "{ss}", "CalendarEnabled", "true");
g_variant_builder_add (builder, "{ss}", "ContactsEnabled", "true");
g_variant_builder_add (builder, "{ss}", "ChatEnabled", "true");
g_variant_builder_add (builder, "{ss}", "DocumentsEnabled", "true");
}
| 0 |
[
"CWE-310"
] |
gnome-online-accounts
|
ecad8142e9ac519b9fc74b96dcb5531052bbffe1
| 224,004,053,586,748,480,000,000,000,000,000,000,000 | 9 |
Guard against invalid SSL certificates
None of the branded providers (eg., Google, Facebook and Windows Live)
should ever have an invalid certificate. So set "ssl-strict" on the
SoupSession object being used by GoaWebView.
Providers like ownCloud and Exchange might have to deal with
certificates that are not up to the mark. eg., self-signed
certificates. For those, show a warning when the account is being
created, and only proceed if the user decides to ignore it. In any
case, save the status of the certificate that was used to create the
account. So an account created with a valid certificate will never
work with an invalid one, and one created with an invalid certificate
will not throw any further warnings.
Fixes: CVE-2013-0240
|
static int TIFFWriteDirectoryTagFloatArray(TIFF* tif, uint32* ndir, TIFFDirEntry* dir, uint16 tag, uint32 count, float* value)
{
if (dir==NULL)
{
(*ndir)++;
return(1);
}
return(TIFFWriteDirectoryTagCheckedFloatArray(tif,ndir,dir,tag,count,value));
}
| 0 |
[
"CWE-617"
] |
libtiff
|
de144fd228e4be8aa484c3caf3d814b6fa88c6d9
| 288,925,666,584,003,500,000,000,000,000,000,000,000 | 9 |
TIFFWriteDirectorySec: avoid assertion. Fixes http://bugzilla.maptools.org/show_bug.cgi?id=2795. CVE-2018-10963
|
ves_icall_Type_MakeGenericType (MonoReflectionType *type, MonoArray *type_array)
{
MonoClass *class;
MonoType *geninst, **types;
int i, count;
MONO_ARCH_SAVE_REGS;
count = mono_array_length (type_array);
types = g_new0 (MonoType *, count);
for (i = 0; i < count; i++) {
MonoReflectionType *t = mono_array_get (type_array, gpointer, i);
types [i] = t->type;
}
geninst = mono_reflection_bind_generic_parameters (type, count, types);
g_free (types);
if (!geninst)
return NULL;
class = mono_class_from_mono_type (geninst);
/*we might inflate to the GTD*/
if (class->generic_class && !mono_verifier_class_is_valid_generic_instantiation (class))
mono_raise_exception (mono_get_exception_argument ("method", "Invalid generic arguments"));
return mono_type_get_object (mono_object_domain (type), geninst);
}
| 0 |
[
"CWE-264"
] |
mono
|
035c8587c0d8d307e45f1b7171a0d337bb451f1e
| 180,531,560,871,319,800,000,000,000,000,000,000,000 | 29 |
Allow only primitive types/enums in RuntimeHelpers.InitializeArray ().
|
prepare_repo_download_targets(LrHandle *handle,
LrYumRepo *repo,
LrYumRepoMd *repomd,
LrMetadataTarget *mdtarget,
GSList **targets,
GSList **cbdata_list,
GError **err)
{
char *destdir; /* Destination dir */
destdir = handle->destdir;
assert(destdir);
assert(strlen(destdir));
assert(!err || *err == NULL);
if(handle->cachedir) {
lr_yum_switch_to_zchunk(handle, repomd);
repo->use_zchunk = TRUE;
} else {
g_debug("%s: Cache directory not set, disabling zchunk", __func__);
repo->use_zchunk = FALSE;
}
for (GSList *elem = repomd->records; elem; elem = g_slist_next(elem)) {
int fd;
char *path;
LrDownloadTarget *target;
LrYumRepoMdRecord *record = elem->data;
CbData *cbdata = NULL;
void *user_cbdata = NULL;
LrEndCb endcb = NULL;
if (mdtarget != NULL) {
user_cbdata = mdtarget->cbdata;
endcb = mdtarget->endcb;
}
assert(record);
if (!lr_yum_repomd_record_enabled(handle, record->type, repomd->records))
continue;
char *location_href = record->location_href;
gboolean is_zchunk = FALSE;
#ifdef WITH_ZCHUNK
if (handle->cachedir && record->header_checksum)
is_zchunk = TRUE;
#endif /* WITH_ZCHUNK */
GSList *checksums = NULL;
if (is_zchunk) {
#ifdef WITH_ZCHUNK
if(!prepare_repo_download_zck_target(handle, record, &path, &fd,
&checksums, targets, err))
return FALSE;
#endif /* WITH_ZCHUNK */
} else {
if(!prepare_repo_download_std_target(handle, record, &path, &fd,
&checksums, targets, err))
return FALSE;
}
if (handle->user_cb || handle->hmfcb) {
cbdata = cbdata_new(handle->user_data,
user_cbdata,
handle->user_cb,
handle->hmfcb,
record->type);
*cbdata_list = g_slist_append(*cbdata_list, cbdata);
}
target = lr_downloadtarget_new(handle,
location_href,
record->location_base,
fd,
NULL,
checksums,
0,
0,
NULL,
cbdata,
endcb,
NULL,
NULL,
0,
0,
NULL,
FALSE,
is_zchunk);
if(is_zchunk) {
#ifdef WITH_ZCHUNK
target->expectedsize = record->size_header;
target->zck_header_size = record->size_header;
#endif /* WITH_ZCHUNK */
}
if (mdtarget != NULL)
mdtarget->repomd_records_to_download++;
*targets = g_slist_append(*targets, target);
/* Because path may already exists in repo (while update) */
lr_yum_repo_update(repo, record->type, path);
lr_free(path);
}
return TRUE;
}
| 1 |
[
"CWE-22"
] |
librepo
|
7daea2a2429a54dad68b1de9b37a5f65c5cf2600
| 130,004,621,016,105,400,000,000,000,000,000,000,000 | 108 |
Validate path read from repomd.xml (RhBug:1868639)
= changelog =
msg: Validate path read from repomd.xml
type: security
resolves: https://bugzilla.redhat.com/show_bug.cgi?id=1868639
|
PosibErr<void> Config::commit_all(Vector<int> * phs, const char * codeset)
{
committed_ = true;
others_ = first_;
first_ = 0;
insert_point_ = &first_;
Conv to_utf8;
if (codeset)
RET_ON_ERR(to_utf8.setup(*this, codeset, "utf-8", NormTo));
while (others_) {
*insert_point_ = others_;
others_ = others_->next;
(*insert_point_)->next = 0;
RET_ON_ERR_SET(commit(*insert_point_, codeset ? &to_utf8 : 0), int, place_holder);
if (phs && place_holder != -1 && (phs->empty() || phs->back() != place_holder))
phs->push_back(place_holder);
insert_point_ = &((*insert_point_)->next);
}
return no_err;
}
| 0 |
[
"CWE-125"
] |
aspell
|
80fa26c74279fced8d778351cff19d1d8f44fe4e
| 147,936,931,143,084,280,000,000,000,000,000,000,000 | 20 |
Fix various bugs found by OSS-Fuze.
|
void perf_event_disable(struct perf_event *event)
{
struct perf_event_context *ctx;
ctx = perf_event_ctx_lock(event);
_perf_event_disable(event);
perf_event_ctx_unlock(event, ctx);
}
| 0 |
[
"CWE-284",
"CWE-264"
] |
linux
|
f63a8daa5812afef4f06c962351687e1ff9ccb2b
| 76,376,496,925,004,570,000,000,000,000,000,000,000 | 8 |
perf: Fix event->ctx locking
There have been a few reported issues wrt. the lack of locking around
changing event->ctx. This patch tries to address those.
It avoids the whole rwsem thing; and while it appears to work, please
give it some thought in review.
What I did fail at is sensible runtime checks on the use of
event->ctx, the RCU use makes it very hard.
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Cc: Paul E. McKenney <[email protected]>
Cc: Jiri Olsa <[email protected]>
Cc: Arnaldo Carvalho de Melo <[email protected]>
Cc: Linus Torvalds <[email protected]>
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]>
|
read_rotation(const char *arg)
{
int i;
if (strcmp(arg, "auto"))
{
return -1;
}
i = fz_atoi(arg);
i = i % 360;
if (i % 90 != 0)
{
fprintf(stderr, "Ignoring invalid rotation\n");
i = 0;
}
return i;
}
| 0 |
[
"CWE-369",
"CWE-22"
] |
mupdf
|
22c47acbd52949421f8c7cb46ea1556827d0fcbf
| 138,582,412,399,876,540,000,000,000,000,000,000,000 | 20 |
Bug 704834: Fix division by zero for zero width pages in muraster.
|
struct crypto_instance *crypto_init(
const unsigned char *private_key,
unsigned int private_key_len,
const char *crypto_cipher_type,
const char *crypto_hash_type,
void (*log_printf_func) (
int level,
int subsys,
const char *function,
const char *file,
int line,
const char *format,
...)__attribute__((format(printf, 6, 7))),
int log_level_security,
int log_level_notice,
int log_level_error,
int log_subsys_id)
{
struct crypto_instance *instance;
instance = malloc(sizeof(*instance));
if (instance == NULL) {
return (NULL);
}
memset(instance, 0, sizeof(struct crypto_instance));
memcpy(instance->private_key, private_key, private_key_len);
instance->private_key_len = private_key_len;
instance->crypto_cipher_type = string_to_crypto_cipher_type(crypto_cipher_type);
instance->crypto_hash_type = string_to_crypto_hash_type(crypto_hash_type);
instance->crypto_header_size = crypto_sec_header_size(crypto_cipher_type, crypto_hash_type);
instance->log_printf_func = log_printf_func;
instance->log_level_security = log_level_security;
instance->log_level_notice = log_level_notice;
instance->log_level_error = log_level_error;
instance->log_subsys_id = log_subsys_id;
if (init_nss(instance, crypto_cipher_type, crypto_hash_type) < 0) {
free(instance);
return(NULL);
}
return (instance);
}
| 0 |
[
"CWE-190"
] |
corosync
|
fc1d5418533c1faf21616b282c2559bed7d361c4
| 197,387,553,211,104,670,000,000,000,000,000,000,000 | 46 |
totemcrypto: Check length of the packet
Packet has to be longer than crypto_config_header and hash_len,
otherwise unallocated memory is passed into calculate_nss_hash function,
what may result in crash.
Signed-off-by: Jan Friesse <[email protected]>
Reviewed-by: Raphael Sanchez Prudencio <[email protected]>
Reviewed-by: Christine Caulfield <[email protected]>
|
spnego_gss_init_sec_context(
OM_uint32 *minor_status,
gss_cred_id_t claimant_cred_handle,
gss_ctx_id_t *context_handle,
gss_name_t target_name,
gss_OID mech_type,
OM_uint32 req_flags,
OM_uint32 time_req,
gss_channel_bindings_t input_chan_bindings,
gss_buffer_t input_token,
gss_OID *actual_mech,
gss_buffer_t output_token,
OM_uint32 *ret_flags,
OM_uint32 *time_rec)
{
send_token_flag send_token = NO_TOKEN_SEND;
OM_uint32 tmpmin, ret, negState;
gss_buffer_t mechtok_in, mechListMIC_in, mechListMIC_out;
gss_buffer_desc mechtok_out = GSS_C_EMPTY_BUFFER;
spnego_gss_cred_id_t spcred = NULL;
spnego_gss_ctx_id_t spnego_ctx = NULL;
dsyslog("Entering init_sec_context\n");
mechtok_in = mechListMIC_out = mechListMIC_in = GSS_C_NO_BUFFER;
negState = REJECT;
/*
* This function works in three steps:
*
* 1. Perform mechanism negotiation.
* 2. Invoke the negotiated or optimistic mech's gss_init_sec_context
* function and examine the results.
* 3. Process or generate MICs if necessary.
*
* The three steps share responsibility for determining when the
* exchange is complete. If the selected mech completed in a previous
* call and no MIC exchange is expected, then step 1 will decide. If
* the selected mech completes in this call and no MIC exchange is
* expected, then step 2 will decide. If a MIC exchange is expected,
* then step 3 will decide. If an error occurs in any step, the
* exchange will be aborted, possibly with an error token.
*
* negState determines the state of the negotiation, and is
* communicated to the acceptor if a continuing token is sent.
* send_token is used to indicate what type of token, if any, should be
* generated.
*/
/* Validate arguments. */
if (minor_status != NULL)
*minor_status = 0;
if (output_token != GSS_C_NO_BUFFER) {
output_token->length = 0;
output_token->value = NULL;
}
if (minor_status == NULL ||
output_token == GSS_C_NO_BUFFER ||
context_handle == NULL)
return GSS_S_CALL_INACCESSIBLE_WRITE;
if (actual_mech != NULL)
*actual_mech = GSS_C_NO_OID;
/* Step 1: perform mechanism negotiation. */
spcred = (spnego_gss_cred_id_t)claimant_cred_handle;
if (*context_handle == GSS_C_NO_CONTEXT) {
ret = init_ctx_new(minor_status, spcred,
context_handle, &send_token);
if (ret != GSS_S_CONTINUE_NEEDED) {
goto cleanup;
}
} else {
ret = init_ctx_cont(minor_status, context_handle,
input_token, &mechtok_in,
&mechListMIC_in, &negState, &send_token);
if (HARD_ERROR(ret)) {
goto cleanup;
}
}
/* Step 2: invoke the selected or optimistic mechanism's
* gss_init_sec_context function, if it didn't complete previously. */
spnego_ctx = (spnego_gss_ctx_id_t)*context_handle;
if (!spnego_ctx->mech_complete) {
ret = init_ctx_call_init(
minor_status, spnego_ctx, spcred,
target_name, req_flags,
time_req, mechtok_in,
actual_mech, &mechtok_out,
ret_flags, time_rec,
&negState, &send_token);
/* Give the mechanism a chance to force a mechlistMIC. */
if (!HARD_ERROR(ret) && mech_requires_mechlistMIC(spnego_ctx))
spnego_ctx->mic_reqd = 1;
}
/* Step 3: process or generate the MIC, if the negotiated mech is
* complete and supports MICs. */
if (!HARD_ERROR(ret) && spnego_ctx->mech_complete &&
(spnego_ctx->ctx_flags & GSS_C_INTEG_FLAG)) {
ret = handle_mic(minor_status,
mechListMIC_in,
(mechtok_out.length != 0),
spnego_ctx, &mechListMIC_out,
&negState, &send_token);
}
cleanup:
if (send_token == INIT_TOKEN_SEND) {
if (make_spnego_tokenInit_msg(spnego_ctx,
0,
mechListMIC_out,
req_flags,
&mechtok_out, send_token,
output_token) < 0) {
ret = GSS_S_FAILURE;
}
} else if (send_token != NO_TOKEN_SEND) {
if (make_spnego_tokenTarg_msg(negState, GSS_C_NO_OID,
&mechtok_out, mechListMIC_out,
send_token,
output_token) < 0) {
ret = GSS_S_FAILURE;
}
}
gss_release_buffer(&tmpmin, &mechtok_out);
if (ret == GSS_S_COMPLETE) {
spnego_ctx->opened = 1;
if (actual_mech != NULL)
*actual_mech = spnego_ctx->actual_mech;
if (ret_flags != NULL)
*ret_flags = spnego_ctx->ctx_flags;
} else if (ret != GSS_S_CONTINUE_NEEDED) {
if (spnego_ctx != NULL) {
gss_delete_sec_context(&tmpmin,
&spnego_ctx->ctx_handle,
GSS_C_NO_BUFFER);
release_spnego_ctx(&spnego_ctx);
}
*context_handle = GSS_C_NO_CONTEXT;
}
if (mechtok_in != GSS_C_NO_BUFFER) {
gss_release_buffer(&tmpmin, mechtok_in);
free(mechtok_in);
}
if (mechListMIC_in != GSS_C_NO_BUFFER) {
gss_release_buffer(&tmpmin, mechListMIC_in);
free(mechListMIC_in);
}
if (mechListMIC_out != GSS_C_NO_BUFFER) {
gss_release_buffer(&tmpmin, mechListMIC_out);
free(mechListMIC_out);
}
return ret;
} /* init_sec_context */
| 0 |
[
"CWE-18",
"CWE-763"
] |
krb5
|
b51b33f2bc5d1497ddf5bd107f791c101695000d
| 181,637,875,626,594,540,000,000,000,000,000,000,000 | 157 |
Fix SPNEGO context aliasing bugs [CVE-2015-2695]
The SPNEGO mechanism currently replaces its context handle with the
mechanism context handle upon establishment, under the assumption that
most GSS functions are only called after context establishment. This
assumption is incorrect, and can lead to aliasing violations for some
programs. Maintain the SPNEGO context structure after context
establishment and refer to it in all GSS methods. Add initiate and
opened flags to the SPNEGO context structure for use in
gss_inquire_context() prior to context establishment.
CVE-2015-2695:
In MIT krb5 1.5 and later, applications which call
gss_inquire_context() on a partially-established SPNEGO context can
cause the GSS-API library to read from a pointer using the wrong type,
generally causing a process crash. This bug may go unnoticed, because
the most common SPNEGO authentication scenario establishes the context
after just one call to gss_accept_sec_context(). Java server
applications using the native JGSS provider are vulnerable to this
bug. A carefully crafted SPNEGO packet might allow the
gss_inquire_context() call to succeed with attacker-determined
results, but applications should not make access control decisions
based on gss_inquire_context() results prior to context establishment.
CVSSv2 Vector: AV:N/AC:M/Au:N/C:N/I:N/A:C/E:POC/RL:OF/RC:C
[[email protected]: several bugfixes, style changes, and edge-case
behavior changes; commit message and CVE description]
ticket: 8244
target_version: 1.14
tags: pullup
|
static void list_md_fn(const EVP_MD *m, const char *from, const char *to, void *arg )
{
size_t len, n;
const char *name, *cp, **seen;
struct hstate *hstate = arg;
EVP_MD_CTX ctx;
u_int digest_len;
u_char digest[EVP_MAX_MD_SIZE];
if (!m)
return; /* Ignore aliases */
name = EVP_MD_name(m);
/* Lowercase names aren't accepted by keytype_from_text in ssl_init.c */
for( cp = name; *cp; cp++ ) {
if( islower(*cp) )
return;
}
len = (cp - name) + 1;
/* There are duplicates. Discard if name has been seen. */
for (seen = hstate->seen; *seen; seen++)
if (!strcmp(*seen, name))
return;
n = (seen - hstate->seen) + 2;
hstate->seen = erealloc(hstate->seen, n * sizeof(*seen));
hstate->seen[n-2] = name;
hstate->seen[n-1] = NULL;
/* Discard MACs that NTP won't accept.
* Keep this consistent with keytype_from_text() in ssl_init.c.
*/
EVP_DigestInit(&ctx, EVP_get_digestbyname(name));
EVP_DigestFinal(&ctx, digest, &digest_len);
if (digest_len > (MAX_MAC_LEN - sizeof(keyid_t)))
return;
if (hstate->list != NULL)
len += strlen(hstate->list);
len += (hstate->idx >= K_PER_LINE)? strlen(K_NL_PFX_STR): strlen(K_DELIM_STR);
if (hstate->list == NULL) {
hstate->list = (char *)emalloc(len);
hstate->list[0] = '\0';
} else
hstate->list = (char *)erealloc(hstate->list, len);
sprintf(hstate->list + strlen(hstate->list), "%s%s",
((hstate->idx >= K_PER_LINE)? K_NL_PFX_STR : K_DELIM_STR),
name);
if (hstate->idx >= K_PER_LINE)
hstate->idx = 1;
else
hstate->idx++;
}
| 0 |
[
"CWE-20"
] |
ntp
|
07a5b8141e354a998a52994c3c9cd547927e56ce
| 274,680,736,833,404,500,000,000,000,000,000,000,000 | 59 |
[TALOS-CAN-0063] avoid buffer overrun in ntpq
|
Subsets and Splits
CWE 416 & 19
The query filters records related to specific CWEs (Common Weakness Enumerations), providing a basic overview of entries with these vulnerabilities but without deeper analysis.
CWE Frequency in Train Set
Counts the occurrences of each CWE (Common Weakness Enumeration) in the dataset, providing a basic distribution but limited insight.