func
stringlengths 0
484k
| target
int64 0
1
| cwe
sequencelengths 0
4
| project
stringclasses 799
values | commit_id
stringlengths 40
40
| hash
float64 1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
| size
int64 1
24k
| message
stringlengths 0
13.3k
|
---|---|---|---|---|---|---|---|
static void encode_lookup(struct xdr_stream *xdr, const struct qstr *name, struct compound_hdr *hdr)
{
int len = name->len;
__be32 *p;
p = reserve_space(xdr, 8 + len);
*p++ = cpu_to_be32(OP_LOOKUP);
xdr_encode_opaque(p, name->name, len);
hdr->nops++;
hdr->replen += decode_lookup_maxsz;
} | 0 | [
"CWE-703",
"CWE-189"
] | linux | bf118a342f10dafe44b14451a1392c3254629a1f | 324,902,156,237,312,300,000,000,000,000,000,000,000 | 11 | NFSv4: include bitmap in nfsv4 get acl data
The NFSv4 bitmap size is unbounded: a server can return an arbitrary
sized bitmap in an FATTR4_WORD0_ACL request. Replace using the
nfs4_fattr_bitmap_maxsz as a guess to the maximum bitmask returned by a server
with the inclusion of the bitmap (xdr length plus bitmasks) and the acl data
xdr length to the (cached) acl page data.
This is a general solution to commit e5012d1f "NFSv4.1: update
nfs4_fattr_bitmap_maxsz" and fixes hitting a BUG_ON in xdr_shrink_bufhead
when getting ACLs.
Fix a bug in decode_getacl that returned -EINVAL on ACLs > page when getxattr
was called with a NULL buffer, preventing ACL > PAGE_SIZE from being retrieved.
Cc: [email protected]
Signed-off-by: Andy Adamson <[email protected]>
Signed-off-by: Trond Myklebust <[email protected]> |
int Field_timestamp::store(longlong nr, bool unsigned_val)
{
MYSQL_TIME l_time;
int error;
ErrConvInteger str(nr, unsigned_val);
THD *thd= get_thd();
longlong tmp= number_to_datetime(nr, 0, &l_time, sql_mode_for_timestamp(thd),
&error);
return store_TIME_with_warning(thd, &l_time, &str, error, tmp != -1);
} | 0 | [
"CWE-416",
"CWE-703"
] | server | 08c7ab404f69d9c4ca6ca7a9cf7eec74c804f917 | 66,958,921,065,998,980,000,000,000,000,000,000,000 | 11 | MDEV-24176 Server crashes after insert in the table with virtual
column generated using date_format() and if()
vcol_info->expr is allocated on expr_arena at parsing stage. Since
expr item is allocated on expr_arena all its containee items must be
allocated on expr_arena too. Otherwise fix_session_expr() will
encounter prematurely freed item.
When table is reopened from cache vcol_info contains stale
expression. We refresh expression via TABLE::vcol_fix_exprs() but
first we must prepare a proper context (Vcol_expr_context) which meets
some requirements:
1. As noted above expr update must be done on expr_arena as there may
be new items created. It was a bug in fix_session_expr_for_read() and
was just not reproduced because of no second refix. Now refix is done
for more cases so it does reproduce. Tests affected: vcol.binlog
2. Also name resolution context must be narrowed to the single table.
Tested by: vcol.update main.default vcol.vcol_syntax gcol.gcol_bugfixes
3. sql_mode must be clean and not fail expr update.
sql_mode such as MODE_NO_BACKSLASH_ESCAPES, MODE_NO_ZERO_IN_DATE, etc
must not affect vcol expression update. If the table was created
successfully any further evaluation must not fail. Tests affected:
main.func_like
Reviewed by: Sergei Golubchik <[email protected]> |
void Config::set_extra(const KeyInfo * begin,
const KeyInfo * end)
{
extra_begin = begin;
extra_end = end;
} | 0 | [
"CWE-125"
] | aspell | 80fa26c74279fced8d778351cff19d1d8f44fe4e | 303,112,278,095,457,930,000,000,000,000,000,000,000 | 6 | Fix various bugs found by OSS-Fuze. |
innodb_bk_thread(
/*=============*/
void* arg)
{
ENGINE_HANDLE* handle;
struct innodb_engine* innodb_eng;
innodb_conn_data_t* conn_data;
void* thd = NULL;
bk_thd_exited = false;
handle = (ENGINE_HANDLE*) (arg);
innodb_eng = innodb_handle(handle);
if (innodb_eng->enable_binlog) {
/* This thread will commit the transactions
on behalf of the other threads. It will "pretend"
to be each connection thread while doing it. */
thd = handler_create_thd(true);
}
conn_data = UT_LIST_GET_FIRST(innodb_eng->conn_data);
while(!memcached_shutdown) {
innodb_conn_data_t* next_conn_data;
uint64_t time;
uint64_t trx_start = 0;
uint64_t processed_count = 0;
if (handler_check_global_read_lock_active()) {
release_mdl_lock = true;
} else {
release_mdl_lock = false;
}
/* Do the cleanup every innodb_eng->bk_commit_interval
seconds. We also check if the plugin is being shutdown
every second */
for (uint i = 0; i < innodb_eng->bk_commit_interval; i++) {
sleep(1);
/* If memcached is being shutdown, break */
if (memcached_shutdown) {
break;
}
}
time = mci_get_time();
if (UT_LIST_GET_LEN(innodb_eng->conn_data) == 0) {
continue;
}
if (!conn_data) {
conn_data = UT_LIST_GET_FIRST(innodb_eng->conn_data);
}
if (conn_data) {
next_conn_data = UT_LIST_GET_NEXT(conn_list, conn_data);
} else {
next_conn_data = NULL;
}
/* Set the clean_stale_conn to prevent force clean in
innodb_conn_clean. */
LOCK_CONN_IF_NOT_LOCKED(false, innodb_eng);
innodb_eng->clean_stale_conn = true;
UNLOCK_CONN_IF_NOT_LOCKED(false, innodb_eng);
while (conn_data) {
if (release_mdl_lock && !conn_data->is_stale) {
int err;
if(conn_data->is_waiting_for_mdl) {
goto next_item;
}
err = LOCK_CURRENT_CONN_TRYLOCK(conn_data);
if (err != 0) {
goto next_item;
}
/* We have got the lock here */
} else {
LOCK_CURRENT_CONN_IF_NOT_LOCKED(false, conn_data);
}
if (conn_data->is_stale) {
UNLOCK_CURRENT_CONN_IF_NOT_LOCKED(
false, conn_data);
LOCK_CONN_IF_NOT_LOCKED(false, innodb_eng);
UT_LIST_REMOVE(conn_list, innodb_eng->conn_data,
conn_data);
UNLOCK_CONN_IF_NOT_LOCKED(false, innodb_eng);
innodb_conn_clean_data(conn_data, false, true);
goto next_item;
}
if (release_mdl_lock) {
if (conn_data->thd) {
handler_thd_attach(conn_data->thd, NULL);
}
if (conn_data->in_use) {
UNLOCK_CURRENT_CONN_IF_NOT_LOCKED(false, conn_data);
goto next_item;
}
innodb_reset_conn(conn_data, true, true,
innodb_eng->enable_binlog);
if(conn_data->mysql_tbl) {
handler_unlock_table(conn_data->thd,
conn_data->mysql_tbl,
HDL_READ);
conn_data->mysql_tbl = NULL;
}
/*Close the data cursor */
if (conn_data->crsr) {
innodb_cb_cursor_close(conn_data->crsr);
conn_data->crsr = NULL;
}
if(conn_data->crsr_trx != NULL) {
ib_cb_trx_release(conn_data->crsr_trx);
conn_data->crsr_trx = NULL;
}
UNLOCK_CURRENT_CONN_IF_NOT_LOCKED(false, conn_data);
goto next_item;
}
if (conn_data->crsr_trx) {
trx_start = ib_cb_trx_get_start_time(
conn_data->crsr_trx);
}
/* Check the trx, if it is qualified for
reset and commit */
if ((conn_data->n_writes_since_commit > 0
|| conn_data->n_reads_since_commit > 0)
&& trx_start
&& (time - trx_start > CONN_IDLE_TIME_TO_BK_COMMIT)
&& !conn_data->in_use) {
/* binlog is running, make the thread
attach to conn_data->thd for binlog
committing */
if (thd) {
handler_thd_attach(
conn_data->thd, NULL);
}
innodb_reset_conn(conn_data, true, true,
innodb_eng->enable_binlog);
processed_count++;
}
UNLOCK_CURRENT_CONN_IF_NOT_LOCKED(false, conn_data);
next_item:
conn_data = next_conn_data;
/* Process BK_MAX_PROCESS_COMMIT (5) trx at a time */
if (!release_mdl_lock &&
processed_count > BK_MAX_PROCESS_COMMIT) {
break;
}
if (conn_data) {
next_conn_data = UT_LIST_GET_NEXT(
conn_list, conn_data);
}
}
/* Set the clean_stale_conn back. */
LOCK_CONN_IF_NOT_LOCKED(false, innodb_eng);
innodb_eng->clean_stale_conn = false;
UNLOCK_CONN_IF_NOT_LOCKED(false, innodb_eng);
}
bk_thd_exited = true;
/* Change to its original state before close the MySQL THD */
if (thd) {
handler_thd_attach(thd, NULL);
handler_close_thd(thd);
}
pthread_detach(pthread_self());
pthread_exit(NULL);
return((void*) 0);
} | 1 | [] | mysql-server | 659514dc83299a7d8c7defeb543be4339fbe1ee1 | 272,194,019,071,320,220,000,000,000,000,000,000,000 | 192 | Bug #25147515 SET DAEMON_MEMCACHED_R_BATCH_SIZE GREATER THAN 1 MAKE MYSQLD CRASHED
PROBLEM
-------
User starts a "get" the the connection with SET DAEMON_MEMCACHED_R_BATCH_SIZE= 5
and with binlog on. Since "get" is a read transaction this connection is not
allocated any conn_data->thd (which is used for bin log commit).The connection
is kept open. Innodb background thread tries to commit transactions which have
exceed CONN_IDLE_TIME_TO_BK_COMMIT and whose read batch size > 0, when it finds
this connection it tries to attach conn_data->thd to bin log thread.Since
conn_data->thd is NULL it crashes.
FIX
---
Check conn_data->thd value before attaching it to binlog thread. |
HttpTransact::does_client_request_permit_cached_response(const OverridableHttpConfigParams *p, CacheControlResult *c,
HTTPHdr *h, char *via_string)
{
////////////////////////////////////////////////////////////////////////
// If aren't ignoring client's cache directives, meet client's wishes //
////////////////////////////////////////////////////////////////////////
if (!c->ignore_client_no_cache) {
if (h->is_cache_control_set(HTTP_VALUE_NO_CACHE))
return (false);
if (h->is_pragma_no_cache_set()) {
// if we are going to send out an ims anyway,
// no need to flag this as a no-cache.
if (!p->cache_ims_on_client_no_cache) {
via_string[VIA_CLIENT_REQUEST] = VIA_CLIENT_NO_CACHE;
}
return (false);
}
}
return (true);
} | 0 | [
"CWE-119"
] | trafficserver | 8b5f0345dade6b2822d9b52c8ad12e63011a5c12 | 334,540,092,172,715,360,000,000,000,000,000,000,000 | 22 | Fix the internal buffer sizing. Thanks to Sudheer for helping isolating this bug |
static int rotate_back_and_peek(gnutls_session_t session,
uint8_t key[TICKET_MASTER_KEY_SIZE])
{
int64_t t;
gnutls_datum_t secret;
/* Get the previous TOTP */
t = totp_previous(session);
if (t < 0)
return gnutls_assert_val(t);
secret.data = session->key.initial_stek;
secret.size = TICKET_MASTER_KEY_SIZE;
if (totp_sha3(session, t, &secret, key) < 0)
return gnutls_assert_val(GNUTLS_E_REQUESTED_DATA_NOT_AVAILABLE);
return 0;
} | 0 | [] | gnutls | c2646aeee94e71cb15c90a3147cf3b5b0ca158ca | 159,266,163,694,695,350,000,000,000,000,000,000,000 | 19 | stek: differentiate initial state from valid time window of TOTP
There was a confusion in the TOTP implementation in stek.c. When the
mechanism is initialized at the first time, it records the timestamp
but doesn't initialize the key. This removes the timestamp recording
at the initialization phase, so the key is properly set later.
Signed-off-by: Daiki Ueno <[email protected]> |
static int l2cap_le_connect_req(struct l2cap_conn *conn,
struct l2cap_cmd_hdr *cmd, u16 cmd_len,
u8 *data)
{
struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
struct l2cap_le_conn_rsp rsp;
struct l2cap_chan *chan, *pchan;
u16 dcid, scid, credits, mtu, mps;
__le16 psm;
u8 result;
if (cmd_len != sizeof(*req))
return -EPROTO;
scid = __le16_to_cpu(req->scid);
mtu = __le16_to_cpu(req->mtu);
mps = __le16_to_cpu(req->mps);
psm = req->psm;
dcid = 0;
credits = 0;
if (mtu < 23 || mps < 23)
return -EPROTO;
BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
scid, mtu, mps);
/* Check if we have socket listening on psm */
pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
&conn->hcon->dst, LE_LINK);
if (!pchan) {
result = L2CAP_CR_LE_BAD_PSM;
chan = NULL;
goto response;
}
mutex_lock(&conn->chan_lock);
l2cap_chan_lock(pchan);
if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
SMP_ALLOW_STK)) {
result = L2CAP_CR_LE_AUTHENTICATION;
chan = NULL;
goto response_unlock;
}
/* Check for valid dynamic CID range */
if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
result = L2CAP_CR_LE_INVALID_SCID;
chan = NULL;
goto response_unlock;
}
/* Check if we already have channel with that dcid */
if (__l2cap_get_chan_by_dcid(conn, scid)) {
result = L2CAP_CR_LE_SCID_IN_USE;
chan = NULL;
goto response_unlock;
}
chan = pchan->ops->new_connection(pchan);
if (!chan) {
result = L2CAP_CR_LE_NO_MEM;
goto response_unlock;
}
bacpy(&chan->src, &conn->hcon->src);
bacpy(&chan->dst, &conn->hcon->dst);
chan->src_type = bdaddr_src_type(conn->hcon);
chan->dst_type = bdaddr_dst_type(conn->hcon);
chan->psm = psm;
chan->dcid = scid;
chan->omtu = mtu;
chan->remote_mps = mps;
chan->tx_credits = __le16_to_cpu(req->credits);
__l2cap_chan_add(conn, chan);
l2cap_le_flowctl_init(chan);
dcid = chan->scid;
credits = chan->rx_credits;
__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
chan->ident = cmd->ident;
if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
l2cap_state_change(chan, BT_CONNECT2);
/* The following result value is actually not defined
* for LE CoC but we use it to let the function know
* that it should bail out after doing its cleanup
* instead of sending a response.
*/
result = L2CAP_CR_PEND;
chan->ops->defer(chan);
} else {
l2cap_chan_ready(chan);
result = L2CAP_CR_LE_SUCCESS;
}
response_unlock:
l2cap_chan_unlock(pchan);
mutex_unlock(&conn->chan_lock);
l2cap_chan_put(pchan);
if (result == L2CAP_CR_PEND)
return 0;
response:
if (chan) {
rsp.mtu = cpu_to_le16(chan->imtu);
rsp.mps = cpu_to_le16(chan->mps);
} else {
rsp.mtu = 0;
rsp.mps = 0;
}
rsp.dcid = cpu_to_le16(dcid);
rsp.credits = cpu_to_le16(credits);
rsp.result = cpu_to_le16(result);
l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
return 0;
} | 0 | [
"CWE-125"
] | linux | 7c9cbd0b5e38a1672fcd137894ace3b042dfbf69 | 323,132,480,461,060,300,000,000,000,000,000,000,000 | 126 | Bluetooth: Verify that l2cap_get_conf_opt provides large enough buffer
The function l2cap_get_conf_opt will return L2CAP_CONF_OPT_SIZE + opt->len
as length value. The opt->len however is in control over the remote user
and can be used by an attacker to gain access beyond the bounds of the
actual packet.
To prevent any potential leak of heap memory, it is enough to check that
the resulting len calculation after calling l2cap_get_conf_opt is not
below zero. A well formed packet will always return >= 0 here and will
end with the length value being zero after the last option has been
parsed. In case of malformed packets messing with the opt->len field the
length value will become negative. If that is the case, then just abort
and ignore the option.
In case an attacker uses a too short opt->len value, then garbage will
be parsed, but that is protected by the unknown option handling and also
the option parameter size checks.
Signed-off-by: Marcel Holtmann <[email protected]>
Reviewed-by: Greg Kroah-Hartman <[email protected]>
Signed-off-by: Johan Hedberg <[email protected]> |
static int unittest_i2c_mux_remove(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct device_node *np = client->dev.of_node;
struct i2c_mux_core *muxc = i2c_get_clientdata(client);
dev_dbg(dev, "%s for node @%pOF\n", __func__, np);
i2c_mux_del_adapters(muxc);
return 0;
} | 0 | [
"CWE-401"
] | linux | e13de8fe0d6a51341671bbe384826d527afe8d44 | 136,846,749,090,127,740,000,000,000,000,000,000,000 | 10 | of: unittest: fix memory leak in unittest_data_add
In unittest_data_add, a copy buffer is created via kmemdup. This buffer
is leaked if of_fdt_unflatten_tree fails. The release for the
unittest_data buffer is added.
Fixes: b951f9dc7f25 ("Enabling OF selftest to run without machine's devicetree")
Signed-off-by: Navid Emamdoost <[email protected]>
Reviewed-by: Frank Rowand <[email protected]>
Signed-off-by: Rob Herring <[email protected]> |
CImg<T>& draw_object3d(const float x0, const float y0, const float z0,
const CImg<tp>& vertices, const CImgList<tf>& primitives,
const CImgList<tc>& colors,
const unsigned int render_type=4,
const bool is_double_sided=false, const float focale=700,
const float lightx=0, const float lighty=0, const float lightz=-5e8,
const float specular_lightness=0.2f, const float specular_shininess=0.1f,
const float g_opacity=1) {
return draw_object3d(x0,y0,z0,vertices,primitives,colors,CImg<floatT>::const_empty(),
render_type,is_double_sided,focale,lightx,lighty,lightz,
specular_lightness,specular_shininess,g_opacity,CImg<floatT>::empty());
} | 0 | [
"CWE-770"
] | cimg | 619cb58dd90b4e03ac68286c70ed98acbefd1c90 | 314,655,161,277,373,170,000,000,000,000,000,000,000 | 12 | CImg<>::load_bmp() and CImg<>::load_pandore(): Check that dimensions encoded in file does not exceed file size. |
char *ssh_path_expand_tilde(const char *d) {
char *h = NULL, *r;
const char *p;
size_t ld;
size_t lh = 0;
if (d[0] != '~') {
return strdup(d);
}
d++;
/* handle ~user/path */
p = strchr(d, '/');
if (p != NULL && p > d) {
#ifdef _WIN32
return strdup(d);
#else
struct passwd *pw;
size_t s = p - d;
char u[128];
if (s >= sizeof(u)) {
return NULL;
}
memcpy(u, d, s);
u[s] = '\0';
pw = getpwnam(u);
if (pw == NULL) {
return NULL;
}
ld = strlen(p);
h = strdup(pw->pw_dir);
#endif
} else {
ld = strlen(d);
p = (char *) d;
h = ssh_get_user_home_dir();
}
if (h == NULL) {
return NULL;
}
lh = strlen(h);
r = malloc(ld + lh + 1);
if (r == NULL) {
SAFE_FREE(h);
return NULL;
}
if (lh > 0) {
memcpy(r, h, lh);
}
SAFE_FREE(h);
memcpy(r + lh, p, ld + 1);
return r;
} | 0 | [] | libssh | 2ba1dea5493fb2f5a5be2dd263ce46ccb5f8ec76 | 250,678,756,820,063,430,000,000,000,000,000,000,000 | 57 | CVE-2019-14889: misc: Add function to quote file names
The added function quote file names strings to be used in a shell.
Special cases are treated for the charactes '\'' and '!'.
Fixes T181
Signed-off-by: Anderson Toshiyuki Sasaki <[email protected]>
Reviewed-by: Andreas Schneider <[email protected]>
(cherry picked from commit c4ad1aba9860e02fe03ef3f58a047964e9e765fc) |
DEFUN (no_bgp_redistribute_ipv4,
no_bgp_redistribute_ipv4_cmd,
"no redistribute (connected|kernel|ospf|rip|static)",
NO_STR
"Redistribute information from another routing protocol\n"
"Connected\n"
"Kernel routes\n"
"Open Shurtest Path First (OSPF)\n"
"Routing Information Protocol (RIP)\n"
"Static routes\n")
{
int type;
type = bgp_str2route_type (AFI_IP, argv[0]);
if (! type)
{
vty_out (vty, "%% Invalid route type%s", VTY_NEWLINE);
return CMD_WARNING;
}
return bgp_redistribute_unset (vty->index, AFI_IP, type);
} | 0 | [
"CWE-125"
] | frr | 6d58272b4cf96f0daa846210dd2104877900f921 | 65,232,499,457,871,090,000,000,000,000,000,000,000 | 22 | [bgpd] cleanup, compact and consolidate capability parsing code
2007-07-26 Paul Jakma <[email protected]>
* (general) Clean up and compact capability parsing slightly.
Consolidate validation of length and logging of generic TLV, and
memcpy of capability data, thus removing such from cap specifc
code (not always present or correct).
* bgp_open.h: Add structures for the generic capability TLV header
and for the data formats of the various specific capabilities we
support. Hence remove the badly named, or else misdefined, struct
capability.
* bgp_open.c: (bgp_capability_vty_out) Use struct capability_mp_data.
Do the length checks *before* memcpy()'ing based on that length
(stored capability - should have been validated anyway on input,
but..).
(bgp_afi_safi_valid_indices) new function to validate (afi,safi)
which is about to be used as index into arrays, consolidates
several instances of same, at least one of which appeared to be
incomplete..
(bgp_capability_mp) Much condensed.
(bgp_capability_orf_entry) New, process one ORF entry
(bgp_capability_orf) Condensed. Fixed to process all ORF entries.
(bgp_capability_restart) Condensed, and fixed to use a
cap-specific type, rather than abusing capability_mp.
(struct message capcode_str) added to aid generic logging.
(size_t cap_minsizes[]) added to aid generic validation of
capability length field.
(bgp_capability_parse) Generic logging and validation of TLV
consolidated here. Code compacted as much as possible.
* bgp_packet.c: (bgp_open_receive) Capability parsers now use
streams, so no more need here to manually fudge the input stream
getp.
(bgp_capability_msg_parse) use struct capability_mp_data. Validate
lengths /before/ memcpy. Use bgp_afi_safi_valid_indices.
(bgp_capability_receive) Exported for use by test harness.
* bgp_vty.c: (bgp_show_summary) fix conversion warning
(bgp_show_peer) ditto
* bgp_debug.h: Fix storage 'extern' after type 'const'.
* lib/log.c: (mes_lookup) warning about code not being in
same-number array slot should be debug, not warning. E.g. BGP
has several discontigious number spaces, allocating from
different parts of a space is not uncommon (e.g. IANA
assigned versus vendor-assigned code points in some number
space). |
QPDF::replaceObject(int objid, int generation, QPDFObjectHandle oh)
{
if (oh.isIndirect())
{
QTC::TC("qpdf", "QPDF replaceObject called with indirect object");
throw std::logic_error(
"QPDF::replaceObject called with indirect object handle");
}
// Force new object to appear in the cache
resolve(objid, generation);
// Replace the object in the object cache
QPDFObjGen og(objid, generation);
this->m->obj_cache[og] =
ObjCache(QPDFObjectHandle::ObjAccessor::getObject(oh), -1, -1);
} | 0 | [
"CWE-125"
] | qpdf | 1868a10f8b06631362618bfc85ca8646da4b4b71 | 199,581,839,878,838,640,000,000,000,000,000,000,000 | 17 | Replace all atoi calls with QUtil::string_to_int
The latter catches underflow/overflow. |
int x509_check_cert_time(X509_STORE_CTX *ctx, X509 *x, int quiet)
{
time_t *ptime;
int i;
if (ctx->param->flags & X509_V_FLAG_USE_CHECK_TIME)
ptime = &ctx->param->check_time;
else if (ctx->param->flags & X509_V_FLAG_NO_CHECK_TIME)
return 1;
else
ptime = NULL;
i = X509_cmp_time(X509_get_notBefore(x), ptime);
if (i == 0) {
if (quiet)
return 0;
ctx->error = X509_V_ERR_ERROR_IN_CERT_NOT_BEFORE_FIELD;
ctx->current_cert = x;
if (!ctx->verify_cb(0, ctx))
return 0;
}
if (i > 0) {
if (quiet)
return 0;
ctx->error = X509_V_ERR_CERT_NOT_YET_VALID;
ctx->current_cert = x;
if (!ctx->verify_cb(0, ctx))
return 0;
}
i = X509_cmp_time(X509_get_notAfter(x), ptime);
if (i == 0) {
if (quiet)
return 0;
ctx->error = X509_V_ERR_ERROR_IN_CERT_NOT_AFTER_FIELD;
ctx->current_cert = x;
if (!ctx->verify_cb(0, ctx))
return 0;
}
if (i < 0) {
if (quiet)
return 0;
ctx->error = X509_V_ERR_CERT_HAS_EXPIRED;
ctx->current_cert = x;
if (!ctx->verify_cb(0, ctx))
return 0;
}
return 1;
} | 0 | [] | openssl | 33cc5dde478ba5ad79f8fd4acd8737f0e60e236e | 168,679,551,140,375,210,000,000,000,000,000,000,000 | 52 | Compat self-signed trust with reject-only aux data
When auxiliary data contains only reject entries, continue to trust
self-signed objects just as when no auxiliary data is present.
This makes it possible to reject specific uses without changing
what's accepted (and thus overring the underlying EKU).
Added new supported certs and doubled test count from 38 to 76.
Reviewed-by: Dr. Stephen Henson <[email protected]> |
void psi_cgroup_free(struct cgroup *cgroup)
{
if (static_branch_likely(&psi_disabled))
return;
cancel_delayed_work_sync(&cgroup->psi.avgs_work);
free_percpu(cgroup->psi.pcpu);
/* All triggers must be removed by now */
WARN_ONCE(cgroup->psi.poll_states, "psi: trigger leak\n");
} | 0 | [
"CWE-787"
] | linux | 6fcca0fa48118e6d63733eb4644c6cd880c15b8f | 292,623,963,634,900,930,000,000,000,000,000,000,000 | 10 | sched/psi: Fix OOB write when writing 0 bytes to PSI files
Issuing write() with count parameter set to 0 on any file under
/proc/pressure/ will cause an OOB write because of the access to
buf[buf_size-1] when NUL-termination is performed. Fix this by checking
for buf_size to be non-zero.
Signed-off-by: Suren Baghdasaryan <[email protected]>
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Signed-off-by: Ingo Molnar <[email protected]>
Acked-by: Johannes Weiner <[email protected]>
Link: https://lkml.kernel.org/r/[email protected] |
mainloop_del_fd(mainloop_io_t *client)
{
if(client != NULL) {
crm_trace("Removing client %s[%p] %d", client->name, client, mainloop_gio_refcount(client));
if (client->source) {
/* Results in mainloop_gio_destroy() being called just
* before the source is removed from mainloop
*/
g_source_remove(client->source);
}
}
} | 0 | [
"CWE-399"
] | pacemaker | 564f7cc2a51dcd2f28ab12a13394f31be5aa3c93 | 161,959,735,745,349,630,000,000,000,000,000,000,000 | 12 | High: core: Internal tls api improvements for reuse with future LRMD tls backend. |
static int hugetlb_vmtruncate(struct inode *inode, loff_t offset)
{
pgoff_t pgoff;
struct address_space *mapping = inode->i_mapping;
struct hstate *h = hstate_inode(inode);
BUG_ON(offset & ~huge_page_mask(h));
pgoff = offset >> PAGE_SHIFT;
i_size_write(inode, offset);
i_mmap_lock_write(mapping);
if (!RB_EMPTY_ROOT(&mapping->i_mmap))
hugetlb_vmtruncate_list(&mapping->i_mmap, pgoff);
i_mmap_unlock_write(mapping);
truncate_hugepages(inode, offset);
return 0;
} | 1 | [] | linux | 1bfad99ab42569807d0ca1698449cae5e8c0334a | 165,161,072,599,374,800,000,000,000,000,000,000,000 | 17 | hugetlbfs: hugetlb_vmtruncate_list() needs to take a range to delete
fallocate hole punch will want to unmap a specific range of pages.
Modify the existing hugetlb_vmtruncate_list() routine to take a
start/end range. If end is 0, this indicates all pages after start
should be unmapped. This is the same as the existing truncate
functionality. Modify existing callers to add 0 as end of range.
Since the routine will be used in hole punch as well as truncate
operations, it is more appropriately renamed to hugetlb_vmdelete_list().
Signed-off-by: Mike Kravetz <[email protected]>
Reviewed-by: Naoya Horiguchi <[email protected]>
Acked-by: Hillf Danton <[email protected]>
Cc: Dave Hansen <[email protected]>
Cc: David Rientjes <[email protected]>
Cc: Hugh Dickins <[email protected]>
Cc: Davidlohr Bueso <[email protected]>
Cc: Aneesh Kumar <[email protected]>
Cc: Christoph Hellwig <[email protected]>
Cc: Michal Hocko <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
isdn_net_ciscohdlck_receive(isdn_net_local *lp, struct sk_buff *skb)
{
unsigned char *p;
u8 addr;
u8 ctrl;
u16 type;
if (skb->len < 4)
goto out_free;
p = skb->data;
addr = *(u8 *)(p + 0);
ctrl = *(u8 *)(p + 1);
type = be16_to_cpup((__be16 *)(p + 2));
p += 4;
skb_pull(skb, 4);
if (addr != CISCO_ADDR_UNICAST && addr != CISCO_ADDR_BROADCAST) {
printk(KERN_WARNING "%s: Unknown Cisco addr 0x%02x\n",
lp->netdev->dev->name, addr);
goto out_free;
}
if (ctrl != CISCO_CTRL) {
printk(KERN_WARNING "%s: Unknown Cisco ctrl 0x%02x\n",
lp->netdev->dev->name, ctrl);
goto out_free;
}
switch (type) {
case CISCO_TYPE_SLARP:
isdn_net_ciscohdlck_slarp_in(lp, skb);
goto out_free;
case CISCO_TYPE_CDP:
if (lp->cisco_debserint)
printk(KERN_DEBUG "%s: Received CDP packet. use "
"\"no cdp enable\" on cisco.\n",
lp->netdev->dev->name);
goto out_free;
default:
/* no special cisco protocol */
skb->protocol = htons(type);
netif_rx(skb);
return;
}
out_free:
kfree_skb(skb);
} | 0 | [
"CWE-119"
] | linux | 9f5af546e6acc30f075828cb58c7f09665033967 | 59,364,246,014,727,845,000,000,000,000,000,000,000 | 48 | isdn/i4l: fix buffer overflow
This fixes a potential buffer overflow in isdn_net.c caused by an
unbounded strcpy.
[ ISDN seems to be effectively unmaintained, and the I4L driver in
particular is long deprecated, but in case somebody uses this..
- Linus ]
Signed-off-by: Jiten Thakkar <[email protected]>
Signed-off-by: Annie Cherkaev <[email protected]>
Cc: Karsten Keil <[email protected]>
Cc: Kees Cook <[email protected]>
Cc: [email protected]
Signed-off-by: Linus Torvalds <[email protected]> |
flatpak_dir_update_deploy_ref (FlatpakDir *self,
const char *ref,
const char *checksum,
GError **error)
{
g_autofree char *deploy_ref = g_strconcat ("deploy/", ref, NULL);
if (!ostree_repo_set_ref_immediate (self->repo, NULL, deploy_ref, checksum, NULL, error))
return FALSE;
return TRUE;
} | 0 | [
"CWE-74"
] | flatpak | fb473cad801c6b61706353256cab32330557374a | 48,384,305,355,342,770,000,000,000,000,000,000,000 | 12 | dir: Pass environment via bwrap --setenv when running apply_extra
This means we can systematically pass the environment variables
through bwrap(1), even if it is setuid and thus is filtering out
security-sensitive environment variables. bwrap ends up being
run with an empty environment instead.
As with the previous commit, this regressed while fixing CVE-2021-21261.
Fixes: 6d1773d2 "run: Convert all environment variables into bwrap arguments"
Signed-off-by: Simon McVittie <[email protected]> |
static void ept_save_pdptrs(struct kvm_vcpu *vcpu)
{
if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) {
vcpu->arch.pdptrs[0] = vmcs_read64(GUEST_PDPTR0);
vcpu->arch.pdptrs[1] = vmcs_read64(GUEST_PDPTR1);
vcpu->arch.pdptrs[2] = vmcs_read64(GUEST_PDPTR2);
vcpu->arch.pdptrs[3] = vmcs_read64(GUEST_PDPTR3);
}
__set_bit(VCPU_EXREG_PDPTR,
(unsigned long *)&vcpu->arch.regs_avail);
__set_bit(VCPU_EXREG_PDPTR,
(unsigned long *)&vcpu->arch.regs_dirty);
} | 0 | [
"CWE-400"
] | linux-2.6 | 9581d442b9058d3699b4be568b6e5eae38a41493 | 317,248,408,000,010,200,000,000,000,000,000,000,000 | 14 | KVM: Fix fs/gs reload oops with invalid ldt
kvm reloads the host's fs and gs blindly, however the underlying segment
descriptors may be invalid due to the user modifying the ldt after loading
them.
Fix by using the safe accessors (loadsegment() and load_gs_index()) instead
of home grown unsafe versions.
This is CVE-2010-3698.
KVM-Stable-Tag.
Signed-off-by: Avi Kivity <[email protected]>
Signed-off-by: Marcelo Tosatti <[email protected]> |
static int uwsgi_run_command_do(char *command, char *arg) {
char *argv[4];
#ifdef __linux__
if (prctl(PR_SET_PDEATHSIG, SIGKILL, 0, 0, 0)) {
uwsgi_error("prctl()");
}
#endif
if (command == NULL) {
argv[0] = uwsgi_binsh();
argv[1] = "-c";
argv[2] = arg;
argv[3] = NULL;
execvp(argv[0], argv);
}
else {
argv[0] = command;
argv[1] = arg;
argv[2] = NULL;
execvp(command, argv);
}
uwsgi_error("execvp()");
//never here
exit(1);
} | 0 | [
"CWE-119",
"CWE-703",
"CWE-787"
] | uwsgi | cb4636f7c0af2e97a4eef7a3cdcbd85a71247bfe | 214,923,487,132,173,300,000,000,000,000,000,000,000 | 29 | improve uwsgi_expand_path() to sanitize input, avoiding stack corruption and potential security issue |
static int send_header(void *data, const char *key, const char *val)
{
char *header_line = NULL;
hdr_ptr *hdr = (hdr_ptr*)data;
header_line = apr_pstrcat(hdr->bb->p, key, ": ", val, CRLF, NULL);
ap_xlate_proto_to_ascii(header_line, strlen(header_line));
ap_fputs(hdr->f, hdr->bb, header_line);
return 1;
} | 0 | [] | httpd | ecebcc035ccd8d0e2984fe41420d9e944f456b3c | 13,887,485,691,350,570,000,000,000,000,000,000,000 | 10 | Merged r1734009,r1734231,r1734281,r1838055,r1838079,r1840229,r1876664,r1876674,r1876784,r1879078,r1881620,r1887311,r1888871 from trunk:
*) core: Split ap_create_request() from ap_read_request(). [Graham Leggett]
*) core, h2: common ap_parse_request_line() and ap_check_request_header()
code. [Yann Ylavic]
*) core: Add StrictHostCheck to allow unconfigured hostnames to be
rejected. [Eric Covener]
git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1890245 13f79535-47bb-0310-9956-ffa450edef68 |
void do_eval(DYNAMIC_STRING *query_eval, const char *query,
const char *query_end, my_bool pass_through_escape_chars)
{
const char *p;
register char c, next_c;
register int escaped = 0;
VAR *v;
DBUG_ENTER("do_eval");
for (p= query; (c= *p) && p < query_end; ++p)
{
switch(c) {
case '$':
if (escaped)
{
escaped= 0;
dynstr_append_mem(query_eval, p, 1);
}
else
{
if (!(v= var_get(p, &p, 0, 0)))
{
report_or_die( "Bad variable in eval");
return;
}
dynstr_append_mem(query_eval, v->str_val, v->str_val_len);
}
break;
case '\\':
next_c= *(p+1);
if (escaped)
{
escaped= 0;
dynstr_append_mem(query_eval, p, 1);
}
else if (next_c == '\\' || next_c == '$' || next_c == '"')
{
/* Set escaped only if next char is \, " or $ */
escaped= 1;
if (pass_through_escape_chars)
{
/* The escape char should be added to the output string. */
dynstr_append_mem(query_eval, p, 1);
}
}
else
dynstr_append_mem(query_eval, p, 1);
break;
default:
escaped= 0;
dynstr_append_mem(query_eval, p, 1);
break;
}
}
#ifdef __WIN__
fix_win_paths(query_eval->str, query_eval->length);
#endif
DBUG_VOID_RETURN;
} | 0 | [] | server | 01b39b7b0730102b88d8ea43ec719a75e9316a1e | 168,563,886,555,526,170,000,000,000,000,000,000,000 | 60 | mysqltest: don't eat new lines in --exec
pass them through as is |
e_ews_connection_get_server_time_zones (EEwsConnection *cnc,
gint pri,
GSList *msdn_locations,
GCancellable *cancellable,
GAsyncReadyCallback callback,
gpointer user_data)
{
ESoapMessage *msg;
GSimpleAsyncResult *simple;
EwsAsyncData *async_data;
GSList *l;
g_return_if_fail (cnc != NULL);
g_return_if_fail (cnc->priv != NULL);
simple = g_simple_async_result_new (
G_OBJECT (cnc), callback, user_data, e_ews_connection_get_server_time_zones);
async_data = g_new0 (EwsAsyncData, 1);
g_simple_async_result_set_op_res_gpointer (simple, async_data, (GDestroyNotify) async_data_free);
/*
* EWS server version earlier than 2010 doesn't have support to "GetServerTimeZones".
* So, if the API is called with an older Exchange's version, let's just fail silently.
*/
if (!e_ews_connection_satisfies_server_version (cnc, E_EWS_EXCHANGE_2010_SP1)) {
g_simple_async_result_complete_in_idle (simple);
g_object_unref (simple);
return;
}
msg = e_ews_message_new_with_header (
cnc->priv->settings,
cnc->priv->uri,
cnc->priv->impersonate_user,
"GetServerTimeZones",
"ReturnFullTimeZoneData",
"true",
cnc->priv->version,
E_EWS_EXCHANGE_2010,
FALSE,
TRUE);
e_soap_message_start_element (msg, "Ids", "messages", NULL);
for (l = msdn_locations; l != NULL; l = l->next)
e_ews_message_write_string_parameter_with_attribute (msg, "Id", NULL, l->data, NULL, NULL);
e_soap_message_end_element (msg); /* Ids */
e_ews_message_write_footer (msg); /* Complete the footer and print the request */
e_ews_connection_queue_request (cnc, msg, get_server_time_zones_response_cb, pri, cancellable, simple);
g_object_unref (simple);
} | 0 | [
"CWE-295"
] | evolution-ews | 915226eca9454b8b3e5adb6f2fff9698451778de | 255,808,168,137,551,280,000,000,000,000,000,000,000 | 53 | I#27 - SSL Certificates are not validated
This depends on https://gitlab.gnome.org/GNOME/evolution-data-server/commit/6672b8236139bd6ef41ecb915f4c72e2a052dba5 too.
Closes https://gitlab.gnome.org/GNOME/evolution-ews/issues/27 |
void kvm_release_page_clean(struct page *page)
{
WARN_ON(is_error_page(page));
kvm_release_pfn_clean(page_to_pfn(page));
} | 0 | [
"CWE-399"
] | linux | 12d6e7538e2d418c08f082b1b44ffa5fb7270ed8 | 103,314,822,072,213,550,000,000,000,000,000,000,000 | 6 | KVM: perform an invalid memslot step for gpa base change
PPC must flush all translations before the new memory slot
is visible.
Signed-off-by: Marcelo Tosatti <[email protected]>
Signed-off-by: Avi Kivity <[email protected]> |
int regulator_is_enabled(struct regulator *regulator)
{
int ret;
if (regulator->always_on)
return 1;
mutex_lock(®ulator->rdev->mutex);
ret = _regulator_is_enabled(regulator->rdev);
mutex_unlock(®ulator->rdev->mutex);
return ret;
} | 0 | [
"CWE-416"
] | linux | 60a2362f769cf549dc466134efe71c8bf9fbaaba | 304,030,315,094,051,700,000,000,000,000,000,000,000 | 13 | regulator: core: Fix regualtor_ena_gpio_free not to access pin after freeing
After freeing pin from regulator_ena_gpio_free, loop can access
the pin. So this patch fixes not to access pin after freeing.
Signed-off-by: Seung-Woo Kim <[email protected]>
Signed-off-by: Mark Brown <[email protected]> |
fr_window_finalize (GObject *object)
{
FrWindow *window = FR_WINDOW (object);
fr_window_free_open_files (window);
if (window->archive != NULL) {
g_object_unref (window->archive);
window->archive = NULL;
}
if (window->priv != NULL) {
fr_window_free_private_data (window);
g_free (window->priv);
window->priv = NULL;
}
G_OBJECT_CLASS (fr_window_parent_class)->finalize (object);
} | 0 | [
"CWE-22"
] | file-roller | b147281293a8307808475e102a14857055f81631 | 129,344,107,296,238,740,000,000,000,000,000,000,000 | 19 | libarchive: sanitize filenames before extracting |
void expectProtocol(const std::string& proto) {
expectHandshakeSuccess();
EXPECT_NE(client->nextProtoLength, 0);
EXPECT_EQ(client->nextProtoLength, server->nextProtoLength);
EXPECT_EQ(
memcmp(client->nextProto, server->nextProto, server->nextProtoLength),
0);
string selected((const char*)client->nextProto, client->nextProtoLength);
EXPECT_EQ(proto, selected);
} | 0 | [
"CWE-125"
] | folly | c321eb588909646c15aefde035fd3133ba32cdee | 31,247,787,187,301,673,000,000,000,000,000,000,000 | 10 | Handle close_notify as standard writeErr in AsyncSSLSocket.
Summary: Fixes CVE-2019-11934
Reviewed By: mingtaoy
Differential Revision: D18020613
fbshipit-source-id: db82bb250e53f0d225f1280bd67bc74abd417836 |
static struct dcb_app_type *dcb_app_lookup(const struct dcb_app *app,
int ifindex, int prio)
{
struct dcb_app_type *itr;
list_for_each_entry(itr, &dcb_app_list, list) {
if (itr->app.selector == app->selector &&
itr->app.protocol == app->protocol &&
itr->ifindex == ifindex &&
(!prio || itr->app.priority == prio))
return itr;
}
return NULL;
} | 0 | [
"CWE-399"
] | linux-2.6 | 29cd8ae0e1a39e239a3a7b67da1986add1199fc0 | 177,264,307,409,429,720,000,000,000,000,000,000,000 | 15 | dcbnl: fix various netlink info leaks
The dcb netlink interface leaks stack memory in various places:
* perm_addr[] buffer is only filled at max with 12 of the 32 bytes but
copied completely,
* no in-kernel driver fills all fields of an IEEE 802.1Qaz subcommand,
so we're leaking up to 58 bytes for ieee_ets structs, up to 136 bytes
for ieee_pfc structs, etc.,
* the same is true for CEE -- no in-kernel driver fills the whole
struct,
Prevent all of the above stack info leaks by properly initializing the
buffers/structures involved.
Signed-off-by: Mathias Krause <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
execute_arith_for_command (arith_for_command)
ARITH_FOR_COM *arith_for_command;
{
intmax_t expresult;
int expok, body_status, arith_lineno, save_lineno;
body_status = EXECUTION_SUCCESS;
loop_level++;
save_lineno = line_number;
if (arith_for_command->flags & CMD_IGNORE_RETURN)
arith_for_command->action->flags |= CMD_IGNORE_RETURN;
this_command_name = "(("; /* )) for expression error messages */
/* save the starting line number of the command so we can reset
line_number before executing each expression -- for $LINENO
and the DEBUG trap. */
line_number = arith_lineno = arith_for_command->line;
if (variable_context && interactive_shell)
{
line_number -= function_line_number;
if (line_number < 0)
line_number = 0;
}
/* Evaluate the initialization expression. */
expresult = eval_arith_for_expr (arith_for_command->init, &expok);
if (expok == 0)
{
line_number = save_lineno;
return (EXECUTION_FAILURE);
}
while (1)
{
/* Evaluate the test expression. */
line_number = arith_lineno;
expresult = eval_arith_for_expr (arith_for_command->test, &expok);
line_number = save_lineno;
if (expok == 0)
{
body_status = EXECUTION_FAILURE;
break;
}
REAP ();
if (expresult == 0)
break;
/* Execute the body of the arithmetic for command. */
QUIT;
body_status = execute_command (arith_for_command->action);
QUIT;
/* Handle any `break' or `continue' commands executed by the body. */
if (breaking)
{
breaking--;
break;
}
if (continuing)
{
continuing--;
if (continuing)
break;
}
/* Evaluate the step expression. */
line_number = arith_lineno;
expresult = eval_arith_for_expr (arith_for_command->step, &expok);
line_number = save_lineno;
if (expok == 0)
{
body_status = EXECUTION_FAILURE;
break;
}
}
loop_level--;
line_number = save_lineno;
return (body_status);
} | 0 | [] | bash | 955543877583837c85470f7fb8a97b7aa8d45e6c | 156,221,452,491,772,000,000,000,000,000,000,000,000 | 86 | bash-4.4-rc2 release |
MOBI_RET mobi_markup_to_utf8(MOBIPart *part) {
if (part == NULL) {
return MOBI_INIT_FAILED;
}
unsigned char *text = part->data;
size_t length = part->size;
/* extreme case in which each input character is converted
to 3-byte utf-8 sequence */
size_t out_length = 3 * length + 1;
char *out_text = malloc(out_length);
if (out_text == NULL) {
debug_print("%s", "Memory allocation failed\n");
return MOBI_MALLOC_FAILED;
}
MOBI_RET ret = mobi_cp1252_to_utf8(out_text, (const char *) text, &out_length, length);
free(text);
if (ret != MOBI_SUCCESS || out_length == 0) {
debug_print("%s", "conversion from cp1252 to utf8 failed\n");
free(out_text);
part->data = NULL;
return MOBI_DATA_CORRUPT;
}
text = malloc(out_length);
if (text == NULL) {
debug_print("%s", "Memory allocation failed\n");
free(out_text);
part->data = NULL;
return MOBI_MALLOC_FAILED;
}
memcpy(text, out_text, out_length);
free(out_text);
part->data = text;
part->size = out_length;
return MOBI_SUCCESS;
} | 0 | [
"CWE-703",
"CWE-125"
] | libmobi | fb1ab50e448ddbed746fd27ae07469bc506d838b | 312,076,602,796,706,050,000,000,000,000,000,000,000 | 35 | Fix array boundary check when parsing inflections which could result in buffer over-read with corrupt input |
xmlSchemaVContentModelCallback(xmlSchemaValidCtxtPtr vctxt ATTRIBUTE_UNUSED,
const xmlChar * name ATTRIBUTE_UNUSED,
xmlSchemaElementPtr item,
xmlSchemaNodeInfoPtr inode)
{
inode->decl = item;
#ifdef DEBUG_CONTENT
{
xmlChar *str = NULL;
if (item->type == XML_SCHEMA_TYPE_ELEMENT) {
xmlGenericError(xmlGenericErrorContext,
"AUTOMATON callback for '%s' [declaration]\n",
xmlSchemaFormatQName(&str,
inode->localName, inode->nsName));
} else {
xmlGenericError(xmlGenericErrorContext,
"AUTOMATON callback for '%s' [wildcard]\n",
xmlSchemaFormatQName(&str,
inode->localName, inode->nsName));
}
FREE_AND_NULL(str)
}
#endif
} | 0 | [
"CWE-134"
] | libxml2 | 4472c3a5a5b516aaf59b89be602fbce52756c3e9 | 186,103,971,002,407,300,000,000,000,000,000,000,000 | 26 | Fix some format string warnings with possible format string vulnerability
For https://bugzilla.gnome.org/show_bug.cgi?id=761029
Decorate every method in libxml2 with the appropriate
LIBXML_ATTR_FORMAT(fmt,args) macro and add some cleanups
following the reports. |
static inline struct sk_buff *tcp_write_queue_next(const struct sock *sk,
const struct sk_buff *skb)
{
return skb_queue_next(&sk->sk_write_queue, skb);
} | 0 | [
"CWE-416",
"CWE-269"
] | linux | bb1fceca22492109be12640d49f5ea5a544c6bb4 | 321,063,761,507,164,970,000,000,000,000,000,000,000 | 5 | tcp: fix use after free in tcp_xmit_retransmit_queue()
When tcp_sendmsg() allocates a fresh and empty skb, it puts it at the
tail of the write queue using tcp_add_write_queue_tail()
Then it attempts to copy user data into this fresh skb.
If the copy fails, we undo the work and remove the fresh skb.
Unfortunately, this undo lacks the change done to tp->highest_sack and
we can leave a dangling pointer (to a freed skb)
Later, tcp_xmit_retransmit_queue() can dereference this pointer and
access freed memory. For regular kernels where memory is not unmapped,
this might cause SACK bugs because tcp_highest_sack_seq() is buggy,
returning garbage instead of tp->snd_nxt, but with various debug
features like CONFIG_DEBUG_PAGEALLOC, this can crash the kernel.
This bug was found by Marco Grassi thanks to syzkaller.
Fixes: 6859d49475d4 ("[TCP]: Abstract tp->highest_sack accessing & point to next skb")
Reported-by: Marco Grassi <[email protected]>
Signed-off-by: Eric Dumazet <[email protected]>
Cc: Ilpo Järvinen <[email protected]>
Cc: Yuchung Cheng <[email protected]>
Cc: Neal Cardwell <[email protected]>
Acked-by: Neal Cardwell <[email protected]>
Reviewed-by: Cong Wang <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
ex_colorscheme(exarg_T *eap)
{
if (*eap->arg == NUL)
{
#ifdef FEAT_EVAL
char_u *expr = vim_strsave((char_u *)"g:colors_name");
char_u *p = NULL;
if (expr != NULL)
{
++emsg_off;
p = eval_to_string(expr, FALSE);
--emsg_off;
vim_free(expr);
}
if (p != NULL)
{
msg((char *)p);
vim_free(p);
}
else
msg("default");
#else
msg(_("unknown"));
#endif
}
else if (load_colors(eap->arg) == FAIL)
semsg(_(e_cannot_find_color_scheme_str), eap->arg);
#ifdef FEAT_VTP
else if (has_vtp_working())
{
// background color change requires clear + redraw
update_screen(UPD_CLEAR);
redrawcmd();
}
#endif
} | 0 | [
"CWE-416"
] | vim | 35d21c6830fc2d68aca838424a0e786821c5891c | 339,503,140,787,238,000,000,000,000,000,000,000,000 | 38 | patch 9.0.0360: crash when invalid line number on :for is ignored
Problem: Crash when invalid line number on :for is ignored.
Solution: Do not check breakpoint for non-existing line. |
void Filter::onSoftPerTryTimeout(UpstreamRequest& upstream_request) {
// Track this as a timeout for outlier detection purposes even though we didn't
// cancel the request yet and might get a 2xx later.
updateOutlierDetection(Upstream::Outlier::Result::LOCAL_ORIGIN_TIMEOUT, upstream_request,
absl::optional<uint64_t>(enumToInt(timeout_response_code_)));
upstream_request.outlier_detection_timeout_recorded_ = true;
if (!downstream_response_started_ && retry_state_) {
RetryStatus retry_status =
retry_state_->shouldHedgeRetryPerTryTimeout([this]() -> void { doRetry(); });
if (retry_status == RetryStatus::Yes && setupRetry()) {
setupRetry();
// Don't increment upstream_host->stats().rq_error_ here, we'll do that
// later if 1) we hit global timeout or 2) we get bad response headers
// back.
upstream_request.retried_ = true;
// TODO: cluster stat for hedge attempted.
} else if (retry_status == RetryStatus::NoOverflow) {
callbacks_->streamInfo().setResponseFlag(StreamInfo::ResponseFlag::UpstreamOverflow);
} else if (retry_status == RetryStatus::NoRetryLimitExceeded) {
callbacks_->streamInfo().setResponseFlag(
StreamInfo::ResponseFlag::UpstreamRetryLimitExceeded);
}
}
} | 0 | [
"CWE-400",
"CWE-703"
] | envoy | afc39bea36fd436e54262f150c009e8d72db5014 | 158,767,188,278,968,470,000,000,000,000,000,000,000 | 27 | Track byteSize of HeaderMap internally.
Introduces a cached byte size updated internally in HeaderMap. The value
is stored as an optional, and is cleared whenever a non-const pointer or
reference to a HeaderEntry is accessed. The cached value can be set with
refreshByteSize() which performs an iteration over the HeaderMap to sum
the size of each key and value in the HeaderMap.
Signed-off-by: Asra Ali <[email protected]> |
static int translate_table(struct xt_table_info *newinfo, void *entry0,
const struct arpt_replace *repl)
{
struct arpt_entry *iter;
unsigned int i;
int ret = 0;
newinfo->size = repl->size;
newinfo->number = repl->num_entries;
/* Init all hooks to impossible value. */
for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
newinfo->hook_entry[i] = 0xFFFFFFFF;
newinfo->underflow[i] = 0xFFFFFFFF;
}
duprintf("translate_table: size %u\n", newinfo->size);
i = 0;
/* Walk through entries, checking offsets. */
xt_entry_foreach(iter, entry0, newinfo->size) {
ret = check_entry_size_and_hooks(iter, newinfo, entry0,
entry0 + repl->size,
repl->hook_entry,
repl->underflow,
repl->valid_hooks);
if (ret != 0)
break;
++i;
if (strcmp(arpt_get_target(iter)->u.user.name,
XT_ERROR_TARGET) == 0)
++newinfo->stacksize;
}
duprintf("translate_table: ARPT_ENTRY_ITERATE gives %d\n", ret);
if (ret != 0)
return ret;
if (i != repl->num_entries) {
duprintf("translate_table: %u not %u entries\n",
i, repl->num_entries);
return -EINVAL;
}
/* Check hooks all assigned */
for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
/* Only hooks which are valid */
if (!(repl->valid_hooks & (1 << i)))
continue;
if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
duprintf("Invalid hook entry %u %u\n",
i, repl->hook_entry[i]);
return -EINVAL;
}
if (newinfo->underflow[i] == 0xFFFFFFFF) {
duprintf("Invalid underflow %u %u\n",
i, repl->underflow[i]);
return -EINVAL;
}
}
if (!mark_source_chains(newinfo, repl->valid_hooks, entry0)) {
duprintf("Looping hook\n");
return -ELOOP;
}
/* Finally, each sanity check must pass */
i = 0;
xt_entry_foreach(iter, entry0, newinfo->size) {
ret = find_check_entry(iter, repl->name, repl->size);
if (ret != 0)
break;
++i;
}
if (ret != 0) {
xt_entry_foreach(iter, entry0, newinfo->size) {
if (i-- == 0)
break;
cleanup_entry(iter);
}
return ret;
}
/* And one copy for every other CPU */
for_each_possible_cpu(i) {
if (newinfo->entries[i] && newinfo->entries[i] != entry0)
memcpy(newinfo->entries[i], entry0, newinfo->size);
}
return ret;
} | 0 | [
"CWE-200"
] | linux-2.6 | 42eab94fff18cb1091d3501cd284d6bd6cc9c143 | 212,230,573,846,915,370,000,000,000,000,000,000,000 | 91 | netfilter: arp_tables: fix infoleak to userspace
Structures ipt_replace, compat_ipt_replace, and xt_get_revision are
copied from userspace. Fields of these structs that are
zero-terminated strings are not checked. When they are used as argument
to a format string containing "%s" in request_module(), some sensitive
information is leaked to userspace via argument of spawned modprobe
process.
The first bug was introduced before the git epoch; the second is
introduced by 6b7d31fc (v2.6.15-rc1); the third is introduced by
6b7d31fc (v2.6.15-rc1). To trigger the bug one should have
CAP_NET_ADMIN.
Signed-off-by: Vasiliy Kulikov <[email protected]>
Signed-off-by: Patrick McHardy <[email protected]> |
addr_policy_permits_address(uint32_t addr, uint16_t port,
smartlist_t *policy)
{
tor_addr_t a;
tor_addr_from_ipv4h(&a, addr);
return addr_policy_permits_tor_addr(&a, port, policy);
} | 0 | [
"CWE-119"
] | tor | 43414eb98821d3b5c6c65181d7545ce938f82c8e | 300,652,288,575,773,350,000,000,000,000,000,000,000 | 7 | Fix bounds-checking in policy_summarize
Found by piebeer. |
TEST_P(RBACIntegrationTest, HeaderMatchConditionDuplicateHeaderMatch) {
config_helper_.addFilter(fmt::format(RBAC_CONFIG_HEADER_MATCH_CONDITION, "yyy,zzz"));
initialize();
codec_client_ = makeHttpConnection(lookupPort("http"));
auto response = codec_client_->makeRequestWithBody(
Http::TestRequestHeaderMapImpl{
{":method", "POST"},
{":path", "/path"},
{":scheme", "http"},
{":authority", "host"},
{"xxx", "yyy"},
{"xxx", "zzz"},
},
1024);
waitForNextUpstreamRequest();
upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "200"}}, true);
response->waitForEndStream();
ASSERT_TRUE(response->complete());
EXPECT_EQ("200", response->headers().getStatusValue());
} | 0 | [] | envoy | 2c60632d41555ec8b3d9ef5246242be637a2db0f | 327,253,914,487,143,650,000,000,000,000,000,000,000 | 23 | http: header map security fixes for duplicate headers (#197)
Previously header matching did not match on all headers for
non-inline headers. This patch changes the default behavior to
always logically match on all headers. Multiple individual
headers will be logically concatenated with ',' similar to what
is done with inline headers. This makes the behavior effectively
consistent. This behavior can be temporary reverted by setting
the runtime value "envoy.reloadable_features.header_match_on_all_headers"
to "false".
Targeted fixes have been additionally performed on the following
extensions which make them consider all duplicate headers by default as
a comma concatenated list:
1) Any extension using CEL matching on headers.
2) The header to metadata filter.
3) The JWT filter.
4) The Lua filter.
Like primary header matching used in routing, RBAC, etc. this behavior
can be disabled by setting the runtime value
"envoy.reloadable_features.header_match_on_all_headers" to false.
Finally, the setCopy() header map API previously only set the first
header in the case of duplicate non-inline headers. setCopy() now
behaves similiarly to the other set*() APIs and replaces all found
headers with a single value. This may have had security implications
in the extauth filter which uses this API. This behavior can be disabled
by setting the runtime value
"envoy.reloadable_features.http_set_copy_replace_all_headers" to false.
Fixes https://github.com/envoyproxy/envoy-setec/issues/188
Signed-off-by: Matt Klein <[email protected]> |
void sk_prot_clear_portaddr_nulls(struct sock *sk, int size)
{
unsigned long nulls1, nulls2;
nulls1 = offsetof(struct sock, __sk_common.skc_node.next);
nulls2 = offsetof(struct sock, __sk_common.skc_portaddr_node.next);
if (nulls1 > nulls2)
swap(nulls1, nulls2);
if (nulls1 != 0)
memset((char *)sk, 0, nulls1);
memset((char *)sk + nulls1 + sizeof(void *), 0,
nulls2 - nulls1 - sizeof(void *));
memset((char *)sk + nulls2 + sizeof(void *), 0,
size - nulls2 - sizeof(void *));
} | 0 | [
"CWE-119",
"CWE-787"
] | linux | 82981930125abfd39d7c8378a9cfdf5e1be2002b | 276,644,800,603,913,700,000,000,000,000,000,000,000 | 16 | net: cleanups in sock_setsockopt()
Use min_t()/max_t() macros, reformat two comments, use !!test_bit() to
match !!sock_flag()
Signed-off-by: Eric Dumazet <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
static int auto_connect(struct socket *sock, struct tipc_msg *msg)
{
struct tipc_sock *tsock = tipc_sk(sock->sk);
struct tipc_port *p_ptr;
tsock->peer_name.ref = msg_origport(msg);
tsock->peer_name.node = msg_orignode(msg);
p_ptr = tipc_port_deref(tsock->p->ref);
if (!p_ptr)
return -EINVAL;
__tipc_connect(tsock->p->ref, p_ptr, &tsock->peer_name);
if (msg_importance(msg) > TIPC_CRITICAL_IMPORTANCE)
return -EINVAL;
msg_set_importance(&p_ptr->phdr, (u32)msg_importance(msg));
sock->state = SS_CONNECTED;
return 0;
} | 0 | [
"CWE-20",
"CWE-269"
] | linux | f3d3342602f8bcbf37d7c46641cb9bca7618eb1c | 57,802,553,408,532,610,000,000,000,000,000,000,000 | 19 | net: rework recvmsg handler msg_name and msg_namelen logic
This patch now always passes msg->msg_namelen as 0. recvmsg handlers must
set msg_namelen to the proper size <= sizeof(struct sockaddr_storage)
to return msg_name to the user.
This prevents numerous uninitialized memory leaks we had in the
recvmsg handlers and makes it harder for new code to accidentally leak
uninitialized memory.
Optimize for the case recvfrom is called with NULL as address. We don't
need to copy the address at all, so set it to NULL before invoking the
recvmsg handler. We can do so, because all the recvmsg handlers must
cope with the case a plain read() is called on them. read() also sets
msg_name to NULL.
Also document these changes in include/linux/net.h as suggested by David
Miller.
Changes since RFC:
Set msg->msg_name = NULL if user specified a NULL in msg_name but had a
non-null msg_namelen in verify_iovec/verify_compat_iovec. This doesn't
affect sendto as it would bail out earlier while trying to copy-in the
address. It also more naturally reflects the logic by the callers of
verify_iovec.
With this change in place I could remove "
if (!uaddr || msg_sys->msg_namelen == 0)
msg->msg_name = NULL
".
This change does not alter the user visible error logic as we ignore
msg_namelen as long as msg_name is NULL.
Also remove two unnecessary curly brackets in ___sys_recvmsg and change
comments to netdev style.
Cc: David Miller <[email protected]>
Suggested-by: Eric Dumazet <[email protected]>
Signed-off-by: Hannes Frederic Sowa <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
void ndp_msgra_flag_managed_set(struct ndp_msgra *msgra, bool flag_managed)
{
if (flag_managed)
msgra->ra->nd_ra_flags_reserved |= ND_RA_FLAG_MANAGED;
else
msgra->ra->nd_ra_flags_reserved &= ~ND_RA_FLAG_MANAGED;
} | 0 | [
"CWE-284"
] | libndp | a4892df306e0532487f1634ba6d4c6d4bb381c7f | 5,243,920,876,024,055,000,000,000,000,000,000,000 | 7 | libndp: validate the IPv6 hop limit
None of the NDP messages should ever come from a non-local network; as
stated in RFC4861's 6.1.1 (RS), 6.1.2 (RA), 7.1.1 (NS), 7.1.2 (NA),
and 8.1. (redirect):
- The IP Hop Limit field has a value of 255, i.e., the packet
could not possibly have been forwarded by a router.
This fixes CVE-2016-3698.
Reported by: Julien BERNARD <[email protected]>
Signed-off-by: Lubomir Rintel <[email protected]>
Signed-off-by: Jiri Pirko <[email protected]> |
static UINT cliprdr_server_receive_format_list(CliprdrServerContext* context, wStream* s,
const CLIPRDR_HEADER* header)
{
CLIPRDR_FORMAT_LIST formatList;
UINT error = CHANNEL_RC_OK;
formatList.msgType = CB_FORMAT_LIST;
formatList.msgFlags = header->msgFlags;
formatList.dataLen = header->dataLen;
if ((error = cliprdr_read_format_list(s, &formatList, context->useLongFormatNames)))
goto out;
WLog_DBG(TAG, "ClientFormatList: numFormats: %" PRIu32 "", formatList.numFormats);
IFCALLRET(context->ClientFormatList, error, context, &formatList);
if (error)
WLog_ERR(TAG, "ClientFormatList failed with error %" PRIu32 "!", error);
out:
cliprdr_free_format_list(&formatList);
return error;
} | 0 | [] | FreeRDP | 8e1a1b407565eb0a48923c796f5b1f69167b3c48 | 318,686,276,242,293,220,000,000,000,000,000,000,000 | 23 | Fixed cliprdr_server_receive_capabilities
Thanks to hac425 CVE-2020-11017, CVE-2020-11018 |
static inline bool should_force_cow_break(struct vm_area_struct *vma, unsigned int flags)
{
return is_cow_mapping(vma->vm_flags) && (flags & (FOLL_GET | FOLL_PIN));
} | 0 | [
"CWE-362"
] | linux | 17839856fd588f4ab6b789f482ed3ffd7c403e1f | 337,592,755,885,211,860,000,000,000,000,000,000,000 | 4 | gup: document and work around "COW can break either way" issue
Doing a "get_user_pages()" on a copy-on-write page for reading can be
ambiguous: the page can be COW'ed at any time afterwards, and the
direction of a COW event isn't defined.
Yes, whoever writes to it will generally do the COW, but if the thread
that did the get_user_pages() unmapped the page before the write (and
that could happen due to memory pressure in addition to any outright
action), the writer could also just take over the old page instead.
End result: the get_user_pages() call might result in a page pointer
that is no longer associated with the original VM, and is associated
with - and controlled by - another VM having taken it over instead.
So when doing a get_user_pages() on a COW mapping, the only really safe
thing to do would be to break the COW when getting the page, even when
only getting it for reading.
At the same time, some users simply don't even care.
For example, the perf code wants to look up the page not because it
cares about the page, but because the code simply wants to look up the
physical address of the access for informational purposes, and doesn't
really care about races when a page might be unmapped and remapped
elsewhere.
This adds logic to force a COW event by setting FOLL_WRITE on any
copy-on-write mapping when FOLL_GET (or FOLL_PIN) is used to get a page
pointer as a result.
The current semantics end up being:
- __get_user_pages_fast(): no change. If you don't ask for a write,
you won't break COW. You'd better know what you're doing.
- get_user_pages_fast(): the fast-case "look it up in the page tables
without anything getting mmap_sem" now refuses to follow a read-only
page, since it might need COW breaking. Which happens in the slow
path - the fast path doesn't know if the memory might be COW or not.
- get_user_pages() (including the slow-path fallback for gup_fast()):
for a COW mapping, turn on FOLL_WRITE for FOLL_GET/FOLL_PIN, with
very similar semantics to FOLL_FORCE.
If it turns out that we want finer granularity (ie "only break COW when
it might actually matter" - things like the zero page are special and
don't need to be broken) we might need to push these semantics deeper
into the lookup fault path. So if people care enough, it's possible
that we might end up adding a new internal FOLL_BREAK_COW flag to go
with the internal FOLL_COW flag we already have for tracking "I had a
COW".
Alternatively, if it turns out that different callers might want to
explicitly control the forced COW break behavior, we might even want to
make such a flag visible to the users of get_user_pages() instead of
using the above default semantics.
But for now, this is mostly commentary on the issue (this commit message
being a lot bigger than the patch, and that patch in turn is almost all
comments), with that minimal "enable COW breaking early" logic using the
existing FOLL_WRITE behavior.
[ It might be worth noting that we've always had this ambiguity, and it
could arguably be seen as a user-space issue.
You only get private COW mappings that could break either way in
situations where user space is doing cooperative things (ie fork()
before an execve() etc), but it _is_ surprising and very subtle, and
fork() is supposed to give you independent address spaces.
So let's treat this as a kernel issue and make the semantics of
get_user_pages() easier to understand. Note that obviously a true
shared mapping will still get a page that can change under us, so this
does _not_ mean that get_user_pages() somehow returns any "stable"
page ]
Reported-by: Jann Horn <[email protected]>
Tested-by: Christoph Hellwig <[email protected]>
Acked-by: Oleg Nesterov <[email protected]>
Acked-by: Kirill Shutemov <[email protected]>
Acked-by: Jan Kara <[email protected]>
Cc: Andrea Arcangeli <[email protected]>
Cc: Matthew Wilcox <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
static struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad,
size_t reserve, size_t len,
size_t linear, int noblock,
int *err)
{
struct sk_buff *skb;
/* Under a page? Don't bother with paged skb. */
if (prepad + len < PAGE_SIZE || !linear)
linear = len;
skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
err, 0);
if (!skb)
return NULL;
skb_reserve(skb, reserve);
skb_put(skb, linear);
skb->data_len = len - linear;
skb->len += len - linear;
return skb;
} | 0 | [
"CWE-416",
"CWE-362"
] | linux | 84ac7260236a49c79eede91617700174c2c19b0c | 325,900,145,650,344,400,000,000,000,000,000,000,000 | 23 | packet: fix race condition in packet_set_ring
When packet_set_ring creates a ring buffer it will initialize a
struct timer_list if the packet version is TPACKET_V3. This value
can then be raced by a different thread calling setsockopt to
set the version to TPACKET_V1 before packet_set_ring has finished.
This leads to a use-after-free on a function pointer in the
struct timer_list when the socket is closed as the previously
initialized timer will not be deleted.
The bug is fixed by taking lock_sock(sk) in packet_setsockopt when
changing the packet version while also taking the lock at the start
of packet_set_ring.
Fixes: f6fb8f100b80 ("af-packet: TPACKET_V3 flexible buffer implementation.")
Signed-off-by: Philip Pettersson <[email protected]>
Signed-off-by: Eric Dumazet <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
TEST(HeaderMapImplTest, DoubleCookieAdd) {
HeaderMapImpl headers;
const std::string foo("foo");
const std::string bar("bar");
const LowerCaseString& set_cookie = Http::Headers::get().SetCookie;
headers.addReference(set_cookie, foo);
headers.addReference(set_cookie, bar);
EXPECT_EQ(2UL, headers.size());
EXPECT_EQ(headers.byteSize().value(), countBytesForTest(headers));
std::vector<absl::string_view> out;
Http::HeaderUtility::getAllOfHeader(headers, "set-cookie", out);
ASSERT_EQ(out.size(), 2);
ASSERT_EQ(out[0], "foo");
ASSERT_EQ(out[1], "bar");
} | 0 | [
"CWE-400",
"CWE-703"
] | envoy | afc39bea36fd436e54262f150c009e8d72db5014 | 274,110,047,356,077,930,000,000,000,000,000,000,000 | 16 | Track byteSize of HeaderMap internally.
Introduces a cached byte size updated internally in HeaderMap. The value
is stored as an optional, and is cleared whenever a non-const pointer or
reference to a HeaderEntry is accessed. The cached value can be set with
refreshByteSize() which performs an iteration over the HeaderMap to sum
the size of each key and value in the HeaderMap.
Signed-off-by: Asra Ali <[email protected]> |
UnicodeString &UnicodeString::setToUTF8(StringPiece utf8) {
unBogus();
int32_t length = utf8.length();
int32_t capacity;
// The UTF-16 string will be at most as long as the UTF-8 string.
if(length <= US_STACKBUF_SIZE) {
capacity = US_STACKBUF_SIZE;
} else {
capacity = length + 1; // +1 for the terminating NUL.
}
UChar *utf16 = getBuffer(capacity);
int32_t length16;
UErrorCode errorCode = U_ZERO_ERROR;
u_strFromUTF8WithSub(utf16, getCapacity(), &length16,
utf8.data(), length,
0xfffd, // Substitution character.
NULL, // Don't care about number of substitutions.
&errorCode);
releaseBuffer(length16);
if(U_FAILURE(errorCode)) {
setToBogus();
}
return *this;
} | 0 | [
"CWE-190",
"CWE-787"
] | icu | b7d08bc04a4296982fcef8b6b8a354a9e4e7afca | 138,053,458,511,023,630,000,000,000,000,000,000,000 | 24 | ICU-20958 Prevent SEGV_MAPERR in append
See #971 |
static uint32_t fdctrl_read_statusA(FDCtrl *fdctrl)
{
uint32_t retval = fdctrl->sra;
FLOPPY_DPRINTF("status register A: 0x%02x\n", retval);
return retval;
} | 0 | [
"CWE-119"
] | qemu | e907746266721f305d67bc0718795fedee2e824c | 268,749,071,143,948,000,000,000,000,000,000,000,000 | 8 | fdc: force the fifo access to be in bounds of the allocated buffer
During processing of certain commands such as FD_CMD_READ_ID and
FD_CMD_DRIVE_SPECIFICATION_COMMAND the fifo memory access could
get out of bounds leading to memory corruption with values coming
from the guest.
Fix this by making sure that the index is always bounded by the
allocated memory.
This is CVE-2015-3456.
Signed-off-by: Petr Matousek <[email protected]>
Reviewed-by: John Snow <[email protected]>
Signed-off-by: John Snow <[email protected]> |
jas_matrix_t *jas_matrix_create(jas_matind_t numrows, jas_matind_t numcols)
{
jas_matrix_t *matrix;
jas_matind_t i;
size_t size;
matrix = 0;
if (numrows < 0 || numcols < 0) {
return NULL;
}
// matrix->datasize_ = numrows * numcols;
if (!jas_safe_size_mul(numrows, numcols, &size)) {
return NULL;
}
if (!(matrix = jas_malloc(sizeof(jas_matrix_t)))) {
return NULL;
}
matrix->flags_ = 0;
matrix->numrows_ = numrows;
matrix->numcols_ = numcols;
matrix->rows_ = 0;
matrix->maxrows_ = numrows;
matrix->data_ = 0;
matrix->datasize_ = size;
if (matrix->maxrows_ > 0) {
if (!(matrix->rows_ = jas_alloc2(matrix->maxrows_,
sizeof(jas_seqent_t *)))) {
goto error;
}
}
if (matrix->datasize_ > 0) {
if (!(matrix->data_ = jas_alloc2(matrix->datasize_,
sizeof(jas_seqent_t)))) {
goto error;
}
memset(matrix->data_, 0,
matrix->datasize_ * sizeof(jas_seqent_t));
}
for (i = 0; i < numrows; ++i) {
matrix->rows_[i] = &matrix->data_[i * matrix->numcols_];
}
matrix->xstart_ = 0;
matrix->ystart_ = 0;
matrix->xend_ = matrix->numcols_;
matrix->yend_ = matrix->numrows_;
return matrix;
error:
jas_matrix_destroy(matrix);
return 0;
} | 0 | [
"CWE-787"
] | jasper | e2f2e5f4022baef2386eec25c57b63debfe4cb20 | 303,460,166,954,997,720,000,000,000,000,000,000,000 | 60 | jas_seq: check bounds in jas_seq2d_bindsub()
Fixes CVE-2017-5503, CVE-2017-5504, CVE-2017-5505,
Closes https://github.com/jasper-maint/jasper/issues/3
Closes https://github.com/jasper-maint/jasper/issues/4
Closes https://github.com/jasper-maint/jasper/issues/5
Closes https://github.com/mdadams/jasper/issues/88
Closes https://github.com/mdadams/jasper/issues/89
Closes https://github.com/mdadams/jasper/issues/90 |
alloc_does_fail(size_t size)
{
if (alloc_fail_countdown == 0)
{
if (--alloc_fail_repeat <= 0)
alloc_fail_id = 0;
do_outofmem_msg(size);
return TRUE;
}
--alloc_fail_countdown;
return FALSE;
} | 0 | [
"CWE-416"
] | vim | 9f1a39a5d1cd7989ada2d1cb32f97d84360e050f | 15,057,239,232,293,927,000,000,000,000,000,000,000 | 12 | patch 8.2.4040: keeping track of allocated lines is too complicated
Problem: Keeping track of allocated lines in user functions is too
complicated.
Solution: Instead of freeing individual lines keep them all until the end. |
static bool decode_flag_request(void *mem_ctx, DATA_BLOB in, void *_out)
{
if (in.length != 0) {
return false;
}
return true;
} | 0 | [
"CWE-399"
] | samba | 530d50a1abdcdf4d1775652d4c456c1274d83d8d | 182,364,574,253,734,440,000,000,000,000,000,000,000 | 8 | CVE-2015-7540: s4: libcli: ldap message - Ensure all asn1_XX returns are checked.
BUG: https://bugzilla.samba.org/show_bug.cgi?id=9187
Signed-off-by: Jeremy Allison <[email protected]>
Reviewed-by: Ronnie Sahlberg <[email protected]>
Autobuild-User(master): Jeremy Allison <[email protected]>
Autobuild-Date(master): Fri Sep 26 03:15:00 CEST 2014 on sn-devel-104
(cherry picked from commit 69a7e3cfdc8dbba9c8dcfdfae82d2894c7247e15) |
RZ_API RZ_OWN char *rz_bin_dex_version(RZ_NONNULL RzBinDex *dex) {
rz_return_val_if_fail(dex, NULL);
// https://cs.android.com/android/platform/superproject/+/master:dalvik/dx/src/com/android/dex/DexFormat.java;l=55;bpv=1;bpt=0
// https://developer.android.com/studio/releases/platforms
if (!strncmp((char *)dex->version, "009", 3)) {
return strdup("Android M3 release (Nov-Dec 2007)");
} else if (!strncmp((char *)dex->version, "013", 3)) {
return strdup("Android M5 release (Feb-Mar 2008)");
} else if (!strncmp((char *)dex->version, "035", 3)) {
return strdup("Android 3.2 (API level 13 and earlier)");
} else if (!strncmp((char *)dex->version, "037", 3)) {
return strdup("Android 7 (API level 24 and earlier)");
} else if (!strncmp((char *)dex->version, "038", 3)) {
return strdup("Android 8 (API level 26 and earlier)");
} else if (!strncmp((char *)dex->version, "039", 3)) {
return strdup("Android 9 (API level 28 and earlier)");
} else if (!strncmp((char *)dex->version, "040", 3)) {
return strdup("Android 10+ (Aug 2019)");
}
return NULL;
} | 0 | [
"CWE-787"
] | rizin | 1524f85211445e41506f98180f8f69f7bf115406 | 89,284,723,258,432,330,000,000,000,000,000,000,000 | 21 | fix #2969 - oob write (1 byte) in dex.c |
_gnutls_copy_comp_methods (gnutls_session_t session,
opaque * ret_data, size_t ret_data_size)
{
int ret, i;
uint8_t *compression_methods, comp_num;
int datalen, pos;
ret = _gnutls_supported_compression_methods (session, &compression_methods);
if (ret < 0)
{
gnutls_assert ();
return ret;
}
comp_num = ret;
datalen = pos = 0;
datalen += comp_num + 1;
if ((size_t) datalen > ret_data_size)
{
gnutls_assert ();
return GNUTLS_E_INTERNAL_ERROR;
}
ret_data[pos++] = comp_num; /* put the number of compression methods */
for (i = 0; i < comp_num; i++)
{
ret_data[pos++] = compression_methods[i];
}
gnutls_free (compression_methods);
return datalen;
} | 0 | [
"CWE-189"
] | gnutls | bc8102405fda11ea00ca3b42acc4f4bce9d6e97b | 201,535,816,437,954,050,000,000,000,000,000,000,000 | 36 | Fix GNUTLS-SA-2008-1 security vulnerabilities.
See http://www.gnu.org/software/gnutls/security.html for updates. |
static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
{
__u8 status = *((__u8 *) skb->data);
struct hci_cp_write_ssp_mode *sent;
BT_DBG("%s status 0x%2.2x", hdev->name, status);
sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
if (!sent)
return;
hci_dev_lock(hdev);
if (!status) {
if (sent->mode)
hdev->features[1][0] |= LMP_HOST_SSP;
else
hdev->features[1][0] &= ~LMP_HOST_SSP;
}
if (hci_dev_test_flag(hdev, HCI_MGMT))
mgmt_ssp_enable_complete(hdev, sent->mode, status);
else if (!status) {
if (sent->mode)
hci_dev_set_flag(hdev, HCI_SSP_ENABLED);
else
hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
}
hci_dev_unlock(hdev);
} | 0 | [
"CWE-290"
] | linux | 3ca44c16b0dcc764b641ee4ac226909f5c421aa3 | 278,427,985,181,823,640,000,000,000,000,000,000,000 | 31 | Bluetooth: Consolidate encryption handling in hci_encrypt_cfm
This makes hci_encrypt_cfm calls hci_connect_cfm in case the connection
state is BT_CONFIG so callers don't have to check the state.
Signed-off-by: Luiz Augusto von Dentz <[email protected]>
Signed-off-by: Marcel Holtmann <[email protected]> |
char *fzofft(__G__ val, pre, post)
__GDEF
zoff_t val;
ZCONST char *pre;
ZCONST char *post;
{
/* Storage cylinder. (now in globals.h) */
/*static char fzofft_buf[FZOFFT_NUM][FZOFFT_LEN];*/
/*static int fzofft_index = 0;*/
/* Temporary format string storage. */
char fmt[16];
/* Assemble the format string. */
fmt[0] = '%';
fmt[1] = '\0'; /* Start after initial "%". */
if (pre == FZOFFT_HEX_WID) /* Special hex width. */
{
strcat(fmt, FZOFFT_HEX_WID_VALUE);
}
else if (pre == FZOFFT_HEX_DOT_WID) /* Special hex ".width". */
{
strcat(fmt, ".");
strcat(fmt, FZOFFT_HEX_WID_VALUE);
}
else if (pre != NULL) /* Caller's prefix (width). */
{
strcat(fmt, pre);
}
strcat(fmt, FZOFFT_FMT); /* Long or long-long or whatever. */
if (post == NULL)
strcat(fmt, "d"); /* Default radix = decimal. */
else
strcat(fmt, post); /* Caller's radix. */
/* Advance the cylinder. */
G.fzofft_index = (G.fzofft_index + 1) % FZOFFT_NUM;
/* Write into the current chamber. */
sprintf(G.fzofft_buf[G.fzofft_index], fmt, val);
/* Return a pointer to this chamber. */
return G.fzofft_buf[G.fzofft_index];
} | 0 | [
"CWE-400"
] | unzip | 41beb477c5744bc396fa1162ee0c14218ec12213 | 174,358,862,235,245,660,000,000,000,000,000,000,000 | 46 | Fix bug in undefer_input() that misplaced the input state. |
static int hugetlbfs_write_begin(struct file *file,
struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata)
{
return -EINVAL;
} | 0 | [] | linux | 1bfad99ab42569807d0ca1698449cae5e8c0334a | 284,361,145,588,413,270,000,000,000,000,000,000,000 | 7 | hugetlbfs: hugetlb_vmtruncate_list() needs to take a range to delete
fallocate hole punch will want to unmap a specific range of pages.
Modify the existing hugetlb_vmtruncate_list() routine to take a
start/end range. If end is 0, this indicates all pages after start
should be unmapped. This is the same as the existing truncate
functionality. Modify existing callers to add 0 as end of range.
Since the routine will be used in hole punch as well as truncate
operations, it is more appropriately renamed to hugetlb_vmdelete_list().
Signed-off-by: Mike Kravetz <[email protected]>
Reviewed-by: Naoya Horiguchi <[email protected]>
Acked-by: Hillf Danton <[email protected]>
Cc: Dave Hansen <[email protected]>
Cc: David Rientjes <[email protected]>
Cc: Hugh Dickins <[email protected]>
Cc: Davidlohr Bueso <[email protected]>
Cc: Aneesh Kumar <[email protected]>
Cc: Christoph Hellwig <[email protected]>
Cc: Michal Hocko <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf)
{
struct sock *sk = tport->sk;
u32 res;
/*
* Process message if socket is unlocked; otherwise add to backlog queue
*
* This code is based on sk_receive_skb(), but must be distinct from it
* since a TIPC-specific filter/reject mechanism is utilized
*/
bh_lock_sock(sk);
if (!sock_owned_by_user(sk)) {
res = filter_rcv(sk, buf);
} else {
if (sk_add_backlog(sk, buf, rcvbuf_limit(sk, buf)))
res = TIPC_ERR_OVERLOAD;
else
res = TIPC_OK;
}
bh_unlock_sock(sk);
return res;
} | 0 | [
"CWE-20",
"CWE-269"
] | linux | f3d3342602f8bcbf37d7c46641cb9bca7618eb1c | 200,791,656,522,547,500,000,000,000,000,000,000,000 | 24 | net: rework recvmsg handler msg_name and msg_namelen logic
This patch now always passes msg->msg_namelen as 0. recvmsg handlers must
set msg_namelen to the proper size <= sizeof(struct sockaddr_storage)
to return msg_name to the user.
This prevents numerous uninitialized memory leaks we had in the
recvmsg handlers and makes it harder for new code to accidentally leak
uninitialized memory.
Optimize for the case recvfrom is called with NULL as address. We don't
need to copy the address at all, so set it to NULL before invoking the
recvmsg handler. We can do so, because all the recvmsg handlers must
cope with the case a plain read() is called on them. read() also sets
msg_name to NULL.
Also document these changes in include/linux/net.h as suggested by David
Miller.
Changes since RFC:
Set msg->msg_name = NULL if user specified a NULL in msg_name but had a
non-null msg_namelen in verify_iovec/verify_compat_iovec. This doesn't
affect sendto as it would bail out earlier while trying to copy-in the
address. It also more naturally reflects the logic by the callers of
verify_iovec.
With this change in place I could remove "
if (!uaddr || msg_sys->msg_namelen == 0)
msg->msg_name = NULL
".
This change does not alter the user visible error logic as we ignore
msg_namelen as long as msg_name is NULL.
Also remove two unnecessary curly brackets in ___sys_recvmsg and change
comments to netdev style.
Cc: David Miller <[email protected]>
Suggested-by: Eric Dumazet <[email protected]>
Signed-off-by: Hannes Frederic Sowa <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
static int exif_process_user_comment(image_info_type *ImageInfo, char **pszInfoPtr, char **pszEncoding, char *szValuePtr, int ByteCount)
{
int a;
char *decode;
size_t len;
*pszEncoding = NULL;
/* Copy the comment */
if (ByteCount>=8) {
const zend_encoding *from, *to;
if (!memcmp(szValuePtr, "UNICODE\0", 8)) {
*pszEncoding = estrdup((const char*)szValuePtr);
szValuePtr = szValuePtr+8;
ByteCount -= 8;
/* First try to detect BOM: ZERO WIDTH NOBREAK SPACE (FEFF 16)
* since we have no encoding support for the BOM yet we skip that.
*/
if (!memcmp(szValuePtr, "\xFE\xFF", 2)) {
decode = "UCS-2BE";
szValuePtr = szValuePtr+2;
ByteCount -= 2;
} else if (!memcmp(szValuePtr, "\xFF\xFE", 2)) {
decode = "UCS-2LE";
szValuePtr = szValuePtr+2;
ByteCount -= 2;
} else if (ImageInfo->motorola_intel) {
decode = ImageInfo->decode_unicode_be;
} else {
decode = ImageInfo->decode_unicode_le;
}
to = zend_multibyte_fetch_encoding(ImageInfo->encode_unicode);
from = zend_multibyte_fetch_encoding(decode);
/* XXX this will fail again if encoding_converter returns on error something different than SIZE_MAX */
if (!to || !from || zend_multibyte_encoding_converter(
(unsigned char**)pszInfoPtr,
&len,
(unsigned char*)szValuePtr,
ByteCount,
to,
from) == (size_t)-1) {
len = exif_process_string_raw(pszInfoPtr, szValuePtr, ByteCount);
}
return len;
} else if (!memcmp(szValuePtr, "ASCII\0\0\0", 8)) {
*pszEncoding = estrdup((const char*)szValuePtr);
szValuePtr = szValuePtr+8;
ByteCount -= 8;
} else if (!memcmp(szValuePtr, "JIS\0\0\0\0\0", 8)) {
/* JIS should be tanslated to MB or we leave it to the user - leave it to the user */
*pszEncoding = estrdup((const char*)szValuePtr);
szValuePtr = szValuePtr+8;
ByteCount -= 8;
/* XXX this will fail again if encoding_converter returns on error something different than SIZE_MAX */
to = zend_multibyte_fetch_encoding(ImageInfo->encode_jis);
from = zend_multibyte_fetch_encoding(ImageInfo->motorola_intel ? ImageInfo->decode_jis_be : ImageInfo->decode_jis_le);
if (!to || !from || zend_multibyte_encoding_converter(
(unsigned char**)pszInfoPtr,
&len,
(unsigned char*)szValuePtr,
ByteCount,
to,
from) == (size_t)-1) {
len = exif_process_string_raw(pszInfoPtr, szValuePtr, ByteCount);
}
return len;
} else if (!memcmp(szValuePtr, "\0\0\0\0\0\0\0\0", 8)) {
/* 8 NULL means undefined and should be ASCII... */
*pszEncoding = estrdup("UNDEFINED");
szValuePtr = szValuePtr+8;
ByteCount -= 8;
}
}
/* Olympus has this padded with trailing spaces. Remove these first. */
if (ByteCount>0) {
for (a=ByteCount-1;a && szValuePtr[a]==' ';a--) {
(szValuePtr)[a] = '\0';
}
}
/* normal text without encoding */
exif_process_string(pszInfoPtr, szValuePtr, ByteCount);
return strlen(*pszInfoPtr);
} | 1 | [
"CWE-125"
] | php-src | e648fa4699e8d072db6db34fcc09826e8127fab8 | 5,427,184,732,579,812,000,000,000,000,000,000,000 | 84 | Fix bug #78256 (heap-buffer-overflow on exif_process_user_comment)
(cherry picked from commit aeb6d13185a2ea4f1496ede2697469faed98ce05) |
void Save() {
url_.Save();
for (int i = 0; i < num_fields_; i++) {
fields_[i].Save();
}
for (int i = 0; i < num_values_; i++) {
values_[i].Save();
}
} | 0 | [] | node | 7b3fb22290c3b6acb497ca85cf2f1648d75c8154 | 274,947,491,642,862,660,000,000,000,000,000,000,000 | 11 | typo in node_http_parser |
static int check_map_func_compatibility(struct bpf_verifier_env *env,
struct bpf_map *map, int func_id)
{
if (!map)
return 0;
/* We need a two way check, first is from map perspective ... */
switch (map->map_type) {
case BPF_MAP_TYPE_PROG_ARRAY:
if (func_id != BPF_FUNC_tail_call)
goto error;
break;
case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
if (func_id != BPF_FUNC_perf_event_read &&
func_id != BPF_FUNC_perf_event_output &&
func_id != BPF_FUNC_skb_output &&
func_id != BPF_FUNC_perf_event_read_value &&
func_id != BPF_FUNC_xdp_output)
goto error;
break;
case BPF_MAP_TYPE_RINGBUF:
if (func_id != BPF_FUNC_ringbuf_output &&
func_id != BPF_FUNC_ringbuf_reserve &&
func_id != BPF_FUNC_ringbuf_query)
goto error;
break;
case BPF_MAP_TYPE_STACK_TRACE:
if (func_id != BPF_FUNC_get_stackid)
goto error;
break;
case BPF_MAP_TYPE_CGROUP_ARRAY:
if (func_id != BPF_FUNC_skb_under_cgroup &&
func_id != BPF_FUNC_current_task_under_cgroup)
goto error;
break;
case BPF_MAP_TYPE_CGROUP_STORAGE:
case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE:
if (func_id != BPF_FUNC_get_local_storage)
goto error;
break;
case BPF_MAP_TYPE_DEVMAP:
case BPF_MAP_TYPE_DEVMAP_HASH:
if (func_id != BPF_FUNC_redirect_map &&
func_id != BPF_FUNC_map_lookup_elem)
goto error;
break;
/* Restrict bpf side of cpumap and xskmap, open when use-cases
* appear.
*/
case BPF_MAP_TYPE_CPUMAP:
if (func_id != BPF_FUNC_redirect_map)
goto error;
break;
case BPF_MAP_TYPE_XSKMAP:
if (func_id != BPF_FUNC_redirect_map &&
func_id != BPF_FUNC_map_lookup_elem)
goto error;
break;
case BPF_MAP_TYPE_ARRAY_OF_MAPS:
case BPF_MAP_TYPE_HASH_OF_MAPS:
if (func_id != BPF_FUNC_map_lookup_elem)
goto error;
break;
case BPF_MAP_TYPE_SOCKMAP:
if (func_id != BPF_FUNC_sk_redirect_map &&
func_id != BPF_FUNC_sock_map_update &&
func_id != BPF_FUNC_map_delete_elem &&
func_id != BPF_FUNC_msg_redirect_map &&
func_id != BPF_FUNC_sk_select_reuseport &&
func_id != BPF_FUNC_map_lookup_elem &&
!may_update_sockmap(env, func_id))
goto error;
break;
case BPF_MAP_TYPE_SOCKHASH:
if (func_id != BPF_FUNC_sk_redirect_hash &&
func_id != BPF_FUNC_sock_hash_update &&
func_id != BPF_FUNC_map_delete_elem &&
func_id != BPF_FUNC_msg_redirect_hash &&
func_id != BPF_FUNC_sk_select_reuseport &&
func_id != BPF_FUNC_map_lookup_elem &&
!may_update_sockmap(env, func_id))
goto error;
break;
case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY:
if (func_id != BPF_FUNC_sk_select_reuseport)
goto error;
break;
case BPF_MAP_TYPE_QUEUE:
case BPF_MAP_TYPE_STACK:
if (func_id != BPF_FUNC_map_peek_elem &&
func_id != BPF_FUNC_map_pop_elem &&
func_id != BPF_FUNC_map_push_elem)
goto error;
break;
case BPF_MAP_TYPE_SK_STORAGE:
if (func_id != BPF_FUNC_sk_storage_get &&
func_id != BPF_FUNC_sk_storage_delete)
goto error;
break;
case BPF_MAP_TYPE_INODE_STORAGE:
if (func_id != BPF_FUNC_inode_storage_get &&
func_id != BPF_FUNC_inode_storage_delete)
goto error;
break;
case BPF_MAP_TYPE_TASK_STORAGE:
if (func_id != BPF_FUNC_task_storage_get &&
func_id != BPF_FUNC_task_storage_delete)
goto error;
break;
default:
break;
}
/* ... and second from the function itself. */
switch (func_id) {
case BPF_FUNC_tail_call:
if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
goto error;
if (env->subprog_cnt > 1 && !allow_tail_call_in_subprogs(env)) {
verbose(env, "tail_calls are not allowed in non-JITed programs with bpf-to-bpf calls\n");
return -EINVAL;
}
break;
case BPF_FUNC_perf_event_read:
case BPF_FUNC_perf_event_output:
case BPF_FUNC_perf_event_read_value:
case BPF_FUNC_skb_output:
case BPF_FUNC_xdp_output:
if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY)
goto error;
break;
case BPF_FUNC_ringbuf_output:
case BPF_FUNC_ringbuf_reserve:
case BPF_FUNC_ringbuf_query:
if (map->map_type != BPF_MAP_TYPE_RINGBUF)
goto error;
break;
case BPF_FUNC_get_stackid:
if (map->map_type != BPF_MAP_TYPE_STACK_TRACE)
goto error;
break;
case BPF_FUNC_current_task_under_cgroup:
case BPF_FUNC_skb_under_cgroup:
if (map->map_type != BPF_MAP_TYPE_CGROUP_ARRAY)
goto error;
break;
case BPF_FUNC_redirect_map:
if (map->map_type != BPF_MAP_TYPE_DEVMAP &&
map->map_type != BPF_MAP_TYPE_DEVMAP_HASH &&
map->map_type != BPF_MAP_TYPE_CPUMAP &&
map->map_type != BPF_MAP_TYPE_XSKMAP)
goto error;
break;
case BPF_FUNC_sk_redirect_map:
case BPF_FUNC_msg_redirect_map:
case BPF_FUNC_sock_map_update:
if (map->map_type != BPF_MAP_TYPE_SOCKMAP)
goto error;
break;
case BPF_FUNC_sk_redirect_hash:
case BPF_FUNC_msg_redirect_hash:
case BPF_FUNC_sock_hash_update:
if (map->map_type != BPF_MAP_TYPE_SOCKHASH)
goto error;
break;
case BPF_FUNC_get_local_storage:
if (map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE &&
map->map_type != BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
goto error;
break;
case BPF_FUNC_sk_select_reuseport:
if (map->map_type != BPF_MAP_TYPE_REUSEPORT_SOCKARRAY &&
map->map_type != BPF_MAP_TYPE_SOCKMAP &&
map->map_type != BPF_MAP_TYPE_SOCKHASH)
goto error;
break;
case BPF_FUNC_map_peek_elem:
case BPF_FUNC_map_pop_elem:
case BPF_FUNC_map_push_elem:
if (map->map_type != BPF_MAP_TYPE_QUEUE &&
map->map_type != BPF_MAP_TYPE_STACK)
goto error;
break;
case BPF_FUNC_sk_storage_get:
case BPF_FUNC_sk_storage_delete:
if (map->map_type != BPF_MAP_TYPE_SK_STORAGE)
goto error;
break;
case BPF_FUNC_inode_storage_get:
case BPF_FUNC_inode_storage_delete:
if (map->map_type != BPF_MAP_TYPE_INODE_STORAGE)
goto error;
break;
case BPF_FUNC_task_storage_get:
case BPF_FUNC_task_storage_delete:
if (map->map_type != BPF_MAP_TYPE_TASK_STORAGE)
goto error;
break;
default:
break;
}
return 0;
error:
verbose(env, "cannot pass map_type %d into func %s#%d\n",
map->map_type, func_id_name(func_id), func_id);
return -EINVAL;
} | 0 | [
"CWE-843"
] | bpf | 5b029a32cfe4600f5e10e36b41778506b90fd4de | 106,478,003,913,559,310,000,000,000,000,000,000,000 | 208 | bpf: Fix ringbuf helper function compatibility
Commit 457f44363a88 ("bpf: Implement BPF ring buffer and verifier support
for it") extended check_map_func_compatibility() by enforcing map -> helper
function match, but not helper -> map type match.
Due to this all of the bpf_ringbuf_*() helper functions could be used with
a wrong map type such as array or hash map, leading to invalid access due
to type confusion.
Also, both BPF_FUNC_ringbuf_{submit,discard} have ARG_PTR_TO_ALLOC_MEM as
argument and not a BPF map. Therefore, their check_map_func_compatibility()
presence is incorrect since it's only for map type checking.
Fixes: 457f44363a88 ("bpf: Implement BPF ring buffer and verifier support for it")
Reported-by: Ryota Shiga (Flatt Security)
Signed-off-by: Daniel Borkmann <[email protected]>
Acked-by: Alexei Starovoitov <[email protected]> |
GF_Err Media_GetSampleDesc(GF_MediaBox *mdia, u32 SampleDescIndex, GF_SampleEntryBox **out_entry, u32 *dataRefIndex)
{
GF_SampleDescriptionBox *stsd;
GF_SampleEntryBox *entry = NULL;
if (!mdia) return GF_ISOM_INVALID_FILE;
stsd = mdia->information->sampleTable->SampleDescription;
if (!stsd) return GF_ISOM_INVALID_FILE;
if (!SampleDescIndex || (SampleDescIndex > gf_list_count(stsd->child_boxes)) ) return GF_BAD_PARAM;
entry = (GF_SampleEntryBox*)gf_list_get(stsd->child_boxes, SampleDescIndex - 1);
if (!entry) return GF_ISOM_INVALID_FILE;
if (out_entry) *out_entry = entry;
if (dataRefIndex) *dataRefIndex = entry->dataReferenceIndex;
return GF_OK;
} | 0 | [
"CWE-787"
] | gpac | 328def7d3b93847d64ecb6e9e0399684e57c3eca | 252,065,104,214,927,670,000,000,000,000,000,000,000 | 18 | fixed #1766 (fuzz) |
static void __set_monitor_timer(struct l2cap_chan *chan)
{
__clear_retrans_timer(chan);
if (chan->monitor_timeout) {
l2cap_set_timer(chan, &chan->monitor_timer,
msecs_to_jiffies(chan->monitor_timeout));
}
} | 0 | [
"CWE-787"
] | linux | e860d2c904d1a9f38a24eb44c9f34b8f915a6ea3 | 317,572,808,817,689,980,000,000,000,000,000,000,000 | 8 | Bluetooth: Properly check L2CAP config option output buffer length
Validate the output buffer length for L2CAP config requests and responses
to avoid overflowing the stack buffer used for building the option blocks.
Cc: [email protected]
Signed-off-by: Ben Seri <[email protected]>
Signed-off-by: Marcel Holtmann <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
static int get_filter(void __user *arg, struct sock_filter **p)
{
struct sock_fprog uprog;
struct sock_filter *code = NULL;
int len;
if (copy_from_user(&uprog, arg, sizeof(uprog)))
return -EFAULT;
if (!uprog.len) {
*p = NULL;
return 0;
}
len = uprog.len * sizeof(struct sock_filter);
code = memdup_user(uprog.filter, len);
if (IS_ERR(code))
return PTR_ERR(code);
*p = code;
return uprog.len;
} | 0 | [] | linux | 4ab42d78e37a294ac7bc56901d563c642e03c4ae | 143,235,667,244,246,850,000,000,000,000,000,000,000 | 22 | ppp, slip: Validate VJ compression slot parameters completely
Currently slhc_init() treats out-of-range values of rslots and tslots
as equivalent to 0, except that if tslots is too large it will
dereference a null pointer (CVE-2015-7799).
Add a range-check at the top of the function and make it return an
ERR_PTR() on error instead of NULL. Change the callers accordingly.
Compile-tested only.
Reported-by: 郭永刚 <[email protected]>
References: http://article.gmane.org/gmane.comp.security.oss.general/17908
Signed-off-by: Ben Hutchings <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
xmlNewTextChild(xmlNodePtr parent, xmlNsPtr ns,
const xmlChar *name, const xmlChar *content) {
xmlNodePtr cur, prev;
if (parent == NULL) {
#ifdef DEBUG_TREE
xmlGenericError(xmlGenericErrorContext,
"xmlNewTextChild : parent == NULL\n");
#endif
return(NULL);
}
if (name == NULL) {
#ifdef DEBUG_TREE
xmlGenericError(xmlGenericErrorContext,
"xmlNewTextChild : name == NULL\n");
#endif
return(NULL);
}
/*
* Allocate a new node
*/
if (parent->type == XML_ELEMENT_NODE) {
if (ns == NULL)
cur = xmlNewDocRawNode(parent->doc, parent->ns, name, content);
else
cur = xmlNewDocRawNode(parent->doc, ns, name, content);
} else if ((parent->type == XML_DOCUMENT_NODE) ||
(parent->type == XML_HTML_DOCUMENT_NODE)) {
if (ns == NULL)
cur = xmlNewDocRawNode((xmlDocPtr) parent, NULL, name, content);
else
cur = xmlNewDocRawNode((xmlDocPtr) parent, ns, name, content);
} else if (parent->type == XML_DOCUMENT_FRAG_NODE) {
cur = xmlNewDocRawNode( parent->doc, ns, name, content);
} else {
return(NULL);
}
if (cur == NULL) return(NULL);
/*
* add the new element at the end of the children list.
*/
cur->type = XML_ELEMENT_NODE;
cur->parent = parent;
cur->doc = parent->doc;
if (parent->children == NULL) {
parent->children = cur;
parent->last = cur;
} else {
prev = parent->last;
prev->next = cur;
cur->prev = prev;
parent->last = cur;
}
return(cur);
} | 0 | [
"CWE-20"
] | libxml2 | bdd66182ef53fe1f7209ab6535fda56366bd7ac9 | 321,981,744,034,253,570,000,000,000,000,000,000,000 | 59 | Avoid building recursive entities
For https://bugzilla.gnome.org/show_bug.cgi?id=762100
When we detect a recusive entity we should really not
build the associated data, moreover if someone bypass
libxml2 fatal errors and still tries to serialize a broken
entity make sure we don't risk to get ito a recursion
* parser.c: xmlParserEntityCheck() don't build if entity loop
were found and remove the associated text content
* tree.c: xmlStringGetNodeList() avoid a potential recursion |
void AuthorizationSession::_buildAuthenticatedRolesVector() {
_authenticatedRoleNames.clear();
for (UserSet::iterator it = _authenticatedUsers.begin(); it != _authenticatedUsers.end();
++it) {
RoleNameIterator roles = (*it)->getIndirectRoles();
while (roles.more()) {
RoleName roleName = roles.next();
_authenticatedRoleNames.push_back(RoleName(roleName.getRole(), roleName.getDB()));
}
}
} | 0 | [
"CWE-613"
] | mongo | db19e7ce84cfd702a4ba9983ee2ea5019f470f82 | 226,064,766,518,831,160,000,000,000,000,000,000,000 | 11 | SERVER-38984 Validate unique User ID on UserCache hit
(cherry picked from commit e55d6e2292e5dbe2f97153251d8193d1cc89f5d7) |
static int ocfs2_dir_open(struct inode *inode, struct file *file)
{
return ocfs2_init_file_private(inode, file);
} | 0 | [
"CWE-401"
] | linux | 28f5a8a7c033cbf3e32277f4cc9c6afd74f05300 | 42,134,323,930,541,974,000,000,000,000,000,000,000 | 4 | ocfs2: should wait dio before inode lock in ocfs2_setattr()
we should wait dio requests to finish before inode lock in
ocfs2_setattr(), otherwise the following deadlock will happen:
process 1 process 2 process 3
truncate file 'A' end_io of writing file 'A' receiving the bast messages
ocfs2_setattr
ocfs2_inode_lock_tracker
ocfs2_inode_lock_full
inode_dio_wait
__inode_dio_wait
-->waiting for all dio
requests finish
dlm_proxy_ast_handler
dlm_do_local_bast
ocfs2_blocking_ast
ocfs2_generic_handle_bast
set OCFS2_LOCK_BLOCKED flag
dio_end_io
dio_bio_end_aio
dio_complete
ocfs2_dio_end_io
ocfs2_dio_end_io_write
ocfs2_inode_lock
__ocfs2_cluster_lock
ocfs2_wait_for_mask
-->waiting for OCFS2_LOCK_BLOCKED
flag to be cleared, that is waiting
for 'process 1' unlocking the inode lock
inode_dio_end
-->here dec the i_dio_count, but will never
be called, so a deadlock happened.
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Alex Chen <[email protected]>
Reviewed-by: Jun Piao <[email protected]>
Reviewed-by: Joseph Qi <[email protected]>
Acked-by: Changwei Ge <[email protected]>
Cc: Mark Fasheh <[email protected]>
Cc: Joel Becker <[email protected]>
Cc: Junxiao Bi <[email protected]>
Cc: <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
void _out_dns_mark_bad(conn_t out) {
if (out->s2s->dns_bad_timeout > 0) {
dnsres_t bad;
char *ipport;
/* mark this host as bad */
ipport = dns_make_ipport(out->ip, out->port);
bad = xhash_get(out->s2s->dns_bad, ipport);
if (bad == NULL) {
bad = (dnsres_t) calloc(1, sizeof(struct dnsres_st));
bad->key = ipport;
xhash_put(out->s2s->dns_bad, ipport, bad);
}
bad->expiry = time(NULL) + out->s2s->dns_bad_timeout;
}
} | 0 | [
"CWE-20"
] | jabberd2 | aabcffae560d5fd00cd1d2ffce5d760353cf0a4d | 149,899,026,295,932,530,000,000,000,000,000,000,000 | 16 | Fixed possibility of Unsolicited Dialback Attacks |
xmlParseCharRef(xmlParserCtxtPtr ctxt) {
int val = 0;
int count = 0;
/*
* Using RAW/CUR/NEXT is okay since we are working on ASCII range here
*/
if ((RAW == '&') && (NXT(1) == '#') &&
(NXT(2) == 'x')) {
SKIP(3);
GROW;
while (RAW != ';') { /* loop blocked by count */
if (count++ > 20) {
count = 0;
GROW;
if (ctxt->instate == XML_PARSER_EOF)
return(0);
}
if ((RAW >= '0') && (RAW <= '9'))
val = val * 16 + (CUR - '0');
else if ((RAW >= 'a') && (RAW <= 'f') && (count < 20))
val = val * 16 + (CUR - 'a') + 10;
else if ((RAW >= 'A') && (RAW <= 'F') && (count < 20))
val = val * 16 + (CUR - 'A') + 10;
else {
xmlFatalErr(ctxt, XML_ERR_INVALID_HEX_CHARREF, NULL);
val = 0;
break;
}
if (val > 0x110000)
val = 0x110000;
NEXT;
count++;
}
if (RAW == ';') {
/* on purpose to avoid reentrancy problems with NEXT and SKIP */
ctxt->input->col++;
ctxt->input->cur++;
}
} else if ((RAW == '&') && (NXT(1) == '#')) {
SKIP(2);
GROW;
while (RAW != ';') { /* loop blocked by count */
if (count++ > 20) {
count = 0;
GROW;
if (ctxt->instate == XML_PARSER_EOF)
return(0);
}
if ((RAW >= '0') && (RAW <= '9'))
val = val * 10 + (CUR - '0');
else {
xmlFatalErr(ctxt, XML_ERR_INVALID_DEC_CHARREF, NULL);
val = 0;
break;
}
if (val > 0x110000)
val = 0x110000;
NEXT;
count++;
}
if (RAW == ';') {
/* on purpose to avoid reentrancy problems with NEXT and SKIP */
ctxt->input->col++;
ctxt->input->cur++;
}
} else {
xmlFatalErr(ctxt, XML_ERR_INVALID_CHARREF, NULL);
}
/*
* [ WFC: Legal Character ]
* Characters referred to using character references must match the
* production for Char.
*/
if (val >= 0x110000) {
xmlFatalErrMsgInt(ctxt, XML_ERR_INVALID_CHAR,
"xmlParseCharRef: character reference out of bounds\n",
val);
} else if (IS_CHAR(val)) {
return(val);
} else {
xmlFatalErrMsgInt(ctxt, XML_ERR_INVALID_CHAR,
"xmlParseCharRef: invalid xmlChar value %d\n",
val);
}
return(0);
} | 0 | [
"CWE-776"
] | libxml2 | 8598060bacada41a0eb09d95c97744ff4e428f8e | 273,942,395,692,312,400,000,000,000,000,000,000,000 | 90 | Patch for security issue CVE-2021-3541
This is relapted to parameter entities expansion and following
the line of the billion laugh attack. Somehow in that path the
counting of parameters was missed and the normal algorithm based
on entities "density" was useless. |
test_bson_append_int64 (void)
{
bson_t *b;
bson_t *b2;
b = bson_new ();
BSON_ASSERT (bson_append_int64 (b, "a", -1, 100000000000000ULL));
b2 = get_bson ("test34.bson");
BSON_ASSERT_BSON_EQUAL (b, b2);
bson_destroy (b);
bson_destroy (b2);
} | 0 | [
"CWE-125"
] | libbson | 42900956dc461dfe7fb91d93361d10737c1602b3 | 144,799,353,842,194,730,000,000,000,000,000,000,000 | 12 | CDRIVER-2269 Check for zero string length in codewscope |
static void streamFile(char *filename) {
xmlTextReaderPtr reader;
int ret;
#ifdef HAVE_MMAP
int fd = -1;
struct stat info;
const char *base = NULL;
xmlParserInputBufferPtr input = NULL;
if (memory) {
if (stat(filename, &info) < 0)
return;
if ((fd = open(filename, O_RDONLY)) < 0)
return;
base = mmap(NULL, info.st_size, PROT_READ, MAP_SHARED, fd, 0) ;
if (base == (void *) MAP_FAILED) {
close(fd);
fprintf(stderr, "mmap failure for file %s\n", filename);
progresult = XMLLINT_ERR_RDFILE;
return;
}
reader = xmlReaderForMemory(base, info.st_size, filename,
NULL, options);
} else
#endif
reader = xmlReaderForFile(filename, NULL, options);
#ifdef LIBXML_PATTERN_ENABLED
if (pattern != NULL) {
patternc = xmlPatterncompile((const xmlChar *) pattern, NULL, 0, NULL);
if (patternc == NULL) {
xmlGenericError(xmlGenericErrorContext,
"Pattern %s failed to compile\n", pattern);
progresult = XMLLINT_ERR_SCHEMAPAT;
pattern = NULL;
}
}
if (patternc != NULL) {
patstream = xmlPatternGetStreamCtxt(patternc);
if (patstream != NULL) {
ret = xmlStreamPush(patstream, NULL, NULL);
if (ret < 0) {
fprintf(stderr, "xmlStreamPush() failure\n");
xmlFreeStreamCtxt(patstream);
patstream = NULL;
}
}
}
#endif
if (reader != NULL) {
#ifdef LIBXML_VALID_ENABLED
if (valid)
xmlTextReaderSetParserProp(reader, XML_PARSER_VALIDATE, 1);
else
#endif /* LIBXML_VALID_ENABLED */
if (loaddtd)
xmlTextReaderSetParserProp(reader, XML_PARSER_LOADDTD, 1);
#ifdef LIBXML_SCHEMAS_ENABLED
if (relaxng != NULL) {
if ((timing) && (!repeat)) {
startTimer();
}
ret = xmlTextReaderRelaxNGValidate(reader, relaxng);
if (ret < 0) {
xmlGenericError(xmlGenericErrorContext,
"Relax-NG schema %s failed to compile\n", relaxng);
progresult = XMLLINT_ERR_SCHEMACOMP;
relaxng = NULL;
}
if ((timing) && (!repeat)) {
endTimer("Compiling the schemas");
}
}
if (schema != NULL) {
if ((timing) && (!repeat)) {
startTimer();
}
ret = xmlTextReaderSchemaValidate(reader, schema);
if (ret < 0) {
xmlGenericError(xmlGenericErrorContext,
"XSD schema %s failed to compile\n", schema);
progresult = XMLLINT_ERR_SCHEMACOMP;
schema = NULL;
}
if ((timing) && (!repeat)) {
endTimer("Compiling the schemas");
}
}
#endif
/*
* Process all nodes in sequence
*/
if ((timing) && (!repeat)) {
startTimer();
}
ret = xmlTextReaderRead(reader);
while (ret == 1) {
if ((debug)
#ifdef LIBXML_PATTERN_ENABLED
|| (patternc)
#endif
)
processNode(reader);
ret = xmlTextReaderRead(reader);
}
if ((timing) && (!repeat)) {
#ifdef LIBXML_SCHEMAS_ENABLED
if (relaxng != NULL)
endTimer("Parsing and validating");
else
#endif
#ifdef LIBXML_VALID_ENABLED
if (valid)
endTimer("Parsing and validating");
else
#endif
endTimer("Parsing");
}
#ifdef LIBXML_VALID_ENABLED
if (valid) {
if (xmlTextReaderIsValid(reader) != 1) {
xmlGenericError(xmlGenericErrorContext,
"Document %s does not validate\n", filename);
progresult = XMLLINT_ERR_VALID;
}
}
#endif /* LIBXML_VALID_ENABLED */
#ifdef LIBXML_SCHEMAS_ENABLED
if ((relaxng != NULL) || (schema != NULL)) {
if (xmlTextReaderIsValid(reader) != 1) {
fprintf(stderr, "%s fails to validate\n", filename);
progresult = XMLLINT_ERR_VALID;
} else {
if (!quiet) {
fprintf(stderr, "%s validates\n", filename);
}
}
}
#endif
/*
* Done, cleanup and status
*/
xmlFreeTextReader(reader);
if (ret != 0) {
fprintf(stderr, "%s : failed to parse\n", filename);
progresult = XMLLINT_ERR_UNCLASS;
}
} else {
fprintf(stderr, "Unable to open %s\n", filename);
progresult = XMLLINT_ERR_UNCLASS;
}
#ifdef LIBXML_PATTERN_ENABLED
if (patstream != NULL) {
xmlFreeStreamCtxt(patstream);
patstream = NULL;
}
#endif
#ifdef HAVE_MMAP
if (memory) {
xmlFreeParserInputBuffer(input);
munmap((char *) base, info.st_size);
close(fd);
}
#endif
} | 0 | [
"CWE-416"
] | libxml2 | 1358d157d0bd83be1dfe356a69213df9fac0b539 | 328,293,334,190,481,700,000,000,000,000,000,000,000 | 169 | Fix use-after-free with `xmllint --html --push`
Call htmlCtxtUseOptions to make sure that names aren't stored in
dictionaries.
Note that this issue only affects xmllint using the HTML push parser.
Fixes #230. |
btrfs_lookup_dir_index_item(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path, u64 dir,
u64 objectid, const char *name, int name_len,
int mod)
{
int ret;
struct btrfs_key key;
int ins_len = mod < 0 ? -1 : 0;
int cow = mod != 0;
key.objectid = dir;
key.type = BTRFS_DIR_INDEX_KEY;
key.offset = objectid;
ret = btrfs_search_slot(trans, root, &key, path, ins_len, cow);
if (ret < 0)
return ERR_PTR(ret);
if (ret > 0)
return ERR_PTR(-ENOENT);
return btrfs_match_dir_item_name(root, path, name, name_len);
} | 0 | [
"CWE-416",
"CWE-362"
] | linux | 5f5bc6b1e2d5a6f827bc860ef2dc5b6f365d1339 | 230,753,926,637,106,900,000,000,000,000,000,000,000 | 22 | Btrfs: make xattr replace operations atomic
Replacing a xattr consists of doing a lookup for its existing value, delete
the current value from the respective leaf, release the search path and then
finally insert the new value. This leaves a time window where readers (getxattr,
listxattrs) won't see any value for the xattr. Xattrs are used to store ACLs,
so this has security implications.
This change also fixes 2 other existing issues which were:
*) Deleting the old xattr value without verifying first if the new xattr will
fit in the existing leaf item (in case multiple xattrs are packed in the
same item due to name hash collision);
*) Returning -EEXIST when the flag XATTR_CREATE is given and the xattr doesn't
exist but we have have an existing item that packs muliple xattrs with
the same name hash as the input xattr. In this case we should return ENOSPC.
A test case for xfstests follows soon.
Thanks to Alexandre Oliva for reporting the non-atomicity of the xattr replace
implementation.
Reported-by: Alexandre Oliva <[email protected]>
Signed-off-by: Filipe Manana <[email protected]>
Signed-off-by: Chris Mason <[email protected]> |
static int esp6_input(struct xfrm_state *x, struct sk_buff *skb)
{
struct ip_esp_hdr *esph;
struct esp_data *esp = x->data;
struct crypto_aead *aead = esp->aead;
struct aead_request *req;
struct sk_buff *trailer;
int elen = skb->len - sizeof(*esph) - crypto_aead_ivsize(aead);
int nfrags;
int ret = 0;
void *tmp;
u8 *iv;
struct scatterlist *sg;
struct scatterlist *asg;
if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead))) {
ret = -EINVAL;
goto out;
}
if (elen <= 0) {
ret = -EINVAL;
goto out;
}
if ((nfrags = skb_cow_data(skb, 0, &trailer)) < 0) {
ret = -EINVAL;
goto out;
}
ret = -ENOMEM;
tmp = esp_alloc_tmp(aead, nfrags + 1);
if (!tmp)
goto out;
ESP_SKB_CB(skb)->tmp = tmp;
iv = esp_tmp_iv(aead, tmp);
req = esp_tmp_req(aead, iv);
asg = esp_req_sg(aead, req);
sg = asg + 1;
skb->ip_summed = CHECKSUM_NONE;
esph = (struct ip_esp_hdr *)skb->data;
/* Get ivec. This can be wrong, check against another impls. */
iv = esph->enc_data;
sg_init_table(sg, nfrags);
skb_to_sgvec(skb, sg, sizeof(*esph) + crypto_aead_ivsize(aead), elen);
sg_init_one(asg, esph, sizeof(*esph));
aead_request_set_callback(req, 0, esp_input_done, skb);
aead_request_set_crypt(req, sg, sg, elen, iv);
aead_request_set_assoc(req, asg, sizeof(*esph));
ret = crypto_aead_decrypt(req);
if (ret == -EINPROGRESS)
goto out;
ret = esp_input_done2(skb, ret);
out:
return ret;
} | 0 | [
"CWE-16"
] | linux-2.6 | 920fc941a9617f95ccb283037fe6f8a38d95bb69 | 195,006,693,569,356,900,000,000,000,000,000,000,000 | 65 | [ESP]: Ensure IV is in linear part of the skb to avoid BUG() due to OOB access
ESP does not account for the IV size when calling pskb_may_pull() to
ensure everything it accesses directly is within the linear part of a
potential fragment. This results in a BUG() being triggered when the
both the IPv4 and IPv6 ESP stack is fed with an skb where the first
fragment ends between the end of the esp header and the end of the IV.
This bug was found by Dirk Nehring <[email protected]> .
Signed-off-by: Thomas Graf <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
void vQueueDelete( QueueHandle_t xQueue )
{
Queue_t * const pxQueue = xQueue;
configASSERT( pxQueue );
traceQUEUE_DELETE( pxQueue );
#if ( configQUEUE_REGISTRY_SIZE > 0 )
{
vQueueUnregisterQueue( pxQueue );
}
#endif
#if ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) )
{
/* The queue can only have been allocated dynamically - free it
* again. */
vPortFree( pxQueue );
}
#elif ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )
{
/* The queue could have been allocated statically or dynamically, so
* check before attempting to free the memory. */
if( pxQueue->ucStaticallyAllocated == ( uint8_t ) pdFALSE )
{
vPortFree( pxQueue );
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
#else /* if ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) ) */
{
/* The queue must have been statically allocated, so is not going to be
* deleted. Avoid compiler warnings about the unused parameter. */
( void ) pxQueue;
}
#endif /* configSUPPORT_DYNAMIC_ALLOCATION */
}
| 0 | [
"CWE-200",
"CWE-190"
] | FreeRTOS-Kernel | 47338393f1f79558f6144213409f09f81d7c4837 | 248,899,252,856,479,450,000,000,000,000,000,000,000 | 40 | add assert for addition overflow on queue creation (#225) |
xmlFreeTextReader(xmlTextReaderPtr reader) {
if (reader == NULL)
return;
#ifdef LIBXML_SCHEMAS_ENABLED
if (reader->rngSchemas != NULL) {
xmlRelaxNGFree(reader->rngSchemas);
reader->rngSchemas = NULL;
}
if (reader->rngValidCtxt != NULL) {
if (! reader->rngPreserveCtxt)
xmlRelaxNGFreeValidCtxt(reader->rngValidCtxt);
reader->rngValidCtxt = NULL;
}
if (reader->xsdPlug != NULL) {
xmlSchemaSAXUnplug(reader->xsdPlug);
reader->xsdPlug = NULL;
}
if (reader->xsdValidCtxt != NULL) {
if (! reader->xsdPreserveCtxt)
xmlSchemaFreeValidCtxt(reader->xsdValidCtxt);
reader->xsdValidCtxt = NULL;
}
if (reader->xsdSchemas != NULL) {
xmlSchemaFree(reader->xsdSchemas);
reader->xsdSchemas = NULL;
}
#endif
#ifdef LIBXML_XINCLUDE_ENABLED
if (reader->xincctxt != NULL)
xmlXIncludeFreeContext(reader->xincctxt);
#endif
#ifdef LIBXML_PATTERN_ENABLED
if (reader->patternTab != NULL) {
int i;
for (i = 0;i < reader->patternNr;i++) {
if (reader->patternTab[i] != NULL)
xmlFreePattern(reader->patternTab[i]);
}
xmlFree(reader->patternTab);
}
#endif
if (reader->faketext != NULL) {
xmlFreeNode(reader->faketext);
}
if (reader->ctxt != NULL) {
if (reader->dict == reader->ctxt->dict)
reader->dict = NULL;
if (reader->ctxt->myDoc != NULL) {
if (reader->preserve == 0)
xmlTextReaderFreeDoc(reader, reader->ctxt->myDoc);
reader->ctxt->myDoc = NULL;
}
if ((reader->ctxt->vctxt.vstateTab != NULL) &&
(reader->ctxt->vctxt.vstateMax > 0)){
xmlFree(reader->ctxt->vctxt.vstateTab);
reader->ctxt->vctxt.vstateTab = NULL;
reader->ctxt->vctxt.vstateMax = 0;
}
if (reader->allocs & XML_TEXTREADER_CTXT)
xmlFreeParserCtxt(reader->ctxt);
}
if (reader->sax != NULL)
xmlFree(reader->sax);
if ((reader->input != NULL) && (reader->allocs & XML_TEXTREADER_INPUT))
xmlFreeParserInputBuffer(reader->input);
if (reader->buffer != NULL)
xmlBufFree(reader->buffer);
if (reader->entTab != NULL)
xmlFree(reader->entTab);
if (reader->dict != NULL)
xmlDictFree(reader->dict);
xmlFree(reader);
} | 0 | [
"CWE-399"
] | libxml2 | 213f1fe0d76d30eaed6e5853057defc43e6df2c9 | 108,240,696,622,087,130,000,000,000,000,000,000,000 | 73 | CVE-2015-1819 Enforce the reader to run in constant memory
One of the operation on the reader could resolve entities
leading to the classic expansion issue. Make sure the
buffer used for xmlreader operation is bounded.
Introduce a new allocation type for the buffers for this effect. |
static int init_cache_node(struct kmem_cache *cachep, int node, gfp_t gfp)
{
struct kmem_cache_node *n;
/*
* Set up the kmem_cache_node for cpu before we can
* begin anything. Make sure some other cpu on this
* node has not already allocated this
*/
n = get_node(cachep, node);
if (n) {
spin_lock_irq(&n->list_lock);
n->free_limit = (1 + nr_cpus_node(node)) * cachep->batchcount +
cachep->num;
spin_unlock_irq(&n->list_lock);
return 0;
}
n = kmalloc_node(sizeof(struct kmem_cache_node), gfp, node);
if (!n)
return -ENOMEM;
kmem_cache_node_init(n);
n->next_reap = jiffies + REAPTIMEOUT_NODE +
((unsigned long)cachep) % REAPTIMEOUT_NODE;
n->free_limit =
(1 + nr_cpus_node(node)) * cachep->batchcount + cachep->num;
/*
* The kmem_cache_nodes don't come and go as CPUs
* come and go. slab_mutex is sufficient
* protection here.
*/
cachep->node[node] = n;
return 0;
} | 0 | [
"CWE-703"
] | linux | c4e490cf148e85ead0d1b1c2caaba833f1d5b29f | 157,412,306,211,779,670,000,000,000,000,000,000,000 | 39 | mm/slab.c: fix SLAB freelist randomization duplicate entries
This patch fixes a bug in the freelist randomization code. When a high
random number is used, the freelist will contain duplicate entries. It
will result in different allocations sharing the same chunk.
It will result in odd behaviours and crashes. It should be uncommon but
it depends on the machines. We saw it happening more often on some
machines (every few hours of running tests).
Fixes: c7ce4f60ac19 ("mm: SLAB freelist randomization")
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: John Sperbeck <[email protected]>
Signed-off-by: Thomas Garnier <[email protected]>
Cc: Christoph Lameter <[email protected]>
Cc: Pekka Enberg <[email protected]>
Cc: David Rientjes <[email protected]>
Cc: Joonsoo Kim <[email protected]>
Cc: <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
find_func(char_u *name, int is_global)
{
ufunc_T *fp = find_func_even_dead(name, is_global ? FFED_IS_GLOBAL : 0);
if (fp != NULL && (fp->uf_flags & FC_DEAD) == 0)
return fp;
return NULL;
} | 0 | [
"CWE-416"
] | vim | 91c7cbfe31bbef57d5fcf7d76989fc159f73ef15 | 135,501,918,926,601,420,000,000,000,000,000,000,000 | 8 | patch 9.0.0225: using freed memory with multiple line breaks in expression
Problem: Using freed memory with multiple line breaks in expression.
Solution: Free eval_tofree later. |
static void WM_FreePatches(void) {
int i;
struct _patch * tmp_patch;
struct _sample * tmp_sample;
_WM_Lock(&_WM_patch_lock);
for (i = 0; i < 128; i++) {
while (_WM_patch[i]) {
while (_WM_patch[i]->first_sample) {
tmp_sample = _WM_patch[i]->first_sample->next;
free(_WM_patch[i]->first_sample->data);
free(_WM_patch[i]->first_sample);
_WM_patch[i]->first_sample = tmp_sample;
}
free(_WM_patch[i]->filename);
tmp_patch = _WM_patch[i]->next;
free(_WM_patch[i]);
_WM_patch[i] = tmp_patch;
}
}
_WM_Unlock(&_WM_patch_lock);
} | 0 | [
"CWE-200",
"CWE-119"
] | wildmidi | 814f31d8eceda8401eb812fc2e94ed143fdad0ab | 303,134,758,167,712,600,000,000,000,000,000,000,000 | 22 | wildmidi_lib.c (WildMidi_Open, WildMidi_OpenBuffer): refuse to proceed if less then 18 bytes of input
Fixes bug #178. |
static long fuse_file_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
return fuse_ioctl_common(file, cmd, arg, 0);
} | 0 | [
"CWE-459"
] | linux | 5d069dbe8aaf2a197142558b6fb2978189ba3454 | 318,508,941,170,088,120,000,000,000,000,000,000,000 | 5 | fuse: fix bad inode
Jan Kara's analysis of the syzbot report (edited):
The reproducer opens a directory on FUSE filesystem, it then attaches
dnotify mark to the open directory. After that a fuse_do_getattr() call
finds that attributes returned by the server are inconsistent, and calls
make_bad_inode() which, among other things does:
inode->i_mode = S_IFREG;
This then confuses dnotify which doesn't tear down its structures
properly and eventually crashes.
Avoid calling make_bad_inode() on a live inode: switch to a private flag on
the fuse inode. Also add the test to ops which the bad_inode_ops would
have caught.
This bug goes back to the initial merge of fuse in 2.6.14...
Reported-by: [email protected]
Signed-off-by: Miklos Szeredi <[email protected]>
Tested-by: Jan Kara <[email protected]>
Cc: <[email protected]> |
void Flush() {
HandleScope scope;
Local<Value> cb = handle_->Get(on_headers_sym);
if (!cb->IsFunction())
return;
Local<Value> argv[2] = {
CreateHeaders(),
url_.ToString()
};
Local<Value> r = Local<Function>::Cast(cb)->Call(handle_, 2, argv);
if (r.IsEmpty())
got_exception_ = true;
url_.Reset();
have_flushed_ = true;
} | 0 | [] | node | 7b3fb22290c3b6acb497ca85cf2f1648d75c8154 | 267,767,791,949,057,740,000,000,000,000,000,000,000 | 21 | typo in node_http_parser |
static int mpls_route_del(struct mpls_route_config *cfg,
struct netlink_ext_ack *extack)
{
struct net *net = cfg->rc_nlinfo.nl_net;
unsigned index;
int err = -EINVAL;
index = cfg->rc_label;
if (!mpls_label_ok(net, &index, extack))
goto errout;
mpls_route_update(net, index, NULL, &cfg->rc_nlinfo);
err = 0;
errout:
return err;
} | 0 | [] | net | 6c8991f41546c3c472503dff1ea9daaddf9331c2 | 128,181,297,747,638,420,000,000,000,000,000,000,000 | 18 | net: ipv6_stub: use ip6_dst_lookup_flow instead of ip6_dst_lookup
ipv6_stub uses the ip6_dst_lookup function to allow other modules to
perform IPv6 lookups. However, this function skips the XFRM layer
entirely.
All users of ipv6_stub->ip6_dst_lookup use ip_route_output_flow (via the
ip_route_output_key and ip_route_output helpers) for their IPv4 lookups,
which calls xfrm_lookup_route(). This patch fixes this inconsistent
behavior by switching the stub to ip6_dst_lookup_flow, which also calls
xfrm_lookup_route().
This requires some changes in all the callers, as these two functions
take different arguments and have different return types.
Fixes: 5f81bd2e5d80 ("ipv6: export a stub for IPv6 symbols used by vxlan")
Reported-by: Xiumei Mu <[email protected]>
Signed-off-by: Sabrina Dubroca <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
static void p54u_disconnect(struct usb_interface *intf)
{
struct ieee80211_hw *dev = usb_get_intfdata(intf);
struct p54u_priv *priv;
if (!dev)
return;
priv = dev->priv;
wait_for_completion(&priv->fw_wait_load);
p54_unregister_common(dev);
release_firmware(priv->fw);
p54_free_common(dev);
} | 0 | [
"CWE-416"
] | linux | 6e41e2257f1094acc37618bf6c856115374c6922 | 167,382,408,257,419,370,000,000,000,000,000,000,000 | 15 | p54usb: Fix race between disconnect and firmware loading
The syzbot fuzzer found a bug in the p54 USB wireless driver. The
issue involves a race between disconnect and the firmware-loader
callback routine, and it has several aspects.
One big problem is that when the firmware can't be loaded, the
callback routine tries to unbind the driver from the USB _device_ (by
calling device_release_driver) instead of from the USB _interface_ to
which it is actually bound (by calling usb_driver_release_interface).
The race involves access to the private data structure. The driver's
disconnect handler waits for a completion that is signalled by the
firmware-loader callback routine. As soon as the completion is
signalled, you have to assume that the private data structure may have
been deallocated by the disconnect handler -- even if the firmware was
loaded without errors. However, the callback routine does access the
private data several times after that point.
Another problem is that, in order to ensure that the USB device
structure hasn't been freed when the callback routine runs, the driver
takes a reference to it. This isn't good enough any more, because now
that the callback routine calls usb_driver_release_interface, it has
to ensure that the interface structure hasn't been freed.
Finally, the driver takes an unnecessary reference to the USB device
structure in the probe function and drops the reference in the
disconnect handler. This extra reference doesn't accomplish anything,
because the USB core already guarantees that a device structure won't
be deallocated while a driver is still bound to any of its interfaces.
To fix these problems, this patch makes the following changes:
Call usb_driver_release_interface() rather than
device_release_driver().
Don't signal the completion until after the important
information has been copied out of the private data structure,
and don't refer to the private data at all thereafter.
Lock udev (the interface's parent) before unbinding the driver
instead of locking udev->parent.
During the firmware loading process, take a reference to the
USB interface instead of the USB device.
Don't take an unnecessary reference to the device during probe
(and then don't drop it during disconnect).
Signed-off-by: Alan Stern <[email protected]>
Reported-and-tested-by: [email protected]
CC: <[email protected]>
Acked-by: Christian Lamparter <[email protected]>
Signed-off-by: Kalle Valo <[email protected]> |
uint32_t mg_crc32(uint32_t crc, const char *buf, size_t len) {
int i;
crc = ~crc;
while (len--) {
crc ^= *(unsigned char *) buf++;
for (i = 0; i < 8; i++) crc = crc & 1 ? (crc >> 1) ^ 0xedb88320 : crc >> 1;
}
return ~crc;
} | 0 | [
"CWE-552"
] | mongoose | c65c8fdaaa257e0487ab0aaae9e8f6b439335945 | 174,498,408,006,741,500,000,000,000,000,000,000,000 | 9 | Protect against the directory traversal in mg_upload() |
int tls_collect_extensions(SSL *s, PACKET *packet, unsigned int context,
RAW_EXTENSION **res, size_t *len, int init)
{
PACKET extensions = *packet;
size_t i = 0;
size_t num_exts;
custom_ext_methods *exts = &s->cert->custext;
RAW_EXTENSION *raw_extensions = NULL;
const EXTENSION_DEFINITION *thisexd;
*res = NULL;
/*
* Initialise server side custom extensions. Client side is done during
* construction of extensions for the ClientHello.
*/
if ((context & SSL_EXT_CLIENT_HELLO) != 0)
custom_ext_init(&s->cert->custext);
num_exts = OSSL_NELEM(ext_defs) + (exts != NULL ? exts->meths_count : 0);
raw_extensions = OPENSSL_zalloc(num_exts * sizeof(*raw_extensions));
if (raw_extensions == NULL) {
SSLfatal(s, SSL_AD_INTERNAL_ERROR, SSL_F_TLS_COLLECT_EXTENSIONS,
ERR_R_MALLOC_FAILURE);
return 0;
}
i = 0;
while (PACKET_remaining(&extensions) > 0) {
unsigned int type, idx;
PACKET extension;
RAW_EXTENSION *thisex;
if (!PACKET_get_net_2(&extensions, &type) ||
!PACKET_get_length_prefixed_2(&extensions, &extension)) {
SSLfatal(s, SSL_AD_DECODE_ERROR, SSL_F_TLS_COLLECT_EXTENSIONS,
SSL_R_BAD_EXTENSION);
goto err;
}
/*
* Verify this extension is allowed. We only check duplicates for
* extensions that we recognise. We also have a special case for the
* PSK extension, which must be the last one in the ClientHello.
*/
if (!verify_extension(s, context, type, exts, raw_extensions, &thisex)
|| (thisex != NULL && thisex->present == 1)
|| (type == TLSEXT_TYPE_psk
&& (context & SSL_EXT_CLIENT_HELLO) != 0
&& PACKET_remaining(&extensions) != 0)) {
SSLfatal(s, SSL_AD_ILLEGAL_PARAMETER, SSL_F_TLS_COLLECT_EXTENSIONS,
SSL_R_BAD_EXTENSION);
goto err;
}
idx = thisex - raw_extensions;
/*-
* Check that we requested this extension (if appropriate). Requests can
* be sent in the ClientHello and CertificateRequest. Unsolicited
* extensions can be sent in the NewSessionTicket. We only do this for
* the built-in extensions. Custom extensions have a different but
* similar check elsewhere.
* Special cases:
* - The HRR cookie extension is unsolicited
* - The renegotiate extension is unsolicited (the client signals
* support via an SCSV)
* - The signed_certificate_timestamp extension can be provided by a
* custom extension or by the built-in version. We let the extension
* itself handle unsolicited response checks.
*/
if (idx < OSSL_NELEM(ext_defs)
&& (context & (SSL_EXT_CLIENT_HELLO
| SSL_EXT_TLS1_3_CERTIFICATE_REQUEST
| SSL_EXT_TLS1_3_NEW_SESSION_TICKET)) == 0
&& type != TLSEXT_TYPE_cookie
&& type != TLSEXT_TYPE_renegotiate
&& type != TLSEXT_TYPE_signed_certificate_timestamp
&& (s->ext.extflags[idx] & SSL_EXT_FLAG_SENT) == 0
#ifndef OPENSSL_NO_GOST
&& !((context & SSL_EXT_TLS1_2_SERVER_HELLO) != 0
&& type == TLSEXT_TYPE_cryptopro_bug)
#endif
) {
SSLfatal(s, SSL_AD_UNSUPPORTED_EXTENSION,
SSL_F_TLS_COLLECT_EXTENSIONS, SSL_R_UNSOLICITED_EXTENSION);
goto err;
}
if (thisex != NULL) {
thisex->data = extension;
thisex->present = 1;
thisex->type = type;
thisex->received_order = i++;
if (s->ext.debug_cb)
s->ext.debug_cb(s, !s->server, thisex->type,
PACKET_data(&thisex->data),
PACKET_remaining(&thisex->data),
s->ext.debug_arg);
}
}
if (init) {
/*
* Initialise all known extensions relevant to this context,
* whether we have found them or not
*/
for (thisexd = ext_defs, i = 0; i < OSSL_NELEM(ext_defs);
i++, thisexd++) {
if (thisexd->init != NULL && (thisexd->context & context) != 0
&& extension_is_relevant(s, thisexd->context, context)
&& !thisexd->init(s, context)) {
/* SSLfatal() already called */
goto err;
}
}
}
*res = raw_extensions;
if (len != NULL)
*len = num_exts;
return 1;
err:
OPENSSL_free(raw_extensions);
return 0;
} | 0 | [
"CWE-476"
] | openssl | fb9fa6b51defd48157eeb207f52181f735d96148 | 194,182,538,725,248,530,000,000,000,000,000,000,000 | 123 | ssl sigalg extension: fix NULL pointer dereference
As the variable peer_sigalgslen is not cleared on ssl rehandshake, it's
possible to crash an openssl tls secured server remotely by sending a
manipulated hello message in a rehandshake.
On such a manipulated rehandshake, tls1_set_shared_sigalgs() calls
tls12_shared_sigalgs() with the peer_sigalgslen of the previous
handshake, while the peer_sigalgs has been freed.
As a result tls12_shared_sigalgs() walks over the available
peer_sigalgs and tries to access data of a NULL pointer.
This issue was introduced by c589c34e61 (Add support for the TLS 1.3
signature_algorithms_cert extension, 2018-01-11).
Signed-off-by: Peter Kästle <[email protected]>
Signed-off-by: Samuel Sapalski <[email protected]>
CVE-2021-3449
CLA: trivial
Reviewed-by: Tomas Mraz <[email protected]>
Reviewed-by: Paul Dale <[email protected]>
Reviewed-by: Matt Caswell <[email protected]> |
hook_process_add_to_buffer (struct t_hook *hook_process, int index_buffer,
const char *buffer, int size)
{
if (HOOK_PROCESS(hook_process, buffer_size[index_buffer]) + size > HOOK_PROCESS_BUFFER_SIZE)
hook_process_send_buffers (hook_process, WEECHAT_HOOK_PROCESS_RUNNING);
memcpy (HOOK_PROCESS(hook_process, buffer[index_buffer]) +
HOOK_PROCESS(hook_process, buffer_size[index_buffer]),
buffer, size);
HOOK_PROCESS(hook_process, buffer_size[index_buffer]) += size;
} | 0 | [
"CWE-20"
] | weechat | c265cad1c95b84abfd4e8d861f25926ef13b5d91 | 99,630,524,320,888,980,000,000,000,000,000,000,000 | 11 | Fix verification of SSL certificates by calling gnutls verify callback (patch #7459) |
lseg_parallel(PG_FUNCTION_ARGS)
{
LSEG *l1 = PG_GETARG_LSEG_P(0);
LSEG *l2 = PG_GETARG_LSEG_P(1);
#ifdef NOT_USED
PG_RETURN_BOOL(FPeq(l1->m, l2->m));
#endif
PG_RETURN_BOOL(FPeq(point_sl(&l1->p[0], &l1->p[1]),
point_sl(&l2->p[0], &l2->p[1])));
} | 0 | [
"CWE-703",
"CWE-189"
] | postgres | 31400a673325147e1205326008e32135a78b4d8a | 323,170,212,879,772,400,000,000,000,000,000,000,000 | 11 | Predict integer overflow to avoid buffer overruns.
Several functions, mostly type input functions, calculated an allocation
size such that the calculation wrapped to a small positive value when
arguments implied a sufficiently-large requirement. Writes past the end
of the inadvertent small allocation followed shortly thereafter.
Coverity identified the path_in() vulnerability; code inspection led to
the rest. In passing, add check_stack_depth() to prevent stack overflow
in related functions.
Back-patch to 8.4 (all supported versions). The non-comment hstore
changes touch code that did not exist in 8.4, so that part stops at 9.0.
Noah Misch and Heikki Linnakangas, reviewed by Tom Lane.
Security: CVE-2014-0064 |
remove_timers (GSManager *manager)
{
remove_lock_timer (manager);
remove_cycle_timer (manager);
} | 0 | [] | gnome-screensaver | 2f597ea9f1f363277fd4dfc109fa41bbc6225aca | 162,589,882,926,627,130,000,000,000,000,000,000,000 | 5 | Fix adding monitors
Make sure to show windows that are added. And fix an off by one bug. |
UnicodeString::countChar32(int32_t start, int32_t length) const {
pinIndices(start, length);
// if(isBogus()) then fArray==0 and start==0 - u_countChar32() checks for NULL
return u_countChar32(getArrayStart()+start, length);
} | 0 | [
"CWE-190",
"CWE-787"
] | icu | b7d08bc04a4296982fcef8b6b8a354a9e4e7afca | 3,739,006,231,853,787,600,000,000,000,000,000,000 | 5 | ICU-20958 Prevent SEGV_MAPERR in append
See #971 |
static inline size_t xfrm_sa_len(struct xfrm_state *x)
{
size_t l = 0;
if (x->aead)
l += nla_total_size(aead_len(x->aead));
if (x->aalg) {
l += nla_total_size(sizeof(struct xfrm_algo) +
(x->aalg->alg_key_len + 7) / 8);
l += nla_total_size(xfrm_alg_auth_len(x->aalg));
}
if (x->ealg)
l += nla_total_size(xfrm_alg_len(x->ealg));
if (x->calg)
l += nla_total_size(sizeof(*x->calg));
if (x->encap)
l += nla_total_size(sizeof(*x->encap));
if (x->tfcpad)
l += nla_total_size(sizeof(x->tfcpad));
if (x->replay_esn)
l += nla_total_size(xfrm_replay_state_esn_len(x->replay_esn));
if (x->security)
l += nla_total_size(sizeof(struct xfrm_user_sec_ctx) +
x->security->ctx_len);
if (x->coaddr)
l += nla_total_size(sizeof(*x->coaddr));
/* Must count x->lastused as it may become non-zero behind our back. */
l += nla_total_size(sizeof(u64));
return l;
} | 0 | [
"CWE-200"
] | linux | 1f86840f897717f86d523a13e99a447e6a5d2fa5 | 112,235,709,194,035,000,000,000,000,000,000,000,000 | 31 | xfrm_user: fix info leak in copy_to_user_tmpl()
The memory used for the template copy is a local stack variable. As
struct xfrm_user_tmpl contains multiple holes added by the compiler for
alignment, not initializing the memory will lead to leaking stack bytes
to userland. Add an explicit memset(0) to avoid the info leak.
Initial version of the patch by Brad Spengler.
Cc: Brad Spengler <[email protected]>
Signed-off-by: Mathias Krause <[email protected]>
Acked-by: Steffen Klassert <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
static inline char *
expand_string_to_string_internal (string, quoted, func)
char *string;
int quoted;
EXPFUNC *func;
{
WORD_LIST *list;
char *ret;
if (string == 0 || *string == '\0')
return ((char *)NULL);
list = (*func) (string, quoted);
if (list)
{
ret = string_list (list);
dispose_words (list);
}
else
ret = (char *)NULL;
return (ret); | 0 | [
"CWE-20"
] | bash | 4f747edc625815f449048579f6e65869914dd715 | 25,948,974,384,613,560,000,000,000,000,000,000,000 | 22 | Bash-4.4 patch 7 |
static void *spl_ptr_llist_last(spl_ptr_llist *llist) /* {{{ */
{
spl_ptr_llist_element *tail = llist->tail;
if (tail == NULL) {
return NULL;
} else {
return tail->data;
}
} | 0 | [] | php-src | df78c48354f376cf419d7a97f88ca07d572f00fb | 72,025,521,363,507,530,000,000,000,000,000,000,000 | 10 | Fixed Bug #67538 (SPL Iterators use-after-free) |
struct dce_hwseq *dcn20_hwseq_create(
struct dc_context *ctx)
{
struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL);
if (hws) {
hws->ctx = ctx;
hws->regs = &hwseq_reg;
hws->shifts = &hwseq_shift;
hws->masks = &hwseq_mask;
}
return hws;
} | 0 | [
"CWE-400",
"CWE-703",
"CWE-401"
] | linux | 055e547478a11a6360c7ce05e2afc3e366968a12 | 89,759,929,247,264,670,000,000,000,000,000,000,000 | 13 | drm/amd/display: memory leak
In dcn*_clock_source_create when dcn20_clk_src_construct fails allocated
clk_src needs release.
Signed-off-by: Navid Emamdoost <[email protected]>
Signed-off-by: Alex Deucher <[email protected]> |
fr_window_get_batch_title (FrWindow *window)
{
return window->priv->batch_title;
} | 0 | [
"CWE-22"
] | file-roller | b147281293a8307808475e102a14857055f81631 | 138,530,960,839,758,480,000,000,000,000,000,000,000 | 4 | libarchive: sanitize filenames before extracting |
parse_keyblock_image (iobuf_t iobuf, int pk_no, int uid_no,
const u32 *sigstatus, kbnode_t *r_keyblock)
{
gpg_error_t err;
PACKET *pkt;
kbnode_t keyblock = NULL;
kbnode_t node, *tail;
int in_cert, save_mode;
u32 n_sigs;
int pk_count, uid_count;
*r_keyblock = NULL;
pkt = xtrymalloc (sizeof *pkt);
if (!pkt)
return gpg_error_from_syserror ();
init_packet (pkt);
save_mode = set_packet_list_mode (0);
in_cert = 0;
n_sigs = 0;
tail = NULL;
pk_count = uid_count = 0;
while ((err = parse_packet (iobuf, pkt)) != -1)
{
if (gpg_err_code (err) == GPG_ERR_UNKNOWN_PACKET)
{
free_packet (pkt);
init_packet (pkt);
continue;
}
if (err)
{
log_error ("parse_keyblock_image: read error: %s\n",
gpg_strerror (err));
err = gpg_error (GPG_ERR_INV_KEYRING);
break;
}
if (pkt->pkttype == PKT_COMPRESSED)
{
log_error ("skipped compressed packet in keybox blob\n");
free_packet(pkt);
init_packet(pkt);
continue;
}
if (pkt->pkttype == PKT_RING_TRUST)
{
log_info ("skipped ring trust packet in keybox blob\n");
free_packet(pkt);
init_packet(pkt);
continue;
}
if (!in_cert && pkt->pkttype != PKT_PUBLIC_KEY)
{
log_error ("parse_keyblock_image: first packet in a keybox blob "
"is not a public key packet\n");
err = gpg_error (GPG_ERR_INV_KEYRING);
break;
}
if (in_cert && (pkt->pkttype == PKT_PUBLIC_KEY
|| pkt->pkttype == PKT_SECRET_KEY))
{
log_error ("parse_keyblock_image: "
"multiple keyblocks in a keybox blob\n");
err = gpg_error (GPG_ERR_INV_KEYRING);
break;
}
in_cert = 1;
if (pkt->pkttype == PKT_SIGNATURE && sigstatus)
{
PKT_signature *sig = pkt->pkt.signature;
n_sigs++;
if (n_sigs > sigstatus[0])
{
log_error ("parse_keyblock_image: "
"more signatures than found in the meta data\n");
err = gpg_error (GPG_ERR_INV_KEYRING);
break;
}
if (sigstatus[n_sigs])
{
sig->flags.checked = 1;
if (sigstatus[n_sigs] == 1 )
; /* missing key */
else if (sigstatus[n_sigs] == 2 )
; /* bad signature */
else if (sigstatus[n_sigs] < 0x10000000)
; /* bad flag */
else
{
sig->flags.valid = 1;
/* Fixme: Shall we set the expired flag here? */
}
}
}
node = new_kbnode (pkt);
switch (pkt->pkttype)
{
case PKT_PUBLIC_KEY:
case PKT_PUBLIC_SUBKEY:
case PKT_SECRET_KEY:
case PKT_SECRET_SUBKEY:
if (++pk_count == pk_no)
node->flag |= 1;
break;
case PKT_USER_ID:
if (++uid_count == uid_no)
node->flag |= 2;
break;
default:
break;
}
if (!keyblock)
keyblock = node;
else
*tail = node;
tail = &node->next;
pkt = xtrymalloc (sizeof *pkt);
if (!pkt)
{
err = gpg_error_from_syserror ();
break;
}
init_packet (pkt);
}
set_packet_list_mode (save_mode);
if (err == -1 && keyblock)
err = 0; /* Got the entire keyblock. */
if (!err && sigstatus && n_sigs != sigstatus[0])
{
log_error ("parse_keyblock_image: signature count does not match\n");
err = gpg_error (GPG_ERR_INV_KEYRING);
}
if (err)
release_kbnode (keyblock);
else
*r_keyblock = keyblock;
free_packet (pkt);
xfree (pkt);
return err;
} | 1 | [
"CWE-416"
] | gnupg | f0f71a721ccd7ab9e40b8b6b028b59632c0cc648 | 46,344,756,122,396,080,000,000,000,000,000,000,000 | 152 | gpg: Prevent an invalid memory read using a garbled keyring.
* g10/keyring.c (keyring_get_keyblock): Whitelist allowed packet
types.
* g10/keydb.c (parse_keyblock_image): Ditto.
--
The keyring DB code did not reject packets which don't belong into a
keyring. If for example the keyblock contains a literal data packet
it is expected that the processing code stops at the data packet and
reads from the input stream which is referenced from the data packets.
Obviously the keyring processing code does not and cannot do that.
However, when exporting this messes up the IOBUF and leads to an
invalid read of sizeof (int).
We now skip all packets which are not allowed in a keyring.
Reported-by: Hanno Böck <[email protected]>
Test data:
gpg2 --no-default-keyring --keyring FILE --export >/dev/null
With this unpacked data for FILE:
-----BEGIN PGP ARMORED FILE-----
mI0EVNP2zQEEALvETPVDCJDBXkegF4esiV1fqlne40yJnCmJeDEJYocwFPXfFA86
sSGjInzgDbpbC9gQPwq91Qe9x3Vy81CkyVonPOejhINlzfpzqAAa3A6viJccZTwt
DJ8E/I9jg53sbYW8q+VgfLn1hlggH/XQRT0HkXMP5y9ClURYnTsNwJhXABEBAAGs
CXRlc3QgdGVzdIi5BBMBCgAjBQJU0/bNAhsDBwsJCAcDAgEGFQgCCQoLBBYCAwEC
HgECF4AACgkQlsmuCapsqYLvtQP/byY0tM0Lc3moftbHQZ2eHj9ykLjsCjeMDfPx
kZUUtUS3HQaqgZLZOeqPjM7XgGh5hJsd9pfhmRWJ0x+iGB47XQNpRTtdLBV/WMCS
l5z3uW7e9Md7QVUVuSlJnBgQHTS6EgP8JQadPkAiF+jgpJZXP+gFs2j3gobS0qUF
eyTtxs+wAgAD
=uIt9
-----END PGP ARMORED FILE-----
Signed-off-by: Werner Koch <[email protected]> |
static Image *ReadVIFFImage(const ImageInfo *image_info,
ExceptionInfo *exception)
{
#define VFF_CM_genericRGB 15
#define VFF_CM_ntscRGB 1
#define VFF_CM_NONE 0
#define VFF_DEP_DECORDER 0x4
#define VFF_DEP_NSORDER 0x8
#define VFF_DES_RAW 0
#define VFF_LOC_IMPLICIT 1
#define VFF_MAPTYP_NONE 0
#define VFF_MAPTYP_1_BYTE 1
#define VFF_MAPTYP_2_BYTE 2
#define VFF_MAPTYP_4_BYTE 4
#define VFF_MAPTYP_FLOAT 5
#define VFF_MAPTYP_DOUBLE 7
#define VFF_MS_NONE 0
#define VFF_MS_ONEPERBAND 1
#define VFF_MS_SHARED 3
#define VFF_TYP_BIT 0
#define VFF_TYP_1_BYTE 1
#define VFF_TYP_2_BYTE 2
#define VFF_TYP_4_BYTE 4
#define VFF_TYP_FLOAT 5
#define VFF_TYP_DOUBLE 9
typedef struct _ViffInfo
{
unsigned char
identifier,
file_type,
release,
version,
machine_dependency,
reserve[3];
char
comment[512];
unsigned int
rows,
columns,
subrows;
int
x_offset,
y_offset;
float
x_bits_per_pixel,
y_bits_per_pixel;
unsigned int
location_type,
location_dimension,
number_of_images,
number_data_bands,
data_storage_type,
data_encode_scheme,
map_scheme,
map_storage_type,
map_rows,
map_columns,
map_subrows,
map_enable,
maps_per_cycle,
color_space_model;
} ViffInfo;
double
min_value,
scale_factor,
value;
Image
*image;
int
bit;
MagickBooleanType
status;
MagickSizeType
number_pixels;
register ssize_t
x;
register Quantum
*q;
register ssize_t
i;
register unsigned char
*p;
size_t
bytes_per_pixel,
max_packets,
quantum;
ssize_t
count,
y;
unsigned char
*pixels;
unsigned long
lsb_first;
ViffInfo
viff_info;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImage(image_info,exception);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Read VIFF header (1024 bytes).
*/
count=ReadBlob(image,1,&viff_info.identifier);
do
{
/*
Verify VIFF identifier.
*/
if ((count != 1) || ((unsigned char) viff_info.identifier != 0xab))
ThrowReaderException(CorruptImageError,"NotAVIFFImage");
/*
Initialize VIFF image.
*/
(void) ReadBlob(image,sizeof(viff_info.file_type),&viff_info.file_type);
(void) ReadBlob(image,sizeof(viff_info.release),&viff_info.release);
(void) ReadBlob(image,sizeof(viff_info.version),&viff_info.version);
(void) ReadBlob(image,sizeof(viff_info.machine_dependency),
&viff_info.machine_dependency);
(void) ReadBlob(image,sizeof(viff_info.reserve),viff_info.reserve);
count=ReadBlob(image,512,(unsigned char *) viff_info.comment);
if (count != 512)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
viff_info.comment[511]='\0';
if (strlen(viff_info.comment) > 4)
(void) SetImageProperty(image,"comment",viff_info.comment,exception);
if ((viff_info.machine_dependency == VFF_DEP_DECORDER) ||
(viff_info.machine_dependency == VFF_DEP_NSORDER))
image->endian=LSBEndian;
else
image->endian=MSBEndian;
viff_info.rows=ReadBlobLong(image);
viff_info.columns=ReadBlobLong(image);
viff_info.subrows=ReadBlobLong(image);
viff_info.x_offset=ReadBlobSignedLong(image);
viff_info.y_offset=ReadBlobSignedLong(image);
viff_info.x_bits_per_pixel=(float) ReadBlobLong(image);
viff_info.y_bits_per_pixel=(float) ReadBlobLong(image);
viff_info.location_type=ReadBlobLong(image);
viff_info.location_dimension=ReadBlobLong(image);
viff_info.number_of_images=ReadBlobLong(image);
viff_info.number_data_bands=ReadBlobLong(image);
viff_info.data_storage_type=ReadBlobLong(image);
viff_info.data_encode_scheme=ReadBlobLong(image);
viff_info.map_scheme=ReadBlobLong(image);
viff_info.map_storage_type=ReadBlobLong(image);
viff_info.map_rows=ReadBlobLong(image);
viff_info.map_columns=ReadBlobLong(image);
viff_info.map_subrows=ReadBlobLong(image);
viff_info.map_enable=ReadBlobLong(image);
viff_info.maps_per_cycle=ReadBlobLong(image);
viff_info.color_space_model=ReadBlobLong(image);
for (i=0; i < 420; i++)
(void) ReadBlobByte(image);
if (EOFBlob(image) != MagickFalse)
ThrowReaderException(CorruptImageError,"UnexpectedEndOfFile");
image->columns=viff_info.rows;
image->rows=viff_info.columns;
image->depth=viff_info.x_bits_per_pixel <= 8 ? 8UL :
MAGICKCORE_QUANTUM_DEPTH;
image->alpha_trait=viff_info.number_data_bands == 4 ? BlendPixelTrait :
UndefinedPixelTrait;
status=SetImageExtent(image,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
(void) SetImageBackgroundColor(image,exception);
/*
Verify that we can read this VIFF image.
*/
number_pixels=(MagickSizeType) viff_info.columns*viff_info.rows;
if (number_pixels != (size_t) number_pixels)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
if (number_pixels == 0)
ThrowReaderException(CoderError,"ImageColumnOrRowSizeIsNotSupported");
if ((viff_info.number_data_bands < 1) || (viff_info.number_data_bands > 4))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if ((viff_info.data_storage_type != VFF_TYP_BIT) &&
(viff_info.data_storage_type != VFF_TYP_1_BYTE) &&
(viff_info.data_storage_type != VFF_TYP_2_BYTE) &&
(viff_info.data_storage_type != VFF_TYP_4_BYTE) &&
(viff_info.data_storage_type != VFF_TYP_FLOAT) &&
(viff_info.data_storage_type != VFF_TYP_DOUBLE))
ThrowReaderException(CoderError,"DataStorageTypeIsNotSupported");
if (viff_info.data_encode_scheme != VFF_DES_RAW)
ThrowReaderException(CoderError,"DataEncodingSchemeIsNotSupported");
if ((viff_info.map_storage_type != VFF_MAPTYP_NONE) &&
(viff_info.map_storage_type != VFF_MAPTYP_1_BYTE) &&
(viff_info.map_storage_type != VFF_MAPTYP_2_BYTE) &&
(viff_info.map_storage_type != VFF_MAPTYP_4_BYTE) &&
(viff_info.map_storage_type != VFF_MAPTYP_FLOAT) &&
(viff_info.map_storage_type != VFF_MAPTYP_DOUBLE))
ThrowReaderException(CoderError,"MapStorageTypeIsNotSupported");
if ((viff_info.color_space_model != VFF_CM_NONE) &&
(viff_info.color_space_model != VFF_CM_ntscRGB) &&
(viff_info.color_space_model != VFF_CM_genericRGB))
ThrowReaderException(CoderError,"ColorspaceModelIsNotSupported");
if (viff_info.location_type != VFF_LOC_IMPLICIT)
ThrowReaderException(CoderError,"LocationTypeIsNotSupported");
if (viff_info.number_of_images != 1)
ThrowReaderException(CoderError,"NumberOfImagesIsNotSupported");
if (viff_info.map_rows == 0)
viff_info.map_scheme=VFF_MS_NONE;
switch ((int) viff_info.map_scheme)
{
case VFF_MS_NONE:
{
if (viff_info.number_data_bands < 3)
{
/*
Create linear color ramp.
*/
if (viff_info.data_storage_type == VFF_TYP_BIT)
image->colors=2;
else
if (viff_info.data_storage_type == VFF_MAPTYP_1_BYTE)
image->colors=256UL;
else
image->colors=image->depth <= 8 ? 256UL : 65536UL;
status=AcquireImageColormap(image,image->colors,exception);
if (status == MagickFalse)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
}
break;
}
case VFF_MS_ONEPERBAND:
case VFF_MS_SHARED:
{
unsigned char
*viff_colormap;
/*
Allocate VIFF colormap.
*/
switch ((int) viff_info.map_storage_type)
{
case VFF_MAPTYP_1_BYTE: bytes_per_pixel=1; break;
case VFF_MAPTYP_2_BYTE: bytes_per_pixel=2; break;
case VFF_MAPTYP_4_BYTE: bytes_per_pixel=4; break;
case VFF_MAPTYP_FLOAT: bytes_per_pixel=4; break;
case VFF_MAPTYP_DOUBLE: bytes_per_pixel=8; break;
default: bytes_per_pixel=1; break;
}
image->colors=viff_info.map_columns;
if ((MagickSizeType) (viff_info.map_rows*image->colors) > GetBlobSize(image))
ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile");
if (AcquireImageColormap(image,image->colors,exception) == MagickFalse)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
if ((MagickSizeType) viff_info.map_rows > GetBlobSize(image))
ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile");
if ((MagickSizeType) viff_info.map_rows >
(viff_info.map_rows*bytes_per_pixel*sizeof(*viff_colormap)))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
viff_colormap=(unsigned char *) AcquireQuantumMemory(image->colors,
viff_info.map_rows*bytes_per_pixel*sizeof(*viff_colormap));
if (viff_colormap == (unsigned char *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
/*
Read VIFF raster colormap.
*/
count=ReadBlob(image,bytes_per_pixel*image->colors*viff_info.map_rows,
viff_colormap);
lsb_first=1;
if (*(char *) &lsb_first &&
((viff_info.machine_dependency != VFF_DEP_DECORDER) &&
(viff_info.machine_dependency != VFF_DEP_NSORDER)))
switch ((int) viff_info.map_storage_type)
{
case VFF_MAPTYP_2_BYTE:
{
MSBOrderShort(viff_colormap,(bytes_per_pixel*image->colors*
viff_info.map_rows));
break;
}
case VFF_MAPTYP_4_BYTE:
case VFF_MAPTYP_FLOAT:
{
MSBOrderLong(viff_colormap,(bytes_per_pixel*image->colors*
viff_info.map_rows));
break;
}
default: break;
}
for (i=0; i < (ssize_t) (viff_info.map_rows*image->colors); i++)
{
switch ((int) viff_info.map_storage_type)
{
case VFF_MAPTYP_2_BYTE: value=1.0*((short *) viff_colormap)[i]; break;
case VFF_MAPTYP_4_BYTE: value=1.0*((int *) viff_colormap)[i]; break;
case VFF_MAPTYP_FLOAT: value=((float *) viff_colormap)[i]; break;
case VFF_MAPTYP_DOUBLE: value=((double *) viff_colormap)[i]; break;
default: value=1.0*viff_colormap[i]; break;
}
if (i < (ssize_t) image->colors)
{
image->colormap[i].red=(MagickRealType)
ScaleCharToQuantum((unsigned char) value);
image->colormap[i].green=(MagickRealType)
ScaleCharToQuantum((unsigned char) value);
image->colormap[i].blue=(MagickRealType)
ScaleCharToQuantum((unsigned char) value);
}
else
if (i < (ssize_t) (2*image->colors))
image->colormap[i % image->colors].green=(MagickRealType)
ScaleCharToQuantum((unsigned char) value);
else
if (i < (ssize_t) (3*image->colors))
image->colormap[i % image->colors].blue=(MagickRealType)
ScaleCharToQuantum((unsigned char) value);
}
viff_colormap=(unsigned char *) RelinquishMagickMemory(viff_colormap);
break;
}
default:
ThrowReaderException(CoderError,"ColormapTypeNotSupported");
}
if ((image_info->ping != MagickFalse) && (image_info->number_scenes != 0))
if (image->scene >= (image_info->scene+image_info->number_scenes-1))
break;
if (viff_info.data_storage_type == VFF_TYP_BIT)
{
/*
Create bi-level colormap.
*/
image->colors=2;
if (AcquireImageColormap(image,image->colors,exception) == MagickFalse)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
image->colorspace=GRAYColorspace;
}
/*
Allocate VIFF pixels.
*/
switch ((int) viff_info.data_storage_type)
{
case VFF_TYP_2_BYTE: bytes_per_pixel=2; break;
case VFF_TYP_4_BYTE: bytes_per_pixel=4; break;
case VFF_TYP_FLOAT: bytes_per_pixel=4; break;
case VFF_TYP_DOUBLE: bytes_per_pixel=8; break;
default: bytes_per_pixel=1; break;
}
if (viff_info.data_storage_type == VFF_TYP_BIT)
{
if (HeapOverflowSanityCheck((image->columns+7UL) >> 3UL,image->rows) != MagickFalse)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
max_packets=((image->columns+7UL) >> 3UL)*image->rows;
}
else
{
if (HeapOverflowSanityCheck((size_t) number_pixels,viff_info.number_data_bands) != MagickFalse)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
max_packets=(size_t) (number_pixels*viff_info.number_data_bands);
}
if ((MagickSizeType) (bytes_per_pixel*max_packets) > GetBlobSize(image))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
pixels=(unsigned char *) AcquireQuantumMemory((size_t) MagickMax(
number_pixels,max_packets),bytes_per_pixel*sizeof(*pixels));
if (pixels == (unsigned char *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
(void) memset(pixels,0,MagickMax(number_pixels,max_packets)*
bytes_per_pixel*sizeof(*pixels));
count=ReadBlob(image,bytes_per_pixel*max_packets,pixels);
lsb_first=1;
if (*(char *) &lsb_first &&
((viff_info.machine_dependency != VFF_DEP_DECORDER) &&
(viff_info.machine_dependency != VFF_DEP_NSORDER)))
switch ((int) viff_info.data_storage_type)
{
case VFF_TYP_2_BYTE:
{
MSBOrderShort(pixels,bytes_per_pixel*max_packets);
break;
}
case VFF_TYP_4_BYTE:
case VFF_TYP_FLOAT:
{
MSBOrderLong(pixels,bytes_per_pixel*max_packets);
break;
}
default: break;
}
min_value=0.0;
scale_factor=1.0;
if ((viff_info.data_storage_type != VFF_TYP_1_BYTE) &&
(viff_info.map_scheme == VFF_MS_NONE))
{
double
max_value;
/*
Determine scale factor.
*/
switch ((int) viff_info.data_storage_type)
{
case VFF_TYP_2_BYTE: value=1.0*((short *) pixels)[0]; break;
case VFF_TYP_4_BYTE: value=1.0*((int *) pixels)[0]; break;
case VFF_TYP_FLOAT: value=((float *) pixels)[0]; break;
case VFF_TYP_DOUBLE: value=((double *) pixels)[0]; break;
default: value=1.0*pixels[0]; break;
}
max_value=value;
min_value=value;
for (i=0; i < (ssize_t) max_packets; i++)
{
switch ((int) viff_info.data_storage_type)
{
case VFF_TYP_2_BYTE: value=1.0*((short *) pixels)[i]; break;
case VFF_TYP_4_BYTE: value=1.0*((int *) pixels)[i]; break;
case VFF_TYP_FLOAT: value=((float *) pixels)[i]; break;
case VFF_TYP_DOUBLE: value=((double *) pixels)[i]; break;
default: value=1.0*pixels[i]; break;
}
if (value > max_value)
max_value=value;
else
if (value < min_value)
min_value=value;
}
if ((min_value == 0) && (max_value == 0))
scale_factor=0;
else
if (min_value == max_value)
{
scale_factor=(double) QuantumRange/min_value;
min_value=0;
}
else
scale_factor=(double) QuantumRange/(max_value-min_value);
}
/*
Convert pixels to Quantum size.
*/
p=(unsigned char *) pixels;
for (i=0; i < (ssize_t) max_packets; i++)
{
switch ((int) viff_info.data_storage_type)
{
case VFF_TYP_2_BYTE: value=1.0*((short *) pixels)[i]; break;
case VFF_TYP_4_BYTE: value=1.0*((int *) pixels)[i]; break;
case VFF_TYP_FLOAT: value=((float *) pixels)[i]; break;
case VFF_TYP_DOUBLE: value=((double *) pixels)[i]; break;
default: value=1.0*pixels[i]; break;
}
if (viff_info.map_scheme == VFF_MS_NONE)
{
value=(value-min_value)*scale_factor;
if (value > QuantumRange)
value=QuantumRange;
else
if (value < 0)
value=0;
}
*p=(unsigned char) ((Quantum) value);
p++;
}
/*
Convert VIFF raster image to pixel packets.
*/
p=(unsigned char *) pixels;
if (viff_info.data_storage_type == VFF_TYP_BIT)
{
/*
Convert bitmap scanline.
*/
for (y=0; y < (ssize_t) image->rows; y++)
{
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) (image->columns-7); x+=8)
{
for (bit=0; bit < 8; bit++)
{
quantum=(size_t) ((*p) & (0x01 << bit) ? 0 : 1);
SetPixelRed(image,quantum == 0 ? 0 : QuantumRange,q);
SetPixelGreen(image,quantum == 0 ? 0 : QuantumRange,q);
SetPixelBlue(image,quantum == 0 ? 0 : QuantumRange,q);
if (image->storage_class == PseudoClass)
SetPixelIndex(image,(Quantum) quantum,q);
q+=GetPixelChannels(image);
}
p++;
}
if ((image->columns % 8) != 0)
{
for (bit=0; bit < (int) (image->columns % 8); bit++)
{
quantum=(size_t) ((*p) & (0x01 << bit) ? 0 : 1);
SetPixelRed(image,quantum == 0 ? 0 : QuantumRange,q);
SetPixelGreen(image,quantum == 0 ? 0 : QuantumRange,q);
SetPixelBlue(image,quantum == 0 ? 0 : QuantumRange,q);
if (image->storage_class == PseudoClass)
SetPixelIndex(image,(Quantum) quantum,q);
q+=GetPixelChannels(image);
}
p++;
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
}
}
else
if (image->storage_class == PseudoClass)
for (y=0; y < (ssize_t) image->rows; y++)
{
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelIndex(image,*p++,q);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
}
else
{
/*
Convert DirectColor scanline.
*/
number_pixels=(MagickSizeType) image->columns*image->rows;
for (y=0; y < (ssize_t) image->rows; y++)
{
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRed(image,ScaleCharToQuantum(*p),q);
SetPixelGreen(image,ScaleCharToQuantum(*(p+number_pixels)),q);
SetPixelBlue(image,ScaleCharToQuantum(*(p+2*number_pixels)),q);
if (image->colors != 0)
{
ssize_t
index;
index=(ssize_t) GetPixelRed(image,q);
SetPixelRed(image,ClampToQuantum(image->colormap[
ConstrainColormapIndex(image,index,exception)].red),q);
index=(ssize_t) GetPixelGreen(image,q);
SetPixelGreen(image,ClampToQuantum(image->colormap[
ConstrainColormapIndex(image,index,exception)].green),q);
index=(ssize_t) GetPixelBlue(image,q);
SetPixelBlue(image,ClampToQuantum(image->colormap[
ConstrainColormapIndex(image,index,exception)].blue),q);
}
SetPixelAlpha(image,image->alpha_trait != UndefinedPixelTrait ?
ScaleCharToQuantum(*(p+number_pixels*3)) : OpaqueAlpha,q);
p++;
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
}
}
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
if (image->storage_class == PseudoClass)
(void) SyncImage(image,exception);
if (EOFBlob(image) != MagickFalse)
{
ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile",
image->filename);
break;
}
/*
Proceed to next image.
*/
if (image_info->number_scenes != 0)
if (image->scene >= (image_info->scene+image_info->number_scenes-1))
break;
count=ReadBlob(image,1,&viff_info.identifier);
if ((count == 1) && (viff_info.identifier == 0xab))
{
/*
Allocate next image structure.
*/
AcquireNextImage(image_info,image,exception);
if (GetNextImageInList(image) == (Image *) NULL)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
image=SyncNextImageInList(image);
status=SetImageProgress(image,LoadImagesTag,TellBlob(image),
GetBlobSize(image));
if (status == MagickFalse)
break;
}
} while ((count != 0) && (viff_info.identifier == 0xab));
(void) CloseBlob(image);
if (status == MagickFalse)
return(DestroyImageList(image));
return(GetFirstImageInList(image));
} | 1 | [
"CWE-400"
] | ImageMagick | 23acdce3651447328c6ef71ede20ee60637ba39d | 101,804,520,406,687,290,000,000,000,000,000,000,000 | 648 | https://github.com/ImageMagick/ImageMagick/issues/1286 |
gnutls_global_set_mem_functions(gnutls_alloc_function alloc_func,
gnutls_alloc_function secure_alloc_func,
gnutls_is_secure_function is_secure_func,
gnutls_realloc_function realloc_func,
gnutls_free_function free_func)
{
_gnutls_debug_log("called the deprecated gnutls_global_set_mem_functions()\n");
} | 0 | [
"CWE-20"
] | gnutls | b0a3048e56611a2deee4976aeba3b8c0740655a6 | 82,825,337,665,688,160,000,000,000,000,000,000,000 | 8 | env: use secure_getenv when reading environment variables |
void KaxSimpleBlock::SetParent(KaxCluster & aParentCluster) {
KaxInternalBlock::SetParent( aParentCluster );
} | 0 | [
"CWE-200",
"CWE-399",
"CWE-119"
] | libmatroska | 0a2d3e3644a7453b6513db2f9bc270f77943573f | 226,368,017,995,053,200,000,000,000,000,000,000,000 | 3 | KaxBlockInternal: check EBML lace sizes against available buffer space |
lou_getEmphClasses(const char *tableList) {
const char *names[MAX_EMPH_CLASSES + 1];
unsigned int count = 0;
const TranslationTableHeader *table = _lou_getTranslationTable(tableList);
if (!table) return NULL;
while (count < MAX_EMPH_CLASSES) {
char const *name = table->emphClassNames[count];
if (!name) break;
names[count++] = name;
}
names[count++] = NULL;
{
unsigned int size = count * sizeof(names[0]);
char const **result = malloc(size);
if (!result) return NULL;
/* The void* cast is necessary to stop MSVC from warning about
* different 'const' qualifiers (C4090). */
memcpy((void *)result, names, size);
return result;
}
} | 0 | [
"CWE-787"
] | liblouis | 2e4772befb2b1c37cb4b9d6572945115ee28630a | 258,557,068,565,850,900,000,000,000,000,000,000,000 | 23 | Prevent an invalid memory writes in compileRule
Thanks to Han Zheng for reporting it
Fixes #1214 |
bool SetOpAttrScalar(TFE_Context* ctx, TFE_Op* op, const char* key,
PyObject* py_value, TF_AttrType type,
tensorflow::gtl::FlatMap<string, int64_t>* attr_list_sizes,
TF_Status* status) {
if (type == TF_ATTR_STRING) {
tensorflow::StringPiece value;
if (!ParseStringValue(key, py_value, status, &value)) return false;
TFE_OpSetAttrString(op, key, value.data(), value.size());
} else if (type == TF_ATTR_INT) {
int64_t value;
if (!ParseInt64Value(key, py_value, status, &value)) return false;
TFE_OpSetAttrInt(op, key, value);
// attr_list_sizes is set for all int attributes (since at this point we are
// not aware if that attribute might be used to calculate the size of an
// output list or not).
if (attr_list_sizes != nullptr) (*attr_list_sizes)[key] = value;
} else if (type == TF_ATTR_FLOAT) {
float value;
if (!ParseFloatValue(key, py_value, status, &value)) return false;
TFE_OpSetAttrFloat(op, key, value);
} else if (type == TF_ATTR_BOOL) {
unsigned char value;
if (!ParseBoolValue(key, py_value, status, &value)) return false;
TFE_OpSetAttrBool(op, key, value);
} else if (type == TF_ATTR_TYPE) {
int value;
if (!ParseTypeValue(key, py_value, status, &value)) return false;
TFE_OpSetAttrType(op, key, static_cast<TF_DataType>(value));
} else if (type == TF_ATTR_SHAPE) {
if (py_value == Py_None) {
TFE_OpSetAttrShape(op, key, nullptr, -1, status);
} else {
if (!PySequence_Check(py_value)) {
TF_SetStatus(status, TF_INVALID_ARGUMENT,
tensorflow::strings::StrCat(
"Expecting None or sequence value for attr", key,
", got ", py_value->ob_type->tp_name)
.c_str());
return false;
}
const auto num_dims = TensorShapeNumDims(py_value);
if (num_dims == -1) {
TFE_OpSetAttrShape(op, key, nullptr, -1, status);
return true;
}
std::unique_ptr<int64_t[]> dims(new int64_t[num_dims]);
for (int i = 0; i < num_dims; ++i) {
tensorflow::Safe_PyObjectPtr inner_py_value(
PySequence_ITEM(py_value, i));
if (inner_py_value.get() == Py_None) {
dims[i] = -1;
} else if (!ParseDimensionValue(key, inner_py_value.get(), status,
&dims[i])) {
return false;
}
}
TFE_OpSetAttrShape(op, key, dims.get(), num_dims, status);
}
if (!status->status.ok()) return false;
} else if (type == TF_ATTR_FUNC) {
// Allow:
// (1) String function name, OR
// (2) A Python object with a .name attribute
// (A crude test for being a
// tensorflow.python.framework.function._DefinedFunction)
// (which is what the various "defun" or "Defun" decorators do).
// And in the future also allow an object that can encapsulate
// the function name and its attribute values.
tensorflow::StringPiece func_name;
if (!ParseStringValue(key, py_value, status, &func_name)) {
PyObject* name_attr = PyObject_GetAttrString(py_value, "name");
if (name_attr == nullptr ||
!ParseStringValue(key, name_attr, status, &func_name)) {
TF_SetStatus(
status, TF_INVALID_ARGUMENT,
tensorflow::strings::StrCat(
"unable to set function value attribute from a ",
py_value->ob_type->tp_name,
" object. If you think this is an error, please file an issue "
"at https://github.com/tensorflow/tensorflow/issues/new")
.c_str());
return false;
}
}
TF_SetStatus(status, TF_OK, "");
TFE_OpSetAttrFunctionName(op, key, func_name.data(), func_name.size());
} else {
TF_SetStatus(
status, TF_UNIMPLEMENTED,
tensorflow::strings::StrCat("Attr ", key, " has unhandled type ", type)
.c_str());
return false;
}
return true;
} | 1 | [
"CWE-476",
"CWE-908"
] | tensorflow | 237822b59fc504dda2c564787f5d3ad9c4aa62d9 | 9,094,187,509,084,024,000,000,000,000,000,000,000 | 95 | Fix tf.compat.v1.placeholder_with_default vulnerability with quantized types.
When iterating through the tensor to extract shape values, an underlying missing kernel
(`StridedSlice` for quantized types) causes an error, which then results in a `nullptr`
being passed to `ParseDimensionValue()`, causing a segfault.
The `nullptr` check allows the missing kernel error to propagate.
Adding the missing kernel registrations allows the shape values
to be extracted successfully.
PiperOrigin-RevId: 445045957 |
static void ib_uverbs_remove_one(struct ib_device *device, void *client_data)
{
struct ib_uverbs_device *uverbs_dev = client_data;
int wait_clients = 1;
if (!uverbs_dev)
return;
cdev_device_del(&uverbs_dev->cdev, &uverbs_dev->dev);
ida_free(&uverbs_ida, uverbs_dev->devnum);
if (device->ops.disassociate_ucontext) {
/* We disassociate HW resources and immediately return.
* Userspace will see a EIO errno for all future access.
* Upon returning, ib_device may be freed internally and is not
* valid any more.
* uverbs_device is still available until all clients close
* their files, then the uverbs device ref count will be zero
* and its resources will be freed.
* Note: At this point no more files can be opened since the
* cdev was deleted, however active clients can still issue
* commands and close their open files.
*/
ib_uverbs_free_hw_resources(uverbs_dev, device);
wait_clients = 0;
}
if (atomic_dec_and_test(&uverbs_dev->refcount))
ib_uverbs_comp_dev(uverbs_dev);
if (wait_clients)
wait_for_completion(&uverbs_dev->comp);
put_device(&uverbs_dev->dev);
} | 0 | [
"CWE-362",
"CWE-703",
"CWE-667"
] | linux | 04f5866e41fb70690e28397487d8bd8eea7d712a | 204,403,931,201,881,820,000,000,000,000,000,000,000 | 34 | coredump: fix race condition between mmget_not_zero()/get_task_mm() and core dumping
The core dumping code has always run without holding the mmap_sem for
writing, despite that is the only way to ensure that the entire vma
layout will not change from under it. Only using some signal
serialization on the processes belonging to the mm is not nearly enough.
This was pointed out earlier. For example in Hugh's post from Jul 2017:
https://lkml.kernel.org/r/[email protected]
"Not strictly relevant here, but a related note: I was very surprised
to discover, only quite recently, how handle_mm_fault() may be called
without down_read(mmap_sem) - when core dumping. That seems a
misguided optimization to me, which would also be nice to correct"
In particular because the growsdown and growsup can move the
vm_start/vm_end the various loops the core dump does around the vma will
not be consistent if page faults can happen concurrently.
Pretty much all users calling mmget_not_zero()/get_task_mm() and then
taking the mmap_sem had the potential to introduce unexpected side
effects in the core dumping code.
Adding mmap_sem for writing around the ->core_dump invocation is a
viable long term fix, but it requires removing all copy user and page
faults and to replace them with get_dump_page() for all binary formats
which is not suitable as a short term fix.
For the time being this solution manually covers the places that can
confuse the core dump either by altering the vma layout or the vma flags
while it runs. Once ->core_dump runs under mmap_sem for writing the
function mmget_still_valid() can be dropped.
Allowing mmap_sem protected sections to run in parallel with the
coredump provides some minor parallelism advantage to the swapoff code
(which seems to be safe enough by never mangling any vma field and can
keep doing swapins in parallel to the core dumping) and to some other
corner case.
In order to facilitate the backporting I added "Fixes: 86039bd3b4e6"
however the side effect of this same race condition in /proc/pid/mem
should be reproducible since before 2.6.12-rc2 so I couldn't add any
other "Fixes:" because there's no hash beyond the git genesis commit.
Because find_extend_vma() is the only location outside of the process
context that could modify the "mm" structures under mmap_sem for
reading, by adding the mmget_still_valid() check to it, all other cases
that take the mmap_sem for reading don't need the new check after
mmget_not_zero()/get_task_mm(). The expand_stack() in page fault
context also doesn't need the new check, because all tasks under core
dumping are frozen.
Link: http://lkml.kernel.org/r/[email protected]
Fixes: 86039bd3b4e6 ("userfaultfd: add new syscall to provide memory externalization")
Signed-off-by: Andrea Arcangeli <[email protected]>
Reported-by: Jann Horn <[email protected]>
Suggested-by: Oleg Nesterov <[email protected]>
Acked-by: Peter Xu <[email protected]>
Reviewed-by: Mike Rapoport <[email protected]>
Reviewed-by: Oleg Nesterov <[email protected]>
Reviewed-by: Jann Horn <[email protected]>
Acked-by: Jason Gunthorpe <[email protected]>
Acked-by: Michal Hocko <[email protected]>
Cc: <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
inline Padding3DValues ComputePadding3DValues(
int stride_height, int stride_width, int stride_depth,
int dilation_rate_height, int dilation_rate_width, int dilation_rate_depth,
int in_height, int in_width, int in_depth, int filter_height,
int filter_width, int filter_depth, TfLitePadding padding, int* out_height,
int* out_width, int* out_depth) {
*out_width = ComputeOutSize(padding, in_width, filter_width, stride_width,
dilation_rate_width);
*out_height = ComputeOutSize(padding, in_height, filter_height, stride_height,
dilation_rate_height);
*out_depth = ComputeOutSize(padding, in_depth, filter_depth, stride_depth,
dilation_rate_depth);
Padding3DValues padding_values;
int offset = 0;
padding_values.depth =
ComputePaddingWithOffset(stride_depth, dilation_rate_depth, in_depth,
filter_depth, *out_depth, &offset);
padding_values.depth_offset = offset;
padding_values.height =
ComputePaddingWithOffset(stride_height, dilation_rate_height, in_height,
filter_height, *out_height, &offset);
padding_values.height_offset = offset;
padding_values.width =
ComputePaddingWithOffset(stride_width, dilation_rate_width, in_width,
filter_width, *out_width, &offset);
padding_values.width_offset = offset;
return padding_values;
} | 0 | [
"CWE-369"
] | tensorflow | 49847ae69a4e1a97ae7f2db5e217c77721e37948 | 270,006,874,560,540,600,000,000,000,000,000,000,000 | 29 | Fix division by zero in TFLite padding.
PiperOrigin-RevId: 370777494
Change-Id: Ic1331e4a1603b9e4c8aa183012a6c8237410aa0f |
static void update_and_free_pages_bulk(struct hstate *h, struct list_head *list)
{
struct page *page, *t_page;
list_for_each_entry_safe(page, t_page, list, lru) {
update_and_free_page(h, page, false);
cond_resched();
}
} | 0 | [] | linux | a4a118f2eead1d6c49e00765de89878288d4b890 | 138,401,509,228,572,800,000,000,000,000,000,000,000 | 9 | hugetlbfs: flush TLBs correctly after huge_pmd_unshare
When __unmap_hugepage_range() calls to huge_pmd_unshare() succeed, a TLB
flush is missing. This TLB flush must be performed before releasing the
i_mmap_rwsem, in order to prevent an unshared PMDs page from being
released and reused before the TLB flush took place.
Arguably, a comprehensive solution would use mmu_gather interface to
batch the TLB flushes and the PMDs page release, however it is not an
easy solution: (1) try_to_unmap_one() and try_to_migrate_one() also call
huge_pmd_unshare() and they cannot use the mmu_gather interface; and (2)
deferring the release of the page reference for the PMDs page until
after i_mmap_rwsem is dropeed can confuse huge_pmd_unshare() into
thinking PMDs are shared when they are not.
Fix __unmap_hugepage_range() by adding the missing TLB flush, and
forcing a flush when unshare is successful.
Fixes: 24669e58477e ("hugetlb: use mmu_gather instead of a temporary linked list for accumulating pages)" # 3.6
Signed-off-by: Nadav Amit <[email protected]>
Reviewed-by: Mike Kravetz <[email protected]>
Cc: Aneesh Kumar K.V <[email protected]>
Cc: KAMEZAWA Hiroyuki <[email protected]>
Cc: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
static int dtls1_process_buffered_records(SSL *s)
{
pitem *item;
item = pqueue_peek(s->d1->unprocessed_rcds.q);
if (item) {
/* Check if epoch is current. */
if (s->d1->unprocessed_rcds.epoch != s->d1->r_epoch)
return (1); /* Nothing to do. */
/* Process all the records. */
while (pqueue_peek(s->d1->unprocessed_rcds.q)) {
dtls1_get_unprocessed_record(s);
if (!dtls1_process_record(s))
return (0);
if (dtls1_buffer_record(s, &(s->d1->processed_rcds),
s->s3->rrec.seq_num) < 0)
return -1;
}
}
/*
* sync epoch numbers once all the unprocessed records have been
* processed
*/
s->d1->processed_rcds.epoch = s->d1->r_epoch;
s->d1->unprocessed_rcds.epoch = s->d1->r_epoch + 1;
return (1);
} | 1 | [
"CWE-189"
] | openssl | fa75569758298e2930c78989b516cac937118acc | 288,222,067,568,821,400,000,000,000,000,000,000,000 | 30 | Fix DTLS unprocessed records bug
During a DTLS handshake we may get records destined for the next epoch
arrive before we have processed the CCS. In that case we can't decrypt or
verify the record yet, so we buffer it for later use. When we do receive
the CCS we work through the queue of unprocessed records and process them.
Unfortunately the act of processing wipes out any existing packet data
that we were still working through. This includes any records from the new
epoch that were in the same packet as the CCS. We should only process the
buffered records if we've not got any data left.
Reviewed-by: Richard Levitte <[email protected]> |
Subsets and Splits