func
stringlengths 0
484k
| target
int64 0
1
| cwe
listlengths 0
4
| project
stringclasses 799
values | commit_id
stringlengths 40
40
| hash
float64 1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
| size
int64 1
24k
| message
stringlengths 0
13.3k
|
---|---|---|---|---|---|---|---|
int credssp_skip_ts_request(int length)
{
length += ber_skip_integer(2);
length += ber_skip_contextual_tag(3);
length += der_skip_sequence_tag(length);
return length;
}
| 1 |
[
"CWE-476"
] |
FreeRDP
|
0dc22d5a30a1c7d146b2a835b2032668127c33e9
| 239,449,554,855,092,540,000,000,000,000,000,000,000 | 7 |
Fixed a range of BER boundary encoding bugs which would occur when any NLA packet hit the 127 character mark. Removed ber#get_content_length as it was not behaving deterministically.
|
static int netfront_probe(struct xenbus_device *dev,
const struct xenbus_device_id *id)
{
int err;
struct net_device *netdev;
struct netfront_info *info;
netdev = xennet_create_dev(dev);
if (IS_ERR(netdev)) {
err = PTR_ERR(netdev);
xenbus_dev_fatal(dev, err, "creating netdev");
return err;
}
info = netdev_priv(netdev);
dev_set_drvdata(&dev->dev, info);
#ifdef CONFIG_SYSFS
info->netdev->sysfs_groups[0] = &xennet_dev_group;
#endif
return 0;
}
| 0 |
[] |
linux
|
f63c2c2032c2e3caad9add3b82cc6e91c376fd26
| 46,533,134,299,272,760,000,000,000,000,000,000,000 | 22 |
xen-netfront: restore __skb_queue_tail() positioning in xennet_get_responses()
The commit referenced below moved the invocation past the "next" label,
without any explanation. In fact this allows misbehaving backends undue
control over the domain the frontend runs in, as earlier detected errors
require the skb to not be freed (it may be retained for later processing
via xennet_move_rx_slot(), or it may simply be unsafe to have it freed).
This is CVE-2022-33743 / XSA-405.
Fixes: 6c5aa6fc4def ("xen networking: add basic XDP support for xen-netfront")
Signed-off-by: Jan Beulich <[email protected]>
Reviewed-by: Juergen Gross <[email protected]>
Signed-off-by: Juergen Gross <[email protected]>
|
rleUncompress (int inLength, int maxLength, const signed char in[], char out[])
{
char *outStart = out;
while (inLength > 0)
{
if (*in < 0)
{
int count = -((int)*in++);
inLength -= count + 1;
if (0 > (maxLength -= count))
return 0;
memcpy(out, in, count);
out += count;
in += count;
}
else
{
int count = *in++;
inLength -= 2;
if (0 > (maxLength -= count + 1))
return 0;
memset(out, *(char*)in, count+1);
out += count+1;
in++;
}
}
return out - outStart;
}
| 1 |
[
"CWE-125"
] |
openexr
|
e79d2296496a50826a15c667bf92bdc5a05518b4
| 150,910,726,358,335,670,000,000,000,000,000,000,000 | 35 |
fix memory leaks and invalid memory accesses
Signed-off-by: Peter Hillman <[email protected]>
|
void FilterManager::encodeTrailers(ActiveStreamEncoderFilter* filter,
ResponseTrailerMap& trailers) {
filter_manager_callbacks_.resetIdleTimer();
// Filter iteration may start at the current filter.
std::list<ActiveStreamEncoderFilterPtr>::iterator entry =
commonEncodePrefix(filter, true, FilterIterationStartState::CanStartFromCurrent);
for (; entry != encoder_filters_.end(); entry++) {
(*entry)->maybeEvaluateMatchTreeWithNewData(
[&](auto& matching_data) { matching_data.onResponseTrailers(trailers); });
if ((*entry)->skipFilter()) {
continue;
}
// If the filter pointed by entry has stopped for all frame type, return now.
if ((*entry)->stoppedAll()) {
return;
}
ASSERT(!(state_.filter_call_state_ & FilterCallState::EncodeTrailers));
state_.filter_call_state_ |= FilterCallState::EncodeTrailers;
FilterTrailersStatus status = (*entry)->handle_->encodeTrailers(trailers);
(*entry)->handle_->encodeComplete();
(*entry)->end_stream_ = true;
state_.filter_call_state_ &= ~FilterCallState::EncodeTrailers;
ENVOY_STREAM_LOG(trace, "encode trailers called: filter={} status={}", *this,
static_cast<const void*>((*entry).get()), static_cast<uint64_t>(status));
if (!(*entry)->commonHandleAfterTrailersCallback(status)) {
return;
}
}
filter_manager_callbacks_.encodeTrailers(trailers);
if (state_.saw_downstream_reset_) {
return;
}
maybeEndEncode(true);
}
| 0 |
[
"CWE-416"
] |
envoy
|
148de954ed3585d8b4298b424aa24916d0de6136
| 25,699,476,610,674,015,000,000,000,000,000,000,000 | 38 |
CVE-2021-43825
Response filter manager crash
Signed-off-by: Yan Avlasov <[email protected]>
|
static void nfs_set_open_stateid(struct nfs4_state *state, nfs4_stateid *stateid, int open_flags)
{
write_seqlock(&state->seqlock);
nfs_set_open_stateid_locked(state, stateid, open_flags);
write_sequnlock(&state->seqlock);
}
| 1 |
[
"CWE-703"
] |
linux
|
dc0b027dfadfcb8a5504f7d8052754bf8d501ab9
| 274,178,314,137,326,730,000,000,000,000,000,000,000 | 6 |
NFSv4: Convert the open and close ops to use fmode
Signed-off-by: Trond Myklebust <[email protected]>
|
static int multiSelect(
Parse *pParse, /* Parsing context */
Select *p, /* The right-most of SELECTs to be coded */
SelectDest *pDest /* What to do with query results */
){
int rc = SQLITE_OK; /* Success code from a subroutine */
Select *pPrior; /* Another SELECT immediately to our left */
Vdbe *v; /* Generate code to this VDBE */
SelectDest dest; /* Alternative data destination */
Select *pDelete = 0; /* Chain of simple selects to delete */
sqlite3 *db; /* Database connection */
/* Make sure there is no ORDER BY or LIMIT clause on prior SELECTs. Only
** the last (right-most) SELECT in the series may have an ORDER BY or LIMIT.
*/
assert( p && p->pPrior ); /* Calling function guarantees this much */
assert( (p->selFlags & SF_Recursive)==0 || p->op==TK_ALL || p->op==TK_UNION );
assert( p->selFlags & SF_Compound );
db = pParse->db;
pPrior = p->pPrior;
dest = *pDest;
if( pPrior->pOrderBy || pPrior->pLimit ){
sqlite3ErrorMsg(pParse,"%s clause should come after %s not before",
pPrior->pOrderBy!=0 ? "ORDER BY" : "LIMIT", selectOpName(p->op));
rc = 1;
goto multi_select_end;
}
v = sqlite3GetVdbe(pParse);
assert( v!=0 ); /* The VDBE already created by calling function */
/* Create the destination temporary table if necessary
*/
if( dest.eDest==SRT_EphemTab ){
assert( p->pEList );
sqlite3VdbeAddOp2(v, OP_OpenEphemeral, dest.iSDParm, p->pEList->nExpr);
dest.eDest = SRT_Table;
}
/* Special handling for a compound-select that originates as a VALUES clause.
*/
if( p->selFlags & SF_MultiValue ){
rc = multiSelectValues(pParse, p, &dest);
if( rc>=0 ) goto multi_select_end;
rc = SQLITE_OK;
}
/* Make sure all SELECTs in the statement have the same number of elements
** in their result sets.
*/
assert( p->pEList && pPrior->pEList );
assert( p->pEList->nExpr==pPrior->pEList->nExpr );
#ifndef SQLITE_OMIT_CTE
if( p->selFlags & SF_Recursive ){
generateWithRecursiveQuery(pParse, p, &dest);
}else
#endif
/* Compound SELECTs that have an ORDER BY clause are handled separately.
*/
if( p->pOrderBy ){
return multiSelectOrderBy(pParse, p, pDest);
}else{
#ifndef SQLITE_OMIT_EXPLAIN
if( pPrior->pPrior==0 ){
ExplainQueryPlan((pParse, 1, "COMPOUND QUERY"));
ExplainQueryPlan((pParse, 1, "LEFT-MOST SUBQUERY"));
}
#endif
/* Generate code for the left and right SELECT statements.
*/
switch( p->op ){
case TK_ALL: {
int addr = 0;
int nLimit;
assert( !pPrior->pLimit );
pPrior->iLimit = p->iLimit;
pPrior->iOffset = p->iOffset;
pPrior->pLimit = p->pLimit;
rc = sqlite3Select(pParse, pPrior, &dest);
p->pLimit = 0;
if( rc ){
goto multi_select_end;
}
p->pPrior = 0;
p->iLimit = pPrior->iLimit;
p->iOffset = pPrior->iOffset;
if( p->iLimit ){
addr = sqlite3VdbeAddOp1(v, OP_IfNot, p->iLimit); VdbeCoverage(v);
VdbeComment((v, "Jump ahead if LIMIT reached"));
if( p->iOffset ){
sqlite3VdbeAddOp3(v, OP_OffsetLimit,
p->iLimit, p->iOffset+1, p->iOffset);
}
}
ExplainQueryPlan((pParse, 1, "UNION ALL"));
rc = sqlite3Select(pParse, p, &dest);
testcase( rc!=SQLITE_OK );
pDelete = p->pPrior;
p->pPrior = pPrior;
p->nSelectRow = sqlite3LogEstAdd(p->nSelectRow, pPrior->nSelectRow);
if( pPrior->pLimit
&& sqlite3ExprIsInteger(pPrior->pLimit->pLeft, &nLimit)
&& nLimit>0 && p->nSelectRow > sqlite3LogEst((u64)nLimit)
){
p->nSelectRow = sqlite3LogEst((u64)nLimit);
}
if( addr ){
sqlite3VdbeJumpHere(v, addr);
}
break;
}
case TK_EXCEPT:
case TK_UNION: {
int unionTab; /* Cursor number of the temp table holding result */
u8 op = 0; /* One of the SRT_ operations to apply to self */
int priorOp; /* The SRT_ operation to apply to prior selects */
Expr *pLimit; /* Saved values of p->nLimit */
int addr;
SelectDest uniondest;
testcase( p->op==TK_EXCEPT );
testcase( p->op==TK_UNION );
priorOp = SRT_Union;
if( dest.eDest==priorOp ){
/* We can reuse a temporary table generated by a SELECT to our
** right.
*/
assert( p->pLimit==0 ); /* Not allowed on leftward elements */
unionTab = dest.iSDParm;
}else{
/* We will need to create our own temporary table to hold the
** intermediate results.
*/
unionTab = pParse->nTab++;
assert( p->pOrderBy==0 );
addr = sqlite3VdbeAddOp2(v, OP_OpenEphemeral, unionTab, 0);
assert( p->addrOpenEphm[0] == -1 );
p->addrOpenEphm[0] = addr;
findRightmost(p)->selFlags |= SF_UsesEphemeral;
assert( p->pEList );
}
/* Code the SELECT statements to our left
*/
assert( !pPrior->pOrderBy );
sqlite3SelectDestInit(&uniondest, priorOp, unionTab);
rc = sqlite3Select(pParse, pPrior, &uniondest);
if( rc ){
goto multi_select_end;
}
/* Code the current SELECT statement
*/
if( p->op==TK_EXCEPT ){
op = SRT_Except;
}else{
assert( p->op==TK_UNION );
op = SRT_Union;
}
p->pPrior = 0;
pLimit = p->pLimit;
p->pLimit = 0;
uniondest.eDest = op;
ExplainQueryPlan((pParse, 1, "%s USING TEMP B-TREE",
selectOpName(p->op)));
rc = sqlite3Select(pParse, p, &uniondest);
testcase( rc!=SQLITE_OK );
/* Query flattening in sqlite3Select() might refill p->pOrderBy.
** Be sure to delete p->pOrderBy, therefore, to avoid a memory leak. */
sqlite3ExprListDelete(db, p->pOrderBy);
pDelete = p->pPrior;
p->pPrior = pPrior;
p->pOrderBy = 0;
if( p->op==TK_UNION ){
p->nSelectRow = sqlite3LogEstAdd(p->nSelectRow, pPrior->nSelectRow);
}
sqlite3ExprDelete(db, p->pLimit);
p->pLimit = pLimit;
p->iLimit = 0;
p->iOffset = 0;
/* Convert the data in the temporary table into whatever form
** it is that we currently need.
*/
assert( unionTab==dest.iSDParm || dest.eDest!=priorOp );
if( dest.eDest!=priorOp ){
int iCont, iBreak, iStart;
assert( p->pEList );
iBreak = sqlite3VdbeMakeLabel(pParse);
iCont = sqlite3VdbeMakeLabel(pParse);
computeLimitRegisters(pParse, p, iBreak);
sqlite3VdbeAddOp2(v, OP_Rewind, unionTab, iBreak); VdbeCoverage(v);
iStart = sqlite3VdbeCurrentAddr(v);
selectInnerLoop(pParse, p, unionTab,
0, 0, &dest, iCont, iBreak);
sqlite3VdbeResolveLabel(v, iCont);
sqlite3VdbeAddOp2(v, OP_Next, unionTab, iStart); VdbeCoverage(v);
sqlite3VdbeResolveLabel(v, iBreak);
sqlite3VdbeAddOp2(v, OP_Close, unionTab, 0);
}
break;
}
default: assert( p->op==TK_INTERSECT ); {
int tab1, tab2;
int iCont, iBreak, iStart;
Expr *pLimit;
int addr;
SelectDest intersectdest;
int r1;
/* INTERSECT is different from the others since it requires
** two temporary tables. Hence it has its own case. Begin
** by allocating the tables we will need.
*/
tab1 = pParse->nTab++;
tab2 = pParse->nTab++;
assert( p->pOrderBy==0 );
addr = sqlite3VdbeAddOp2(v, OP_OpenEphemeral, tab1, 0);
assert( p->addrOpenEphm[0] == -1 );
p->addrOpenEphm[0] = addr;
findRightmost(p)->selFlags |= SF_UsesEphemeral;
assert( p->pEList );
/* Code the SELECTs to our left into temporary table "tab1".
*/
sqlite3SelectDestInit(&intersectdest, SRT_Union, tab1);
rc = sqlite3Select(pParse, pPrior, &intersectdest);
if( rc ){
goto multi_select_end;
}
/* Code the current SELECT into temporary table "tab2"
*/
addr = sqlite3VdbeAddOp2(v, OP_OpenEphemeral, tab2, 0);
assert( p->addrOpenEphm[1] == -1 );
p->addrOpenEphm[1] = addr;
p->pPrior = 0;
pLimit = p->pLimit;
p->pLimit = 0;
intersectdest.iSDParm = tab2;
ExplainQueryPlan((pParse, 1, "%s USING TEMP B-TREE",
selectOpName(p->op)));
rc = sqlite3Select(pParse, p, &intersectdest);
testcase( rc!=SQLITE_OK );
pDelete = p->pPrior;
p->pPrior = pPrior;
if( p->nSelectRow>pPrior->nSelectRow ){
p->nSelectRow = pPrior->nSelectRow;
}
sqlite3ExprDelete(db, p->pLimit);
p->pLimit = pLimit;
/* Generate code to take the intersection of the two temporary
** tables.
*/
assert( p->pEList );
iBreak = sqlite3VdbeMakeLabel(pParse);
iCont = sqlite3VdbeMakeLabel(pParse);
computeLimitRegisters(pParse, p, iBreak);
sqlite3VdbeAddOp2(v, OP_Rewind, tab1, iBreak); VdbeCoverage(v);
r1 = sqlite3GetTempReg(pParse);
iStart = sqlite3VdbeAddOp2(v, OP_RowData, tab1, r1);
sqlite3VdbeAddOp4Int(v, OP_NotFound, tab2, iCont, r1, 0);
VdbeCoverage(v);
sqlite3ReleaseTempReg(pParse, r1);
selectInnerLoop(pParse, p, tab1,
0, 0, &dest, iCont, iBreak);
sqlite3VdbeResolveLabel(v, iCont);
sqlite3VdbeAddOp2(v, OP_Next, tab1, iStart); VdbeCoverage(v);
sqlite3VdbeResolveLabel(v, iBreak);
sqlite3VdbeAddOp2(v, OP_Close, tab2, 0);
sqlite3VdbeAddOp2(v, OP_Close, tab1, 0);
break;
}
}
#ifndef SQLITE_OMIT_EXPLAIN
if( p->pNext==0 ){
ExplainQueryPlanPop(pParse);
}
#endif
}
if( pParse->nErr ) goto multi_select_end;
/* Compute collating sequences used by
** temporary tables needed to implement the compound select.
** Attach the KeyInfo structure to all temporary tables.
**
** This section is run by the right-most SELECT statement only.
** SELECT statements to the left always skip this part. The right-most
** SELECT might also skip this part if it has no ORDER BY clause and
** no temp tables are required.
*/
if( p->selFlags & SF_UsesEphemeral ){
int i; /* Loop counter */
KeyInfo *pKeyInfo; /* Collating sequence for the result set */
Select *pLoop; /* For looping through SELECT statements */
CollSeq **apColl; /* For looping through pKeyInfo->aColl[] */
int nCol; /* Number of columns in result set */
assert( p->pNext==0 );
nCol = p->pEList->nExpr;
pKeyInfo = sqlite3KeyInfoAlloc(db, nCol, 1);
if( !pKeyInfo ){
rc = SQLITE_NOMEM_BKPT;
goto multi_select_end;
}
for(i=0, apColl=pKeyInfo->aColl; i<nCol; i++, apColl++){
*apColl = multiSelectCollSeq(pParse, p, i);
if( 0==*apColl ){
*apColl = db->pDfltColl;
}
}
for(pLoop=p; pLoop; pLoop=pLoop->pPrior){
for(i=0; i<2; i++){
int addr = pLoop->addrOpenEphm[i];
if( addr<0 ){
/* If [0] is unused then [1] is also unused. So we can
** always safely abort as soon as the first unused slot is found */
assert( pLoop->addrOpenEphm[1]<0 );
break;
}
sqlite3VdbeChangeP2(v, addr, nCol);
sqlite3VdbeChangeP4(v, addr, (char*)sqlite3KeyInfoRef(pKeyInfo),
P4_KEYINFO);
pLoop->addrOpenEphm[i] = -1;
}
}
sqlite3KeyInfoUnref(pKeyInfo);
}
multi_select_end:
pDest->iSdst = dest.iSdst;
pDest->nSdst = dest.nSdst;
sqlite3SelectDelete(db, pDelete);
return rc;
}
| 0 |
[
"CWE-476"
] |
sqlite
|
8428b3b437569338a9d1e10c4cd8154acbe33089
| 142,655,477,716,831,860,000,000,000,000,000,000,000 | 343 |
Continuation of [e2bddcd4c55ba3cb]: Add another spot where it is necessary
to abort early due to prior errors in sqlite3WindowRewrite().
FossilOrigin-Name: cba2a2a44cdf138a629109bb0ad088ed4ef67fc66bed3e0373554681a39615d2
|
Pong(const std::string& cookie, const std::string& server = "")
: ClientProtocol::Message("PONG", ServerInstance->Config->GetServerName())
{
if (server.empty())
PushParamRef(ServerInstance->Config->GetServerName());
else
PushParam(server);
PushParamRef(cookie);
}
| 0 |
[
"CWE-200",
"CWE-732"
] |
inspircd
|
4350a11c663b0d75f8119743bffb7736d87abd4d
| 221,906,848,297,919,620,000,000,000,000,000,000,000 | 9 |
Fix sending malformed pong messages in some cases.
|
int SSL_CTX_get_quiet_shutdown(const SSL_CTX *ctx)
{
return (ctx->quiet_shutdown);
}
| 0 |
[
"CWE-310"
] |
openssl
|
56f1acf5ef8a432992497a04792ff4b3b2c6f286
| 196,426,916,310,974,570,000,000,000,000,000,000,000 | 4 |
Disable SSLv2 default build, default negotiation and weak ciphers.
SSLv2 is by default disabled at build-time. Builds that are not
configured with "enable-ssl2" will not support SSLv2. Even if
"enable-ssl2" is used, users who want to negotiate SSLv2 via the
version-flexible SSLv23_method() will need to explicitly call either
of:
SSL_CTX_clear_options(ctx, SSL_OP_NO_SSLv2);
or
SSL_clear_options(ssl, SSL_OP_NO_SSLv2);
as appropriate. Even if either of those is used, or the application
explicitly uses the version-specific SSLv2_method() or its client
or server variants, SSLv2 ciphers vulnerable to exhaustive search
key recovery have been removed. Specifically, the SSLv2 40-bit
EXPORT ciphers, and SSLv2 56-bit DES are no longer available.
Mitigation for CVE-2016-0800
Reviewed-by: Emilia Käsper <[email protected]>
|
void set_key_field_ptr(KEY *key_info, const uchar *new_buf,
const uchar *old_buf)
{
KEY_PART_INFO *key_part= key_info->key_part;
uint key_parts= key_info->user_defined_key_parts;
uint i= 0;
my_ptrdiff_t diff= (new_buf - old_buf);
DBUG_ENTER("set_key_field_ptr");
do
{
key_part->field->move_field_offset(diff);
key_part++;
} while (++i < key_parts);
DBUG_VOID_RETURN;
}
| 0 |
[
"CWE-416"
] |
server
|
c02ebf3510850ba78a106be9974c94c3b97d8585
| 108,241,139,683,824,270,000,000,000,000,000,000,000 | 16 |
MDEV-24176 Preparations
1. moved fix_vcol_exprs() call to open_table()
mysql_alter_table() doesn't do lock_tables() so it cannot win from
fix_vcol_exprs() from there. Tests affected: main.default_session
2. Vanilla cleanups and comments.
|
void ComputeAsyncImpl(OpKernelContext* c, CollectiveExecutor* col_exec,
DoneCallback done) override {
// Allocate output on the first pass through this function. This must be
// done immediately, while we're still in the executor thread. Otherwise
// the memory is not guaranteed to be unused by any concurrently executing
// GPU kernel.
if (c->mutable_output(0) == nullptr) {
// Allocate the output tensor, trying to reuse the input.
Tensor* output = nullptr;
OP_REQUIRES_OK_ASYNC(c,
c->forward_input_or_allocate_output(
{0}, 0, c->input(0).shape(), &output),
done);
col_params_->instance.shape = c->input(0).shape();
}
if (!CanProceedWithCompute(c, col_exec, done)) return;
auto actual_done = [c, col_params = col_params_, done](const Status& s) {
VLOG(1) << "CollectiveReduceOpKernel ExecuteAsync done for collective "
<< c->op_kernel().name() << " device " << c->device()->name()
<< " group " << col_params->group.group_key << " instance "
<< col_params->instance.instance_key << " status " << s;
col_params->Unref();
OP_REQUIRES_OK_ASYNC(c, s, done);
done();
};
VLOG(1) << "CollectiveReduceOpKernel ExecuteAsync start for collective "
<< col_params_->name << " device " << c->device()->name()
<< " group " << col_params_->group.group_key << " instance "
<< col_params_->instance.instance_key;
col_params_->Ref();
col_exec->ExecuteAsync(c, col_params_, GetCollectiveKey(c), actual_done);
}
| 0 |
[
"CWE-416"
] |
tensorflow
|
ca38dab9d3ee66c5de06f11af9a4b1200da5ef75
| 156,135,762,445,643,800,000,000,000,000,000,000,000 | 33 |
Fix undefined behavior in CollectiveReduceV2 and others
We should not call done after it's moved.
PiperOrigin-RevId: 400838185
Change-Id: Ifc979740054b8f8c6f4d50acc89472fe60c4fdb1
|
*/
YY_BUFFER_STATE re_yy_scan_string (yyconst char * yystr , yyscan_t yyscanner)
{
return re_yy_scan_bytes(yystr,strlen(yystr) ,yyscanner);
| 0 |
[
"CWE-476",
"CWE-703",
"CWE-125"
] |
yara
|
3119b232c9c453c98d8fa8b6ae4e37ba18117cd4
| 276,779,344,896,011,900,000,000,000,000,000,000,000 | 5 |
re_lexer: Make reading escape sequences more robust (#586)
* Add test for issue #503
* re_lexer: Make reading escape sequences more robust
This commit fixes parsing incomplete escape sequences at the end of a
regular expression and parsing things like \xxy (invalid hex digits)
which before were silently turned into (char)255.
Close #503
* Update re_lexer.c
|
CImg(const t *const values, const unsigned int size_x, const unsigned int size_y,
const unsigned int size_z, const unsigned int size_c,
const char *const axes_order):_data(0),_is_shared(false) {
const size_t siz = safe_size(size_x,size_y,size_z,size_c);
if (values && siz) {
unsigned char s_code[4] = { 0,1,2,3 }, n_code[4] = { 0 };
for (unsigned int l = 0; axes_order[l]; ++l) {
int c = cimg::lowercase(axes_order[l]);
if (l>=4 || (c!='x' && c!='y' && c!='z' && c!='c')) { *s_code = 4; break; }
else { ++n_code[c%=4]; s_code[l] = c; }
}
if (*axes_order && *s_code<4 && *n_code<=1 && n_code[1]<=1 && n_code[2]<=1 && n_code[3]<=1) {
const unsigned int code = (s_code[0]<<12) | (s_code[1]<<8) | (s_code[2]<<4) | (s_code[3]);
int s0 = 0, s1 = 0, s2 = 0, s3 = 0;
const char *inv_order = 0;
switch (code) {
case 0x0123 : inv_order = "xyzc"; s0 = size_x; s1 = size_y; s2 = size_z; s3 = size_c; break; // xyzc
case 0x0132 : inv_order = "xyzc"; s0 = size_x; s1 = size_y; s2 = size_c; s3 = size_z; break; // xycz
case 0x0213 : inv_order = "xzyc"; s0 = size_x; s1 = size_z; s2 = size_y; s3 = size_c; break; // xzyc
case 0x0231 : inv_order = "xcyz"; s0 = size_x; s1 = size_z; s2 = size_c; s3 = size_y; break; // xzcy
case 0x0312 : inv_order = "xzcy"; s0 = size_x; s1 = size_c; s2 = size_y; s3 = size_z; break; // xcyz
case 0x0321 : inv_order = "xczy"; s0 = size_x; s1 = size_c; s2 = size_z; s3 = size_y; break; // xczy
case 0x1023 : inv_order = "yxzc"; s0 = size_y; s1 = size_x; s2 = size_z; s3 = size_c; break; // yxzc
case 0x1032 : inv_order = "yxcz"; s0 = size_y; s1 = size_x; s2 = size_c; s3 = size_z; break; // yxcz
case 0x1203 : inv_order = "zxyc"; s0 = size_y; s1 = size_z; s2 = size_x; s3 = size_c; break; // yzxc
case 0x1230 : inv_order = "cxyz"; s0 = size_y; s1 = size_z; s2 = size_c; s3 = size_x; break; // yzcx
case 0x1302 : inv_order = "zxcy"; s0 = size_y; s1 = size_c; s2 = size_x; s3 = size_z; break; // ycxz
case 0x1320 : inv_order = "cxzy"; s0 = size_y; s1 = size_c; s2 = size_z; s3 = size_x; break; // yczx
case 0x2013 : inv_order = "yzxc"; s0 = size_z; s1 = size_x; s2 = size_y; s3 = size_c; break; // zxyc
case 0x2031 : inv_order = "ycxz"; s0 = size_z; s1 = size_x; s2 = size_c; s3 = size_y; break; // zxcy
case 0x2103 : inv_order = "zyxc"; s0 = size_z; s1 = size_y; s2 = size_x; s3 = size_c; break; // zyxc
case 0x2130 : inv_order = "cyxz"; s0 = size_z; s1 = size_y; s2 = size_c; s3 = size_x; break; // zycx
case 0x2301 : inv_order = "zcxy"; s0 = size_z; s1 = size_c; s2 = size_x; s3 = size_y; break; // zcxy
case 0x2310 : inv_order = "czxy"; s0 = size_z; s1 = size_c; s2 = size_y; s3 = size_x; break; // zcyx
case 0x3012 : inv_order = "yzcx"; s0 = size_c; s1 = size_x; s2 = size_y; s3 = size_z; break; // cxyz
case 0x3021 : inv_order = "yczx"; s0 = size_c; s1 = size_x; s2 = size_z; s3 = size_y; break; // cxzy
case 0x3102 : inv_order = "zycx"; s0 = size_c; s1 = size_y; s2 = size_x; s3 = size_z; break; // cyxz
case 0x3120 : inv_order = "cyzx"; s0 = size_c; s1 = size_y; s2 = size_z; s3 = size_x; break; // cyzx
case 0x3201 : inv_order = "zcyx"; s0 = size_c; s1 = size_z; s2 = size_x; s3 = size_y; break; // czxy
case 0x3210 : inv_order = "czyx"; s0 = size_c; s1 = size_z; s2 = size_y; s3 = size_x; break; // czyx
}
CImg<t>(values,s0,s1,s2,s3,true).get_permute_axes(inv_order).move_to(*this);
} else {
_width = _height = _depth = _spectrum = 0; _data = 0;
throw CImgArgumentException(_cimg_instance
"CImg(): Invalid specified axes order '%s'.",
cimg_instance,
axes_order);
}
} else { _width = _height = _depth = _spectrum = 0; _is_shared = false; _data = 0; }
}
| 0 |
[
"CWE-770"
] |
cimg
|
619cb58dd90b4e03ac68286c70ed98acbefd1c90
| 190,136,719,436,184,900,000,000,000,000,000,000,000 | 51 |
CImg<>::load_bmp() and CImg<>::load_pandore(): Check that dimensions encoded in file does not exceed file size.
|
int GetFlatIndex(int index, EvalData<T>* eval_data) {
int flat_index = 0;
int64_t left_pad = 0, right_pad = 0, dimension_index, index_in_input;
for (int i = 0; i < eval_data->num_dims; ++i) {
switch (eval_data->padding_matrix->type) {
case kTfLiteInt32:
GetPadding(eval_data->padding_matrix->data.i32, i, &left_pad,
&right_pad);
break;
case kTfLiteInt64:
GetPadding(eval_data->padding_matrix->data.i64, i, &left_pad,
&right_pad);
break;
default:
break;
}
dimension_index = index / (*eval_data->output_dims_num_elements)[i];
index_in_input =
GetInputDimension(dimension_index, left_pad, right_pad,
eval_data->input_dims->data[i], eval_data->offset);
flat_index += index_in_input * (*eval_data->input_dims_num_elements)[i];
index %= (*eval_data->output_dims_num_elements)[i];
}
return flat_index;
}
| 0 |
[
"CWE-125",
"CWE-787"
] |
tensorflow
|
1970c2158b1ffa416d159d03c3370b9a462aee35
| 16,956,164,945,226,420,000,000,000,000,000,000,000 | 25 |
[tflite]: Insert `nullptr` checks when obtaining tensors.
As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages.
We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`).
PiperOrigin-RevId: 332521299
Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
|
unsigned evaluate_verbatim_subframe_(
FLAC__StreamEncoder *encoder,
const FLAC__int32 signal[],
unsigned blocksize,
unsigned subframe_bps,
FLAC__Subframe *subframe
)
{
unsigned estimate;
subframe->type = FLAC__SUBFRAME_TYPE_VERBATIM;
subframe->data.verbatim.data = signal;
estimate = FLAC__SUBFRAME_ZERO_PAD_LEN + FLAC__SUBFRAME_TYPE_LEN + FLAC__SUBFRAME_WASTED_BITS_FLAG_LEN + subframe->wasted_bits + (blocksize * subframe_bps);
#if SPOTCHECK_ESTIMATE
spotcheck_subframe_estimate_(encoder, blocksize, subframe_bps, subframe, estimate);
#else
(void)encoder;
#endif
return estimate;
}
| 0 |
[] |
flac
|
c06a44969c1145242a22f75fc8fb2e8b54c55303
| 252,700,098,001,757,180,000,000,000,000,000,000,000 | 24 |
flac : Fix for https://sourceforge.net/p/flac/bugs/425/
* flac/encode.c : Validate num_tracks field of cuesheet.
* libFLAC/stream_encoder.c : Add check for a NULL pointer.
* flac/encode.c : Improve bounds checking.
Closes: https://sourceforge.net/p/flac/bugs/425/
|
static int sysctl_follow_link(struct ctl_table_header **phead,
struct ctl_table **pentry)
{
struct ctl_table_header *head;
struct ctl_table_root *root;
struct ctl_table_set *set;
struct ctl_table *entry;
struct ctl_dir *dir;
int ret;
ret = 0;
spin_lock(&sysctl_lock);
root = (*pentry)->data;
set = lookup_header_set(root);
dir = xlate_dir(set, (*phead)->parent);
if (IS_ERR(dir))
ret = PTR_ERR(dir);
else {
const char *procname = (*pentry)->procname;
head = NULL;
entry = find_entry(&head, dir, procname, strlen(procname));
ret = -ENOENT;
if (entry && use_table(head)) {
unuse_table(*phead);
*phead = head;
*pentry = entry;
ret = 0;
}
}
spin_unlock(&sysctl_lock);
return ret;
}
| 0 |
[
"CWE-20",
"CWE-399"
] |
linux
|
93362fa47fe98b62e4a34ab408c4a418432e7939
| 186,367,979,372,816,530,000,000,000,000,000,000,000 | 33 |
sysctl: Drop reference added by grab_header in proc_sys_readdir
Fixes CVE-2016-9191, proc_sys_readdir doesn't drop reference
added by grab_header when return from !dir_emit_dots path.
It can cause any path called unregister_sysctl_table will
wait forever.
The calltrace of CVE-2016-9191:
[ 5535.960522] Call Trace:
[ 5535.963265] [<ffffffff817cdaaf>] schedule+0x3f/0xa0
[ 5535.968817] [<ffffffff817d33fb>] schedule_timeout+0x3db/0x6f0
[ 5535.975346] [<ffffffff817cf055>] ? wait_for_completion+0x45/0x130
[ 5535.982256] [<ffffffff817cf0d3>] wait_for_completion+0xc3/0x130
[ 5535.988972] [<ffffffff810d1fd0>] ? wake_up_q+0x80/0x80
[ 5535.994804] [<ffffffff8130de64>] drop_sysctl_table+0xc4/0xe0
[ 5536.001227] [<ffffffff8130de17>] drop_sysctl_table+0x77/0xe0
[ 5536.007648] [<ffffffff8130decd>] unregister_sysctl_table+0x4d/0xa0
[ 5536.014654] [<ffffffff8130deff>] unregister_sysctl_table+0x7f/0xa0
[ 5536.021657] [<ffffffff810f57f5>] unregister_sched_domain_sysctl+0x15/0x40
[ 5536.029344] [<ffffffff810d7704>] partition_sched_domains+0x44/0x450
[ 5536.036447] [<ffffffff817d0761>] ? __mutex_unlock_slowpath+0x111/0x1f0
[ 5536.043844] [<ffffffff81167684>] rebuild_sched_domains_locked+0x64/0xb0
[ 5536.051336] [<ffffffff8116789d>] update_flag+0x11d/0x210
[ 5536.057373] [<ffffffff817cf61f>] ? mutex_lock_nested+0x2df/0x450
[ 5536.064186] [<ffffffff81167acb>] ? cpuset_css_offline+0x1b/0x60
[ 5536.070899] [<ffffffff810fce3d>] ? trace_hardirqs_on+0xd/0x10
[ 5536.077420] [<ffffffff817cf61f>] ? mutex_lock_nested+0x2df/0x450
[ 5536.084234] [<ffffffff8115a9f5>] ? css_killed_work_fn+0x25/0x220
[ 5536.091049] [<ffffffff81167ae5>] cpuset_css_offline+0x35/0x60
[ 5536.097571] [<ffffffff8115aa2c>] css_killed_work_fn+0x5c/0x220
[ 5536.104207] [<ffffffff810bc83f>] process_one_work+0x1df/0x710
[ 5536.110736] [<ffffffff810bc7c0>] ? process_one_work+0x160/0x710
[ 5536.117461] [<ffffffff810bce9b>] worker_thread+0x12b/0x4a0
[ 5536.123697] [<ffffffff810bcd70>] ? process_one_work+0x710/0x710
[ 5536.130426] [<ffffffff810c3f7e>] kthread+0xfe/0x120
[ 5536.135991] [<ffffffff817d4baf>] ret_from_fork+0x1f/0x40
[ 5536.142041] [<ffffffff810c3e80>] ? kthread_create_on_node+0x230/0x230
One cgroup maintainer mentioned that "cgroup is trying to offline
a cpuset css, which takes place under cgroup_mutex. The offlining
ends up trying to drain active usages of a sysctl table which apprently
is not happening."
The real reason is that proc_sys_readdir doesn't drop reference added
by grab_header when return from !dir_emit_dots path. So this cpuset
offline path will wait here forever.
See here for details: http://www.openwall.com/lists/oss-security/2016/11/04/13
Fixes: f0c3b5093add ("[readdir] convert procfs")
Cc: [email protected]
Reported-by: CAI Qian <[email protected]>
Tested-by: Yang Shukui <[email protected]>
Signed-off-by: Zhou Chengming <[email protected]>
Acked-by: Al Viro <[email protected]>
Signed-off-by: Eric W. Biederman <[email protected]>
|
static int handle_oem_get_msg_cmd(struct ipmi_smi *intf,
struct ipmi_smi_msg *msg)
{
struct cmd_rcvr *rcvr;
int rv = 0;
unsigned char netfn;
unsigned char cmd;
unsigned char chan;
struct ipmi_user *user = NULL;
struct ipmi_system_interface_addr *smi_addr;
struct ipmi_recv_msg *recv_msg;
/*
* We expect the OEM SW to perform error checking
* so we just do some basic sanity checks
*/
if (msg->rsp_size < 4) {
/* Message not big enough, just ignore it. */
ipmi_inc_stat(intf, invalid_commands);
return 0;
}
if (msg->rsp[2] != 0) {
/* An error getting the response, just ignore it. */
return 0;
}
/*
* This is an OEM Message so the OEM needs to know how
* handle the message. We do no interpretation.
*/
netfn = msg->rsp[0] >> 2;
cmd = msg->rsp[1];
chan = msg->rsp[3] & 0xf;
rcu_read_lock();
rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
if (rcvr) {
user = rcvr->user;
kref_get(&user->refcount);
} else
user = NULL;
rcu_read_unlock();
if (user == NULL) {
/* We didn't find a user, just give up. */
ipmi_inc_stat(intf, unhandled_commands);
/*
* Don't do anything with these messages, just allow
* them to be freed.
*/
rv = 0;
} else {
recv_msg = ipmi_alloc_recv_msg();
if (!recv_msg) {
/*
* We couldn't allocate memory for the
* message, so requeue it for handling
* later.
*/
rv = 1;
kref_put(&user->refcount, free_user);
} else {
/*
* OEM Messages are expected to be delivered via
* the system interface to SMS software. We might
* need to visit this again depending on OEM
* requirements
*/
smi_addr = ((struct ipmi_system_interface_addr *)
&recv_msg->addr);
smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
smi_addr->channel = IPMI_BMC_CHANNEL;
smi_addr->lun = msg->rsp[0] & 3;
recv_msg->user = user;
recv_msg->user_msg_data = NULL;
recv_msg->recv_type = IPMI_OEM_RECV_TYPE;
recv_msg->msg.netfn = msg->rsp[0] >> 2;
recv_msg->msg.cmd = msg->rsp[1];
recv_msg->msg.data = recv_msg->msg_data;
/*
* The message starts at byte 4 which follows the
* the Channel Byte in the "GET MESSAGE" command
*/
recv_msg->msg.data_len = msg->rsp_size - 4;
memcpy(recv_msg->msg_data, &msg->rsp[4],
msg->rsp_size - 4);
if (deliver_response(intf, recv_msg))
ipmi_inc_stat(intf, unhandled_commands);
else
ipmi_inc_stat(intf, handled_commands);
}
}
return rv;
}
| 0 |
[
"CWE-416",
"CWE-284"
] |
linux
|
77f8269606bf95fcb232ee86f6da80886f1dfae8
| 104,133,242,453,626,180,000,000,000,000,000,000,000 | 100 |
ipmi: fix use-after-free of user->release_barrier.rda
When we do the following test, we got oops in ipmi_msghandler driver
while((1))
do
service ipmievd restart & service ipmievd restart
done
---------------------------------------------------------------
[ 294.230186] Unable to handle kernel paging request at virtual address 0000803fea6ea008
[ 294.230188] Mem abort info:
[ 294.230190] ESR = 0x96000004
[ 294.230191] Exception class = DABT (current EL), IL = 32 bits
[ 294.230193] SET = 0, FnV = 0
[ 294.230194] EA = 0, S1PTW = 0
[ 294.230195] Data abort info:
[ 294.230196] ISV = 0, ISS = 0x00000004
[ 294.230197] CM = 0, WnR = 0
[ 294.230199] user pgtable: 4k pages, 48-bit VAs, pgdp = 00000000a1c1b75a
[ 294.230201] [0000803fea6ea008] pgd=0000000000000000
[ 294.230204] Internal error: Oops: 96000004 [#1] SMP
[ 294.235211] Modules linked in: nls_utf8 isofs rpcrdma ib_iser ib_srpt target_core_mod ib_srp scsi_transport_srp ib_ipoib rdma_ucm ib_umad rdma_cm ib_cm iw_cm dm_mirror dm_region_hash dm_log dm_mod aes_ce_blk crypto_simd cryptd aes_ce_cipher ghash_ce sha2_ce ses sha256_arm64 sha1_ce hibmc_drm hisi_sas_v2_hw enclosure sg hisi_sas_main sbsa_gwdt ip_tables mlx5_ib ib_uverbs marvell ib_core mlx5_core ixgbe ipmi_si mdio hns_dsaf ipmi_devintf ipmi_msghandler hns_enet_drv hns_mdio
[ 294.277745] CPU: 3 PID: 0 Comm: swapper/3 Kdump: loaded Not tainted 5.0.0-rc2+ #113
[ 294.285511] Hardware name: Huawei TaiShan 2280 /BC11SPCD, BIOS 1.37 11/21/2017
[ 294.292835] pstate: 80000005 (Nzcv daif -PAN -UAO)
[ 294.297695] pc : __srcu_read_lock+0x38/0x58
[ 294.301940] lr : acquire_ipmi_user+0x2c/0x70 [ipmi_msghandler]
[ 294.307853] sp : ffff00001001bc80
[ 294.311208] x29: ffff00001001bc80 x28: ffff0000117e5000
[ 294.316594] x27: 0000000000000000 x26: dead000000000100
[ 294.321980] x25: dead000000000200 x24: ffff803f6bd06800
[ 294.327366] x23: 0000000000000000 x22: 0000000000000000
[ 294.332752] x21: ffff00001001bd04 x20: ffff80df33d19018
[ 294.338137] x19: ffff80df33d19018 x18: 0000000000000000
[ 294.343523] x17: 0000000000000000 x16: 0000000000000000
[ 294.348908] x15: 0000000000000000 x14: 0000000000000002
[ 294.354293] x13: 0000000000000000 x12: 0000000000000000
[ 294.359679] x11: 0000000000000000 x10: 0000000000100000
[ 294.365065] x9 : 0000000000000000 x8 : 0000000000000004
[ 294.370451] x7 : 0000000000000000 x6 : ffff80df34558678
[ 294.375836] x5 : 000000000000000c x4 : 0000000000000000
[ 294.381221] x3 : 0000000000000001 x2 : 0000803fea6ea000
[ 294.386607] x1 : 0000803fea6ea008 x0 : 0000000000000001
[ 294.391994] Process swapper/3 (pid: 0, stack limit = 0x0000000083087293)
[ 294.398791] Call trace:
[ 294.401266] __srcu_read_lock+0x38/0x58
[ 294.405154] acquire_ipmi_user+0x2c/0x70 [ipmi_msghandler]
[ 294.410716] deliver_response+0x80/0xf8 [ipmi_msghandler]
[ 294.416189] deliver_local_response+0x28/0x68 [ipmi_msghandler]
[ 294.422193] handle_one_recv_msg+0x158/0xcf8 [ipmi_msghandler]
[ 294.432050] handle_new_recv_msgs+0xc0/0x210 [ipmi_msghandler]
[ 294.441984] smi_recv_tasklet+0x8c/0x158 [ipmi_msghandler]
[ 294.451618] tasklet_action_common.isra.5+0x88/0x138
[ 294.460661] tasklet_action+0x2c/0x38
[ 294.468191] __do_softirq+0x120/0x2f8
[ 294.475561] irq_exit+0x134/0x140
[ 294.482445] __handle_domain_irq+0x6c/0xc0
[ 294.489954] gic_handle_irq+0xb8/0x178
[ 294.497037] el1_irq+0xb0/0x140
[ 294.503381] arch_cpu_idle+0x34/0x1a8
[ 294.510096] do_idle+0x1d4/0x290
[ 294.516322] cpu_startup_entry+0x28/0x30
[ 294.523230] secondary_start_kernel+0x184/0x1d0
[ 294.530657] Code: d538d082 d2800023 8b010c81 8b020021 (c85f7c25)
[ 294.539746] ---[ end trace 8a7a880dee570b29 ]---
[ 294.547341] Kernel panic - not syncing: Fatal exception in interrupt
[ 294.556837] SMP: stopping secondary CPUs
[ 294.563996] Kernel Offset: disabled
[ 294.570515] CPU features: 0x002,21006008
[ 294.577638] Memory Limit: none
[ 294.587178] Starting crashdump kernel...
[ 294.594314] Bye!
Because the user->release_barrier.rda is freed in ipmi_destroy_user(), but
the refcount is not zero, when acquire_ipmi_user() uses user->release_barrier.rda
in __srcu_read_lock(), it causes oops.
Fix this by calling cleanup_srcu_struct() when the refcount is zero.
Fixes: e86ee2d44b44 ("ipmi: Rework locking and shutdown for hot remove")
Cc: [email protected] # 4.18
Signed-off-by: Yang Yingliang <[email protected]>
Signed-off-by: Corey Minyard <[email protected]>
|
tar_sparse_extract_region (struct tar_sparse_file *file, size_t i)
{
if (file->optab->extract_region)
return file->optab->extract_region (file, i);
return false;
}
| 0 |
[] |
tar
|
c15c42ccd1e2377945fd0414eca1a49294bff454
| 177,901,471,073,697,970,000,000,000,000,000,000,000 | 6 |
Fix CVE-2018-20482
* NEWS: Update.
* src/sparse.c (sparse_dump_region): Handle short read condition.
(sparse_extract_region,check_data_region): Fix dumped_size calculation.
Handle short read condition.
(pax_decode_header): Fix dumped_size calculation.
* tests/Makefile.am: Add new testcases.
* tests/testsuite.at: Likewise.
* tests/sptrcreat.at: New file.
* tests/sptrdiff00.at: New file.
* tests/sptrdiff01.at: New file.
|
int hostap_set_antsel(local_info_t *local)
{
u16 val;
int ret = 0;
if (local->antsel_tx != HOSTAP_ANTSEL_DO_NOT_TOUCH &&
local->func->cmd(local->dev, HFA384X_CMDCODE_READMIF,
HFA386X_CR_TX_CONFIGURE,
NULL, &val) == 0) {
val &= ~(BIT(2) | BIT(1));
switch (local->antsel_tx) {
case HOSTAP_ANTSEL_DIVERSITY:
val |= BIT(1);
break;
case HOSTAP_ANTSEL_LOW:
break;
case HOSTAP_ANTSEL_HIGH:
val |= BIT(2);
break;
}
if (local->func->cmd(local->dev, HFA384X_CMDCODE_WRITEMIF,
HFA386X_CR_TX_CONFIGURE, &val, NULL)) {
printk(KERN_INFO "%s: setting TX AntSel failed\n",
local->dev->name);
ret = -1;
}
}
if (local->antsel_rx != HOSTAP_ANTSEL_DO_NOT_TOUCH &&
local->func->cmd(local->dev, HFA384X_CMDCODE_READMIF,
HFA386X_CR_RX_CONFIGURE,
NULL, &val) == 0) {
val &= ~(BIT(1) | BIT(0));
switch (local->antsel_rx) {
case HOSTAP_ANTSEL_DIVERSITY:
break;
case HOSTAP_ANTSEL_LOW:
val |= BIT(0);
break;
case HOSTAP_ANTSEL_HIGH:
val |= BIT(0) | BIT(1);
break;
}
if (local->func->cmd(local->dev, HFA384X_CMDCODE_WRITEMIF,
HFA386X_CR_RX_CONFIGURE, &val, NULL)) {
printk(KERN_INFO "%s: setting RX AntSel failed\n",
local->dev->name);
ret = -1;
}
}
return ret;
}
| 0 |
[
"CWE-703",
"CWE-264"
] |
linux
|
550fd08c2cebad61c548def135f67aba284c6162
| 81,146,764,554,417,200,000,000,000,000,000,000,000 | 55 |
net: Audit drivers to identify those needing IFF_TX_SKB_SHARING cleared
After the last patch, We are left in a state in which only drivers calling
ether_setup have IFF_TX_SKB_SHARING set (we assume that drivers touching real
hardware call ether_setup for their net_devices and don't hold any state in
their skbs. There are a handful of drivers that violate this assumption of
course, and need to be fixed up. This patch identifies those drivers, and marks
them as not being able to support the safe transmission of skbs by clearning the
IFF_TX_SKB_SHARING flag in priv_flags
Signed-off-by: Neil Horman <[email protected]>
CC: Karsten Keil <[email protected]>
CC: "David S. Miller" <[email protected]>
CC: Jay Vosburgh <[email protected]>
CC: Andy Gospodarek <[email protected]>
CC: Patrick McHardy <[email protected]>
CC: Krzysztof Halasa <[email protected]>
CC: "John W. Linville" <[email protected]>
CC: Greg Kroah-Hartman <[email protected]>
CC: Marcel Holtmann <[email protected]>
CC: Johannes Berg <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
userauth_hostbased(struct ssh *ssh)
{
Authctxt *authctxt = ssh->authctxt;
struct sshbuf *b;
struct sshkey *key = NULL;
char *pkalg, *cuser, *chost;
u_char *pkblob, *sig;
size_t alen, blen, slen;
int r, pktype, authenticated = 0;
/* XXX use sshkey_froms() */
if ((r = sshpkt_get_cstring(ssh, &pkalg, &alen)) != 0 ||
(r = sshpkt_get_string(ssh, &pkblob, &blen)) != 0 ||
(r = sshpkt_get_cstring(ssh, &chost, NULL)) != 0 ||
(r = sshpkt_get_cstring(ssh, &cuser, NULL)) != 0 ||
(r = sshpkt_get_string(ssh, &sig, &slen)) != 0)
fatal("%s: packet parsing: %s", __func__, ssh_err(r));
debug("%s: cuser %s chost %s pkalg %s slen %zu", __func__,
cuser, chost, pkalg, slen);
#ifdef DEBUG_PK
debug("signature:");
sshbuf_dump_data(sig, siglen, stderr);
#endif
pktype = sshkey_type_from_name(pkalg);
if (pktype == KEY_UNSPEC) {
/* this is perfectly legal */
logit("%s: unsupported public key algorithm: %s",
__func__, pkalg);
goto done;
}
if ((r = sshkey_from_blob(pkblob, blen, &key)) != 0) {
error("%s: key_from_blob: %s", __func__, ssh_err(r));
goto done;
}
if (key == NULL) {
error("%s: cannot decode key: %s", __func__, pkalg);
goto done;
}
if (key->type != pktype) {
error("%s: type mismatch for decoded key "
"(received %d, expected %d)", __func__, key->type, pktype);
goto done;
}
if (sshkey_type_plain(key->type) == KEY_RSA &&
(ssh->compat & SSH_BUG_RSASIGMD5) != 0) {
error("Refusing RSA key because peer uses unsafe "
"signature format");
goto done;
}
if (match_pattern_list(pkalg, options.hostbased_key_types, 0) != 1) {
logit("%s: key type %s not in HostbasedAcceptedKeyTypes",
__func__, sshkey_type(key));
goto done;
}
if (!authctxt->valid || authctxt->user == NULL) {
debug2("%s: disabled because of invalid user", __func__);
goto done;
}
if ((b = sshbuf_new()) == NULL)
fatal("%s: sshbuf_new failed", __func__);
/* reconstruct packet */
if ((r = sshbuf_put_string(b, session_id2, session_id2_len)) != 0 ||
(r = sshbuf_put_u8(b, SSH2_MSG_USERAUTH_REQUEST)) != 0 ||
(r = sshbuf_put_cstring(b, authctxt->user)) != 0 ||
(r = sshbuf_put_cstring(b, authctxt->service)) != 0 ||
(r = sshbuf_put_cstring(b, "hostbased")) != 0 ||
(r = sshbuf_put_string(b, pkalg, alen)) != 0 ||
(r = sshbuf_put_string(b, pkblob, blen)) != 0 ||
(r = sshbuf_put_cstring(b, chost)) != 0 ||
(r = sshbuf_put_cstring(b, cuser)) != 0)
fatal("%s: buffer error: %s", __func__, ssh_err(r));
#ifdef DEBUG_PK
sshbuf_dump(b, stderr);
#endif
auth2_record_info(authctxt,
"client user \"%.100s\", client host \"%.100s\"", cuser, chost);
/* test for allowed key and correct signature */
authenticated = 0;
if (PRIVSEP(hostbased_key_allowed(authctxt->pw, cuser, chost, key)) &&
PRIVSEP(sshkey_verify(key, sig, slen,
sshbuf_ptr(b), sshbuf_len(b), pkalg, ssh->compat)) == 0)
authenticated = 1;
auth2_record_key(authctxt, authenticated, key);
sshbuf_free(b);
done:
debug2("%s: authenticated %d", __func__, authenticated);
sshkey_free(key);
free(pkalg);
free(pkblob);
free(cuser);
free(chost);
free(sig);
return authenticated;
}
| 0 |
[
"CWE-200",
"CWE-362",
"CWE-703"
] |
src
|
779974d35b4859c07bc3cb8a12c74b43b0a7d1e0
| 122,076,013,245,002,240,000,000,000,000,000,000,000 | 100 |
delay bailout for invalid authenticating user until after the packet
containing the request has been fully parsed. Reported by Dariusz Tytko
and Michał Sajdak; ok deraadt
|
length_base_file_name(tiffsep_device * pdev, bool *double_f)
{
int base_filename_length = strlen(pdev->fname);
#define REMOVE_TIF_FROM_BASENAME 1
#if REMOVE_TIF_FROM_BASENAME
if (base_filename_length > 4 &&
pdev->fname[base_filename_length - 4] == '.' &&
toupper(pdev->fname[base_filename_length - 3]) == 'T' &&
toupper(pdev->fname[base_filename_length - 2]) == 'I' &&
toupper(pdev->fname[base_filename_length - 1]) == 'F') {
base_filename_length -= 4;
*double_f = false;
}
else if (base_filename_length > 5 &&
pdev->fname[base_filename_length - 5] == '.' &&
toupper(pdev->fname[base_filename_length - 4]) == 'T' &&
toupper(pdev->fname[base_filename_length - 3]) == 'I' &&
toupper(pdev->fname[base_filename_length - 2]) == 'F' &&
toupper(pdev->fname[base_filename_length - 1]) == 'F') {
base_filename_length -= 5;
*double_f = true;
}
#endif
#undef REMOVE_TIF_FROM_BASENAME
return base_filename_length;
}
| 0 |
[
"CWE-476"
] |
ghostpdl
|
aadb53eb834b3def3ef68d78865ff87a68901804
| 205,516,631,850,290,900,000,000,000,000,000,000,000 | 28 |
Tiffsep and Tiffsep1 - abort on multi-page input wtithout %d OutputFile
Bug #701821 "Segmentation fault at tiff//libtiff/tif_dirinfo.c:513 in TIFFFindField"
The tiffsep and tiffsep1 only set 'code' to an error when an attempt is
made to write a second output file without using %d in the OutputFile
specification.
This causes problems later when attempting to process the files. The
devices should exit without trying to further process the pages under
these conditions and this commit returns immediately on error.
The other devices like this already return immediately on error, it
looks like the code here was copied between devices without realising
that the tiffsep and tiffsep1 devices didn't exit when the error
condition was set.
|
static void posix_cpu_timers_init_group(struct signal_struct *sig)
{
/* Thread group counters. */
thread_group_cputime_init(sig);
/* Expiration times and increments. */
sig->it[CPUCLOCK_PROF].expires = cputime_zero;
sig->it[CPUCLOCK_PROF].incr = cputime_zero;
sig->it[CPUCLOCK_VIRT].expires = cputime_zero;
sig->it[CPUCLOCK_VIRT].incr = cputime_zero;
/* Cached expiration times. */
sig->cputime_expires.prof_exp = cputime_zero;
sig->cputime_expires.virt_exp = cputime_zero;
sig->cputime_expires.sched_exp = 0;
if (sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) {
sig->cputime_expires.prof_exp =
secs_to_cputime(sig->rlim[RLIMIT_CPU].rlim_cur);
sig->cputimer.running = 1;
}
/* The timer lists. */
INIT_LIST_HEAD(&sig->cpu_timers[0]);
INIT_LIST_HEAD(&sig->cpu_timers[1]);
INIT_LIST_HEAD(&sig->cpu_timers[2]);
}
| 0 |
[
"CWE-20",
"CWE-703",
"CWE-400"
] |
linux
|
b69f2292063d2caf37ca9aec7d63ded203701bf3
| 41,062,502,672,899,470,000,000,000,000,000,000,000 | 27 |
block: Fix io_context leak after failure of clone with CLONE_IO
With CLONE_IO, parent's io_context->nr_tasks is incremented, but never
decremented whenever copy_process() fails afterwards, which prevents
exit_io_context() from calling IO schedulers exit functions.
Give a task_struct to exit_io_context(), and call exit_io_context() instead of
put_io_context() in copy_process() cleanup path.
Signed-off-by: Louis Rilling <[email protected]>
Signed-off-by: Jens Axboe <[email protected]>
|
ServiceProtoQueryAliases(ServiceConnection *conn,
ProtoRequest *req)
{
VGAuthError err;
gchar *packet;
int num;
ServiceAlias *aList;
/*
* The alias code will do argument validation.
*/
err = ServiceAliasQueryAliases(req->reqData.queryAliases.userName,
&num,
&aList);
if (err != VGAUTH_E_OK) {
packet = Proto_MakeErrorReply(conn, req, err, "queryAliases failed");
} else {
int i;
gchar *endPacket;
packet = g_markup_printf_escaped(VGAUTH_QUERYALIASES_REPLY_FORMAT_START,
req->sequenceNumber);
// now the aliases
for (i = 0; i < num; i++) {
gchar *certPacket;
int j;
certPacket = g_markup_printf_escaped(VGAUTH_ALIAS_FORMAT_START,
aList[i].pemCert);
packet = Proto_ConcatXMLStrings(packet, certPacket);
for (j = 0; j < aList[i].num; j++) {
gchar *aiPacket;
ServiceAliasInfo *ai = &(aList[i].infos[j]);
if (ai->type == SUBJECT_TYPE_ANY) {
aiPacket = g_markup_printf_escaped(VGAUTH_ANYALIASINFO_FORMAT,
ai->comment);
} else if (ai->type == SUBJECT_TYPE_NAMED) {
aiPacket = g_markup_printf_escaped(VGAUTH_NAMEDALIASINFO_FORMAT,
ai->name,
ai->comment);
} else {
aiPacket = NULL;
ASSERT(0);
}
packet = Proto_ConcatXMLStrings(packet, aiPacket);
}
packet = Proto_ConcatXMLStrings(packet,
g_markup_printf_escaped(VGAUTH_ALIAS_FORMAT_END));
}
// now the end of the reply
endPacket = g_markup_printf_escaped(VGAUTH_QUERYALIASES_REPLY_FORMAT_END);
packet = Proto_ConcatXMLStrings(packet, endPacket);
ServiceAliasFreeAliasList(num, aList);
}
err = ServiceNetworkWriteData(conn, strlen(packet), packet);
if (err != VGAUTH_E_OK) {
Warning("%s: failed to send QueryAliases reply\n", __FUNCTION__);
}
g_free(packet);
return err;
}
| 0 |
[] |
open-vm-tools
|
70a74758bfe0042c27f15ce590fb21a2bc54d745
| 143,890,118,700,174,840,000,000,000,000,000,000,000 | 69 |
Properly check authorization on incoming guestOps requests.
Fix public pipe request checks. Only a SessionRequest type should
be accepted on the public pipe.
|
int32_t UpperOffset() const { return upper_offset_; }
| 0 |
[] |
node
|
fd80a31e0697d6317ce8c2d289575399f4e06d21
| 20,525,927,409,716,914,000,000,000,000,000,000,000 | 1 |
deps: backport 5f836c from v8 upstream
Original commit message:
Fix Hydrogen bounds check elimination
When combining bounds checks, they must all be moved before the first load/store
that they are guarding.
BUG=chromium:344186
LOG=y
[email protected]
Review URL: https://codereview.chromium.org/172093002
git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@19475 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
fix #8070
|
ri_CheckTrigger(FunctionCallInfo fcinfo, const char *funcname, int tgkind)
{
TriggerData *trigdata = (TriggerData *) fcinfo->context;
if (!CALLED_AS_TRIGGER(fcinfo))
ereport(ERROR,
(errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
errmsg("function \"%s\" was not called by trigger manager", funcname)));
/*
* Check proper event
*/
if (!TRIGGER_FIRED_AFTER(trigdata->tg_event) ||
!TRIGGER_FIRED_FOR_ROW(trigdata->tg_event))
ereport(ERROR,
(errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
errmsg("function \"%s\" must be fired AFTER ROW", funcname)));
switch (tgkind)
{
case RI_TRIGTYPE_INSERT:
if (!TRIGGER_FIRED_BY_INSERT(trigdata->tg_event))
ereport(ERROR,
(errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
errmsg("function \"%s\" must be fired for INSERT", funcname)));
break;
case RI_TRIGTYPE_UPDATE:
if (!TRIGGER_FIRED_BY_UPDATE(trigdata->tg_event))
ereport(ERROR,
(errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
errmsg("function \"%s\" must be fired for UPDATE", funcname)));
break;
case RI_TRIGTYPE_DELETE:
if (!TRIGGER_FIRED_BY_DELETE(trigdata->tg_event))
ereport(ERROR,
(errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
errmsg("function \"%s\" must be fired for DELETE", funcname)));
break;
}
}
| 0 |
[
"CWE-209"
] |
postgres
|
804b6b6db4dcfc590a468e7be390738f9f7755fb
| 277,595,334,372,886,940,000,000,000,000,000,000,000 | 40 |
Fix column-privilege leak in error-message paths
While building error messages to return to the user,
BuildIndexValueDescription, ExecBuildSlotValueDescription and
ri_ReportViolation would happily include the entire key or entire row in
the result returned to the user, even if the user didn't have access to
view all of the columns being included.
Instead, include only those columns which the user is providing or which
the user has select rights on. If the user does not have any rights
to view the table or any of the columns involved then no detail is
provided and a NULL value is returned from BuildIndexValueDescription
and ExecBuildSlotValueDescription. Note that, for key cases, the user
must have access to all of the columns for the key to be shown; a
partial key will not be returned.
Further, in master only, do not return any data for cases where row
security is enabled on the relation and row security should be applied
for the user. This required a bit of refactoring and moving of things
around related to RLS- note the addition of utils/misc/rls.c.
Back-patch all the way, as column-level privileges are now in all
supported versions.
This has been assigned CVE-2014-8161, but since the issue and the patch
have already been publicized on pgsql-hackers, there's no point in trying
to hide this commit.
|
static int ntop_stats_get_samplings_of_hours_from_epoch(lua_State *vm) {
time_t epoch_start, epoch_end;
int num_hours;
int ifid;
NetworkInterface* iface;
StatsManager *sm;
struct statsManagerRetrieval retvals;
ntop->getTrace()->traceEvent(TRACE_DEBUG, "%s() called", __FUNCTION__);
if(ntop_lua_check(vm, __FUNCTION__, 1, LUA_TNUMBER)) return(CONST_LUA_ERROR);
ifid = lua_tointeger(vm, 1);
if(ifid < 0)
return(CONST_LUA_ERROR);
if(ntop_lua_check(vm, __FUNCTION__, 2, LUA_TNUMBER)) return(CONST_LUA_ERROR);
epoch_end = lua_tointeger(vm, 2);
epoch_end -= (epoch_end % 60);
if(epoch_end < 0)
return(CONST_LUA_ERROR);
if(ntop_lua_check(vm, __FUNCTION__, 3, LUA_TNUMBER)) return(CONST_LUA_ERROR);
num_hours = lua_tointeger(vm, 3);
if(num_hours < 0)
return(CONST_LUA_ERROR);
if(!(iface = ntop->getNetworkInterface(ifid)) ||
!(sm = iface->getStatsManager()))
return (CONST_LUA_ERROR);
epoch_start = epoch_end - (num_hours * 60 * 60);
if(sm->retrieveHourStatsInterval(epoch_start, epoch_end, &retvals))
return(CONST_LUA_ERROR);
lua_newtable(vm);
for (unsigned i = 0 ; i < retvals.rows.size() ; i++)
lua_push_str_table_entry(vm, retvals.rows[i].c_str(), (char*)"");
return(CONST_LUA_OK);
}
| 0 |
[
"CWE-476"
] |
ntopng
|
01f47e04fd7c8d54399c9e465f823f0017069f8f
| 155,199,961,810,772,080,000,000,000,000,000,000,000 | 41 |
Security fix: prevents empty host from being used
|
static void do_under_overlay_for_page(
QPDF& pdf,
Options& o,
UnderOverlay& uo,
std::map<int, std::vector<int> >& pagenos,
size_t page_idx,
std::map<int, QPDFObjectHandle>& fo,
std::vector<QPDFPageObjectHelper>& pages,
QPDFPageObjectHelper& dest_page,
bool before)
{
int pageno = 1 + QIntC::to_int(page_idx);
if (! pagenos.count(pageno))
{
return;
}
std::string content;
int min_suffix = 1;
QPDFObjectHandle resources = dest_page.getAttribute("/Resources", true);
for (std::vector<int>::iterator iter = pagenos[pageno].begin();
iter != pagenos[pageno].end(); ++iter)
{
int from_pageno = *iter;
if (o.verbose)
{
std::cout << " " << uo.which << " " << from_pageno << std::endl;
}
if (0 == fo.count(from_pageno))
{
fo[from_pageno] =
pdf.copyForeignObject(
pages.at(QIntC::to_size(from_pageno - 1)).
getFormXObjectForPage());
}
// If the same page is overlaid or underlaid multiple times,
// we'll generate multiple names for it, but that's harmless
// and also a pretty goofy case that's not worth coding
// around.
std::string name = resources.getUniqueResourceName("/Fx", min_suffix);
std::string new_content = dest_page.placeFormXObject(
fo[from_pageno], name,
dest_page.getTrimBox().getArrayAsRectangle());
if (! new_content.empty())
{
resources.mergeResources(
QPDFObjectHandle::parse("<< /XObject << >> >>"));
resources.getKey("/XObject").replaceKey(name, fo[from_pageno]);
++min_suffix;
content += new_content;
}
}
if (! content.empty())
{
if (before)
{
dest_page.addPageContents(
QPDFObjectHandle::newStream(&pdf, content), true);
}
else
{
dest_page.addPageContents(
QPDFObjectHandle::newStream(&pdf, "q\n"), true);
dest_page.addPageContents(
QPDFObjectHandle::newStream(&pdf, "\nQ\n" + content), false);
}
}
}
| 0 |
[
"CWE-787"
] |
qpdf
|
d71f05ca07eb5c7cfa4d6d23e5c1f2a800f52e8e
| 57,872,566,321,071,450,000,000,000,000,000,000,000 | 68 |
Fix sign and conversion warnings (major)
This makes all integer type conversions that have potential data loss
explicit with calls that do range checks and raise an exception. After
this commit, qpdf builds with no warnings when -Wsign-conversion
-Wconversion is used with gcc or clang or when -W3 -Wd4800 is used
with MSVC. This significantly reduces the likelihood of potential
crashes from bogus integer values.
There are some parts of the code that take int when they should take
size_t or an offset. Such places would make qpdf not support files
with more than 2^31 of something that usually wouldn't be so large. In
the event that such a file shows up and is valid, at least qpdf would
raise an error in the right spot so the issue could be legitimately
addressed rather than failing in some weird way because of a silent
overflow condition.
|
static int add_lengths(int *out, int a, int b)
{
/* sk_FOO_num(NULL) returns -1 but is effectively 0 when iterating. */
if (a < 0)
a = 0;
if (b < 0)
b = 0;
if (a > INT_MAX - b)
return 0;
*out = a + b;
return 1;
}
| 0 |
[
"CWE-125"
] |
openssl
|
8393de42498f8be75cf0353f5c9f906a43a748d2
| 29,080,525,842,475,774,000,000,000,000,000,000,000 | 13 |
Fix the name constraints code to not assume NUL terminated strings
ASN.1 strings may not be NUL terminated. Don't assume they are.
CVE-2021-3712
Reviewed-by: Viktor Dukhovni <[email protected]>
Reviewed-by: Paul Dale <[email protected]>
|
void WebPImage::setComment(const std::string& /*comment*/)
{
// not supported
throw(Error(kerInvalidSettingForImage, "Image comment", "WebP"));
}
| 0 |
[
"CWE-190"
] |
exiv2
|
c73d1e27198a389ce7caf52ac30f8e2120acdafd
| 310,711,374,402,569,880,000,000,000,000,000,000,000 | 5 |
Avoid negative integer overflow when `filesize < io_->tell()`.
This fixes #791.
|
time_diff_ms(struct timeval *t1, struct timeval *t2)
{
// This handles wrapping of tv_usec correctly without any special case.
// Example of 2 pairs (tv_sec, tv_usec) with a duration of 5 ms:
// t1 = (1, 998000) t2 = (2, 3000) gives:
// (2 - 1) * 1000 + (3000 - 998000) / 1000 -> 5 ms.
return (t2->tv_sec - t1->tv_sec) * 1000
+ (t2->tv_usec - t1->tv_usec) / 1000;
}
| 0 |
[
"CWE-476",
"CWE-703"
] |
vim
|
80525751c5ce9ed82c41d83faf9ef38667bf61b1
| 338,353,347,845,413,080,000,000,000,000,000,000,000 | 9 |
patch 9.0.0259: crash with mouse click when not initialized
Problem: Crash with mouse click when not initialized.
Solution: Check TabPageIdxs[] is not NULL.
|
Pl_Count::Pl_Count(char const* identifier, Pipeline* next) :
Pipeline(identifier, next),
count(0),
last_char('\0')
{
}
| 0 |
[
"CWE-787"
] |
qpdf
|
d71f05ca07eb5c7cfa4d6d23e5c1f2a800f52e8e
| 259,842,002,008,520,840,000,000,000,000,000,000,000 | 6 |
Fix sign and conversion warnings (major)
This makes all integer type conversions that have potential data loss
explicit with calls that do range checks and raise an exception. After
this commit, qpdf builds with no warnings when -Wsign-conversion
-Wconversion is used with gcc or clang or when -W3 -Wd4800 is used
with MSVC. This significantly reduces the likelihood of potential
crashes from bogus integer values.
There are some parts of the code that take int when they should take
size_t or an offset. Such places would make qpdf not support files
with more than 2^31 of something that usually wouldn't be so large. In
the event that such a file shows up and is valid, at least qpdf would
raise an error in the right spot so the issue could be legitimately
addressed rather than failing in some weird way because of a silent
overflow condition.
|
evbuffer_setcb(struct evbuffer *buffer, evbuffer_cb cb, void *cbarg)
{
EVBUFFER_LOCK(buffer);
if (!LIST_EMPTY(&buffer->callbacks))
evbuffer_remove_all_callbacks(buffer);
if (cb) {
struct evbuffer_cb_entry *ent =
evbuffer_add_cb(buffer, NULL, cbarg);
ent->cb.cb_obsolete = cb;
ent->flags |= EVBUFFER_CB_OBSOLETE;
}
EVBUFFER_UNLOCK(buffer);
}
| 0 |
[
"CWE-189"
] |
libevent
|
841ecbd96105c84ac2e7c9594aeadbcc6fb38bc4
| 37,625,406,535,545,790,000,000,000,000,000,000,000 | 15 |
Fix CVE-2014-6272 in Libevent 2.1
For this fix, we need to make sure that passing too-large inputs to
the evbuffer functions can't make us do bad things with the heap.
Also, lower the maximum chunk size to the lower of off_t, size_t maximum.
This is necessary since otherwise we could get into an infinite loop
if we make a chunk that 'misalign' cannot index into.
|
int uwsgi_write_intfile(char *filename, int n) {
FILE *pidfile = fopen(filename, "w");
if (!pidfile) {
uwsgi_error_open(filename);
exit(1);
}
if (fprintf(pidfile, "%d\n", n) <= 0 || ferror(pidfile)) {
fclose(pidfile);
return -1;
}
if (fclose(pidfile)) {
return -1;
}
return 0;
}
| 0 |
[
"CWE-119",
"CWE-703",
"CWE-787"
] |
uwsgi
|
cb4636f7c0af2e97a4eef7a3cdcbd85a71247bfe
| 45,833,796,423,637,480,000,000,000,000,000,000,000 | 15 |
improve uwsgi_expand_path() to sanitize input, avoiding stack corruption and potential security issue
|
isdn_ppp_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
int error = 0;
int len;
isdn_net_local *lp = netdev_priv(dev);
if (lp->p_encap != ISDN_NET_ENCAP_SYNCPPP)
return -EINVAL;
switch (cmd) {
#define PPP_VERSION "2.3.7"
case SIOCGPPPVER:
len = strlen(PPP_VERSION) + 1;
if (copy_to_user(ifr->ifr_data, PPP_VERSION, len))
error = -EFAULT;
break;
case SIOCGPPPSTATS:
error = isdn_ppp_dev_ioctl_stats(lp->ppp_slot, ifr, dev);
break;
default:
error = -EINVAL;
break;
}
return error;
}
| 0 |
[] |
linux
|
4ab42d78e37a294ac7bc56901d563c642e03c4ae
| 61,563,749,513,715,680,000,000,000,000,000,000,000 | 27 |
ppp, slip: Validate VJ compression slot parameters completely
Currently slhc_init() treats out-of-range values of rslots and tslots
as equivalent to 0, except that if tslots is too large it will
dereference a null pointer (CVE-2015-7799).
Add a range-check at the top of the function and make it return an
ERR_PTR() on error instead of NULL. Change the callers accordingly.
Compile-tested only.
Reported-by: 郭永刚 <[email protected]>
References: http://article.gmane.org/gmane.comp.security.oss.general/17908
Signed-off-by: Ben Hutchings <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
Stats::Scope& listenerScope() override { return stats_store_; }
| 0 |
[
"CWE-400"
] |
envoy
|
dfddb529e914d794ac552e906b13d71233609bf7
| 323,070,765,192,456,640,000,000,000,000,000,000,000 | 1 |
listener: Add configurable accepted connection limits (#153)
Add support for per-listener limits on accepted connections.
Signed-off-by: Tony Allen <[email protected]>
|
PJ_DEF(void) pjsip_restore_strict_route_set(pjsip_tx_data *tdata)
{
pjsip_route_hdr *first_route_hdr, *last_route_hdr;
/* Check if we have found strict route before */
if (tdata->saved_strict_route == NULL) {
/* This request doesn't contain strict route */
return;
}
/* Find the first "Route" headers from the message. */
first_route_hdr = (pjsip_route_hdr*)
pjsip_msg_find_hdr(tdata->msg, PJSIP_H_ROUTE, NULL);
if (first_route_hdr == NULL) {
/* User has modified message route? We don't expect this! */
pj_assert(!"Message route was modified?");
tdata->saved_strict_route = NULL;
return;
}
/* Find last Route header */
last_route_hdr = first_route_hdr;
while (last_route_hdr->next != (void*)&tdata->msg->hdr) {
pjsip_route_hdr *hdr;
hdr = (pjsip_route_hdr*)
pjsip_msg_find_hdr(tdata->msg, PJSIP_H_ROUTE,
last_route_hdr->next);
if (!hdr)
break;
last_route_hdr = hdr;
}
/* Put the last Route header as request URI, delete last Route
* header, and insert the saved strict route as the first Route.
*/
tdata->msg->line.req.uri = last_route_hdr->name_addr.uri;
pj_list_insert_before(first_route_hdr, tdata->saved_strict_route);
pj_list_erase(last_route_hdr);
/* Reset */
tdata->saved_strict_route = NULL;
}
| 0 |
[
"CWE-297",
"CWE-295"
] |
pjproject
|
67e46c1ac45ad784db5b9080f5ed8b133c122872
| 119,062,924,437,119,370,000,000,000,000,000,000,000 | 43 |
Merge pull request from GHSA-8hcp-hm38-mfph
* Check hostname during TLS transport selection
* revision based on feedback
* remove the code in create_request that has been moved
|
find_decl(
char_u *ptr,
int len,
int locally,
int thisblock,
int flags_arg) // flags passed to searchit()
{
char_u *pat;
pos_T old_pos;
pos_T par_pos;
pos_T found_pos;
int t;
int save_p_ws;
int save_p_scs;
int retval = OK;
int incll;
int searchflags = flags_arg;
int valid;
if ((pat = alloc(len + 7)) == NULL)
return FAIL;
// Put "\V" before the pattern to avoid that the special meaning of "."
// and "~" causes trouble.
sprintf((char *)pat, vim_iswordp(ptr) ? "\\V\\<%.*s\\>" : "\\V%.*s",
len, ptr);
old_pos = curwin->w_cursor;
save_p_ws = p_ws;
save_p_scs = p_scs;
p_ws = FALSE; // don't wrap around end of file now
p_scs = FALSE; // don't switch ignorecase off now
/*
* With "gD" go to line 1.
* With "gd" Search back for the start of the current function, then go
* back until a blank line. If this fails go to line 1.
*/
if (!locally || !findpar(&incll, BACKWARD, 1L, '{', FALSE))
{
setpcmark(); // Set in findpar() otherwise
curwin->w_cursor.lnum = 1;
par_pos = curwin->w_cursor;
}
else
{
par_pos = curwin->w_cursor;
while (curwin->w_cursor.lnum > 1 && *skipwhite(ml_get_curline()) != NUL)
--curwin->w_cursor.lnum;
}
curwin->w_cursor.col = 0;
// Search forward for the identifier, ignore comment lines.
CLEAR_POS(&found_pos);
for (;;)
{
t = searchit(curwin, curbuf, &curwin->w_cursor, NULL, FORWARD,
pat, 1L, searchflags, RE_LAST, NULL);
if (curwin->w_cursor.lnum >= old_pos.lnum)
t = FAIL; // match after start is failure too
if (thisblock && t != FAIL)
{
pos_T *pos;
// Check that the block the match is in doesn't end before the
// position where we started the search from.
if ((pos = findmatchlimit(NULL, '}', FM_FORWARD,
(int)(old_pos.lnum - curwin->w_cursor.lnum + 1))) != NULL
&& pos->lnum < old_pos.lnum)
{
// There can't be a useful match before the end of this block.
// Skip to the end.
curwin->w_cursor = *pos;
continue;
}
}
if (t == FAIL)
{
// If we previously found a valid position, use it.
if (found_pos.lnum != 0)
{
curwin->w_cursor = found_pos;
t = OK;
}
break;
}
if (get_leader_len(ml_get_curline(), NULL, FALSE, TRUE) > 0)
{
// Ignore this line, continue at start of next line.
++curwin->w_cursor.lnum;
curwin->w_cursor.col = 0;
continue;
}
valid = is_ident(ml_get_curline(), curwin->w_cursor.col);
// If the current position is not a valid identifier and a previous
// match is present, favor that one instead.
if (!valid && found_pos.lnum != 0)
{
curwin->w_cursor = found_pos;
break;
}
// Global search: use first valid match found
if (valid && !locally)
break;
if (valid && curwin->w_cursor.lnum >= par_pos.lnum)
{
// If we previously found a valid position, use it.
if (found_pos.lnum != 0)
curwin->w_cursor = found_pos;
break;
}
// For finding a local variable and the match is before the "{" or
// inside a comment, continue searching. For K&R style function
// declarations this skips the function header without types.
if (!valid)
CLEAR_POS(&found_pos);
else
found_pos = curwin->w_cursor;
// Remove SEARCH_START from flags to avoid getting stuck at one
// position.
searchflags &= ~SEARCH_START;
}
if (t == FAIL)
{
retval = FAIL;
curwin->w_cursor = old_pos;
}
else
{
curwin->w_set_curswant = TRUE;
// "n" searches forward now
reset_search_dir();
}
vim_free(pat);
p_ws = save_p_ws;
p_scs = save_p_scs;
return retval;
}
| 0 |
[
"CWE-416"
] |
vim
|
35a9a00afcb20897d462a766793ff45534810dc3
| 144,584,058,471,500,110,000,000,000,000,000,000,000 | 145 |
patch 8.2.3428: using freed memory when replacing
Problem: Using freed memory when replacing. (Dhiraj Mishra)
Solution: Get the line pointer after calling ins_copychar().
|
void Magick::Image::colorSpaceType(const ColorspaceType colorSpace_)
{
modifyImage();
GetPPException;
SetImageColorspace(image(),colorSpace_,exceptionInfo);
ThrowImageException;
options()->colorspaceType(colorSpace_);
}
| 0 |
[
"CWE-416"
] |
ImageMagick
|
8c35502217c1879cb8257c617007282eee3fe1cc
| 281,606,970,656,998,800,000,000,000,000,000,000,000 | 8 |
Added missing return to avoid use after free.
|
static CURLcode smtp_rcpt_to(struct connectdata *conn)
{
CURLcode result = CURLE_OK;
struct smtp_conn *smtpc = &conn->proto.smtpc;
/* send RCPT TO */
if(smtpc->rcpt) {
if(smtpc->rcpt->data[0] == '<')
result = Curl_pp_sendf(&conn->proto.smtpc.pp, "RCPT TO:%s",
smtpc->rcpt->data);
else
result = Curl_pp_sendf(&conn->proto.smtpc.pp, "RCPT TO:<%s>",
smtpc->rcpt->data);
if(!result)
state(conn, SMTP_RCPT);
}
return result;
}
| 0 |
[
"CWE-89"
] |
curl
|
75ca568fa1c19de4c5358fed246686de8467c238
| 209,148,880,502,295,540,000,000,000,000,000,000,000 | 19 |
URL sanitize: reject URLs containing bad data
Protocols (IMAP, POP3 and SMTP) that use the path part of a URL in a
decoded manner now use the new Curl_urldecode() function to reject URLs
with embedded control codes (anything that is or decodes to a byte value
less than 32).
URLs containing such codes could easily otherwise be used to do harm and
allow users to do unintended actions with otherwise innocent tools and
applications. Like for example using a URL like
pop3://pop3.example.com/1%0d%0aDELE%201 when the app wants a URL to get
a mail and instead this would delete one.
This flaw is considered a security vulnerability: CVE-2012-0036
Security advisory at: http://curl.haxx.se/docs/adv_20120124.html
Reported by: Dan Fandrich
|
void RGWSetRequestPayment::pre_exec()
{
rgw_bucket_object_pre_exec(s);
}
| 0 |
[
"CWE-770"
] |
ceph
|
ab29bed2fc9f961fe895de1086a8208e21ddaddc
| 292,228,822,461,709,360,000,000,000,000,000,000,000 | 4 |
rgw: fix issues with 'enforce bounds' patch
The patch to enforce bounds on max-keys/max-uploads/max-parts had a few
issues that would prevent us from compiling it. Instead of changing the
code provided by the submitter, we're addressing them in a separate
commit to maintain the DCO.
Signed-off-by: Joao Eduardo Luis <[email protected]>
Signed-off-by: Abhishek Lekshmanan <[email protected]>
(cherry picked from commit 29bc434a6a81a2e5c5b8cfc4c8d5c82ca5bf538a)
mimic specific fixes:
As the largeish change from master g_conf() isn't in mimic yet, use the g_conf
global structure, also make rgw_op use the value from req_info ceph context as
we do for all the requests
|
check_user_runchroot(void)
{
debug_decl(check_user_runchroot, SUDOERS_DEBUG_PLUGIN);
if (user_runchroot == NULL)
debug_return_bool(true);
sudo_debug_printf(SUDO_DEBUG_INFO|SUDO_DEBUG_LINENO,
"def_runchroot %s, user_runchroot %s",
def_runchroot ? def_runchroot : "none",
user_runchroot ? user_runchroot : "none");
if (def_runchroot == NULL || (strcmp(def_runchroot, "*") != 0 &&
strcmp(def_runchroot, user_runchroot) != 0)) {
log_warningx(SLOG_NO_STDERR|SLOG_AUDIT,
N_("user not allowed to change root directory to %s"),
user_runchroot);
sudo_warnx(U_("you are not permitted to use the -R option with %s"),
user_cmnd);
debug_return_bool(false);
}
free(def_runchroot);
if ((def_runchroot = strdup(user_runchroot)) == NULL) {
sudo_warnx(U_("%s: %s"), __func__, U_("unable to allocate memory"));
debug_return_int(-1);
}
debug_return_bool(true);
}
| 0 |
[
"CWE-193"
] |
sudo
|
1f8638577d0c80a4ff864a2aad80a0d95488e9a8
| 244,069,801,209,016,240,000,000,000,000,000,000,000 | 28 |
Fix potential buffer overflow when unescaping backslashes in user_args.
Also, do not try to unescaping backslashes unless in run mode *and*
we are running the command via a shell.
Found by Qualys, this fixes CVE-2021-3156.
|
term_start_plot()
{
FPRINTF((stderr, "term_start_plot()\n"));
if (!term_initialised)
term_initialise();
if (!term_graphics) {
FPRINTF((stderr, "- calling term->graphics()\n"));
(*term->graphics) ();
term_graphics = TRUE;
} else if (multiplot && term_suspended) {
if (term->resume) {
FPRINTF((stderr, "- calling term->resume()\n"));
(*term->resume) ();
}
term_suspended = FALSE;
}
if (multiplot)
multiplot_count++;
/* Sync point for epslatex text positioning */
(*term->layer)(TERM_LAYER_RESET);
/* Because PostScript plots may be viewed out of order, make sure */
/* Each new plot makes no assumption about the previous palette. */
if (term->flags & TERM_IS_POSTSCRIPT)
invalidate_palette();
/* Set canvas size to full range of current terminal coordinates */
canvas.xleft = 0;
canvas.xright = term->xmax - 1;
canvas.ybot = 0;
canvas.ytop = term->ymax - 1;
}
| 0 |
[
"CWE-787"
] |
gnuplot
|
963c7df3e0c5266efff260d0dff757dfe03d3632
| 197,552,883,491,599,300,000,000,000,000,000,000,000 | 37 |
Better error handling for faulty font syntax
A missing close-quote in an enhanced text font specification could
cause a segfault.
Bug #2303
|
static unsigned long iov_iter_alignment_bvec(const struct iov_iter *i)
{
unsigned res = 0;
size_t size = i->count;
unsigned skip = i->iov_offset;
unsigned k;
for (k = 0; k < i->nr_segs; k++, skip = 0) {
size_t len = i->bvec[k].bv_len - skip;
res |= (unsigned long)i->bvec[k].bv_offset + skip;
if (len > size)
len = size;
res |= len;
size -= len;
if (!size)
break;
}
return res;
}
| 0 |
[
"CWE-665",
"CWE-284"
] |
linux
|
9d2231c5d74e13b2a0546fee6737ee4446017903
| 144,996,608,318,661,400,000,000,000,000,000,000,000 | 19 |
lib/iov_iter: initialize "flags" in new pipe_buffer
The functions copy_page_to_iter_pipe() and push_pipe() can both
allocate a new pipe_buffer, but the "flags" member initializer is
missing.
Fixes: 241699cd72a8 ("new iov_iter flavour: pipe-backed")
To: Alexander Viro <[email protected]>
To: [email protected]
To: [email protected]
Cc: [email protected]
Signed-off-by: Max Kellermann <[email protected]>
Signed-off-by: Al Viro <[email protected]>
|
ebews_set_photo (ESoapMessage *message,
EContact *contact)
{
}
| 0 |
[
"CWE-295"
] |
evolution-ews
|
915226eca9454b8b3e5adb6f2fff9698451778de
| 283,508,368,789,632,800,000,000,000,000,000,000,000 | 5 |
I#27 - SSL Certificates are not validated
This depends on https://gitlab.gnome.org/GNOME/evolution-data-server/commit/6672b8236139bd6ef41ecb915f4c72e2a052dba5 too.
Closes https://gitlab.gnome.org/GNOME/evolution-ews/issues/27
|
static int netif_alloc_rx_queues(struct net_device *dev)
{
unsigned int i, count = dev->num_rx_queues;
struct netdev_rx_queue *rx;
BUG_ON(count < 1);
rx = kcalloc(count, sizeof(struct netdev_rx_queue), GFP_KERNEL);
if (!rx) {
pr_err("netdev: Unable to allocate %u rx queues.\n", count);
return -ENOMEM;
}
dev->_rx = rx;
for (i = 0; i < count; i++)
rx[i].dev = dev;
return 0;
}
| 0 |
[
"CWE-264"
] |
linux
|
8909c9ad8ff03611c9c96c9a92656213e4bb495b
| 338,952,559,147,725,180,000,000,000,000,000,000,000 | 18 |
net: don't allow CAP_NET_ADMIN to load non-netdev kernel modules
Since a8f80e8ff94ecba629542d9b4b5f5a8ee3eb565c any process with
CAP_NET_ADMIN may load any module from /lib/modules/. This doesn't mean
that CAP_NET_ADMIN is a superset of CAP_SYS_MODULE as modules are
limited to /lib/modules/**. However, CAP_NET_ADMIN capability shouldn't
allow anybody load any module not related to networking.
This patch restricts an ability of autoloading modules to netdev modules
with explicit aliases. This fixes CVE-2011-1019.
Arnd Bergmann suggested to leave untouched the old pre-v2.6.32 behavior
of loading netdev modules by name (without any prefix) for processes
with CAP_SYS_MODULE to maintain the compatibility with network scripts
that use autoloading netdev modules by aliases like "eth0", "wlan0".
Currently there are only three users of the feature in the upstream
kernel: ipip, ip_gre and sit.
root@albatros:~# capsh --drop=$(seq -s, 0 11),$(seq -s, 13 34) --
root@albatros:~# grep Cap /proc/$$/status
CapInh: 0000000000000000
CapPrm: fffffff800001000
CapEff: fffffff800001000
CapBnd: fffffff800001000
root@albatros:~# modprobe xfs
FATAL: Error inserting xfs
(/lib/modules/2.6.38-rc6-00001-g2bf4ca3/kernel/fs/xfs/xfs.ko): Operation not permitted
root@albatros:~# lsmod | grep xfs
root@albatros:~# ifconfig xfs
xfs: error fetching interface information: Device not found
root@albatros:~# lsmod | grep xfs
root@albatros:~# lsmod | grep sit
root@albatros:~# ifconfig sit
sit: error fetching interface information: Device not found
root@albatros:~# lsmod | grep sit
root@albatros:~# ifconfig sit0
sit0 Link encap:IPv6-in-IPv4
NOARP MTU:1480 Metric:1
root@albatros:~# lsmod | grep sit
sit 10457 0
tunnel4 2957 1 sit
For CAP_SYS_MODULE module loading is still relaxed:
root@albatros:~# grep Cap /proc/$$/status
CapInh: 0000000000000000
CapPrm: ffffffffffffffff
CapEff: ffffffffffffffff
CapBnd: ffffffffffffffff
root@albatros:~# ifconfig xfs
xfs: error fetching interface information: Device not found
root@albatros:~# lsmod | grep xfs
xfs 745319 0
Reference: https://lkml.org/lkml/2011/2/24/203
Signed-off-by: Vasiliy Kulikov <[email protected]>
Signed-off-by: Michael Tokarev <[email protected]>
Acked-by: David S. Miller <[email protected]>
Acked-by: Kees Cook <[email protected]>
Signed-off-by: James Morris <[email protected]>
|
explicit UnstageOp(OpKernelConstruction* ctx) : OpKernel(ctx) {}
| 0 |
[
"CWE-20",
"CWE-703"
] |
tensorflow
|
cebe3c45d76357d201c65bdbbf0dbe6e8a63bbdb
| 61,454,011,869,277,335,000,000,000,000,000,000,000 | 1 |
Fix tf.raw_ops.StagePeek vulnerability with invalid `index`.
Check that input is actually a scalar before treating it as such.
PiperOrigin-RevId: 445524908
|
zip_get_local_file_header_size(struct archive_read *a, size_t extra)
{
const char *p;
ssize_t filename_length, extra_length;
if ((p = __archive_read_ahead(a, extra + 30, NULL)) == NULL) {
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Truncated ZIP file header");
return (ARCHIVE_WARN);
}
p += extra;
if (memcmp(p, "PK\003\004", 4) != 0) {
archive_set_error(&a->archive, -1, "Damaged Zip archive");
return ARCHIVE_WARN;
}
filename_length = archive_le16dec(p + 26);
extra_length = archive_le16dec(p + 28);
return (30 + filename_length + extra_length);
}
| 0 |
[
"CWE-20"
] |
libarchive
|
d0331e8e5b05b475f20b1f3101fe1ad772d7e7e7
| 32,693,561,794,677,005,000,000,000,000,000,000,000 | 21 |
Issue #656: Fix CVE-2016-1541, VU#862384
When reading OS X metadata entries in Zip archives that were stored
without compression, libarchive would use the uncompressed entry size
to allocate a buffer but would use the compressed entry size to limit
the amount of data copied into that buffer. Since the compressed
and uncompressed sizes are provided by data in the archive itself,
an attacker could manipulate these values to write data beyond
the end of the allocated buffer.
This fix provides three new checks to guard against such
manipulation and to make libarchive generally more robust when
handling this type of entry:
1. If an OS X metadata entry is stored without compression,
abort the entire archive if the compressed and uncompressed
data sizes do not match.
2. When sanity-checking the size of an OS X metadata entry,
abort this entry if either the compressed or uncompressed
size is larger than 4MB.
3. When copying data into the allocated buffer, check the copy
size against both the compressed entry size and uncompressed
entry size.
|
static int val_to_int64(const struct ldb_val *in, int64_t *v)
{
char *end;
char buf[64];
/* make sure we don't read past the end of the data */
if (in->length > sizeof(buf)-1) {
return LDB_ERR_INVALID_ATTRIBUTE_SYNTAX;
}
strncpy(buf, (char *)in->data, in->length);
buf[in->length] = 0;
/* We've to use "strtoll" here to have the intended overflows.
* Otherwise we may get "LONG_MAX" and the conversion is wrong. */
*v = (int64_t) strtoll(buf, &end, 0);
if (*end != 0) {
return LDB_ERR_INVALID_ATTRIBUTE_SYNTAX;
}
return LDB_SUCCESS;
}
| 0 |
[
"CWE-787"
] |
samba
|
fab6b79b7724f0b636963be528483e3e946884aa
| 268,619,812,757,786,240,000,000,000,000,000,000,000 | 20 |
CVE-2021-20277 ldb/attrib_handlers casefold: stay in bounds
For a string that had N spaces at the beginning, we would
try to move N bytes beyond the end of the string.
BUG: https://bugzilla.samba.org/show_bug.cgi?id=14655
Signed-off-by: Douglas Bagnall <[email protected]>
Reviewed-by: Andrew Bartlett <[email protected]>
(cherry-picked from commit for master)
|
static void smack_task_getsecid_subj(struct task_struct *p, u32 *secid)
{
struct smack_known *skp = smk_of_task_struct_subj(p);
*secid = skp->smk_secid;
}
| 0 |
[
"CWE-416"
] |
linux
|
a3727a8bac0a9e77c70820655fd8715523ba3db7
| 248,775,005,160,541,900,000,000,000,000,000,000,000 | 6 |
selinux,smack: fix subjective/objective credential use mixups
Jann Horn reported a problem with commit eb1231f73c4d ("selinux:
clarify task subjective and objective credentials") where some LSM
hooks were attempting to access the subjective credentials of a task
other than the current task. Generally speaking, it is not safe to
access another task's subjective credentials and doing so can cause
a number of problems.
Further, while looking into the problem, I realized that Smack was
suffering from a similar problem brought about by a similar commit
1fb057dcde11 ("smack: differentiate between subjective and objective
task credentials").
This patch addresses this problem by restoring the use of the task's
objective credentials in those cases where the task is other than the
current executing task. Not only does this resolve the problem
reported by Jann, it is arguably the correct thing to do in these
cases.
Cc: [email protected]
Fixes: eb1231f73c4d ("selinux: clarify task subjective and objective credentials")
Fixes: 1fb057dcde11 ("smack: differentiate between subjective and objective task credentials")
Reported-by: Jann Horn <[email protected]>
Acked-by: Eric W. Biederman <[email protected]>
Acked-by: Casey Schaufler <[email protected]>
Signed-off-by: Paul Moore <[email protected]>
|
static int fib6_add_rt2node(struct fib6_node *fn, struct fib6_info *rt,
struct nl_info *info,
struct netlink_ext_ack *extack)
{
struct fib6_info *leaf = rcu_dereference_protected(fn->leaf,
lockdep_is_held(&rt->fib6_table->tb6_lock));
struct fib6_info *iter = NULL;
struct fib6_info __rcu **ins;
struct fib6_info __rcu **fallback_ins = NULL;
int replace = (info->nlh &&
(info->nlh->nlmsg_flags & NLM_F_REPLACE));
int add = (!info->nlh ||
(info->nlh->nlmsg_flags & NLM_F_CREATE));
int found = 0;
bool rt_can_ecmp = rt6_qualify_for_ecmp(rt);
u16 nlflags = NLM_F_EXCL;
int err;
if (info->nlh && (info->nlh->nlmsg_flags & NLM_F_APPEND))
nlflags |= NLM_F_APPEND;
ins = &fn->leaf;
for (iter = leaf; iter;
iter = rcu_dereference_protected(iter->fib6_next,
lockdep_is_held(&rt->fib6_table->tb6_lock))) {
/*
* Search for duplicates
*/
if (iter->fib6_metric == rt->fib6_metric) {
/*
* Same priority level
*/
if (info->nlh &&
(info->nlh->nlmsg_flags & NLM_F_EXCL))
return -EEXIST;
nlflags &= ~NLM_F_EXCL;
if (replace) {
if (rt_can_ecmp == rt6_qualify_for_ecmp(iter)) {
found++;
break;
}
if (rt_can_ecmp)
fallback_ins = fallback_ins ?: ins;
goto next_iter;
}
if (rt6_duplicate_nexthop(iter, rt)) {
if (rt->fib6_nsiblings)
rt->fib6_nsiblings = 0;
if (!(iter->fib6_flags & RTF_EXPIRES))
return -EEXIST;
if (!(rt->fib6_flags & RTF_EXPIRES))
fib6_clean_expires(iter);
else
fib6_set_expires(iter, rt->expires);
if (rt->fib6_pmtu)
fib6_metric_set(iter, RTAX_MTU,
rt->fib6_pmtu);
return -EEXIST;
}
/* If we have the same destination and the same metric,
* but not the same gateway, then the route we try to
* add is sibling to this route, increment our counter
* of siblings, and later we will add our route to the
* list.
* Only static routes (which don't have flag
* RTF_EXPIRES) are used for ECMPv6.
*
* To avoid long list, we only had siblings if the
* route have a gateway.
*/
if (rt_can_ecmp &&
rt6_qualify_for_ecmp(iter))
rt->fib6_nsiblings++;
}
if (iter->fib6_metric > rt->fib6_metric)
break;
next_iter:
ins = &iter->fib6_next;
}
if (fallback_ins && !found) {
/* No ECMP-able route found, replace first non-ECMP one */
ins = fallback_ins;
iter = rcu_dereference_protected(*ins,
lockdep_is_held(&rt->fib6_table->tb6_lock));
found++;
}
/* Reset round-robin state, if necessary */
if (ins == &fn->leaf)
fn->rr_ptr = NULL;
/* Link this route to others same route. */
if (rt->fib6_nsiblings) {
unsigned int fib6_nsiblings;
struct fib6_info *sibling, *temp_sibling;
/* Find the first route that have the same metric */
sibling = leaf;
while (sibling) {
if (sibling->fib6_metric == rt->fib6_metric &&
rt6_qualify_for_ecmp(sibling)) {
list_add_tail(&rt->fib6_siblings,
&sibling->fib6_siblings);
break;
}
sibling = rcu_dereference_protected(sibling->fib6_next,
lockdep_is_held(&rt->fib6_table->tb6_lock));
}
/* For each sibling in the list, increment the counter of
* siblings. BUG() if counters does not match, list of siblings
* is broken!
*/
fib6_nsiblings = 0;
list_for_each_entry_safe(sibling, temp_sibling,
&rt->fib6_siblings, fib6_siblings) {
sibling->fib6_nsiblings++;
BUG_ON(sibling->fib6_nsiblings != rt->fib6_nsiblings);
fib6_nsiblings++;
}
BUG_ON(fib6_nsiblings != rt->fib6_nsiblings);
rt6_multipath_rebalance(temp_sibling);
}
/*
* insert node
*/
if (!replace) {
if (!add)
pr_warn("NLM_F_CREATE should be set when creating new route\n");
add:
nlflags |= NLM_F_CREATE;
if (!info->skip_notify_kernel) {
err = call_fib6_entry_notifiers(info->nl_net,
FIB_EVENT_ENTRY_ADD,
rt, extack);
if (err) {
struct fib6_info *sibling, *next_sibling;
/* If the route has siblings, then it first
* needs to be unlinked from them.
*/
if (!rt->fib6_nsiblings)
return err;
list_for_each_entry_safe(sibling, next_sibling,
&rt->fib6_siblings,
fib6_siblings)
sibling->fib6_nsiblings--;
rt->fib6_nsiblings = 0;
list_del_init(&rt->fib6_siblings);
rt6_multipath_rebalance(next_sibling);
return err;
}
}
rcu_assign_pointer(rt->fib6_next, iter);
fib6_info_hold(rt);
rcu_assign_pointer(rt->fib6_node, fn);
rcu_assign_pointer(*ins, rt);
if (!info->skip_notify)
inet6_rt_notify(RTM_NEWROUTE, rt, info, nlflags);
info->nl_net->ipv6.rt6_stats->fib_rt_entries++;
if (!(fn->fn_flags & RTN_RTINFO)) {
info->nl_net->ipv6.rt6_stats->fib_route_nodes++;
fn->fn_flags |= RTN_RTINFO;
}
} else {
int nsiblings;
if (!found) {
if (add)
goto add;
pr_warn("NLM_F_REPLACE set, but no existing node found!\n");
return -ENOENT;
}
if (!info->skip_notify_kernel) {
err = call_fib6_entry_notifiers(info->nl_net,
FIB_EVENT_ENTRY_REPLACE,
rt, extack);
if (err)
return err;
}
fib6_info_hold(rt);
rcu_assign_pointer(rt->fib6_node, fn);
rt->fib6_next = iter->fib6_next;
rcu_assign_pointer(*ins, rt);
if (!info->skip_notify)
inet6_rt_notify(RTM_NEWROUTE, rt, info, NLM_F_REPLACE);
if (!(fn->fn_flags & RTN_RTINFO)) {
info->nl_net->ipv6.rt6_stats->fib_route_nodes++;
fn->fn_flags |= RTN_RTINFO;
}
nsiblings = iter->fib6_nsiblings;
iter->fib6_node = NULL;
fib6_purge_rt(iter, fn, info->nl_net);
if (rcu_access_pointer(fn->rr_ptr) == iter)
fn->rr_ptr = NULL;
fib6_info_release(iter);
if (nsiblings) {
/* Replacing an ECMP route, remove all siblings */
ins = &rt->fib6_next;
iter = rcu_dereference_protected(*ins,
lockdep_is_held(&rt->fib6_table->tb6_lock));
while (iter) {
if (iter->fib6_metric > rt->fib6_metric)
break;
if (rt6_qualify_for_ecmp(iter)) {
*ins = iter->fib6_next;
iter->fib6_node = NULL;
fib6_purge_rt(iter, fn, info->nl_net);
if (rcu_access_pointer(fn->rr_ptr) == iter)
fn->rr_ptr = NULL;
fib6_info_release(iter);
nsiblings--;
info->nl_net->ipv6.rt6_stats->fib_rt_entries--;
} else {
ins = &iter->fib6_next;
}
iter = rcu_dereference_protected(*ins,
lockdep_is_held(&rt->fib6_table->tb6_lock));
}
WARN_ON(nsiblings != 0);
}
}
return 0;
}
| 0 |
[
"CWE-755"
] |
linux
|
7b09c2d052db4b4ad0b27b97918b46a7746966fa
| 204,390,771,035,314,450,000,000,000,000,000,000,000 | 242 |
ipv6: fix a typo in fib6_rule_lookup()
Yi Ren reported an issue discovered by syzkaller, and bisected
to the cited commit.
Many thanks to Yi, this trivial patch does not reflect the patient
work that has been done.
Fixes: d64a1f574a29 ("ipv6: honor RT6_LOOKUP_F_DST_NOREF in rule lookup logic")
Signed-off-by: Eric Dumazet <[email protected]>
Acked-by: Wei Wang <[email protected]>
Bisected-and-reported-by: Yi Ren <[email protected]>
Signed-off-by: Jakub Kicinski <[email protected]>
|
static void __mark_reg_const_zero(struct bpf_reg_state *reg)
{
__mark_reg_known(reg, 0);
reg->type = SCALAR_VALUE;
}
| 0 |
[
"CWE-703",
"CWE-189"
] |
linux
|
979d63d50c0c0f7bc537bf821e056cc9fe5abd38
| 256,332,150,748,866,500,000,000,000,000,000,000,000 | 5 |
bpf: prevent out of bounds speculation on pointer arithmetic
Jann reported that the original commit back in b2157399cc98
("bpf: prevent out-of-bounds speculation") was not sufficient
to stop CPU from speculating out of bounds memory access:
While b2157399cc98 only focussed on masking array map access
for unprivileged users for tail calls and data access such
that the user provided index gets sanitized from BPF program
and syscall side, there is still a more generic form affected
from BPF programs that applies to most maps that hold user
data in relation to dynamic map access when dealing with
unknown scalars or "slow" known scalars as access offset, for
example:
- Load a map value pointer into R6
- Load an index into R7
- Do a slow computation (e.g. with a memory dependency) that
loads a limit into R8 (e.g. load the limit from a map for
high latency, then mask it to make the verifier happy)
- Exit if R7 >= R8 (mispredicted branch)
- Load R0 = R6[R7]
- Load R0 = R6[R0]
For unknown scalars there are two options in the BPF verifier
where we could derive knowledge from in order to guarantee
safe access to the memory: i) While </>/<=/>= variants won't
allow to derive any lower or upper bounds from the unknown
scalar where it would be safe to add it to the map value
pointer, it is possible through ==/!= test however. ii) another
option is to transform the unknown scalar into a known scalar,
for example, through ALU ops combination such as R &= <imm>
followed by R |= <imm> or any similar combination where the
original information from the unknown scalar would be destroyed
entirely leaving R with a constant. The initial slow load still
precedes the latter ALU ops on that register, so the CPU
executes speculatively from that point. Once we have the known
scalar, any compare operation would work then. A third option
only involving registers with known scalars could be crafted
as described in [0] where a CPU port (e.g. Slow Int unit)
would be filled with many dependent computations such that
the subsequent condition depending on its outcome has to wait
for evaluation on its execution port and thereby executing
speculatively if the speculated code can be scheduled on a
different execution port, or any other form of mistraining
as described in [1], for example. Given this is not limited
to only unknown scalars, not only map but also stack access
is affected since both is accessible for unprivileged users
and could potentially be used for out of bounds access under
speculation.
In order to prevent any of these cases, the verifier is now
sanitizing pointer arithmetic on the offset such that any
out of bounds speculation would be masked in a way where the
pointer arithmetic result in the destination register will
stay unchanged, meaning offset masked into zero similar as
in array_index_nospec() case. With regards to implementation,
there are three options that were considered: i) new insn
for sanitation, ii) push/pop insn and sanitation as inlined
BPF, iii) reuse of ax register and sanitation as inlined BPF.
Option i) has the downside that we end up using from reserved
bits in the opcode space, but also that we would require
each JIT to emit masking as native arch opcodes meaning
mitigation would have slow adoption till everyone implements
it eventually which is counter-productive. Option ii) and iii)
have both in common that a temporary register is needed in
order to implement the sanitation as inlined BPF since we
are not allowed to modify the source register. While a push /
pop insn in ii) would be useful to have in any case, it
requires once again that every JIT needs to implement it
first. While possible, amount of changes needed would also
be unsuitable for a -stable patch. Therefore, the path which
has fewer changes, less BPF instructions for the mitigation
and does not require anything to be changed in the JITs is
option iii) which this work is pursuing. The ax register is
already mapped to a register in all JITs (modulo arm32 where
it's mapped to stack as various other BPF registers there)
and used in constant blinding for JITs-only so far. It can
be reused for verifier rewrites under certain constraints.
The interpreter's tmp "register" has therefore been remapped
into extending the register set with hidden ax register and
reusing that for a number of instructions that needed the
prior temporary variable internally (e.g. div, mod). This
allows for zero increase in stack space usage in the interpreter,
and enables (restricted) generic use in rewrites otherwise as
long as such a patchlet does not make use of these instructions.
The sanitation mask is dynamic and relative to the offset the
map value or stack pointer currently holds.
There are various cases that need to be taken under consideration
for the masking, e.g. such operation could look as follows:
ptr += val or val += ptr or ptr -= val. Thus, the value to be
sanitized could reside either in source or in destination
register, and the limit is different depending on whether
the ALU op is addition or subtraction and depending on the
current known and bounded offset. The limit is derived as
follows: limit := max_value_size - (smin_value + off). For
subtraction: limit := umax_value + off. This holds because
we do not allow any pointer arithmetic that would
temporarily go out of bounds or would have an unknown
value with mixed signed bounds where it is unclear at
verification time whether the actual runtime value would
be either negative or positive. For example, we have a
derived map pointer value with constant offset and bounded
one, so limit based on smin_value works because the verifier
requires that statically analyzed arithmetic on the pointer
must be in bounds, and thus it checks if resulting
smin_value + off and umax_value + off is still within map
value bounds at time of arithmetic in addition to time of
access. Similarly, for the case of stack access we derive
the limit as follows: MAX_BPF_STACK + off for subtraction
and -off for the case of addition where off := ptr_reg->off +
ptr_reg->var_off.value. Subtraction is a special case for
the masking which can be in form of ptr += -val, ptr -= -val,
or ptr -= val. In the first two cases where we know that
the value is negative, we need to temporarily negate the
value in order to do the sanitation on a positive value
where we later swap the ALU op, and restore original source
register if the value was in source.
The sanitation of pointer arithmetic alone is still not fully
sufficient as is, since a scenario like the following could
happen ...
PTR += 0x1000 (e.g. K-based imm)
PTR -= BIG_NUMBER_WITH_SLOW_COMPARISON
PTR += 0x1000
PTR -= BIG_NUMBER_WITH_SLOW_COMPARISON
[...]
... which under speculation could end up as ...
PTR += 0x1000
PTR -= 0 [ truncated by mitigation ]
PTR += 0x1000
PTR -= 0 [ truncated by mitigation ]
[...]
... and therefore still access out of bounds. To prevent such
case, the verifier is also analyzing safety for potential out
of bounds access under speculative execution. Meaning, it is
also simulating pointer access under truncation. We therefore
"branch off" and push the current verification state after the
ALU operation with known 0 to the verification stack for later
analysis. Given the current path analysis succeeded it is
likely that the one under speculation can be pruned. In any
case, it is also subject to existing complexity limits and
therefore anything beyond this point will be rejected. In
terms of pruning, it needs to be ensured that the verification
state from speculative execution simulation must never prune
a non-speculative execution path, therefore, we mark verifier
state accordingly at the time of push_stack(). If verifier
detects out of bounds access under speculative execution from
one of the possible paths that includes a truncation, it will
reject such program.
Given we mask every reg-based pointer arithmetic for
unprivileged programs, we've been looking into how it could
affect real-world programs in terms of size increase. As the
majority of programs are targeted for privileged-only use
case, we've unconditionally enabled masking (with its alu
restrictions on top of it) for privileged programs for the
sake of testing in order to check i) whether they get rejected
in its current form, and ii) by how much the number of
instructions and size will increase. We've tested this by
using Katran, Cilium and test_l4lb from the kernel selftests.
For Katran we've evaluated balancer_kern.o, Cilium bpf_lxc.o
and an older test object bpf_lxc_opt_-DUNKNOWN.o and l4lb
we've used test_l4lb.o as well as test_l4lb_noinline.o. We
found that none of the programs got rejected by the verifier
with this change, and that impact is rather minimal to none.
balancer_kern.o had 13,904 bytes (1,738 insns) xlated and
7,797 bytes JITed before and after the change. Most complex
program in bpf_lxc.o had 30,544 bytes (3,817 insns) xlated
and 18,538 bytes JITed before and after and none of the other
tail call programs in bpf_lxc.o had any changes either. For
the older bpf_lxc_opt_-DUNKNOWN.o object we found a small
increase from 20,616 bytes (2,576 insns) and 12,536 bytes JITed
before to 20,664 bytes (2,582 insns) and 12,558 bytes JITed
after the change. Other programs from that object file had
similar small increase. Both test_l4lb.o had no change and
remained at 6,544 bytes (817 insns) xlated and 3,401 bytes
JITed and for test_l4lb_noinline.o constant at 5,080 bytes
(634 insns) xlated and 3,313 bytes JITed. This can be explained
in that LLVM typically optimizes stack based pointer arithmetic
by using K-based operations and that use of dynamic map access
is not overly frequent. However, in future we may decide to
optimize the algorithm further under known guarantees from
branch and value speculation. Latter seems also unclear in
terms of prediction heuristics that today's CPUs apply as well
as whether there could be collisions in e.g. the predictor's
Value History/Pattern Table for triggering out of bounds access,
thus masking is performed unconditionally at this point but could
be subject to relaxation later on. We were generally also
brainstorming various other approaches for mitigation, but the
blocker was always lack of available registers at runtime and/or
overhead for runtime tracking of limits belonging to a specific
pointer. Thus, we found this to be minimally intrusive under
given constraints.
With that in place, a simple example with sanitized access on
unprivileged load at post-verification time looks as follows:
# bpftool prog dump xlated id 282
[...]
28: (79) r1 = *(u64 *)(r7 +0)
29: (79) r2 = *(u64 *)(r7 +8)
30: (57) r1 &= 15
31: (79) r3 = *(u64 *)(r0 +4608)
32: (57) r3 &= 1
33: (47) r3 |= 1
34: (2d) if r2 > r3 goto pc+19
35: (b4) (u32) r11 = (u32) 20479 |
36: (1f) r11 -= r2 | Dynamic sanitation for pointer
37: (4f) r11 |= r2 | arithmetic with registers
38: (87) r11 = -r11 | containing bounded or known
39: (c7) r11 s>>= 63 | scalars in order to prevent
40: (5f) r11 &= r2 | out of bounds speculation.
41: (0f) r4 += r11 |
42: (71) r4 = *(u8 *)(r4 +0)
43: (6f) r4 <<= r1
[...]
For the case where the scalar sits in the destination register
as opposed to the source register, the following code is emitted
for the above example:
[...]
16: (b4) (u32) r11 = (u32) 20479
17: (1f) r11 -= r2
18: (4f) r11 |= r2
19: (87) r11 = -r11
20: (c7) r11 s>>= 63
21: (5f) r2 &= r11
22: (0f) r2 += r0
23: (61) r0 = *(u32 *)(r2 +0)
[...]
JIT blinding example with non-conflicting use of r10:
[...]
d5: je 0x0000000000000106 _
d7: mov 0x0(%rax),%edi |
da: mov $0xf153246,%r10d | Index load from map value and
e0: xor $0xf153259,%r10 | (const blinded) mask with 0x1f.
e7: and %r10,%rdi |_
ea: mov $0x2f,%r10d |
f0: sub %rdi,%r10 | Sanitized addition. Both use r10
f3: or %rdi,%r10 | but do not interfere with each
f6: neg %r10 | other. (Neither do these instructions
f9: sar $0x3f,%r10 | interfere with the use of ax as temp
fd: and %r10,%rdi | in interpreter.)
100: add %rax,%rdi |_
103: mov 0x0(%rdi),%eax
[...]
Tested that it fixes Jann's reproducer, and also checked that test_verifier
and test_progs suite with interpreter, JIT and JIT with hardening enabled
on x86-64 and arm64 runs successfully.
[0] Speculose: Analyzing the Security Implications of Speculative
Execution in CPUs, Giorgi Maisuradze and Christian Rossow,
https://arxiv.org/pdf/1801.04084.pdf
[1] A Systematic Evaluation of Transient Execution Attacks and
Defenses, Claudio Canella, Jo Van Bulck, Michael Schwarz,
Moritz Lipp, Benjamin von Berg, Philipp Ortner, Frank Piessens,
Dmitry Evtyushkin, Daniel Gruss,
https://arxiv.org/pdf/1811.05441.pdf
Fixes: b2157399cc98 ("bpf: prevent out-of-bounds speculation")
Reported-by: Jann Horn <[email protected]>
Signed-off-by: Daniel Borkmann <[email protected]>
Acked-by: Alexei Starovoitov <[email protected]>
Signed-off-by: Alexei Starovoitov <[email protected]>
|
static int MOD_EXP_CTIME_COPY_TO_PREBUF(const BIGNUM *b, int top,
unsigned char *buf, int idx,
int width)
{
size_t i, j;
if (top > b->top)
top = b->top; /* this works because 'buf' is explicitly
* zeroed */
for (i = 0, j = idx; i < top * sizeof b->d[0]; i++, j += width) {
buf[j] = ((unsigned char *)b->d)[i];
}
return 1;
}
| 1 |
[
"CWE-200"
] |
openssl
|
d6482a82bc2228327aa4ba98aeeecd9979542a31
| 222,061,515,823,214,750,000,000,000,000,000,000,000 | 15 |
bn/bn_exp.c: constant-time MOD_EXP_CTIME_COPY_FROM_PREBUF.
Performance penalty varies from platform to platform, and even
key length. For rsa2048 sign it was observed to reach almost 10%.
CVE-2016-0702
Reviewed-by: Richard Levitte <[email protected]>
Reviewed-by: Rich Salz <[email protected]>
|
int BN_mod_exp_mont_word(BIGNUM *rr, BN_ULONG a, const BIGNUM *p,
const BIGNUM *m, BN_CTX *ctx, BN_MONT_CTX *in_mont)
{
BN_MONT_CTX *mont = NULL;
int b, bits, ret = 0;
int r_is_one;
BN_ULONG w, next_w;
BIGNUM *d, *r, *t;
BIGNUM *swap_tmp;
#define BN_MOD_MUL_WORD(r, w, m) \
(BN_mul_word(r, (w)) && \
(/* BN_ucmp(r, (m)) < 0 ? 1 :*/ \
(BN_mod(t, r, m, ctx) && (swap_tmp = r, r = t, t = swap_tmp, 1))))
/*
* BN_MOD_MUL_WORD is only used with 'w' large, so the BN_ucmp test is
* probably more overhead than always using BN_mod (which uses BN_copy if
* a similar test returns true).
*/
/*
* We can use BN_mod and do not need BN_nnmod because our accumulator is
* never negative (the result of BN_mod does not depend on the sign of
* the modulus).
*/
#define BN_TO_MONTGOMERY_WORD(r, w, mont) \
(BN_set_word(r, (w)) && BN_to_montgomery(r, r, (mont), ctx))
if (BN_get_flags(p, BN_FLG_CONSTTIME) != 0) {
/* BN_FLG_CONSTTIME only supported by BN_mod_exp_mont() */
BNerr(BN_F_BN_MOD_EXP_MONT_WORD, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED);
return -1;
}
bn_check_top(p);
bn_check_top(m);
if (!BN_is_odd(m)) {
BNerr(BN_F_BN_MOD_EXP_MONT_WORD, BN_R_CALLED_WITH_EVEN_MODULUS);
return (0);
}
if (m->top == 1)
a %= m->d[0]; /* make sure that 'a' is reduced */
bits = BN_num_bits(p);
if (bits == 0) {
/* x**0 mod 1 is still zero. */
if (BN_is_one(m)) {
ret = 1;
BN_zero(rr);
} else {
ret = BN_one(rr);
}
return ret;
}
if (a == 0) {
BN_zero(rr);
ret = 1;
return ret;
}
BN_CTX_start(ctx);
d = BN_CTX_get(ctx);
r = BN_CTX_get(ctx);
t = BN_CTX_get(ctx);
if (d == NULL || r == NULL || t == NULL)
goto err;
if (in_mont != NULL)
mont = in_mont;
else {
if ((mont = BN_MONT_CTX_new()) == NULL)
goto err;
if (!BN_MONT_CTX_set(mont, m, ctx))
goto err;
}
r_is_one = 1; /* except for Montgomery factor */
/* bits-1 >= 0 */
/* The result is accumulated in the product r*w. */
w = a; /* bit 'bits-1' of 'p' is always set */
for (b = bits - 2; b >= 0; b--) {
/* First, square r*w. */
next_w = w * w;
if ((next_w / w) != w) { /* overflow */
if (r_is_one) {
if (!BN_TO_MONTGOMERY_WORD(r, w, mont))
goto err;
r_is_one = 0;
} else {
if (!BN_MOD_MUL_WORD(r, w, m))
goto err;
}
next_w = 1;
}
w = next_w;
if (!r_is_one) {
if (!BN_mod_mul_montgomery(r, r, r, mont, ctx))
goto err;
}
/* Second, multiply r*w by 'a' if exponent bit is set. */
if (BN_is_bit_set(p, b)) {
next_w = w * a;
if ((next_w / a) != w) { /* overflow */
if (r_is_one) {
if (!BN_TO_MONTGOMERY_WORD(r, w, mont))
goto err;
r_is_one = 0;
} else {
if (!BN_MOD_MUL_WORD(r, w, m))
goto err;
}
next_w = a;
}
w = next_w;
}
}
/* Finally, set r:=r*w. */
if (w != 1) {
if (r_is_one) {
if (!BN_TO_MONTGOMERY_WORD(r, w, mont))
goto err;
r_is_one = 0;
} else {
if (!BN_MOD_MUL_WORD(r, w, m))
goto err;
}
}
if (r_is_one) { /* can happen only if a == 1 */
if (!BN_one(rr))
goto err;
} else {
if (!BN_from_montgomery(rr, r, mont, ctx))
goto err;
}
ret = 1;
err:
if (in_mont == NULL)
BN_MONT_CTX_free(mont);
BN_CTX_end(ctx);
bn_check_top(rr);
return (ret);
}
| 0 |
[
"CWE-200"
] |
openssl
|
d6482a82bc2228327aa4ba98aeeecd9979542a31
| 138,341,720,810,584,420,000,000,000,000,000,000,000 | 146 |
bn/bn_exp.c: constant-time MOD_EXP_CTIME_COPY_FROM_PREBUF.
Performance penalty varies from platform to platform, and even
key length. For rsa2048 sign it was observed to reach almost 10%.
CVE-2016-0702
Reviewed-by: Richard Levitte <[email protected]>
Reviewed-by: Rich Salz <[email protected]>
|
static int parseValuesReturnFilter (
Operation *op,
SlapReply *rs,
LDAPControl *ctrl )
{
BerElement *ber;
struct berval fstr = BER_BVNULL;
if ( op->o_valuesreturnfilter != SLAP_CONTROL_NONE ) {
rs->sr_text = "valuesReturnFilter control specified multiple times";
return LDAP_PROTOCOL_ERROR;
}
if ( BER_BVISNULL( &ctrl->ldctl_value )) {
rs->sr_text = "valuesReturnFilter control value is absent";
return LDAP_PROTOCOL_ERROR;
}
if ( BER_BVISEMPTY( &ctrl->ldctl_value )) {
rs->sr_text = "valuesReturnFilter control value is empty";
return LDAP_PROTOCOL_ERROR;
}
ber = ber_init( &(ctrl->ldctl_value) );
if (ber == NULL) {
rs->sr_text = "internal error";
return LDAP_OTHER;
}
rs->sr_err = get_vrFilter( op, ber,
(ValuesReturnFilter **)&(op->o_vrFilter), &rs->sr_text);
(void) ber_free( ber, 1 );
if( rs->sr_err != LDAP_SUCCESS ) {
if( rs->sr_err == SLAPD_DISCONNECT ) {
rs->sr_err = LDAP_PROTOCOL_ERROR;
send_ldap_disconnect( op, rs );
rs->sr_err = SLAPD_DISCONNECT;
} else {
send_ldap_result( op, rs );
}
if( op->o_vrFilter != NULL) vrFilter_free( op, op->o_vrFilter );
}
#ifdef LDAP_DEBUG
else {
vrFilter2bv( op, op->o_vrFilter, &fstr );
}
Debug( LDAP_DEBUG_ARGS, " vrFilter: %s\n",
fstr.bv_len ? fstr.bv_val : "empty", 0, 0 );
op->o_tmpfree( fstr.bv_val, op->o_tmpmemctx );
#endif
op->o_valuesreturnfilter = ctrl->ldctl_iscritical
? SLAP_CONTROL_CRITICAL
: SLAP_CONTROL_NONCRITICAL;
rs->sr_err = LDAP_SUCCESS;
return LDAP_SUCCESS;
}
| 1 |
[
"CWE-125"
] |
openldap
|
21981053a1195ae1555e23df4d9ac68d34ede9dd
| 309,722,197,629,018,400,000,000,000,000,000,000,000 | 61 |
ITS#9408 fix vrfilter double-free
|
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 3);
OpContext op_context(context, node);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), op_context.params->num_splits);
auto input_type = op_context.input->type;
TF_LITE_ENSURE(context,
input_type == kTfLiteFloat32 || input_type == kTfLiteUInt8 ||
input_type == kTfLiteInt16 || input_type == kTfLiteInt32 ||
input_type == kTfLiteInt64 || input_type == kTfLiteInt8);
for (int i = 0; i < NumOutputs(node); ++i) {
TfLiteTensor* tensor;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, i, &tensor));
tensor->type = input_type;
}
auto size_splits = op_context.size_splits;
TF_LITE_ENSURE_EQ(context, NumDimensions(size_splits), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), NumElements(size_splits));
// If we know the contents of the 'size_splits' tensor and the 'axis' tensor,
// resize all outputs. Otherwise, wait until Eval().
if (IsConstantTensor(op_context.size_splits) &&
IsConstantTensor(op_context.axis)) {
return ResizeOutputTensors(context, node, op_context.input,
op_context.size_splits, op_context.axis);
} else {
return UseDynamicOutputTensors(context, node);
}
}
| 0 |
[
"CWE-125",
"CWE-787"
] |
tensorflow
|
1970c2158b1ffa416d159d03c3370b9a462aee35
| 74,770,696,799,300,370,000,000,000,000,000,000,000 | 32 |
[tflite]: Insert `nullptr` checks when obtaining tensors.
As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages.
We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`).
PiperOrigin-RevId: 332521299
Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
|
static int ssl_get_server_cert_index(const SSL *s)
{
int idx;
idx = ssl_cipher_get_cert_index(s->s3->tmp.new_cipher);
if (idx == SSL_PKEY_RSA_ENC && !s->cert->pkeys[SSL_PKEY_RSA_ENC].x509)
idx = SSL_PKEY_RSA_SIGN;
if (idx == -1)
SSLerr(SSL_F_SSL_GET_SERVER_CERT_INDEX,ERR_R_INTERNAL_ERROR);
return idx;
}
| 0 |
[
"CWE-310"
] |
openssl
|
cf6da05304d554aaa885151451aa4ecaa977e601
| 177,281,569,785,983,600,000,000,000,000,000,000,000 | 10 |
Support TLS_FALLBACK_SCSV.
Reviewed-by: Stephen Henson <[email protected]>
|
void QPaintEngineEx::fillRect(const QRectF &r, const QBrush &brush)
{
qreal pts[] = { r.x(), r.y(), r.x() + r.width(), r.y(),
r.x() + r.width(), r.y() + r.height(), r.x(), r.y() + r.height() };
QVectorPath vp(pts, 4, nullptr, QVectorPath::RectangleHint);
fill(vp, brush);
}
| 0 |
[
"CWE-787"
] |
qtbase
|
6b400e3147dcfd8cc3a393ace1bd118c93762e0c
| 99,561,142,121,467,480,000,000,000,000,000,000,000 | 7 |
Improve fix for avoiding huge number of tiny dashes
Some pathological cases were not caught by the previous fix.
Fixes: QTBUG-95239
Pick-to: 6.2 6.1 5.15
Change-Id: I0337ee3923ff93ccb36c4d7b810a9c0667354cc5
Reviewed-by: Robert Löhning <[email protected]>
|
void qemu_chr_fe_deinit(CharBackend *b)
{
assert(b);
if (b->chr) {
qemu_chr_fe_set_handlers(b, NULL, NULL, NULL, NULL, NULL);
b->chr->avail_connections++;
b->chr->be = NULL;
if (b->chr->is_mux) {
MuxDriver *d = b->chr->opaque;
d->backends[b->tag] = NULL;
}
b->chr = NULL;
}
}
| 0 |
[
"CWE-416"
] |
qemu
|
a4afa548fc6dd9842ed86639b4d37d4d1c4ad480
| 44,607,584,300,603,490,000,000,000,000,000,000,000 | 15 |
char: move front end handlers in CharBackend
Since the hanlders are associated with a CharBackend, rather than the
CharDriverState, it is more appropriate to store in CharBackend. This
avoids the handler copy dance in qemu_chr_fe_set_handlers() then
mux_chr_update_read_handler(), by storing the CharBackend pointer
directly.
Also a mux CharDriver should go through mux->backends[focused], since
chr->be will stay NULL. Before that, it was possible to call
chr->handler by mistake with surprising results, for ex through
qemu_chr_be_can_write(), which would result in calling the last set
handler front end, not the one with focus.
Signed-off-by: Marc-André Lureau <[email protected]>
Message-Id: <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
|
tor_version_is_obsolete(const char *myversion, const char *versionlist)
{
tor_version_t mine, other;
int found_newer = 0, found_older = 0, found_newer_in_series = 0,
found_any_in_series = 0, r, same;
version_status_t ret = VS_UNRECOMMENDED;
smartlist_t *version_sl;
log_debug(LD_CONFIG,"Checking whether version '%s' is in '%s'",
myversion, versionlist);
if (tor_version_parse(myversion, &mine)) {
log_err(LD_BUG,"I couldn't parse my own version (%s)", myversion);
tor_assert(0);
}
version_sl = smartlist_create();
smartlist_split_string(version_sl, versionlist, ",", SPLIT_SKIP_SPACE, 0);
if (!strlen(versionlist)) { /* no authorities cared or agreed */
ret = VS_EMPTY;
goto done;
}
SMARTLIST_FOREACH(version_sl, const char *, cp, {
if (!strcmpstart(cp, "Tor "))
cp += 4;
if (tor_version_parse(cp, &other)) {
/* Couldn't parse other; it can't be a match. */
} else {
same = tor_version_same_series(&mine, &other);
if (same)
found_any_in_series = 1;
r = tor_version_compare(&mine, &other);
if (r==0) {
ret = VS_RECOMMENDED;
goto done;
} else if (r<0) {
found_newer = 1;
if (same)
found_newer_in_series = 1;
} else if (r>0) {
found_older = 1;
}
}
});
/* We didn't find the listed version. Is it new or old? */
if (found_any_in_series && !found_newer_in_series && found_newer) {
ret = VS_NEW_IN_SERIES;
} else if (found_newer && !found_older) {
ret = VS_OLD;
} else if (found_older && !found_newer) {
ret = VS_NEW;
} else {
ret = VS_UNRECOMMENDED;
}
done:
SMARTLIST_FOREACH(version_sl, char *, version, tor_free(version));
smartlist_free(version_sl);
return ret;
}
| 0 |
[
"CWE-399"
] |
tor
|
57e35ad3d91724882c345ac709666a551a977f0f
| 155,499,396,766,818,820,000,000,000,000,000,000,000 | 63 |
Avoid possible segfault when handling networkstatus vote with bad flavor
Fix for 6530; fix on 0.2.2.6-alpha.
|
void WebContents::Focus() {
// Focusing on WebContents does not automatically focus the window on macOS
// and Linux, do it manually to match the behavior on Windows.
#if defined(OS_MAC) || defined(OS_LINUX)
if (owner_window())
owner_window()->Focus(true);
#endif
web_contents()->Focus();
}
| 0 |
[
"CWE-200",
"CWE-668"
] |
electron
|
07a1c2a3e5845901f7e2eda9506695be58edc73c
| 68,005,801,298,712,770,000,000,000,000,000,000,000 | 9 |
fix: restrict sendToFrame to same-process frames by default (#26875)
|
virtual Item** addr(uint i) { return 0; }
| 0 |
[] |
mysql-server
|
f7316aa0c9a3909fc7498e7b95d5d3af044a7e21
| 157,888,721,586,680,510,000,000,000,000,000,000,000 | 1 |
Bug#26361149 MYSQL SERVER CRASHES AT: COL IN(IFNULL(CONST,
COL), NAME_CONST('NAME', NULL))
Backport of Bug#19143243 fix.
NAME_CONST item can return NULL_ITEM type in case of incorrect arguments.
NULL_ITEM has special processing in Item_func_in function.
In Item_func_in::fix_length_and_dec an array of possible comparators is
created. Since NAME_CONST function has NULL_ITEM type, corresponding
array element is empty. Then NAME_CONST is wrapped to ITEM_CACHE.
ITEM_CACHE can not return proper type(NULL_ITEM) in Item_func_in::val_int(),
so the NULL_ITEM is attempted compared with an empty comparator.
The fix is to disable the caching of Item_name_const item.
|
GF_Err mp4s_box_read(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_MPEGSampleEntryBox *ptr = (GF_MPEGSampleEntryBox *)s;
e = gf_isom_base_sample_entry_read((GF_SampleEntryBox *)ptr, bs);
if (e) return e;
ISOM_DECREASE_SIZE(ptr, 8);
return gf_isom_box_array_read(s, bs);
}
| 0 |
[
"CWE-476",
"CWE-787"
] |
gpac
|
b8f8b202d4fc23eb0ab4ce71ae96536ca6f5d3f8
| 18,649,896,409,994,287,000,000,000,000,000,000,000 | 11 |
fixed #1757
|
void X509_email_free(STACK_OF(OPENSSL_STRING) *sk)
{
sk_OPENSSL_STRING_pop_free(sk, str_free);
}
| 0 |
[
"CWE-125"
] |
openssl
|
bb4d2ed4091408404e18b3326e3df67848ef63d0
| 171,321,681,466,049,560,000,000,000,000,000,000,000 | 4 |
Fix append_ia5 function to not assume NUL terminated strings
ASN.1 strings may not be NUL terminated. Don't assume they are.
CVE-2021-3712
Reviewed-by: Viktor Dukhovni <[email protected]>
Reviewed-by: Paul Dale <[email protected]>
|
static int ext4_writepage(struct page *page,
struct writeback_control *wbc)
{
int ret = 0;
loff_t size;
unsigned int len;
struct buffer_head *page_bufs = NULL;
struct inode *inode = page->mapping->host;
struct ext4_io_submit io_submit;
bool keep_towrite = false;
trace_ext4_writepage(page);
size = i_size_read(inode);
if (page->index == size >> PAGE_CACHE_SHIFT)
len = size & ~PAGE_CACHE_MASK;
else
len = PAGE_CACHE_SIZE;
page_bufs = page_buffers(page);
/*
* We cannot do block allocation or other extent handling in this
* function. If there are buffers needing that, we have to redirty
* the page. But we may reach here when we do a journal commit via
* journal_submit_inode_data_buffers() and in that case we must write
* allocated buffers to achieve data=ordered mode guarantees.
*
* Also, if there is only one buffer per page (the fs block
* size == the page size), if one buffer needs block
* allocation or needs to modify the extent tree to clear the
* unwritten flag, we know that the page can't be written at
* all, so we might as well refuse the write immediately.
* Unfortunately if the block size != page size, we can't as
* easily detect this case using ext4_walk_page_buffers(), but
* for the extremely common case, this is an optimization that
* skips a useless round trip through ext4_bio_write_page().
*/
if (ext4_walk_page_buffers(NULL, page_bufs, 0, len, NULL,
ext4_bh_delay_or_unwritten)) {
redirty_page_for_writepage(wbc, page);
if ((current->flags & PF_MEMALLOC) ||
(inode->i_sb->s_blocksize == PAGE_CACHE_SIZE)) {
/*
* For memory cleaning there's no point in writing only
* some buffers. So just bail out. Warn if we came here
* from direct reclaim.
*/
WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD))
== PF_MEMALLOC);
unlock_page(page);
return 0;
}
keep_towrite = true;
}
if (PageChecked(page) && ext4_should_journal_data(inode))
/*
* It's mmapped pagecache. Add buffers and journal it. There
* doesn't seem much point in redirtying the page here.
*/
return __ext4_journalled_writepage(page, len);
ext4_io_submit_init(&io_submit, wbc);
io_submit.io_end = ext4_init_io_end(inode, GFP_NOFS);
if (!io_submit.io_end) {
redirty_page_for_writepage(wbc, page);
unlock_page(page);
return -ENOMEM;
}
ret = ext4_bio_write_page(&io_submit, page, len, wbc, keep_towrite);
ext4_io_submit(&io_submit);
/* Drop io_end reference we got from init */
ext4_put_io_end_defer(io_submit.io_end);
return ret;
}
| 0 |
[
"CWE-362"
] |
linux
|
ea3d7209ca01da209cda6f0dea8be9cc4b7a933b
| 149,602,611,694,067,370,000,000,000,000,000,000,000 | 74 |
ext4: fix races between page faults and hole punching
Currently, page faults and hole punching are completely unsynchronized.
This can result in page fault faulting in a page into a range that we
are punching after truncate_pagecache_range() has been called and thus
we can end up with a page mapped to disk blocks that will be shortly
freed. Filesystem corruption will shortly follow. Note that the same
race is avoided for truncate by checking page fault offset against
i_size but there isn't similar mechanism available for punching holes.
Fix the problem by creating new rw semaphore i_mmap_sem in inode and
grab it for writing over truncate, hole punching, and other functions
removing blocks from extent tree and for read over page faults. We
cannot easily use i_data_sem for this since that ranks below transaction
start and we need something ranking above it so that it can be held over
the whole truncate / hole punching operation. Also remove various
workarounds we had in the code to reduce race window when page fault
could have created pages with stale mapping information.
Signed-off-by: Jan Kara <[email protected]>
Signed-off-by: Theodore Ts'o <[email protected]>
|
static inline int wm_isupper(int c) {
return (c >= 'A' && c <= 'Z');
}
| 0 |
[
"CWE-200",
"CWE-119"
] |
wildmidi
|
814f31d8eceda8401eb812fc2e94ed143fdad0ab
| 12,419,229,928,650,938,000,000,000,000,000,000,000 | 3 |
wildmidi_lib.c (WildMidi_Open, WildMidi_OpenBuffer): refuse to proceed if less then 18 bytes of input
Fixes bug #178.
|
static MagickBooleanType DecodeLabImage(Image *image,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
ssize_t
y;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register PixelPacket
*magick_restrict q;
register ssize_t
x;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
a,
b;
a=QuantumScale*GetPixela(q)+0.5;
if (a > 1.0)
a-=1.0;
b=QuantumScale*GetPixelb(q)+0.5;
if (b > 1.0)
b-=1.0;
SetPixela(q,QuantumRange*a);
SetPixelb(q,QuantumRange*b);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
{
status=MagickFalse;
break;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
| 0 |
[
"CWE-125"
] |
ImageMagick6
|
d8d844c6f23f4d90d8fe893fe9225dd78fc1e6ef
| 276,701,743,348,616,400,000,000,000,000,000,000,000 | 52 |
https://github.com/ImageMagick/ImageMagick/issues/1532
|
static int chap_gen_challenge(
struct iscsi_conn *conn,
int caller,
char *c_str,
unsigned int *c_len)
{
int ret;
unsigned char challenge_asciihex[CHAP_CHALLENGE_LENGTH * 2 + 1];
struct iscsi_chap *chap = conn->auth_protocol;
memset(challenge_asciihex, 0, CHAP_CHALLENGE_LENGTH * 2 + 1);
ret = get_random_bytes_wait(chap->challenge, CHAP_CHALLENGE_LENGTH);
if (unlikely(ret))
return ret;
chap_binaryhex_to_asciihex(challenge_asciihex, chap->challenge,
CHAP_CHALLENGE_LENGTH);
/*
* Set CHAP_C, and copy the generated challenge into c_str.
*/
*c_len += sprintf(c_str + *c_len, "CHAP_C=0x%s", challenge_asciihex);
*c_len += 1;
pr_debug("[%s] Sending CHAP_C=0x%s\n\n", (caller) ? "server" : "client",
challenge_asciihex);
return 0;
}
| 1 |
[] |
linux
|
8c39e2699f8acb2e29782a834e56306da24937fe
| 62,660,565,309,547,400,000,000,000,000,000,000,000 | 27 |
scsi: target: iscsi: Use bin2hex instead of a re-implementation
Signed-off-by: Vincent Pelletier <[email protected]>
Reviewed-by: Mike Christie <[email protected]>
Signed-off-by: Martin K. Petersen <[email protected]>
|
_shift_data_right_pages(struct page **pages, size_t pgto_base,
size_t pgfrom_base, size_t len)
{
struct page **pgfrom, **pgto;
char *vfrom, *vto;
size_t copy;
BUG_ON(pgto_base <= pgfrom_base);
if (!len)
return;
pgto_base += len;
pgfrom_base += len;
pgto = pages + (pgto_base >> PAGE_SHIFT);
pgfrom = pages + (pgfrom_base >> PAGE_SHIFT);
pgto_base &= ~PAGE_MASK;
pgfrom_base &= ~PAGE_MASK;
do {
/* Are any pointers crossing a page boundary? */
if (pgto_base == 0) {
pgto_base = PAGE_SIZE;
pgto--;
}
if (pgfrom_base == 0) {
pgfrom_base = PAGE_SIZE;
pgfrom--;
}
copy = len;
if (copy > pgto_base)
copy = pgto_base;
if (copy > pgfrom_base)
copy = pgfrom_base;
pgto_base -= copy;
pgfrom_base -= copy;
vto = kmap_atomic(*pgto);
if (*pgto != *pgfrom) {
vfrom = kmap_atomic(*pgfrom);
memcpy(vto + pgto_base, vfrom + pgfrom_base, copy);
kunmap_atomic(vfrom);
} else
memmove(vto + pgto_base, vto + pgfrom_base, copy);
flush_dcache_page(*pgto);
kunmap_atomic(vto);
} while ((len -= copy) != 0);
}
| 0 |
[
"CWE-119",
"CWE-787"
] |
linux
|
6d1c0f3d28f98ea2736128ed3e46821496dc3a8c
| 77,469,967,329,455,560,000,000,000,000,000,000,000 | 52 |
sunrpc: Avoid a KASAN slab-out-of-bounds bug in xdr_set_page_base()
This seems to happen fairly easily during READ_PLUS testing on NFS v4.2.
I found that we could end up accessing xdr->buf->pages[pgnr] with a pgnr
greater than the number of pages in the array. So let's just return
early if we're setting base to a point at the end of the page data and
let xdr_set_tail_base() handle setting up the buffer pointers instead.
Signed-off-by: Anna Schumaker <[email protected]>
Fixes: 8d86e373b0ef ("SUNRPC: Clean up helpers xdr_set_iov() and xdr_set_page_base()")
Signed-off-by: Trond Myklebust <[email protected]>
|
c_find_fdoy(int y, double sg, int *rjd, int *ns)
{
int d, rm, rd;
for (d = 1; d < 31; d++)
if (c_valid_civil_p(y, 1, d, sg, &rm, &rd, rjd, ns))
return 1;
return 0;
}
| 0 |
[] |
date
|
3959accef8da5c128f8a8e2fd54e932a4fb253b0
| 234,980,589,353,821,300,000,000,000,000,000,000,000 | 9 |
Add length limit option for methods that parses date strings
`Date.parse` now raises an ArgumentError when a given date string is
longer than 128. You can configure the limit by giving `limit` keyword
arguments like `Date.parse(str, limit: 1000)`. If you pass `limit: nil`,
the limit is disabled.
Not only `Date.parse` but also the following methods are changed.
* Date._parse
* Date.parse
* DateTime.parse
* Date._iso8601
* Date.iso8601
* DateTime.iso8601
* Date._rfc3339
* Date.rfc3339
* DateTime.rfc3339
* Date._xmlschema
* Date.xmlschema
* DateTime.xmlschema
* Date._rfc2822
* Date.rfc2822
* DateTime.rfc2822
* Date._rfc822
* Date.rfc822
* DateTime.rfc822
* Date._jisx0301
* Date.jisx0301
* DateTime.jisx0301
|
static void seq_set_overflow(struct seq_file *m)
{
m->count = m->size;
}
| 0 |
[
"CWE-120",
"CWE-787"
] |
linux
|
8cae8cd89f05f6de223d63e6d15e31c8ba9cf53b
| 236,637,116,741,935,280,000,000,000,000,000,000,000 | 4 |
seq_file: disallow extremely large seq buffer allocations
There is no reasonable need for a buffer larger than this, and it avoids
int overflow pitfalls.
Fixes: 058504edd026 ("fs/seq_file: fallback to vmalloc allocation")
Suggested-by: Al Viro <[email protected]>
Reported-by: Qualys Security Advisory <[email protected]>
Signed-off-by: Eric Sandeen <[email protected]>
Cc: [email protected]
Signed-off-by: Linus Torvalds <[email protected]>
|
void Filter::UpstreamCallbacks::onUpstreamData(Buffer::Instance& data, bool end_stream) {
if (parent_) {
parent_->onUpstreamData(data, end_stream);
} else {
drainer_->onData(data, end_stream);
}
}
| 0 |
[
"CWE-416"
] |
envoy
|
ce0ae309057a216aba031aff81c445c90c6ef145
| 45,870,804,746,010,000,000,000,000,000,000,000,000 | 7 |
CVE-2021-43826
Signed-off-by: Yan Avlasov <[email protected]>
|
static netdev_features_t xennet_fix_features(struct net_device *dev,
netdev_features_t features)
{
struct netfront_info *np = netdev_priv(dev);
if (features & NETIF_F_SG &&
!xenbus_read_unsigned(np->xbdev->otherend, "feature-sg", 0))
features &= ~NETIF_F_SG;
if (features & NETIF_F_IPV6_CSUM &&
!xenbus_read_unsigned(np->xbdev->otherend,
"feature-ipv6-csum-offload", 0))
features &= ~NETIF_F_IPV6_CSUM;
if (features & NETIF_F_TSO &&
!xenbus_read_unsigned(np->xbdev->otherend, "feature-gso-tcpv4", 0))
features &= ~NETIF_F_TSO;
if (features & NETIF_F_TSO6 &&
!xenbus_read_unsigned(np->xbdev->otherend, "feature-gso-tcpv6", 0))
features &= ~NETIF_F_TSO6;
return features;
}
| 0 |
[] |
linux
|
f63c2c2032c2e3caad9add3b82cc6e91c376fd26
| 126,350,172,860,603,160,000,000,000,000,000,000,000 | 24 |
xen-netfront: restore __skb_queue_tail() positioning in xennet_get_responses()
The commit referenced below moved the invocation past the "next" label,
without any explanation. In fact this allows misbehaving backends undue
control over the domain the frontend runs in, as earlier detected errors
require the skb to not be freed (it may be retained for later processing
via xennet_move_rx_slot(), or it may simply be unsafe to have it freed).
This is CVE-2022-33743 / XSA-405.
Fixes: 6c5aa6fc4def ("xen networking: add basic XDP support for xen-netfront")
Signed-off-by: Jan Beulich <[email protected]>
Reviewed-by: Juergen Gross <[email protected]>
Signed-off-by: Juergen Gross <[email protected]>
|
struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, struct rtattr *tab)
{
struct qdisc_rate_table *rtab;
for (rtab = qdisc_rtab_list; rtab; rtab = rtab->next) {
if (memcmp(&rtab->rate, r, sizeof(struct tc_ratespec)) == 0) {
rtab->refcnt++;
return rtab;
}
}
if (tab == NULL || r->rate == 0 || r->cell_log == 0 || RTA_PAYLOAD(tab) != 1024)
return NULL;
rtab = kmalloc(sizeof(*rtab), GFP_KERNEL);
if (rtab) {
rtab->rate = *r;
rtab->refcnt = 1;
memcpy(rtab->data, RTA_DATA(tab), 1024);
rtab->next = qdisc_rtab_list;
qdisc_rtab_list = rtab;
}
return rtab;
}
| 0 |
[
"CWE-200"
] |
linux-2.6
|
9ef1d4c7c7aca1cd436612b6ca785b726ffb8ed8
| 164,564,841,600,042,390,000,000,000,000,000,000,000 | 24 |
[NETLINK]: Missing initializations in dumped data
Mostly missing initialization of padding fields of 1 or 2 bytes length,
two instances of uninitialized nlmsgerr->msg of 16 bytes length.
Signed-off-by: Patrick McHardy <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
TEST_F(ConnectionHandlerTest, FallbackToWildcardListener) {
TestListener* test_listener1 = addListener(1, true, true, "test_listener1");
Network::MockListener* listener1 = new Network::MockListener();
Network::ListenerCallbacks* listener_callbacks1;
EXPECT_CALL(dispatcher_, createListener_(_, _, _))
.WillOnce(
Invoke([&](Network::Socket&, Network::ListenerCallbacks& cb, bool) -> Network::Listener* {
listener_callbacks1 = &cb;
return listener1;
}));
Network::Address::InstanceConstSharedPtr normal_address(
new Network::Address::Ipv4Instance("127.0.0.1", 10001));
EXPECT_CALL(test_listener1->socket_, localAddress()).WillRepeatedly(ReturnRef(normal_address));
handler_->addListener(*test_listener1);
TestListener* test_listener2 = addListener(1, false, false, "test_listener2");
Network::MockListener* listener2 = new Network::MockListener();
Network::ListenerCallbacks* listener_callbacks2;
EXPECT_CALL(dispatcher_, createListener_(_, _, _))
.WillOnce(
Invoke([&](Network::Socket&, Network::ListenerCallbacks& cb, bool) -> Network::Listener* {
listener_callbacks2 = &cb;
return listener2;
}));
Network::Address::InstanceConstSharedPtr any_address = Network::Utility::getIpv4AnyAddress();
EXPECT_CALL(test_listener2->socket_, localAddress()).WillRepeatedly(ReturnRef(any_address));
handler_->addListener(*test_listener2);
Network::MockListenerFilter* test_filter = new Network::MockListenerFilter();
EXPECT_CALL(*test_filter, destroy_());
Network::MockConnectionSocket* accepted_socket = new NiceMock<Network::MockConnectionSocket>();
bool redirected = false;
EXPECT_CALL(factory_, createListenerFilterChain(_))
.WillRepeatedly(Invoke([&](Network::ListenerFilterManager& manager) -> bool {
// Insert the Mock filter.
if (!redirected) {
manager.addAcceptFilter(Network::ListenerFilterPtr{test_filter});
redirected = true;
}
return true;
}));
// Zero port to match the port of AnyAddress
Network::Address::InstanceConstSharedPtr alt_address(
new Network::Address::Ipv4Instance("127.0.0.2", 0));
EXPECT_CALL(*test_filter, onAccept(_))
.WillOnce(Invoke([&](Network::ListenerFilterCallbacks& cb) -> Network::FilterStatus {
cb.socket().restoreLocalAddress(alt_address);
return Network::FilterStatus::Continue;
}));
EXPECT_CALL(*accepted_socket, restoreLocalAddress(alt_address));
EXPECT_CALL(*accepted_socket, localAddressRestored()).WillOnce(Return(true));
EXPECT_CALL(*accepted_socket, localAddress()).WillRepeatedly(ReturnRef(alt_address));
EXPECT_CALL(manager_, findFilterChain(_)).WillOnce(Return(filter_chain_.get()));
Network::MockConnection* connection = new NiceMock<Network::MockConnection>();
EXPECT_CALL(dispatcher_, createServerConnection_()).WillOnce(Return(connection));
EXPECT_CALL(factory_, createNetworkFilterChain(_, _)).WillOnce(Return(true));
listener_callbacks1->onAccept(Network::ConnectionSocketPtr{accepted_socket});
EXPECT_EQ(1UL, handler_->numConnections());
EXPECT_CALL(*listener2, onDestroy());
EXPECT_CALL(*listener1, onDestroy());
}
| 0 |
[
"CWE-835"
] |
envoy
|
c8de199e2971f79cbcbc6b5eadc8c566b28705d1
| 139,075,768,896,249,100,000,000,000,000,000,000,000 | 62 |
listener: clean up accept filter before creating connection (#8922)
Signed-off-by: Yuchen Dai <[email protected]>
|
}
static bool is_nan(const half val) { // Custom version that works with '-ffast-math'
if (sizeof(half)==2) {
short u;
std::memcpy(&u,&val,sizeof(short));
return (bool)((u&0x7fff)>0x7c00);
}
return cimg::type<float>::is_nan((float)val);
| 0 |
[
"CWE-119",
"CWE-787"
] |
CImg
|
ac8003393569aba51048c9d67e1491559877b1d1
| 170,398,382,285,354,940,000,000,000,000,000,000,000 | 8 |
.
|
mono_dynamic_image_free (MonoDynamicImage *image)
{
MonoDynamicImage *di = image;
GList *list;
int i;
if (di->methodspec)
mono_g_hash_table_destroy (di->methodspec);
if (di->typespec)
g_hash_table_destroy (di->typespec);
if (di->typeref)
g_hash_table_destroy (di->typeref);
if (di->handleref)
g_hash_table_destroy (di->handleref);
if (di->handleref_managed)
mono_g_hash_table_destroy (di->handleref_managed);
if (di->tokens)
mono_g_hash_table_destroy (di->tokens);
if (di->generic_def_objects)
mono_g_hash_table_destroy (di->generic_def_objects);
if (di->blob_cache) {
g_hash_table_foreach (di->blob_cache, free_blob_cache_entry, NULL);
g_hash_table_destroy (di->blob_cache);
}
if (di->standalonesig_cache)
g_hash_table_destroy (di->standalonesig_cache);
for (list = di->array_methods; list; list = list->next) {
ArrayMethod *am = (ArrayMethod *)list->data;
g_free (am->sig);
g_free (am->name);
g_free (am);
}
g_list_free (di->array_methods);
if (di->gen_params) {
for (i = 0; i < di->gen_params->len; i++) {
GenericParamTableEntry *entry = g_ptr_array_index (di->gen_params, i);
mono_gc_deregister_root ((char*) &entry->gparam);
g_free (entry);
}
g_ptr_array_free (di->gen_params, TRUE);
}
if (di->token_fixups)
mono_g_hash_table_destroy (di->token_fixups);
if (di->method_to_table_idx)
g_hash_table_destroy (di->method_to_table_idx);
if (di->field_to_table_idx)
g_hash_table_destroy (di->field_to_table_idx);
if (di->method_aux_hash)
g_hash_table_destroy (di->method_aux_hash);
if (di->vararg_aux_hash)
g_hash_table_destroy (di->vararg_aux_hash);
g_free (di->strong_name);
g_free (di->win32_res);
if (di->public_key)
g_free (di->public_key);
/*g_print ("string heap destroy for image %p\n", di);*/
mono_dynamic_stream_reset (&di->sheap);
mono_dynamic_stream_reset (&di->code);
mono_dynamic_stream_reset (&di->resources);
mono_dynamic_stream_reset (&di->us);
mono_dynamic_stream_reset (&di->blob);
mono_dynamic_stream_reset (&di->tstream);
mono_dynamic_stream_reset (&di->guid);
for (i = 0; i < MONO_TABLE_NUM; ++i) {
g_free (di->tables [i].values);
}
}
| 0 |
[
"CWE-399",
"CWE-264"
] |
mono
|
89d1455a80ef13cddee5d79ec00c06055da3085c
| 326,813,641,627,872,660,000,000,000,000,000,000,000 | 68 |
Don't use finalization to cleanup dynamic methods.
* reflection.c: Use a reference queue to cleanup
dynamic methods instead of finalization.
* runtime.c: Shutdown the dynamic method queue
before runtime cleanup begins.
* DynamicMethod.cs: No longer finalizable.
* icall-def.h: Remove unused dynamic method icall.
Fixes #660422
|
static GF_Err gf_filter_process_check_alloc(GF_Filter *filter)
{
GF_Err e;
u32 nb_allocs=0, nb_callocs=0, nb_reallocs=0, nb_free=0;
u32 prev_nb_allocs=0, prev_nb_callocs=0, prev_nb_reallocs=0, prev_nb_free=0;
//reset alloc/realloc stats of filter
filter->session->nb_alloc_pck = 0;
filter->session->nb_realloc_pck = 0;
//get current alloc state
gf_mem_get_stats(&prev_nb_allocs, &prev_nb_callocs, &prev_nb_reallocs, &prev_nb_free);
e = filter->freg->process(filter);
//get new alloc state
gf_mem_get_stats(&nb_allocs, &nb_callocs, &nb_reallocs, &nb_free);
//remove prev alloc stats
nb_allocs -= prev_nb_allocs;
nb_callocs -= prev_nb_callocs;
nb_reallocs -= prev_nb_reallocs;
nb_free -= prev_nb_free;
//remove internal allocs/reallocs due to filter lib
if (nb_allocs>filter->session->nb_alloc_pck)
nb_allocs -= filter->session->nb_alloc_pck;
else
nb_allocs = 0;
if (nb_reallocs>filter->session->nb_realloc_pck)
nb_reallocs -= filter->session->nb_realloc_pck;
else
nb_reallocs = 0;
//we now have nomber of allocs/realloc used by the filter internally during its process
if (nb_allocs || nb_callocs || nb_reallocs /* || nb_free */) {
filter->stats_nb_alloc += nb_allocs;
filter->stats_nb_calloc += nb_callocs;
filter->stats_nb_realloc += nb_reallocs;
filter->stats_nb_free += nb_free;
} else {
filter->nb_consecutive_process ++;
}
filter->nb_process_since_reset++;
return e;
}
| 0 |
[
"CWE-787"
] |
gpac
|
da37ec8582266983d0ec4b7550ec907401ec441e
| 239,432,764,060,577,030,000,000,000,000,000,000,000 | 45 |
fixed crashes for very long path - cf #1908
|
onigenc_minimum_property_name_to_ctype(OnigEncoding enc, UChar* p, UChar* end)
{
static PosixBracketEntryType PBS[] = {
{ (UChar* )"Alnum", ONIGENC_CTYPE_ALNUM, 5 },
{ (UChar* )"Alpha", ONIGENC_CTYPE_ALPHA, 5 },
{ (UChar* )"Blank", ONIGENC_CTYPE_BLANK, 5 },
{ (UChar* )"Cntrl", ONIGENC_CTYPE_CNTRL, 5 },
{ (UChar* )"Digit", ONIGENC_CTYPE_DIGIT, 5 },
{ (UChar* )"Graph", ONIGENC_CTYPE_GRAPH, 5 },
{ (UChar* )"Lower", ONIGENC_CTYPE_LOWER, 5 },
{ (UChar* )"Print", ONIGENC_CTYPE_PRINT, 5 },
{ (UChar* )"Punct", ONIGENC_CTYPE_PUNCT, 5 },
{ (UChar* )"Space", ONIGENC_CTYPE_SPACE, 5 },
{ (UChar* )"Upper", ONIGENC_CTYPE_UPPER, 5 },
{ (UChar* )"XDigit", ONIGENC_CTYPE_XDIGIT, 6 },
{ (UChar* )"ASCII", ONIGENC_CTYPE_ASCII, 5 },
{ (UChar* )"Word", ONIGENC_CTYPE_WORD, 4 },
{ (UChar* )NULL, -1, 0 }
};
PosixBracketEntryType *pb;
int len;
len = onigenc_strlen(enc, p, end);
for (pb = PBS; IS_NOT_NULL(pb->name); pb++) {
if (len == pb->len &&
onigenc_with_ascii_strncmp(enc, p, end, pb->name, pb->len) == 0)
return pb->ctype;
}
return ONIGERR_INVALID_CHAR_PROPERTY_NAME;
}
| 0 |
[
"CWE-125"
] |
oniguruma
|
65a9b1aa03c9bc2dc01b074295b9603232cb3b78
| 5,328,204,779,499,409,000,000,000,000,000,000,000 | 32 |
onig-5.9.2
|
static void hns_nic_service_timer(unsigned long data)
{
struct hns_nic_priv *priv = (struct hns_nic_priv *)data;
(void)mod_timer(&priv->service_timer, jiffies + SERVICE_TIMER_HZ);
hns_nic_task_schedule(priv);
}
| 0 |
[
"CWE-416"
] |
linux
|
27463ad99f738ed93c7c8b3e2e5bc8c4853a2ff2
| 327,584,152,418,166,950,000,000,000,000,000,000,000 | 8 |
net: hns: Fix a skb used after free bug
skb maybe freed in hns_nic_net_xmit_hw() and return NETDEV_TX_OK,
which cause hns_nic_net_xmit to use a freed skb.
BUG: KASAN: use-after-free in hns_nic_net_xmit_hw+0x62c/0x940...
[17659.112635] alloc_debug_processing+0x18c/0x1a0
[17659.117208] __slab_alloc+0x52c/0x560
[17659.120909] kmem_cache_alloc_node+0xac/0x2c0
[17659.125309] __alloc_skb+0x6c/0x260
[17659.128837] tcp_send_ack+0x8c/0x280
[17659.132449] __tcp_ack_snd_check+0x9c/0xf0
[17659.136587] tcp_rcv_established+0x5a4/0xa70
[17659.140899] tcp_v4_do_rcv+0x27c/0x620
[17659.144687] tcp_prequeue_process+0x108/0x170
[17659.149085] tcp_recvmsg+0x940/0x1020
[17659.152787] inet_recvmsg+0x124/0x180
[17659.156488] sock_recvmsg+0x64/0x80
[17659.160012] SyS_recvfrom+0xd8/0x180
[17659.163626] __sys_trace_return+0x0/0x4
[17659.167506] INFO: Freed in kfree_skbmem+0xa0/0xb0 age=23 cpu=1 pid=13
[17659.174000] free_debug_processing+0x1d4/0x2c0
[17659.178486] __slab_free+0x240/0x390
[17659.182100] kmem_cache_free+0x24c/0x270
[17659.186062] kfree_skbmem+0xa0/0xb0
[17659.189587] __kfree_skb+0x28/0x40
[17659.193025] napi_gro_receive+0x168/0x1c0
[17659.197074] hns_nic_rx_up_pro+0x58/0x90
[17659.201038] hns_nic_rx_poll_one+0x518/0xbc0
[17659.205352] hns_nic_common_poll+0x94/0x140
[17659.209576] net_rx_action+0x458/0x5e0
[17659.213363] __do_softirq+0x1b8/0x480
[17659.217062] run_ksoftirqd+0x64/0x80
[17659.220679] smpboot_thread_fn+0x224/0x310
[17659.224821] kthread+0x150/0x170
[17659.228084] ret_from_fork+0x10/0x40
BUG: KASAN: use-after-free in hns_nic_net_xmit+0x8c/0xc0...
[17751.080490] __slab_alloc+0x52c/0x560
[17751.084188] kmem_cache_alloc+0x244/0x280
[17751.088238] __build_skb+0x40/0x150
[17751.091764] build_skb+0x28/0x100
[17751.095115] __alloc_rx_skb+0x94/0x150
[17751.098900] __napi_alloc_skb+0x34/0x90
[17751.102776] hns_nic_rx_poll_one+0x180/0xbc0
[17751.107097] hns_nic_common_poll+0x94/0x140
[17751.111333] net_rx_action+0x458/0x5e0
[17751.115123] __do_softirq+0x1b8/0x480
[17751.118823] run_ksoftirqd+0x64/0x80
[17751.122437] smpboot_thread_fn+0x224/0x310
[17751.126575] kthread+0x150/0x170
[17751.129838] ret_from_fork+0x10/0x40
[17751.133454] INFO: Freed in kfree_skbmem+0xa0/0xb0 age=19 cpu=7 pid=43
[17751.139951] free_debug_processing+0x1d4/0x2c0
[17751.144436] __slab_free+0x240/0x390
[17751.148051] kmem_cache_free+0x24c/0x270
[17751.152014] kfree_skbmem+0xa0/0xb0
[17751.155543] __kfree_skb+0x28/0x40
[17751.159022] napi_gro_receive+0x168/0x1c0
[17751.163074] hns_nic_rx_up_pro+0x58/0x90
[17751.167041] hns_nic_rx_poll_one+0x518/0xbc0
[17751.171358] hns_nic_common_poll+0x94/0x140
[17751.175585] net_rx_action+0x458/0x5e0
[17751.179373] __do_softirq+0x1b8/0x480
[17751.183076] run_ksoftirqd+0x64/0x80
[17751.186691] smpboot_thread_fn+0x224/0x310
[17751.190826] kthread+0x150/0x170
[17751.194093] ret_from_fork+0x10/0x40
Fixes: 13ac695e7ea1 ("net:hns: Add support of Hip06 SoC to the Hislicon Network Subsystem")
Signed-off-by: Yunsheng Lin <[email protected]>
Signed-off-by: lipeng <[email protected]>
Reported-by: Jun He <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
void sspi_SecBufferFree(PSecBuffer SecBuffer)
{
free(SecBuffer->pvBuffer);
SecBuffer->pvBuffer = NULL;
SecBuffer->cbBuffer = 0;
}
| 0 |
[
"CWE-476",
"CWE-125"
] |
FreeRDP
|
0773bb9303d24473fe1185d85a424dfe159aff53
| 162,730,397,330,526,630,000,000,000,000,000,000,000 | 6 |
nla: invalidate sec handle after creation
If sec pointer isn't invalidated after creation it is not possible
to check if the upper and lower pointers are valid.
This fixes a segfault in the server part if the client disconnects before
the authentication was finished.
|
lacks_deep_count (NautilusFile *file)
{
return file->details->deep_counts_status != NAUTILUS_REQUEST_DONE;
}
| 0 |
[] |
nautilus
|
7632a3e13874a2c5e8988428ca913620a25df983
| 78,903,865,148,681,870,000,000,000,000,000,000,000 | 4 |
Check for trusted desktop file launchers.
2009-02-24 Alexander Larsson <[email protected]>
* libnautilus-private/nautilus-directory-async.c:
Check for trusted desktop file launchers.
* libnautilus-private/nautilus-file-private.h:
* libnautilus-private/nautilus-file.c:
* libnautilus-private/nautilus-file.h:
Add nautilus_file_is_trusted_link.
Allow unsetting of custom display name.
* libnautilus-private/nautilus-mime-actions.c:
Display dialog when trying to launch a non-trusted desktop file.
svn path=/trunk/; revision=15003
|
static NTSTATUS dcesrv_lsa_CREDRREADDOMAINCREDENTIALS(struct dcesrv_call_state *dce_call, TALLOC_CTX *mem_ctx,
struct lsa_CREDRREADDOMAINCREDENTIALS *r)
{
DCESRV_FAULT(DCERPC_FAULT_OP_RNG_ERROR);
}
| 0 |
[
"CWE-200"
] |
samba
|
0a3aa5f908e351201dc9c4d4807b09ed9eedff77
| 126,296,114,602,851,680,000,000,000,000,000,000,000 | 5 |
CVE-2022-32746 ldb: Make use of functions for appending to an ldb_message
This aims to minimise usage of the error-prone pattern of searching for
a just-added message element in order to make modifications to it (and
potentially finding the wrong element).
BUG: https://bugzilla.samba.org/show_bug.cgi?id=15009
Signed-off-by: Joseph Sutton <[email protected]>
|
int fill_schema_column_privileges(THD *thd, TABLE_LIST *tables, Item *cond)
{
#ifndef NO_EMBEDDED_ACCESS_CHECKS
int error= 0;
uint index;
char buff[100];
TABLE *table= tables->table;
bool no_global_access= check_access(thd, SELECT_ACL, "mysql",
NULL, NULL, 1, 1);
char *curr_host= thd->security_ctx->priv_host_name();
DBUG_ENTER("fill_schema_table_privileges");
mysql_rwlock_rdlock(&LOCK_grant);
for (index=0 ; index < column_priv_hash.records ; index++)
{
const char *user, *host, *is_grantable= "YES";
GRANT_TABLE *grant_table= (GRANT_TABLE*) my_hash_element(&column_priv_hash,
index);
if (!(user=grant_table->user))
user= "";
if (!(host= grant_table->host.get_host()))
host= "";
if (no_global_access &&
(strcmp(thd->security_ctx->priv_user, user) ||
my_strcasecmp(system_charset_info, curr_host, host)))
continue;
ulong table_access= grant_table->cols;
if (table_access != 0)
{
if (!(grant_table->privs & GRANT_ACL))
is_grantable= "NO";
ulong test_access= table_access & ~GRANT_ACL;
strxmov(buff, "'", user, "'@'", host, "'", NullS);
if (!test_access)
continue;
else
{
ulong j;
int cnt;
for (cnt= 0, j= SELECT_ACL; j <= TABLE_ACLS; cnt++, j<<= 1)
{
if (test_access & j)
{
for (uint col_index=0 ;
col_index < grant_table->hash_columns.records ;
col_index++)
{
GRANT_COLUMN *grant_column = (GRANT_COLUMN*)
my_hash_element(&grant_table->hash_columns,col_index);
if ((grant_column->rights & j) && (table_access & j))
{
if (update_schema_privilege(thd, table, buff, grant_table->db,
grant_table->tname,
grant_column->column,
grant_column->key_length,
command_array[cnt],
command_lengths[cnt], is_grantable))
{
error= 1;
goto err;
}
}
}
}
}
}
}
}
err:
mysql_rwlock_unlock(&LOCK_grant);
DBUG_RETURN(error);
#else
return (0);
#endif
}
| 0 |
[] |
mysql-server
|
25d1b7e03b9b375a243fabdf0556c063c7282361
| 175,608,661,017,570,850,000,000,000,000,000,000,000 | 80 |
Bug #22722946: integer overflow may lead to wrong results in get_56_lenc_string
|
adium_info_get_version (GHashTable *info)
{
return tp_asv_get_int32 (info, "MessageViewVersion", NULL);
}
| 0 |
[
"CWE-79"
] |
empathy
|
739aca418457de752be13721218aaebc74bd9d36
| 23,251,976,301,883,783,000,000,000,000,000,000,000 | 4 |
theme_adium_append_message: escape alias before displaying it
Not doing so can lead to nasty HTML injection from hostile users.
https://bugzilla.gnome.org/show_bug.cgi?id=662035
|
static int kdb_ps(int argc, const char **argv)
{
struct task_struct *g, *p;
const char *mask;
unsigned long cpu;
if (argc == 0)
kdb_ps_suppressed();
kdb_printf("%-*s Pid Parent [*] cpu State %-*s Command\n",
(int)(2*sizeof(void *))+2, "Task Addr",
(int)(2*sizeof(void *))+2, "Thread");
mask = argc ? argv[1] : kdbgetenv("PS");
/* Run the active tasks first */
for_each_online_cpu(cpu) {
if (KDB_FLAG(CMD_INTERRUPT))
return 0;
p = kdb_curr_task(cpu);
if (kdb_task_state(p, mask))
kdb_ps1(p);
}
kdb_printf("\n");
/* Now the real tasks */
for_each_process_thread(g, p) {
if (KDB_FLAG(CMD_INTERRUPT))
return 0;
if (kdb_task_state(p, mask))
kdb_ps1(p);
}
return 0;
}
| 0 |
[
"CWE-787"
] |
linux
|
eadb2f47a3ced5c64b23b90fd2a3463f63726066
| 46,875,831,158,246,090,000,000,000,000,000,000,000 | 31 |
lockdown: also lock down previous kgdb use
KGDB and KDB allow read and write access to kernel memory, and thus
should be restricted during lockdown. An attacker with access to a
serial port (for example, via a hypervisor console, which some cloud
vendors provide over the network) could trigger the debugger so it is
important that the debugger respect the lockdown mode when/if it is
triggered.
Fix this by integrating lockdown into kdb's existing permissions
mechanism. Unfortunately kgdb does not have any permissions mechanism
(although it certainly could be added later) so, for now, kgdb is simply
and brutally disabled by immediately exiting the gdb stub without taking
any action.
For lockdowns established early in the boot (e.g. the normal case) then
this should be fine but on systems where kgdb has set breakpoints before
the lockdown is enacted than "bad things" will happen.
CVE: CVE-2022-21499
Co-developed-by: Stephen Brennan <[email protected]>
Signed-off-by: Stephen Brennan <[email protected]>
Reviewed-by: Douglas Anderson <[email protected]>
Signed-off-by: Daniel Thompson <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
static u64 kvm_s390_get_initial_cpuid(void)
{
struct cpuid cpuid;
get_cpu_id(&cpuid);
cpuid.version = 0xff;
return *((u64 *) &cpuid);
}
| 0 |
[
"CWE-416"
] |
linux
|
0774a964ef561b7170d8d1b1bfe6f88002b6d219
| 234,909,435,052,118,930,000,000,000,000,000,000,000 | 8 |
KVM: Fix out of range accesses to memslots
Reset the LRU slot if it becomes invalid when deleting a memslot to fix
an out-of-bounds/use-after-free access when searching through memslots.
Explicitly check for there being no used slots in search_memslots(), and
in the caller of s390's approximation variant.
Fixes: 36947254e5f9 ("KVM: Dynamically size memslot array based on number of used slots")
Reported-by: Qian Cai <[email protected]>
Cc: Peter Xu <[email protected]>
Signed-off-by: Sean Christopherson <[email protected]>
Message-Id: <[email protected]>
Acked-by: Christian Borntraeger <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
|
int json_array_clear(json_t *json)
{
json_array_t *array;
size_t i;
if(!json_is_array(json))
return -1;
array = json_to_array(json);
for(i = 0; i < array->entries; i++)
json_decref(array->table[i]);
array->entries = 0;
return 0;
}
| 0 |
[
"CWE-310"
] |
jansson
|
8f80c2d83808150724d31793e6ade92749b1faa4
| 241,026,846,143,959,800,000,000,000,000,000,000,000 | 15 |
CVE-2013-6401: Change hash function, randomize hashes
Thanks to Florian Weimer and Eric Sesterhenn for reporting, reviewing
and testing.
|
cgraph_edge_brings_all_scalars_for_node (struct cgraph_edge *cs,
struct cgraph_node *node)
{
class ipa_node_params *dest_info = IPA_NODE_REF (node);
int count = ipa_get_param_count (dest_info);
class ipa_node_params *caller_info;
class ipa_edge_args *args;
int i;
caller_info = IPA_NODE_REF (cs->caller);
args = IPA_EDGE_REF (cs);
for (i = 0; i < count; i++)
{
struct ipa_jump_func *jump_func;
tree val, t;
val = dest_info->known_csts[i];
if (!val)
continue;
if (i >= ipa_get_cs_argument_count (args))
return false;
jump_func = ipa_get_ith_jump_func (args, i);
t = ipa_value_from_jfunc (caller_info, jump_func,
ipa_get_type (dest_info, i));
if (!t || !values_equal_for_ipcp_p (val, t))
return false;
}
return true;
}
| 0 |
[
"CWE-20"
] |
gcc
|
a09ccc22459c565814f79f96586fe4ad083fe4eb
| 243,329,360,023,034,570,000,000,000,000,000,000,000 | 30 |
Avoid segfault when doing IPA-VRP but not IPA-CP (PR 93015)
2019-12-21 Martin Jambor <[email protected]>
PR ipa/93015
* ipa-cp.c (ipcp_store_vr_results): Check that info exists
testsuite/
* gcc.dg/lto/pr93015_0.c: New test.
From-SVN: r279695
|
static int64_t asf_read_timestamp(AVFormatContext *s, int stream_index,
int64_t *pos, int64_t pos_limit)
{
ASFContext *asf = s->priv_data;
int64_t pkt_pos = *pos, pkt_offset, dts = AV_NOPTS_VALUE, data_end;
AVPacket pkt;
int n;
data_end = asf->data_offset + asf->data_size;
n = (pkt_pos - asf->first_packet_offset + asf->packet_size - 1) /
asf->packet_size;
n = av_clip(n, 0, ((data_end - asf->first_packet_offset) / asf->packet_size - 1));
pkt_pos = asf->first_packet_offset + n * asf->packet_size;
avio_seek(s->pb, pkt_pos, SEEK_SET);
pkt_offset = pkt_pos;
reset_packet_state(s);
while (avio_tell(s->pb) < data_end) {
int i, ret, st_found;
av_init_packet(&pkt);
pkt_offset = avio_tell(s->pb);
if ((ret = asf_read_packet(s, &pkt)) < 0) {
dts = AV_NOPTS_VALUE;
return ret;
}
// ASFPacket may contain fragments of packets belonging to different streams,
// pkt_offset is the offset of the first fragment within it.
if ((pkt_offset >= (pkt_pos + asf->packet_size)))
pkt_pos += asf->packet_size;
for (i = 0; i < asf->nb_streams; i++) {
ASFStream *st = asf->asf_st[i];
st_found = 0;
if (pkt.flags & AV_PKT_FLAG_KEY) {
dts = pkt.dts;
if (dts) {
av_add_index_entry(s->streams[pkt.stream_index], pkt_pos,
dts, pkt.size, 0, AVINDEX_KEYFRAME);
if (stream_index == st->index) {
st_found = 1;
break;
}
}
}
}
if (st_found)
break;
av_packet_unref(&pkt);
}
*pos = pkt_pos;
av_packet_unref(&pkt);
return dts;
}
| 0 |
[
"CWE-119",
"CWE-787"
] |
FFmpeg
|
2b46ebdbff1d8dec7a3d8ea280a612b91a582869
| 242,790,625,958,569,800,000,000,000,000,000,000,000 | 58 |
avformat/asfdec_o: Check size_bmp more fully
Fixes: integer overflow and out of array access
Fixes: asfo-crash-46080c4341572a7137a162331af77f6ded45cbd7
Found-by: Paul Ch <[email protected]>
Signed-off-by: Michael Niedermayer <[email protected]>
|
copy_rrset(const struct ub_packed_rrset_key* key, struct regional* region)
{
struct ub_packed_rrset_key* ck = regional_alloc(region,
sizeof(struct ub_packed_rrset_key));
struct packed_rrset_data* d;
struct packed_rrset_data* data = key->entry.data;
size_t dsize, i;
uint8_t* nextrdata;
/* derived from packed_rrset_copy_region(), but don't use
* packed_rrset_sizeof() and do exclude RRSIGs */
if(!ck)
return NULL;
ck->id = key->id;
memset(&ck->entry, 0, sizeof(ck->entry));
ck->entry.hash = key->entry.hash;
ck->entry.key = ck;
ck->rk = key->rk;
ck->rk.dname = regional_alloc_init(region, key->rk.dname,
key->rk.dname_len);
if(!ck->rk.dname)
return NULL;
dsize = sizeof(struct packed_rrset_data) + data->count *
(sizeof(size_t)+sizeof(uint8_t*)+sizeof(time_t));
for(i=0; i<data->count; i++)
dsize += data->rr_len[i];
d = regional_alloc(region, dsize);
if(!d)
return NULL;
*d = *data;
d->rrsig_count = 0;
ck->entry.data = d;
/* derived from packed_rrset_ptr_fixup() with copying the data */
d->rr_len = (size_t*)((uint8_t*)d + sizeof(struct packed_rrset_data));
d->rr_data = (uint8_t**)&(d->rr_len[d->count]);
d->rr_ttl = (time_t*)&(d->rr_data[d->count]);
nextrdata = (uint8_t*)&(d->rr_ttl[d->count]);
for(i=0; i<d->count; i++) {
d->rr_len[i] = data->rr_len[i];
d->rr_ttl[i] = data->rr_ttl[i];
d->rr_data[i] = nextrdata;
memcpy(d->rr_data[i], data->rr_data[i], data->rr_len[i]);
nextrdata += d->rr_len[i];
}
return ck;
}
| 1 |
[
"CWE-190"
] |
unbound
|
02080f6b180232f43b77f403d0c038e9360a460f
| 158,811,822,716,503,160,000,000,000,000,000,000,000 | 49 |
- Fix Integer Overflows in Size Calculations,
reported by X41 D-Sec.
|
static int check_subprogs(struct bpf_verifier_env *env)
{
int i, ret, subprog_start, subprog_end, off, cur_subprog = 0;
struct bpf_subprog_info *subprog = env->subprog_info;
struct bpf_insn *insn = env->prog->insnsi;
int insn_cnt = env->prog->len;
/* Add entry function. */
ret = add_subprog(env, 0);
if (ret < 0)
return ret;
/* determine subprog starts. The end is one before the next starts */
for (i = 0; i < insn_cnt; i++) {
if (insn[i].code != (BPF_JMP | BPF_CALL))
continue;
if (insn[i].src_reg != BPF_PSEUDO_CALL)
continue;
if (!env->allow_ptr_leaks) {
verbose(env, "function calls to other bpf functions are allowed for root only\n");
return -EPERM;
}
ret = add_subprog(env, i + insn[i].imm + 1);
if (ret < 0)
return ret;
}
/* Add a fake 'exit' subprog which could simplify subprog iteration
* logic. 'subprog_cnt' should not be increased.
*/
subprog[env->subprog_cnt].start = insn_cnt;
if (env->log.level & BPF_LOG_LEVEL2)
for (i = 0; i < env->subprog_cnt; i++)
verbose(env, "func#%d @%d\n", i, subprog[i].start);
/* now check that all jumps are within the same subprog */
subprog_start = subprog[cur_subprog].start;
subprog_end = subprog[cur_subprog + 1].start;
for (i = 0; i < insn_cnt; i++) {
u8 code = insn[i].code;
if (BPF_CLASS(code) != BPF_JMP && BPF_CLASS(code) != BPF_JMP32)
goto next;
if (BPF_OP(code) == BPF_EXIT || BPF_OP(code) == BPF_CALL)
goto next;
off = i + insn[i].off + 1;
if (off < subprog_start || off >= subprog_end) {
verbose(env, "jump out of range from insn %d to %d\n", i, off);
return -EINVAL;
}
next:
if (i == subprog_end - 1) {
/* to avoid fall-through from one subprog into another
* the last insn of the subprog should be either exit
* or unconditional jump back
*/
if (code != (BPF_JMP | BPF_EXIT) &&
code != (BPF_JMP | BPF_JA)) {
verbose(env, "last insn is not an exit or jmp\n");
return -EINVAL;
}
subprog_start = subprog_end;
cur_subprog++;
if (cur_subprog < env->subprog_cnt)
subprog_end = subprog[cur_subprog + 1].start;
}
}
return 0;
}
| 0 |
[] |
linux
|
294f2fc6da27620a506e6c050241655459ccd6bd
| 125,778,697,780,668,320,000,000,000,000,000,000,000 | 70 |
bpf: Verifer, adjust_scalar_min_max_vals to always call update_reg_bounds()
Currently, for all op verification we call __red_deduce_bounds() and
__red_bound_offset() but we only call __update_reg_bounds() in bitwise
ops. However, we could benefit from calling __update_reg_bounds() in
BPF_ADD, BPF_SUB, and BPF_MUL cases as well.
For example, a register with state 'R1_w=invP0' when we subtract from
it,
w1 -= 2
Before coerce we will now have an smin_value=S64_MIN, smax_value=U64_MAX
and unsigned bounds umin_value=0, umax_value=U64_MAX. These will then
be clamped to S32_MIN, U32_MAX values by coerce in the case of alu32 op
as done in above example. However tnum will be a constant because the
ALU op is done on a constant.
Without update_reg_bounds() we have a scenario where tnum is a const
but our unsigned bounds do not reflect this. By calling update_reg_bounds
after coerce to 32bit we further refine the umin_value to U64_MAX in the
alu64 case or U32_MAX in the alu32 case above.
Signed-off-by: John Fastabend <[email protected]>
Signed-off-by: Alexei Starovoitov <[email protected]>
Link: https://lore.kernel.org/bpf/158507151689.15666.566796274289413203.stgit@john-Precision-5820-Tower
|
evbuffer_add(struct evbuffer *buf, const void *data, size_t datlen)
{
size_t used = buf->misalign + buf->off;
size_t oldoff = buf->off;
if (buf->totallen - used < datlen) {
if (evbuffer_expand(buf, datlen) == -1)
return (-1);
}
memcpy(buf->buffer + buf->off, data, datlen);
buf->off += datlen;
if (datlen && buf->cb != NULL)
(*buf->cb)(buf, oldoff, buf->off, buf->cbarg);
return (0);
}
| 0 |
[
"CWE-189"
] |
libevent
|
7b21c4eabf1f3946d3f63cce1319c490caab8ecf
| 322,287,499,074,946,200,000,000,000,000,000,000,000 | 18 |
Fix CVE-2014-6272 in Libevent 1.4
For this fix, we need to make sure that passing too-large inputs to
the evbuffer functions can't make us do bad things with the heap.
|
virtual longlong val_int_endpoint(bool left_endp, bool *incl_endp)
{ DBUG_ASSERT(0); return 0; }
| 0 |
[] |
mysql-server
|
f7316aa0c9a3909fc7498e7b95d5d3af044a7e21
| 220,732,808,622,722,140,000,000,000,000,000,000,000 | 2 |
Bug#26361149 MYSQL SERVER CRASHES AT: COL IN(IFNULL(CONST,
COL), NAME_CONST('NAME', NULL))
Backport of Bug#19143243 fix.
NAME_CONST item can return NULL_ITEM type in case of incorrect arguments.
NULL_ITEM has special processing in Item_func_in function.
In Item_func_in::fix_length_and_dec an array of possible comparators is
created. Since NAME_CONST function has NULL_ITEM type, corresponding
array element is empty. Then NAME_CONST is wrapped to ITEM_CACHE.
ITEM_CACHE can not return proper type(NULL_ITEM) in Item_func_in::val_int(),
so the NULL_ITEM is attempted compared with an empty comparator.
The fix is to disable the caching of Item_name_const item.
|
static void defer_open(struct share_mode_lock *lck,
struct timeval request_time,
struct timeval timeout,
struct smb_request *req,
struct deferred_open_record *state)
{
DEBUG(10,("defer_open_sharing_error: time [%u.%06u] adding deferred "
"open entry for mid %llu\n",
(unsigned int)request_time.tv_sec,
(unsigned int)request_time.tv_usec,
(unsigned long long)req->mid));
if (!push_deferred_open_message_smb(req, request_time, timeout,
state->id, (char *)state, sizeof(*state))) {
TALLOC_FREE(lck);
exit_server("push_deferred_open_message_smb failed");
}
if (lck) {
struct defer_open_state *watch_state;
struct tevent_req *watch_req;
bool ret;
watch_state = talloc(req->sconn, struct defer_open_state);
if (watch_state == NULL) {
exit_server("talloc failed");
}
watch_state->sconn = req->sconn;
watch_state->mid = req->mid;
DEBUG(10, ("defering mid %llu\n",
(unsigned long long)req->mid));
watch_req = dbwrap_record_watch_send(
watch_state, req->sconn->ev_ctx, lck->data->record,
req->sconn->msg_ctx);
if (watch_req == NULL) {
exit_server("Could not watch share mode record");
}
tevent_req_set_callback(watch_req, defer_open_done,
watch_state);
ret = tevent_req_set_endtime(
watch_req, req->sconn->ev_ctx,
timeval_sum(&request_time, &timeout));
SMB_ASSERT(ret);
}
}
| 0 |
[] |
samba
|
60f922bf1bd8816eacbb32c24793ad1f97a1d9f2
| 172,698,715,921,210,200,000,000,000,000,000,000,000 | 47 |
Fix bug #10229 - No access check verification on stream files.
https://bugzilla.samba.org/show_bug.cgi?id=10229
We need to check if the requested access mask
could be used to open the underlying file (if
it existed), as we're passing in zero for the
access mask to the base filename.
Signed-off-by: Jeremy Allison <[email protected]>
Reviewed-by: Stefan Metzmacher <[email protected]>
Reviewed-by: David Disseldorp <[email protected]>
|
latin_ptr2cells_len(char_u *p UNUSED, int size UNUSED)
{
return 1;
}
| 0 |
[
"CWE-122",
"CWE-787"
] |
vim
|
f6d39c31d2177549a986d170e192d8351bd571e2
| 300,667,544,487,391,980,000,000,000,000,000,000,000 | 4 |
patch 9.0.0220: invalid memory access with for loop over NULL string
Problem: Invalid memory access with for loop over NULL string.
Solution: Make sure mb_ptr2len() consistently returns zero for NUL.
|
static inline int has_transparent_hugepage(void)
{
return cpu_has_pse;
}
| 0 |
[
"CWE-119",
"CWE-787"
] |
linux
|
027ef6c87853b0a9df53175063028edb4950d476
| 264,065,179,294,079,150,000,000,000,000,000,000,000 | 4 |
mm: thp: fix pmd_present for split_huge_page and PROT_NONE with THP
In many places !pmd_present has been converted to pmd_none. For pmds
that's equivalent and pmd_none is quicker so using pmd_none is better.
However (unless we delete pmd_present) we should provide an accurate
pmd_present too. This will avoid the risk of code thinking the pmd is non
present because it's under __split_huge_page_map, see the pmd_mknotpresent
there and the comment above it.
If the page has been mprotected as PROT_NONE, it would also lead to a
pmd_present false negative in the same way as the race with
split_huge_page.
Because the PSE bit stays on at all times (both during split_huge_page and
when the _PAGE_PROTNONE bit get set), we could only check for the PSE bit,
but checking the PROTNONE bit too is still good to remember pmd_present
must always keep PROT_NONE into account.
This explains a not reproducible BUG_ON that was seldom reported on the
lists.
The same issue is in pmd_large, it would go wrong with both PROT_NONE and
if it races with split_huge_page.
Signed-off-by: Andrea Arcangeli <[email protected]>
Acked-by: Rik van Riel <[email protected]>
Cc: Johannes Weiner <[email protected]>
Cc: Hugh Dickins <[email protected]>
Cc: Mel Gorman <[email protected]>
Cc: <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
static int doBundleInstance(struct nc_state_t *nc, ncMetadata * pMeta, char *instanceId, char *bucketName, char *filePrefix, char *walrusURL,
char *userPublicKey, char *S3Policy, char *S3PolicySig)
{
// sanity checking
if (instanceId == NULL || bucketName == NULL || filePrefix == NULL || walrusURL == NULL || userPublicKey == NULL || S3Policy == NULL || S3PolicySig == NULL) {
LOGERROR("[%s] bundling instance called with invalid parameters\n", ((instanceId == NULL) ? "UNKNOWN" : instanceId));
return EUCA_ERROR;
}
// find the instance
ncInstance *instance = find_instance(&global_instances, instanceId);
if (instance == NULL) {
LOGERROR("[%s] instance not found\n", instanceId);
return EUCA_NOT_FOUND_ERROR;
}
// "marshall" thread parameters
struct bundling_params_t *params = EUCA_ZALLOC(1, sizeof(struct bundling_params_t));
if (params == NULL)
return cleanup_bundling_task(instance, params, BUNDLING_FAILED);
params->instance = instance;
params->bucketName = strdup(bucketName);
params->filePrefix = strdup(filePrefix);
params->walrusURL = strdup(walrusURL);
params->userPublicKey = strdup(userPublicKey);
params->S3Policy = strdup(S3Policy);
params->S3PolicySig = strdup(S3PolicySig);
params->eucalyptusHomePath = strdup(nc->home);
params->ncBundleUploadCmd = strdup(nc->ncBundleUploadCmd);
params->ncCheckBucketCmd = strdup(nc->ncCheckBucketCmd);
params->ncDeleteBundleCmd = strdup(nc->ncDeleteBundleCmd);
params->workPath = strdup(instance->instancePath);
// terminate the instance
sem_p(inst_sem);
instance->bundlingTime = time(NULL);
change_state(instance, BUNDLING_SHUTDOWN);
change_bundling_state(instance, BUNDLING_IN_PROGRESS);
sem_v(inst_sem);
int err = find_and_terminate_instance(instanceId);
sem_p(inst_sem);
copy_instances();
sem_v(inst_sem);
if (err != EUCA_OK) {
EUCA_FREE(params);
return err;
}
// do the rest in a thread
pthread_attr_t tattr;
pthread_t tid;
pthread_attr_init(&tattr);
pthread_attr_setdetachstate(&tattr, PTHREAD_CREATE_DETACHED);
if (pthread_create(&tid, &tattr, bundling_thread, (void *)params) != 0) {
LOGERROR("[%s] failed to start VM bundling thread\n", instanceId);
return cleanup_bundling_task(instance, params, BUNDLING_FAILED);
}
return EUCA_OK;
}
| 1 |
[] |
eucalyptus
|
c252889a46f41b4c396b89e005ec89836f2524be
| 124,942,277,933,644,910,000,000,000,000,000,000,000 | 62 |
Input validation, shellout hardening on back-end
- validating bucketName and bucketPath in BundleInstance
- validating device name in Attach and DetachVolume
- removed some uses of system() and popen()
Fixes EUCA-7572, EUCA-7520
|
R_API RList *r_bin_java_find_cp_const_by_val(RBinJavaObj *bin_obj, const ut8 *bytes, ut32 len, const char t) {
switch (t) {
case R_BIN_JAVA_CP_UTF8: return r_bin_java_find_cp_const_by_val_utf8 (bin_obj, bytes, len);
case R_BIN_JAVA_CP_INTEGER: return r_bin_java_find_cp_const_by_val_int (bin_obj, bytes, len);
case R_BIN_JAVA_CP_FLOAT: return r_bin_java_find_cp_const_by_val_float (bin_obj, bytes, len);
case R_BIN_JAVA_CP_LONG: return r_bin_java_find_cp_const_by_val_long (bin_obj, bytes, len);
case R_BIN_JAVA_CP_DOUBLE: return r_bin_java_find_cp_const_by_val_double (bin_obj, bytes, len);
case R_BIN_JAVA_CP_UNKNOWN:
default:
eprintf ("Failed to perform the search for: %s\n", bytes);
return r_list_new ();
}
}
| 0 |
[
"CWE-119",
"CWE-788"
] |
radare2
|
6c4428f018d385fc80a33ecddcb37becea685dd5
| 314,409,543,360,069,600,000,000,000,000,000,000,000 | 13 |
Improve boundary checks to fix oobread segfaults ##crash
* Reported by Cen Zhang via huntr.dev
* Reproducer: bins/fuzzed/javaoob-havoc.class
|
int tm_poll(
tm_event_t poll_event,
tm_event_t *result_event,
int wait,
int *tm_errno)
{
int num, i;
int ret, mtype, nnodes;
int prot, protver;
int *obitvalp;
event_info *ep = NULL;
tm_task_id tid, *tidp;
tm_event_t nevent;
tm_node_id node;
char *jobid = NULL;
char *info = NULL;
struct tm_roots *roots;
struct taskhold *thold;
struct infohold *ihold;
struct reschold *rhold;
extern time_t pbs_tcp_timeout;
if (!init_done)
{
return(TM_BADINIT);
}
if (result_event == NULL)
return(TM_EBADENVIRONMENT);
*result_event = TM_ERROR_EVENT;
if (poll_event != TM_NULL_EVENT)
return(TM_ENOTIMPLEMENTED);
if (tm_errno == NULL)
return(TM_EBADENVIRONMENT);
if (event_count == 0)
{
TM_DBPRT(("%s: no events waiting\n",
__func__))
return(TM_ENOTFOUND);
}
if (local_conn < 0)
{
TM_DBPRT(("%s: INTERNAL ERROR %d events but no connection (%d)\n",
__func__, event_count, local_conn))
if (static_chan != NULL)
{
DIS_tcp_cleanup(static_chan);
static_chan = NULL;
}
return(TM_ENOTCONNECTED);
}
if ((static_chan == NULL) && ((static_chan = DIS_tcp_setup(local_conn)) == NULL))
{
TM_DBPRT(("%s: Error allocating memory for sock buffer %d", __func__, PBSE_MEM_MALLOC))
return TM_BADINIT;
}
/*
** Setup tcp dis routines with a wait value appropriate for
** the value of wait the user set.
*/
pbs_tcp_timeout = wait ? FOREVER : 1;
prot = disrsi(static_chan, &ret);
if (ret == DIS_EOD)
{
*result_event = TM_NULL_EVENT;
DIS_tcp_cleanup(static_chan);
static_chan = NULL;
return TM_SUCCESS;
}
else if (ret != DIS_SUCCESS)
{
TM_DBPRT(("%s: protocol number dis error %d\n", __func__, ret))
goto tm_poll_error;
}
if (prot != TM_PROTOCOL)
{
TM_DBPRT(("%s: bad protocol number %d\n", __func__, prot))
goto tm_poll_error;
}
/*
** We have seen the start of a message. Set the timeout value
** so we wait for the remaining data of a message.
*/
pbs_tcp_timeout = FOREVER;
protver = disrsi(static_chan, &ret);
if (ret != DIS_SUCCESS)
{
TM_DBPRT(("%s: protocol version dis error %d\n", __func__, ret))
goto tm_poll_error;
}
if (protver != TM_PROTOCOL_VER)
{
TM_DBPRT(("%s: bad protocol version %d\n", __func__, protver))
goto tm_poll_error;
}
mtype = disrsi(static_chan, &ret);
if (ret != DIS_SUCCESS)
{
TM_DBPRT(("%s: mtype dis error %d\n", __func__, ret))
goto tm_poll_error;
}
nevent = disrsi(static_chan, &ret);
if (ret != DIS_SUCCESS)
{
TM_DBPRT(("%s: event dis error %d\n", __func__, ret))
goto tm_poll_error;
}
*result_event = nevent;
TM_DBPRT(("%s: got event %d return %d\n", __func__, nevent, mtype))
if ((ep = find_event(nevent)) == NULL)
{
TM_DBPRT(("%s: No event found for number %d\n", __func__, nevent));
DIS_tcp_close(static_chan);
static_chan = NULL;
local_conn = -1;
return TM_ENOEVENT;
}
if (mtype == TM_ERROR) /* problem, read error num */
{
*tm_errno = disrsi(static_chan, &ret);
TM_DBPRT(("%s: event %d error %d\n", __func__, nevent, *tm_errno));
goto tm_poll_done;
}
*tm_errno = TM_SUCCESS;
switch (ep->e_mtype)
{
/*
** auxiliary info (
** number of nodes int;
** nodeid[0] int;
** ...
** nodeid[n-1] int;
** parent jobid string;
** parent nodeid int;
** parent taskid int;
** )
*/
case TM_INIT:
nnodes = disrsi(static_chan, &ret);
if (ret != DIS_SUCCESS)
{
TM_DBPRT(("%s: INIT failed nnodes\n", __func__))
goto tm_poll_error;
}
node_table = (tm_node_id *)calloc(nnodes + 1,
sizeof(tm_node_id));
if (node_table == NULL)
{
perror("Memory allocation failed");
goto tm_poll_error;
}
TM_DBPRT(("%s: INIT nodes %d\n", __func__, nnodes))
for (i = 0; i < nnodes; i++)
{
node_table[i] = disrsi(static_chan, &ret);
if (ret != DIS_SUCCESS)
{
TM_DBPRT(("%s: INIT failed nodeid %d\n", __func__, i))
goto tm_poll_error;
}
}
node_table[nnodes] = TM_ERROR_NODE;
jobid = disrst(static_chan, &ret);
if (ret != DIS_SUCCESS)
{
TM_DBPRT(("%s: INIT failed jobid\n", __func__))
goto tm_poll_error;
}
TM_DBPRT(("%s: INIT daddy jobid %s\n", __func__, jobid))
node = disrsi(static_chan, &ret);
if (ret != DIS_SUCCESS)
{
TM_DBPRT(("%s: INIT failed parent nodeid\n", __func__))
goto tm_poll_error;
}
TM_DBPRT(("%s: INIT daddy node %d\n", __func__, node))
tid = disrsi(static_chan, &ret);
if (ret != DIS_SUCCESS)
{
TM_DBPRT(("%s: INIT failed parent taskid\n", __func__))
goto tm_poll_error;
}
TM_DBPRT(("%s: INIT daddy tid %lu\n", __func__, (unsigned long)tid))
roots = (struct tm_roots *)ep->e_info;
roots->tm_parent = new_task(jobid, node, tid);
roots->tm_me = new_task(tm_jobid,
tm_jobndid,
tm_jobtid);
roots->tm_nnodes = nnodes;
roots->tm_ntasks = 0; /* TODO */
roots->tm_taskpoolid = -1; /* what? */
roots->tm_tasklist = NULL; /* TODO */
break;
case TM_TASKS:
thold = (struct taskhold *)ep->e_info;
tidp = thold->list;
num = thold->size;
for (i = 0;; i++)
{
tid = disrsi(static_chan, &ret);
if (tid == TM_NULL_TASK)
break;
if (ret != DIS_SUCCESS)
goto tm_poll_error;
if (i < num)
{
tidp[i] = new_task(tm_jobid,
ep->e_node, tid);
}
}
if (i < num)
tidp[i] = TM_NULL_TASK;
*(thold->ntasks) = i;
break;
case TM_SPAWN:
tid = disrsi(static_chan, &ret);
if (ret != DIS_SUCCESS)
{
TM_DBPRT(("%s: SPAWN failed tid\n", __func__))
goto tm_poll_error;
}
tidp = (tm_task_id *)ep->e_info;
*tidp = new_task(tm_jobid, ep->e_node, tid);
break;
case TM_SIGNAL:
break;
case TM_OBIT:
obitvalp = (int *)ep->e_info;
*obitvalp = disrsi(static_chan, &ret);
if (ret != DIS_SUCCESS)
{
TM_DBPRT(("%s: OBIT failed obitval\n", __func__))
goto tm_poll_error;
}
break;
case TM_POSTINFO:
break;
case TM_GETINFO:
ihold = (struct infohold *)ep->e_info;
info = disrcs(static_chan, (size_t *)ihold->info_len, &ret);
if (ret != DIS_SUCCESS)
{
if (info != NULL)
free(info);
TM_DBPRT(("%s: GETINFO failed info\n", __func__))
break;
}
memcpy(ihold->info, info, MIN(*ihold->info_len, ihold->len));
free(info);
break;
case TM_RESOURCES:
rhold = (struct reschold *)ep->e_info;
info = disrst(static_chan, &ret);
if (ret != DIS_SUCCESS)
{
if (info != NULL)
free(info);
break;
}
snprintf(rhold->resc, rhold->len, "%s", info);
free(info);
break;
default:
TM_DBPRT(("%s: unknown event command %d\n", __func__, ep->e_mtype))
goto tm_poll_error;
}
DIS_tcp_wflush(static_chan);
tm_poll_done:
if (jobid != NULL)
free(jobid);
del_event(ep);
if (tcp_chan_has_data(static_chan) == FALSE)
{
DIS_tcp_cleanup(static_chan);
static_chan = NULL;
}
return TM_SUCCESS;
tm_poll_error:
if (jobid != NULL)
free(jobid);
if (ep)
del_event(ep);
close(local_conn);
DIS_tcp_cleanup(static_chan);
static_chan = NULL;
local_conn = -1;
return TM_ENOTCONNECTED;
}
| 0 |
[
"CWE-264"
] |
torque
|
f2f4c950f3d461a249111c8826da3beaafccace9
| 172,037,914,577,507,980,000,000,000,000,000,000,000 | 379 |
TRQ-2885 - limit tm_adopt() to only adopt a session id that
is owned by the calling user.
|
point_right(PG_FUNCTION_ARGS)
{
Point *pt1 = PG_GETARG_POINT_P(0);
Point *pt2 = PG_GETARG_POINT_P(1);
PG_RETURN_BOOL(FPgt(pt1->x, pt2->x));
}
| 0 |
[
"CWE-703",
"CWE-189"
] |
postgres
|
31400a673325147e1205326008e32135a78b4d8a
| 29,628,556,499,866,233,000,000,000,000,000,000,000 | 7 |
Predict integer overflow to avoid buffer overruns.
Several functions, mostly type input functions, calculated an allocation
size such that the calculation wrapped to a small positive value when
arguments implied a sufficiently-large requirement. Writes past the end
of the inadvertent small allocation followed shortly thereafter.
Coverity identified the path_in() vulnerability; code inspection led to
the rest. In passing, add check_stack_depth() to prevent stack overflow
in related functions.
Back-patch to 8.4 (all supported versions). The non-comment hstore
changes touch code that did not exist in 8.4, so that part stops at 9.0.
Noah Misch and Heikki Linnakangas, reviewed by Tom Lane.
Security: CVE-2014-0064
|
uchar *in_time::get_value(Item *item)
{
tmp.val= item->val_time_packed(current_thd);
if (item->null_value)
return 0;
tmp.unsigned_flag= 1L;
return (uchar*) &tmp;
}
| 0 |
[
"CWE-617"
] |
server
|
807945f2eb5fa22e6f233cc17b85a2e141efe2c8
| 24,445,875,696,049,393,000,000,000,000,000,000,000 | 8 |
MDEV-26402: A SEGV in Item_field::used_tables/update_depend_map_for_order...
When doing condition pushdown from HAVING into WHERE,
Item_equal::create_pushable_equalities() calls
item->set_extraction_flag(IMMUTABLE_FL) for constant items.
Then, Item::cleanup_excluding_immutables_processor() checks for this flag
to see if it should call item->cleanup() or leave the item as-is.
The failure happens when a constant item has a non-constant one inside it,
like:
(tbl.col=0 AND impossible_cond)
item->walk(cleanup_excluding_immutables_processor) works in a bottom-up
way so it
1. will call Item_func_eq(tbl.col=0)->cleanup()
2. will not call Item_cond_and->cleanup (as the AND is constant)
This creates an item tree where a fixed Item has an un-fixed Item inside
it which eventually causes an assertion failure.
Fixed by introducing this rule: instead of just calling
item->set_extraction_flag(IMMUTABLE_FL);
we call Item::walk() to set the flag for all sub-items of the item.
|
Subsets and Splits
CWE 416 & 19
The query filters records related to specific CWEs (Common Weakness Enumerations), providing a basic overview of entries with these vulnerabilities but without deeper analysis.
CWE Frequency in Train Set
Counts the occurrences of each CWE (Common Weakness Enumeration) in the dataset, providing a basic distribution but limited insight.