func
stringlengths 0
484k
| target
int64 0
1
| cwe
listlengths 0
4
| project
stringclasses 799
values | commit_id
stringlengths 40
40
| hash
float64 1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
| size
int64 1
24k
| message
stringlengths 0
13.3k
|
---|---|---|---|---|---|---|---|
xfs_attr_leaf_addname(xfs_da_args_t *args)
{
xfs_inode_t *dp;
struct xfs_buf *bp;
int retval, error, committed, forkoff;
trace_xfs_attr_leaf_addname(args);
/*
* Read the (only) block in the attribute list in.
*/
dp = args->dp;
args->blkno = 0;
error = xfs_attr3_leaf_read(args->trans, args->dp, args->blkno, -1, &bp);
if (error)
return error;
/*
* Look up the given attribute in the leaf block. Figure out if
* the given flags produce an error or call for an atomic rename.
*/
retval = xfs_attr3_leaf_lookup_int(bp, args);
if ((args->flags & ATTR_REPLACE) && (retval == ENOATTR)) {
xfs_trans_brelse(args->trans, bp);
return retval;
} else if (retval == EEXIST) {
if (args->flags & ATTR_CREATE) { /* pure create op */
xfs_trans_brelse(args->trans, bp);
return retval;
}
trace_xfs_attr_leaf_replace(args);
/* save the attribute state for later removal*/
args->op_flags |= XFS_DA_OP_RENAME; /* an atomic rename */
args->blkno2 = args->blkno; /* set 2nd entry info*/
args->index2 = args->index;
args->rmtblkno2 = args->rmtblkno;
args->rmtblkcnt2 = args->rmtblkcnt;
args->rmtvaluelen2 = args->rmtvaluelen;
/*
* clear the remote attr state now that it is saved so that the
* values reflect the state of the attribute we are about to
* add, not the attribute we just found and will remove later.
*/
args->rmtblkno = 0;
args->rmtblkcnt = 0;
args->rmtvaluelen = 0;
}
/*
* Add the attribute to the leaf block, transitioning to a Btree
* if required.
*/
retval = xfs_attr3_leaf_add(bp, args);
if (retval == ENOSPC) {
/*
* Promote the attribute list to the Btree format, then
* Commit that transaction so that the node_addname() call
* can manage its own transactions.
*/
xfs_bmap_init(args->flist, args->firstblock);
error = xfs_attr3_leaf_to_node(args);
if (!error) {
error = xfs_bmap_finish(&args->trans, args->flist,
&committed);
}
if (error) {
ASSERT(committed);
args->trans = NULL;
xfs_bmap_cancel(args->flist);
return(error);
}
/*
* bmap_finish() may have committed the last trans and started
* a new one. We need the inode to be in all transactions.
*/
if (committed)
xfs_trans_ijoin(args->trans, dp, 0);
/*
* Commit the current trans (including the inode) and start
* a new one.
*/
error = xfs_trans_roll(&args->trans, dp);
if (error)
return (error);
/*
* Fob the whole rest of the problem off on the Btree code.
*/
error = xfs_attr_node_addname(args);
return(error);
}
/*
* Commit the transaction that added the attr name so that
* later routines can manage their own transactions.
*/
error = xfs_trans_roll(&args->trans, dp);
if (error)
return (error);
/*
* If there was an out-of-line value, allocate the blocks we
* identified for its storage and copy the value. This is done
* after we create the attribute so that we don't overflow the
* maximum size of a transaction and/or hit a deadlock.
*/
if (args->rmtblkno > 0) {
error = xfs_attr_rmtval_set(args);
if (error)
return(error);
}
/*
* If this is an atomic rename operation, we must "flip" the
* incomplete flags on the "new" and "old" attribute/value pairs
* so that one disappears and one appears atomically. Then we
* must remove the "old" attribute/value pair.
*/
if (args->op_flags & XFS_DA_OP_RENAME) {
/*
* In a separate transaction, set the incomplete flag on the
* "old" attr and clear the incomplete flag on the "new" attr.
*/
error = xfs_attr3_leaf_flipflags(args);
if (error)
return(error);
/*
* Dismantle the "old" attribute/value pair by removing
* a "remote" value (if it exists).
*/
args->index = args->index2;
args->blkno = args->blkno2;
args->rmtblkno = args->rmtblkno2;
args->rmtblkcnt = args->rmtblkcnt2;
args->rmtvaluelen = args->rmtvaluelen2;
if (args->rmtblkno) {
error = xfs_attr_rmtval_remove(args);
if (error)
return(error);
}
/*
* Read in the block containing the "old" attr, then
* remove the "old" attr from that block (neat, huh!)
*/
error = xfs_attr3_leaf_read(args->trans, args->dp, args->blkno,
-1, &bp);
if (error)
return error;
xfs_attr3_leaf_remove(bp, args);
/*
* If the result is small enough, shrink it all into the inode.
*/
if ((forkoff = xfs_attr_shortform_allfit(bp, dp))) {
xfs_bmap_init(args->flist, args->firstblock);
error = xfs_attr3_leaf_to_shortform(bp, args, forkoff);
/* bp is gone due to xfs_da_shrink_inode */
if (!error) {
error = xfs_bmap_finish(&args->trans,
args->flist,
&committed);
}
if (error) {
ASSERT(committed);
args->trans = NULL;
xfs_bmap_cancel(args->flist);
return(error);
}
/*
* bmap_finish() may have committed the last trans
* and started a new one. We need the inode to be
* in all transactions.
*/
if (committed)
xfs_trans_ijoin(args->trans, dp, 0);
}
/*
* Commit the remove and start the next trans in series.
*/
error = xfs_trans_roll(&args->trans, dp);
} else if (args->rmtblkno > 0) {
/*
* Added a "remote" value, just clear the incomplete flag.
*/
error = xfs_attr3_leaf_clearflag(args);
}
return error;
}
| 0 |
[
"CWE-241",
"CWE-19"
] |
linux
|
8275cdd0e7ac550dcce2b3ef6d2fb3b808c1ae59
| 159,404,131,818,770,100,000,000,000,000,000,000,000 | 199 |
xfs: remote attribute overwrite causes transaction overrun
Commit e461fcb ("xfs: remote attribute lookups require the value
length") passes the remote attribute length in the xfs_da_args
structure on lookup so that CRC calculations and validity checking
can be performed correctly by related code. This, unfortunately has
the side effect of changing the args->valuelen parameter in cases
where it shouldn't.
That is, when we replace a remote attribute, the incoming
replacement stores the value and length in args->value and
args->valuelen, but then the lookup which finds the existing remote
attribute overwrites args->valuelen with the length of the remote
attribute being replaced. Hence when we go to create the new
attribute, we create it of the size of the existing remote
attribute, not the size it is supposed to be. When the new attribute
is much smaller than the old attribute, this results in a
transaction overrun and an ASSERT() failure on a debug kernel:
XFS: Assertion failed: tp->t_blk_res_used <= tp->t_blk_res, file: fs/xfs/xfs_trans.c, line: 331
Fix this by keeping the remote attribute value length separate to
the attribute value length in the xfs_da_args structure. The enables
us to pass the length of the remote attribute to be removed without
overwriting the new attribute's length.
Also, ensure that when we save remote block contexts for a later
rename we zero the original state variables so that we don't confuse
the state of the attribute to be removes with the state of the new
attribute that we just added. [Spotted by Brain Foster.]
Signed-off-by: Dave Chinner <[email protected]>
Reviewed-by: Brian Foster <[email protected]>
Signed-off-by: Dave Chinner <[email protected]>
|
void CalendarRegressionTest::test4035301()
{
UErrorCode status = U_ZERO_ERROR;
GregorianCalendar *c = new GregorianCalendar(98, 8, 7,status);
GregorianCalendar *d = new GregorianCalendar(98, 8, 7,status);
if (c->after(*d,status) ||
c->after(*c,status) ||
c->before(*d,status) ||
c->before(*c,status) ||
*c != *c ||
*c != *d)
dataerrln("Fail");
delete c;
delete d;
}
| 0 |
[
"CWE-190"
] |
icu
|
71dd84d4ffd6600a70e5bca56a22b957e6642bd4
| 313,035,345,759,953,960,000,000,000,000,000,000,000 | 15 |
ICU-12504 in ICU4C Persian cal, use int64_t math for one operation to avoid overflow; add tests in C and J
X-SVN-Rev: 40654
|
static xmlNodePtr guess_array_map(encodeTypePtr type, zval *data, int style, xmlNodePtr parent TSRMLS_DC)
{
encodePtr enc = NULL;
if (data && Z_TYPE_P(data) == IS_ARRAY) {
if (is_map(data)) {
enc = get_conversion(APACHE_MAP);
} else {
enc = get_conversion(SOAP_ENC_ARRAY);
}
}
if (!enc) {
enc = get_conversion(IS_NULL);
}
return master_to_xml(enc, data, style, parent TSRMLS_CC);
}
| 0 |
[
"CWE-19"
] |
php-src
|
c8eaca013a3922e8383def6158ece2b63f6ec483
| 249,215,165,626,074,970,000,000,000,000,000,000,000 | 17 |
Added type checks
|
START_TEST(virgl_test_transfer_to_staging_with_iov_succeeds)
{
static const unsigned bufsize = 50;
struct virgl_context ctx = {0};
struct virgl_resource res = {0};
struct pipe_box box = {.width = bufsize, .height = 1, .depth = 1};
int ret;
ret = testvirgl_init_ctx_cmdbuf(&ctx);
ck_assert_int_eq(ret, 0);
ret = testvirgl_create_backed_simple_buffer(&res, 1, bufsize, VIRGL_BIND_STAGING);
ck_assert_int_eq(ret, 0);
virgl_renderer_ctx_attach_resource(ctx.ctx_id, res.handle);
box.width = bufsize;
virgl_encoder_transfer(&ctx, &res, 0, 0, &box, 0, VIRGL_TRANSFER_TO_HOST);
ret = virgl_renderer_submit_cmd(ctx.cbuf->buf, ctx.ctx_id, ctx.cbuf->cdw);
ck_assert_int_eq(ret, 0);
virgl_renderer_ctx_detach_resource(ctx.ctx_id, res.handle);
testvirgl_destroy_backed_res(&res);
testvirgl_fini_ctx_cmdbuf(&ctx);
}
| 0 |
[
"CWE-909"
] |
virglrenderer
|
b05bb61f454eeb8a85164c8a31510aeb9d79129c
| 330,089,740,769,447,200,000,000,000,000,000,000,000 | 25 |
vrend: clear memory when allocating a host-backed memory resource
Closes: #249
Signed-off-by: Gert Wollny <[email protected]>
Reviewed-by: Chia-I Wu <[email protected]>
|
int lgff_init(struct hid_device* hid)
{
struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list);
struct input_dev *dev = hidinput->input;
const signed short *ff_bits = ff_joystick;
int error;
int i;
/* Check that the report looks ok */
if (!hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 7))
return -ENODEV;
for (i = 0; i < ARRAY_SIZE(devices); i++) {
if (dev->id.vendor == devices[i].idVendor &&
dev->id.product == devices[i].idProduct) {
ff_bits = devices[i].ff;
break;
}
}
for (i = 0; ff_bits[i] >= 0; i++)
set_bit(ff_bits[i], dev->ffbit);
error = input_ff_create_memless(dev, NULL, hid_lgff_play);
if (error)
return error;
if ( test_bit(FF_AUTOCENTER, dev->ffbit) )
dev->ff->set_autocenter = hid_lgff_set_autocenter;
pr_info("Force feedback for Logitech force feedback devices by Johann Deneux <[email protected]>\n");
return 0;
}
| 1 |
[
"CWE-787"
] |
linux
|
d9d4b1e46d9543a82c23f6df03f4ad697dab361b
| 154,267,941,060,945,360,000,000,000,000,000,000,000 | 34 |
HID: Fix assumption that devices have inputs
The syzbot fuzzer found a slab-out-of-bounds write bug in the hid-gaff
driver. The problem is caused by the driver's assumption that the
device must have an input report. While this will be true for all
normal HID input devices, a suitably malicious device can violate the
assumption.
The same assumption is present in over a dozen other HID drivers.
This patch fixes them by checking that the list of hid_inputs for the
hid_device is nonempty before allowing it to be used.
Reported-and-tested-by: [email protected]
Signed-off-by: Alan Stern <[email protected]>
CC: <[email protected]>
Signed-off-by: Benjamin Tissoires <[email protected]>
|
static int cipso_v4_cache_init(void)
{
u32 iter;
cipso_v4_cache = kcalloc(CIPSO_V4_CACHE_BUCKETS,
sizeof(struct cipso_v4_map_cache_bkt),
GFP_KERNEL);
if (cipso_v4_cache == NULL)
return -ENOMEM;
for (iter = 0; iter < CIPSO_V4_CACHE_BUCKETS; iter++) {
spin_lock_init(&cipso_v4_cache[iter].lock);
cipso_v4_cache[iter].size = 0;
INIT_LIST_HEAD(&cipso_v4_cache[iter].list);
}
return 0;
}
| 0 |
[
"CWE-362"
] |
linux-2.6
|
f6d8bd051c391c1c0458a30b2a7abcd939329259
| 54,929,704,826,221,530,000,000,000,000,000,000,000 | 18 |
inet: add RCU protection to inet->opt
We lack proper synchronization to manipulate inet->opt ip_options
Problem is ip_make_skb() calls ip_setup_cork() and
ip_setup_cork() possibly makes a copy of ipc->opt (struct ip_options),
without any protection against another thread manipulating inet->opt.
Another thread can change inet->opt pointer and free old one under us.
Use RCU to protect inet->opt (changed to inet->inet_opt).
Instead of handling atomic refcounts, just copy ip_options when
necessary, to avoid cache line dirtying.
We cant insert an rcu_head in struct ip_options since its included in
skb->cb[], so this patch is large because I had to introduce a new
ip_options_rcu structure.
Signed-off-by: Eric Dumazet <[email protected]>
Cc: Herbert Xu <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static int obj_trust(int id, X509 *x, int flags)
{
X509_CERT_AUX *ax = x->aux;
int i;
if (!ax)
return X509_TRUST_UNTRUSTED;
if (ax->reject) {
for (i = 0; i < sk_ASN1_OBJECT_num(ax->reject); i++) {
ASN1_OBJECT *obj = sk_ASN1_OBJECT_value(ax->reject, i);
int nid = OBJ_obj2nid(obj);
if (nid == id || nid == NID_anyExtendedKeyUsage)
return X509_TRUST_REJECTED;
}
}
if (ax->trust) {
for (i = 0; i < sk_ASN1_OBJECT_num(ax->trust); i++) {
ASN1_OBJECT *obj = sk_ASN1_OBJECT_value(ax->trust, i);
int nid = OBJ_obj2nid(obj);
if (nid == id || nid == NID_anyExtendedKeyUsage)
return X509_TRUST_TRUSTED;
}
/*
* Reject when explicit trust EKU are set and none match.
*
* Returning untrusted is enough for for full chains that end in
* self-signed roots, because when explicit trust is specified it
* suppresses the default blanket trust of self-signed objects.
*
* But for partial chains, this is not enough, because absent a similar
* trust-self-signed policy, non matching EKUs are indistinguishable
* from lack of EKU constraints.
*
* Therefore, failure to match any trusted purpose must trigger an
* explicit reject.
*/
return X509_TRUST_REJECTED;
}
return X509_TRUST_UNTRUSTED;
}
| 1 |
[] |
openssl
|
33cc5dde478ba5ad79f8fd4acd8737f0e60e236e
| 330,659,957,301,291,160,000,000,000,000,000,000,000 | 42 |
Compat self-signed trust with reject-only aux data
When auxiliary data contains only reject entries, continue to trust
self-signed objects just as when no auxiliary data is present.
This makes it possible to reject specific uses without changing
what's accepted (and thus overring the underlying EKU).
Added new supported certs and doubled test count from 38 to 76.
Reviewed-by: Dr. Stephen Henson <[email protected]>
|
authzPretty(
Syntax *syntax,
struct berval *val,
struct berval *out,
void *ctx)
{
int rc;
Debug( LDAP_DEBUG_TRACE, ">>> authzPretty: <%s>\n",
val->bv_val, 0, 0 );
rc = authzPrettyNormal( val, out, ctx, 0 );
Debug( LDAP_DEBUG_TRACE, "<<< authzPretty: <%s> (%d)\n",
out->bv_val ? out->bv_val : "(null)" , rc, 0 );
return rc;
}
| 0 |
[
"CWE-617"
] |
openldap
|
02dfc32d658fadc25e4040f78e36592f6e1e1ca0
| 95,270,106,901,840,400,000,000,000,000,000,000,000 | 18 |
ITS#9406 fix debug msg
|
static int ath6kl_wmi_ready_event_rx(struct wmi *wmi, u8 *datap, int len)
{
struct wmi_ready_event_2 *ev = (struct wmi_ready_event_2 *) datap;
if (len < sizeof(struct wmi_ready_event_2))
return -EINVAL;
ath6kl_ready_event(wmi->parent_dev, ev->mac_addr,
le32_to_cpu(ev->sw_version),
le32_to_cpu(ev->abi_version), ev->phy_cap);
return 0;
}
| 0 |
[
"CWE-125"
] |
linux
|
5d6751eaff672ea77642e74e92e6c0ac7f9709ab
| 221,210,390,347,381,180,000,000,000,000,000,000,000 | 13 |
ath6kl: add some bounds checking
The "ev->traffic_class" and "reply->ac" variables come from the network
and they're used as an offset into the wmi->stream_exist_for_ac[] array.
Those variables are u8 so they can be 0-255 but the stream_exist_for_ac[]
array only has WMM_NUM_AC (4) elements. We need to add a couple bounds
checks to prevent array overflows.
I also modified one existing check from "if (traffic_class > 3) {" to
"if (traffic_class >= WMM_NUM_AC) {" just to make them all consistent.
Fixes: bdcd81707973 (" Add ath6kl cleaned up driver")
Signed-off-by: Dan Carpenter <[email protected]>
Signed-off-by: Kalle Valo <[email protected]>
|
store_two(int c, char *s)
{
s[0] = (char)((c >> 8) & 255);
s[1] = (char)(c & 255);
}
| 0 |
[
"CWE-119",
"CWE-787"
] |
t1utils
|
6b9d1aafcb61a3663c883663eb19ccdbfcde8d33
| 317,746,582,784,733,800,000,000,000,000,000,000,000 | 5 |
Security fixes.
- Don't overflow the small cs_start buffer (reported by Niels
Thykier via the debian tracker (Jakub Wilk), found with a
fuzzer ("American fuzzy lop")).
- Cast arguments to <ctype.h> functions to unsigned char.
|
lou_setDataPath (char *path)
{
dataPathPtr = NULL;
if (path == NULL)
return NULL;
strcpy (dataPath, path);
dataPathPtr = dataPath;
return dataPathPtr;
}
| 0 |
[] |
liblouis
|
dc97ef791a4fae9da11592c79f9f79e010596e0c
| 69,509,947,786,985,300,000,000,000,000,000,000,000 | 9 |
Merge branch 'table_resolver'
|
int my_wc_mb_utf8_escape_double_quote_and_backslash(CHARSET_INFO *cs,
my_wc_t wc,
uchar *str, uchar *end)
{
return my_wc_mb_utf8_escape(cs, wc, str, end, '"', '\\');
}
| 0 |
[
"CWE-476"
] |
server
|
3a52569499e2f0c4d1f25db1e81617a9d9755400
| 73,827,947,712,842,940,000,000,000,000,000,000,000 | 6 |
MDEV-25636: Bug report: abortion in sql/sql_parse.cc:6294
The asserion failure was caused by this query
select /*id=1*/ from t1
where
col= ( select /*id=2*/ from ... where corr_cond1
union
select /*id=4*/ from ... where corr_cond2)
Here,
- select with id=2 was correlated due to corr_cond1.
- select with id=4 was initially correlated due to corr_cond2, but then
the optimizer optimized away the correlation, making the select with id=4
uncorrelated.
However, since select with id=2 remained correlated, the execution had to
re-compute the whole UNION. When it tried to execute select with id=4, it
hit an assertion (join buffer already free'd).
This is because select with id=4 has freed its execution structures after
it has been executed once. The select is uncorrelated, so it did not expect
it would need to be executed for the second time.
Fixed this by adding this logic in
st_select_lex::optimize_unflattened_subqueries():
If a member of a UNION is correlated, mark all its members as
correlated, so that they are prepared to be executed multiple times.
|
static int crypto_report_one(struct crypto_alg *alg,
struct crypto_user_alg *ualg, struct sk_buff *skb)
{
memcpy(&ualg->cru_name, &alg->cra_name, sizeof(ualg->cru_name));
memcpy(&ualg->cru_driver_name, &alg->cra_driver_name,
sizeof(ualg->cru_driver_name));
memcpy(&ualg->cru_module_name, module_name(alg->cra_module),
CRYPTO_MAX_ALG_NAME);
ualg->cru_flags = alg->cra_flags;
ualg->cru_refcnt = atomic_read(&alg->cra_refcnt);
if (nla_put_u32(skb, CRYPTOCFGA_PRIORITY_VAL, alg->cra_priority))
goto nla_put_failure;
if (alg->cra_flags & CRYPTO_ALG_LARVAL) {
struct crypto_report_larval rl;
snprintf(rl.type, CRYPTO_MAX_ALG_NAME, "%s", "larval");
if (nla_put(skb, CRYPTOCFGA_REPORT_LARVAL,
sizeof(struct crypto_report_larval), &rl))
goto nla_put_failure;
goto out;
}
if (alg->cra_type && alg->cra_type->report) {
if (alg->cra_type->report(skb, alg))
goto nla_put_failure;
goto out;
}
switch (alg->cra_flags & (CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_LARVAL)) {
case CRYPTO_ALG_TYPE_CIPHER:
if (crypto_report_cipher(skb, alg))
goto nla_put_failure;
break;
case CRYPTO_ALG_TYPE_COMPRESS:
if (crypto_report_comp(skb, alg))
goto nla_put_failure;
break;
}
out:
return 0;
nla_put_failure:
return -EMSGSIZE;
}
| 1 |
[
"CWE-310"
] |
linux
|
9a5467bf7b6e9e02ec9c3da4e23747c05faeaac6
| 164,974,576,304,277,160,000,000,000,000,000,000,000 | 51 |
crypto: user - fix info leaks in report API
Three errors resulting in kernel memory disclosure:
1/ The structures used for the netlink based crypto algorithm report API
are located on the stack. As snprintf() does not fill the remainder of
the buffer with null bytes, those stack bytes will be disclosed to users
of the API. Switch to strncpy() to fix this.
2/ crypto_report_one() does not initialize all field of struct
crypto_user_alg. Fix this to fix the heap info leak.
3/ For the module name we should copy only as many bytes as
module_name() returns -- not as much as the destination buffer could
hold. But the current code does not and therefore copies random data
from behind the end of the module name, as the module name is always
shorter than CRYPTO_MAX_ALG_NAME.
Also switch to use strncpy() to copy the algorithm's name and
driver_name. They are strings, after all.
Signed-off-by: Mathias Krause <[email protected]>
Cc: Steffen Klassert <[email protected]>
Signed-off-by: Herbert Xu <[email protected]>
|
void comps_rtree_values_walk(COMPS_RTree * rt, void* udata,
void (*walk_f)(void*, void*)) {
COMPS_HSList *tmplist, *tmp_subnodes;
COMPS_HSListItem *it;
tmplist = comps_hslist_create();
comps_hslist_init(tmplist, NULL, NULL, NULL);
comps_hslist_append(tmplist, rt->subnodes, 0);
while (tmplist->first != NULL) {
it = tmplist->first;
comps_hslist_remove(tmplist, tmplist->first);
tmp_subnodes = (COMPS_HSList*)it->data;
for (it = tmp_subnodes->first; it != NULL; it=it->next) {
if (((COMPS_RTreeData*)it->data)->subnodes->first) {
comps_hslist_append(tmplist,
((COMPS_RTreeData*)it->data)->subnodes, 0);
}
if (((COMPS_RTreeData*)it->data)->data != NULL) {
walk_f(udata, ((COMPS_RTreeData*)it->data)->data);
}
}
}
comps_hslist_destroy(&tmplist);
}
| 0 |
[
"CWE-416",
"CWE-862"
] |
libcomps
|
e3a5d056633677959ad924a51758876d415e7046
| 182,680,194,217,803,800,000,000,000,000,000,000,000 | 23 |
Fix UAF in comps_objmrtree_unite function
The added field is not used at all in many places and it is probably the
left-over of some copy-paste.
|
void set_seek_pre_roll(uint64_t seek_pre_roll) {
seek_pre_roll_ = seek_pre_roll;
}
| 0 |
[
"CWE-20"
] |
libvpx
|
f00890eecdf8365ea125ac16769a83aa6b68792d
| 86,462,849,020,995,020,000,000,000,000,000,000,000 | 3 |
update libwebm to libwebm-1.0.0.27-352-g6ab9fcf
https://chromium.googlesource.com/webm/libwebm/+log/af81f26..6ab9fcf
Change-Id: I9d56e1fbaba9b96404b4fbabefddc1a85b79c25d
|
cupsdSendError(cupsd_client_t *con, /* I - Connection */
http_status_t code, /* I - Error code */
int auth_type)/* I - Authentication type */
{
char location[HTTP_MAX_VALUE]; /* Location field */
cupsdLogClient(con, CUPSD_LOG_DEBUG2, "cupsdSendError code=%d, auth_type=%d", code, auth_type);
#ifdef HAVE_SSL
/*
* Force client to upgrade for authentication if that is how the
* server is configured...
*/
if (code == HTTP_STATUS_UNAUTHORIZED &&
DefaultEncryption == HTTP_ENCRYPTION_REQUIRED &&
_cups_strcasecmp(httpGetHostname(con->http, NULL, 0), "localhost") &&
!httpIsEncrypted(con->http))
{
code = HTTP_STATUS_UPGRADE_REQUIRED;
}
#endif /* HAVE_SSL */
/*
* Put the request in the access_log file...
*/
cupsdLogRequest(con, code);
/*
* To work around bugs in some proxies, don't use Keep-Alive for some
* error messages...
*
* Kerberos authentication doesn't work without Keep-Alive, so
* never disable it in that case.
*/
strlcpy(location, httpGetField(con->http, HTTP_FIELD_LOCATION), sizeof(location));
httpClearFields(con->http);
httpClearCookie(con->http);
httpSetField(con->http, HTTP_FIELD_LOCATION, location);
if (code >= HTTP_STATUS_BAD_REQUEST && con->type != CUPSD_AUTH_NEGOTIATE)
httpSetKeepAlive(con->http, HTTP_KEEPALIVE_OFF);
if (httpGetVersion(con->http) >= HTTP_VERSION_1_1 &&
httpGetKeepAlive(con->http) == HTTP_KEEPALIVE_OFF)
httpSetField(con->http, HTTP_FIELD_CONNECTION, "close");
if (code >= HTTP_STATUS_BAD_REQUEST)
{
/*
* Send a human-readable error message.
*/
char message[4096], /* Message for user */
urltext[1024], /* URL redirection text */
redirect[1024]; /* Redirection link */
const char *text; /* Status-specific text */
redirect[0] = '\0';
if (code == HTTP_STATUS_UNAUTHORIZED)
text = _cupsLangString(con->language,
_("Enter your username and password or the "
"root username and password to access this "
"page. If you are using Kerberos authentication, "
"make sure you have a valid Kerberos ticket."));
else if (code == HTTP_STATUS_UPGRADE_REQUIRED)
{
text = urltext;
snprintf(urltext, sizeof(urltext),
_cupsLangString(con->language,
_("You must access this page using the URL "
"<A HREF=\"https://%s:%d%s\">"
"https://%s:%d%s</A>.")),
con->servername, con->serverport, con->uri,
con->servername, con->serverport, con->uri);
snprintf(redirect, sizeof(redirect),
"<META HTTP-EQUIV=\"Refresh\" "
"CONTENT=\"3;URL=https://%s:%d%s\">\n",
con->servername, con->serverport, con->uri);
}
else if (code == HTTP_STATUS_CUPS_WEBIF_DISABLED)
text = _cupsLangString(con->language,
_("The web interface is currently disabled. Run "
"\"cupsctl WebInterface=yes\" to enable it."));
else
text = "";
snprintf(message, sizeof(message),
"<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.01 Transitional//EN\" "
"\"http://www.w3.org/TR/html4/loose.dtd\">\n"
"<HTML>\n"
"<HEAD>\n"
"\t<META HTTP-EQUIV=\"Content-Type\" "
"CONTENT=\"text/html; charset=utf-8\">\n"
"\t<TITLE>%s - " CUPS_SVERSION "</TITLE>\n"
"\t<LINK REL=\"STYLESHEET\" TYPE=\"text/css\" "
"HREF=\"/cups.css\">\n"
"%s"
"</HEAD>\n"
"<BODY>\n"
"<H1>%s</H1>\n"
"<P>%s</P>\n"
"</BODY>\n"
"</HTML>\n",
_httpStatus(con->language, code), redirect,
_httpStatus(con->language, code), text);
/*
* Send an error message back to the client. If the error code is a
* 400 or 500 series, make sure the message contains some text, too!
*/
size_t length = strlen(message); /* Length of message */
httpSetLength(con->http, length);
if (!cupsdSendHeader(con, code, "text/html", auth_type))
return (0);
if (httpWrite2(con->http, message, length) < 0)
return (0);
if (httpFlushWrite(con->http) < 0)
return (0);
}
else
{
httpSetField(con->http, HTTP_FIELD_CONTENT_LENGTH, "0");
if (!cupsdSendHeader(con, code, NULL, auth_type))
return (0);
}
return (1);
}
| 0 |
[
"CWE-120"
] |
cups
|
f24e6cf6a39300ad0c3726a41a4aab51ad54c109
| 197,949,406,139,255,100,000,000,000,000,000,000,000 | 144 |
Fix multiple security/disclosure issues:
- CVE-2019-8696 and CVE-2019-8675: Fixed SNMP buffer overflows (rdar://51685251)
- Fixed IPP buffer overflow (rdar://50035411)
- Fixed memory disclosure issue in the scheduler (rdar://51373853)
- Fixed DoS issues in the scheduler (rdar://51373929)
|
**/
CImg<T>& operator--() {
if (is_empty()) return *this;
cimg_pragma_openmp(parallel for cimg_openmp_if(size()>=524288))
cimg_rof(*this,ptrd,T) *ptrd = *ptrd - (T)1;
return *this;
| 0 |
[
"CWE-125"
] |
CImg
|
10af1e8c1ad2a58a0a3342a856bae63e8f257abb
| 309,476,662,199,022,200,000,000,000,000,000,000,000 | 6 |
Fix other issues in 'CImg<T>::load_bmp()'.
|
static int pb0100_probe(struct sd *sd)
{
u16 sensor;
int err;
err = stv06xx_read_sensor(sd, PB_IDENT, &sensor);
if (err < 0)
return -ENODEV;
if ((sensor >> 8) != 0x64)
return -ENODEV;
pr_info("Photobit pb0100 sensor detected\n");
sd->gspca_dev.cam.cam_mode = pb0100_mode;
sd->gspca_dev.cam.nmodes = ARRAY_SIZE(pb0100_mode);
return 0;
}
| 0 |
[
"CWE-476"
] |
linux
|
485b06aadb933190f4bc44e006076bc27a23f205
| 188,531,083,005,782,580,000,000,000,000,000,000,000 | 19 |
media: stv06xx: add missing descriptor sanity checks
Make sure to check that we have two alternate settings and at least one
endpoint before accessing the second altsetting structure and
dereferencing the endpoint arrays.
This specifically avoids dereferencing NULL-pointers or corrupting
memory when a device does not have the expected descriptors.
Note that the sanity checks in stv06xx_start() and pb0100_start() are
not redundant as the driver is mixing looking up altsettings by index
and by number, which may not coincide.
Fixes: 8668d504d72c ("V4L/DVB (12082): gspca_stv06xx: Add support for st6422 bridge and sensor")
Fixes: c0b33bdc5b8d ("[media] gspca-stv06xx: support bandwidth changing")
Cc: stable <[email protected]> # 2.6.31
Cc: Hans de Goede <[email protected]>
Signed-off-by: Johan Hovold <[email protected]>
Signed-off-by: Hans Verkuil <[email protected]>
Signed-off-by: Mauro Carvalho Chehab <[email protected]>
|
virtual Status checkAuthForCommand(Client* client,
const std::string& dbname,
const BSONObj& cmdObj) const {
return auth::checkAuthForUsersInfoCommand(client, dbname, cmdObj);
}
| 0 |
[
"CWE-613"
] |
mongo
|
e55d6e2292e5dbe2f97153251d8193d1cc89f5d7
| 69,509,678,923,186,810,000,000,000,000,000,000,000 | 5 |
SERVER-38984 Validate unique User ID on UserCache hit
|
maybe_update (GifContext *context,
gint x,
gint y,
gint width,
gint height)
{
if (clip_frame (context, &x, &y, &width, &height))
(*context->update_func) (context->frame->pixbuf,
x, y, width, height,
context->user_data);
}
| 0 |
[] |
gdk-pixbuf
|
f8569bb13e2aa1584dde61ca545144750f7a7c98
| 319,166,093,026,649,480,000,000,000,000,000,000,000 | 11 |
GIF: Don't return a partially initialized pixbuf structure
It was found that gdk-pixbuf GIF image loader gdk_pixbuf__gif_image_load()
routine did not properly handle certain return values from their subroutines.
A remote attacker could provide a specially-crafted GIF image, which once
opened in an application, linked against gdk-pixbuf would lead to gdk-pixbuf
to return partially initialized pixbuf structure, possibly having huge
width and height, leading to that particular application termination due
excessive memory use.
The CVE identifier of CVE-2011-2485 has been assigned to this issue.
|
void buildHeader(SSL& ssl, RecordLayerHeader& rlHeader, const Message& msg)
{
ProtocolVersion pv = ssl.getSecurity().get_connection().version_;
rlHeader.type_ = msg.get_type();
rlHeader.version_.major_ = pv.major_;
rlHeader.version_.minor_ = pv.minor_;
rlHeader.length_ = msg.get_length();
}
| 0 |
[] |
mysql-server
|
b9768521bdeb1a8069c7b871f4536792b65fd79b
| 197,915,201,589,692,000,000,000,000,000,000,000,000 | 8 |
Updated yassl to yassl-2.3.8
(cherry picked from commit 7f9941eab55ed672bfcccd382dafbdbcfdc75aaa)
|
inline void set_killed(killed_state killed_arg,
int killed_errno_arg= 0,
const char *killed_err_msg_arg= 0)
{
mysql_mutex_lock(&LOCK_thd_kill);
set_killed_no_mutex(killed_arg, killed_errno_arg, killed_err_msg_arg);
mysql_mutex_unlock(&LOCK_thd_kill);
}
| 0 |
[
"CWE-416"
] |
server
|
4681b6f2d8c82b4ec5cf115e83698251963d80d5
| 147,819,916,342,922,100,000,000,000,000,000,000,000 | 8 |
MDEV-26281 ASAN use-after-poison when complex conversion is involved in blob
the bug was that in_vector array in Item_func_in was allocated in the
statement arena, not in the table->expr_arena.
revert part of the 5acd391e8b2d. Instead, change the arena correctly
in fix_all_session_vcol_exprs().
Remove TABLE_ARENA, that was introduced in 5acd391e8b2d to force
item tree changes to be rolled back (because they were allocated in the
wrong arena and didn't persist. now they do)
|
agoo_ws_expand(agooText t) {
uint8_t buf[16];
uint8_t *b = buf;
uint8_t opcode = t->bin ? AGOO_WS_OP_BIN : AGOO_WS_OP_TEXT;
*b++ = 0x80 | (uint8_t)opcode;
// send unmasked
if (125 >= t->len) {
*b++ = (uint8_t)t->len;
} else if (0xFFFF >= t->len) {
*b++ = (uint8_t)0x7E;
*b++ = (uint8_t)((t->len >> 8) & 0xFF);
*b++ = (uint8_t)(t->len & 0xFF);
} else {
int i;
*b++ = (uint8_t)0x7F;
for (i = 56; 0 <= i; i -= 8) {
*b++ = (uint8_t)((t->len >> i) & 0xFF);
}
}
return agoo_text_prepend(t, (const char*)buf, (int)(b - buf));
}
| 0 |
[
"CWE-444",
"CWE-61"
] |
agoo
|
23d03535cf7b50d679a60a953a0cae9519a4a130
| 227,956,035,486,365,540,000,000,000,000,000,000,000 | 23 |
Remote addr (#99)
* REMOTE_ADDR added
* Ready for merge
|
static int tls1_prf( unsigned char *secret, size_t slen, char *label,
unsigned char *random, size_t rlen,
unsigned char *dstbuf, size_t dlen )
{
size_t nb, hs;
size_t i, j, k;
unsigned char *S1, *S2;
unsigned char tmp[128];
unsigned char h_i[20];
if( sizeof( tmp ) < 20 + strlen( label ) + rlen )
return( POLARSSL_ERR_SSL_BAD_INPUT_DATA );
hs = ( slen + 1 ) / 2;
S1 = secret;
S2 = secret + slen - hs;
nb = strlen( label );
memcpy( tmp + 20, label, nb );
memcpy( tmp + 20 + nb, random, rlen );
nb += rlen;
/*
* First compute P_md5(secret,label+random)[0..dlen]
*/
md5_hmac( S1, hs, tmp + 20, nb, 4 + tmp );
for( i = 0; i < dlen; i += 16 )
{
md5_hmac( S1, hs, 4 + tmp, 16 + nb, h_i );
md5_hmac( S1, hs, 4 + tmp, 16, 4 + tmp );
k = ( i + 16 > dlen ) ? dlen % 16 : 16;
for( j = 0; j < k; j++ )
dstbuf[i + j] = h_i[j];
}
/*
* XOR out with P_sha1(secret,label+random)[0..dlen]
*/
sha1_hmac( S2, hs, tmp + 20, nb, tmp );
for( i = 0; i < dlen; i += 20 )
{
sha1_hmac( S2, hs, tmp, 20 + nb, h_i );
sha1_hmac( S2, hs, tmp, 20, tmp );
k = ( i + 20 > dlen ) ? dlen % 20 : 20;
for( j = 0; j < k; j++ )
dstbuf[i + j] = (unsigned char)( dstbuf[i + j] ^ h_i[j] );
}
memset( tmp, 0, sizeof( tmp ) );
memset( h_i, 0, sizeof( h_i ) );
return( 0 );
}
| 0 |
[
"CWE-310"
] |
polarssl
|
4582999be608c9794d4518ae336b265084db9f93
| 203,158,634,350,645,260,000,000,000,000,000,000,000 | 59 |
Fixed timing difference resulting from badly formatted padding.
|
ec_verify(krb5_context context, krb5_data *req_pkt, krb5_kdc_req *request,
krb5_enc_tkt_part *enc_tkt_reply, krb5_pa_data *data,
krb5_kdcpreauth_callbacks cb, krb5_kdcpreauth_rock rock,
krb5_kdcpreauth_moddata moddata,
krb5_kdcpreauth_verify_respond_fn respond, void *arg)
{
krb5_error_code retval = 0;
krb5_enc_data *enc = NULL;
krb5_data scratch, plain;
krb5_keyblock *armor_key = cb->fast_armor(context, rock);
krb5_pa_enc_ts *ts = NULL;
krb5_keyblock *client_keys = NULL;
krb5_keyblock *challenge_key = NULL;
krb5_keyblock *kdc_challenge_key;
krb5_kdcpreauth_modreq modreq = NULL;
int i = 0;
char *ai = NULL, *realmstr = NULL;
krb5_data realm = request->server->realm;
plain.data = NULL;
if (armor_key == NULL) {
retval = ENOENT;
k5_setmsg(context, ENOENT,
_("Encrypted Challenge used outside of FAST tunnel"));
}
scratch.data = (char *) data->contents;
scratch.length = data->length;
if (retval == 0)
retval = decode_krb5_enc_data(&scratch, &enc);
if (retval == 0) {
plain.data = malloc(enc->ciphertext.length);
plain.length = enc->ciphertext.length;
if (plain.data == NULL)
retval = ENOMEM;
}
/* Check for a configured FAST ec auth indicator. */
if (retval == 0)
realmstr = k5memdup0(realm.data, realm.length, &retval);
if (realmstr != NULL)
retval = profile_get_string(context->profile, KRB5_CONF_REALMS,
realmstr,
KRB5_CONF_ENCRYPTED_CHALLENGE_INDICATOR,
NULL, &ai);
if (retval == 0)
retval = cb->client_keys(context, rock, &client_keys);
if (retval == 0) {
for (i = 0; client_keys[i].enctype&& (retval == 0); i++ ) {
retval = krb5_c_fx_cf2_simple(context,
armor_key, "clientchallengearmor",
&client_keys[i], "challengelongterm",
&challenge_key);
if (retval == 0)
retval = krb5_c_decrypt(context, challenge_key,
KRB5_KEYUSAGE_ENC_CHALLENGE_CLIENT,
NULL, enc, &plain);
if (challenge_key)
krb5_free_keyblock(context, challenge_key);
challenge_key = NULL;
if (retval == 0)
break;
/*We failed to decrypt. Try next key*/
retval = 0;
}
if (client_keys[i].enctype == 0) {
retval = KRB5KDC_ERR_PREAUTH_FAILED;
k5_setmsg(context, retval,
_("Incorrect password in encrypted challenge"));
}
}
if (retval == 0)
retval = decode_krb5_pa_enc_ts(&plain, &ts);
if (retval == 0)
retval = krb5_check_clockskew(context, ts->patimestamp);
if (retval == 0) {
enc_tkt_reply->flags |= TKT_FLG_PRE_AUTH;
/*
* If this fails, we won't generate a reply to the client. That may
* cause the client to fail, but at this point the KDC has considered
* this a success, so the return value is ignored.
*/
if (krb5_c_fx_cf2_simple(context, armor_key, "kdcchallengearmor",
&client_keys[i], "challengelongterm",
&kdc_challenge_key) == 0) {
modreq = (krb5_kdcpreauth_modreq)kdc_challenge_key;
if (ai != NULL)
cb->add_auth_indicator(context, rock, ai);
}
}
cb->free_keys(context, rock, client_keys);
if (plain.data)
free(plain.data);
if (enc)
krb5_free_enc_data(context, enc);
if (ts)
krb5_free_pa_enc_ts(context, ts);
free(realmstr);
free(ai);
(*respond)(arg, retval, modreq, NULL, NULL);
}
| 0 |
[
"CWE-476"
] |
krb5
|
fc98f520caefff2e5ee9a0026fdf5109944b3562
| 229,187,359,353,437,430,000,000,000,000,000,000,000 | 103 |
Fix KDC null deref on bad encrypted challenge
The function ec_verify() in src/kdc/kdc_preauth_ec.c contains a check
to avoid further processing if the armor key is NULL. However, this
check is bypassed by a call to k5memdup0() which overwrites retval
with 0 if the allocation succeeds. If the armor key is NULL, a call
to krb5_c_fx_cf2_simple() will then dereference it, resulting in a
crash. Add a check before the k5memdup0() call to avoid overwriting
retval.
CVE-2021-36222:
In MIT krb5 releases 1.16 and later, an unauthenticated attacker can
cause a null dereference in the KDC by sending a request containing a
PA-ENCRYPTED-CHALLENGE padata element without using FAST.
[[email protected]: trimmed patch; added test case; edited commit
message]
ticket: 9007 (new)
tags: pullup
target_version: 1.19-next
target_version: 1.18-next
|
void lftp_ssl_gnutls::global_init()
{
if(!instance)
instance=new lftp_ssl_gnutls_instance();
}
| 0 |
[
"CWE-310"
] |
lftp
|
6357bed2583171b7515af6bb6585cf56d2117e3f
| 107,121,702,308,312,470,000,000,000,000,000,000,000 | 5 |
use hostmatch function from latest curl (addresses CVE-2014-0139)
|
onigenc_unicode_property_name_to_ctype(OnigEncoding enc, UChar* name, UChar* end)
{
int len;
hash_data_type ctype;
UChar buf[PROPERTY_NAME_MAX_SIZE];
UChar *p;
OnigCodePoint code;
p = name;
len = 0;
while (p < end) {
code = ONIGENC_MBC_TO_CODE(enc, p, end);
if (code >= 0x80)
return ONIGERR_INVALID_CHAR_PROPERTY_NAME;
buf[len++] = (UChar )code;
if (len >= PROPERTY_NAME_MAX_SIZE)
return ONIGERR_INVALID_CHAR_PROPERTY_NAME;
p += enclen(enc, p);
}
buf[len] = 0;
if (NameTableInited == 0) init_name_ctype_table();
if (onig_st_lookup_strend(NameCtypeTable, buf, buf + len, &ctype) == 0) {
return ONIGERR_INVALID_CHAR_PROPERTY_NAME;
}
return (int )ctype;
}
| 0 |
[
"CWE-125"
] |
oniguruma
|
65a9b1aa03c9bc2dc01b074295b9603232cb3b78
| 234,063,347,778,842,150,000,000,000,000,000,000,000 | 32 |
onig-5.9.2
|
dump_raw_buffer_be(const gs_memory_t *mem, int num_rows, int width, int n_chan,
int plane_stride, int rowstride,
char filename[],const byte *Buffer, bool deep)
{
do_dump_raw_buffer(mem, num_rows, width, n_chan, plane_stride,
rowstride, filename, Buffer, deep, 1);
}
| 0 |
[
"CWE-476"
] |
ghostpdl
|
7870f4951bcc6a153f317e3439e14d0e929fd231
| 27,843,413,684,076,200,000,000,000,000,000,000,000 | 7 |
Bug 701795: Segv due to image mask issue
|
static char *get_pid_environ_val(pid_t pid,char *val){
int temp_size = 500;
char *temp = malloc(temp_size);
int i=0;
int foundit=0;
FILE *fp;
sprintf(temp,"/proc/%d/environ",pid);
fp=fopen(temp,"r");
if(fp==NULL)
return NULL;
for(;;){
if (i >= temp_size) {
temp_size *= 2;
temp = realloc(temp, temp_size);
}
temp[i]=fgetc(fp);
if(foundit==1 && (temp[i]==0 || temp[i]=='\0' || temp[i]==EOF)){
char *ret;
temp[i]=0;
ret=malloc(strlen(temp)+10);
sprintf(ret,"%s",temp);
fclose(fp);
return ret;
}
switch(temp[i]){
case EOF:
fclose(fp);
return NULL;
case '=':
temp[i]=0;
if(!strcmp(temp,val)){
foundit=1;
}
i=0;
break;
case '\0':
i=0;
break;
default:
i++;
}
}
}
| 0 |
[
"CWE-119",
"CWE-787"
] |
das_watchdog
|
bd20bb02e75e2c0483832b52f2577253febfb690
| 266,256,142,573,941,520,000,000,000,000,000,000,000 | 52 |
Fix memory overflow if the name of an environment is larger than 500 characters. Bug found by Adam Sampson.
|
static int netlink_bind(struct socket *sock, struct sockaddr *addr,
int addr_len)
{
struct sock *sk = sock->sk;
struct net *net = sock_net(sk);
struct netlink_sock *nlk = nlk_sk(sk);
struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
int err;
if (nladdr->nl_family != AF_NETLINK)
return -EINVAL;
/* Only superuser is allowed to listen multicasts */
if (nladdr->nl_groups) {
if (!netlink_capable(sock, NL_NONROOT_RECV))
return -EPERM;
err = netlink_realloc_groups(sk);
if (err)
return err;
}
if (nlk->pid) {
if (nladdr->nl_pid != nlk->pid)
return -EINVAL;
} else {
err = nladdr->nl_pid ?
netlink_insert(sk, net, nladdr->nl_pid) :
netlink_autobind(sock);
if (err)
return err;
}
if (!nladdr->nl_groups && (nlk->groups == NULL || !(u32)nlk->groups[0]))
return 0;
netlink_table_grab();
netlink_update_subscriptions(sk, nlk->subscriptions +
hweight32(nladdr->nl_groups) -
hweight32(nlk->groups[0]));
nlk->groups[0] = (nlk->groups[0] & ~0xffffffffUL) | nladdr->nl_groups;
netlink_update_listeners(sk);
netlink_table_ungrab();
if (nlk->netlink_bind && nlk->groups[0]) {
int i;
for (i=0; i<nlk->ngroups; i++) {
if (test_bit(i, nlk->groups))
nlk->netlink_bind(i);
}
}
return 0;
}
| 0 |
[
"CWE-287",
"CWE-284"
] |
linux
|
e0e3cea46d31d23dc40df0a49a7a2c04fe8edfea
| 175,522,984,883,955,870,000,000,000,000,000,000,000 | 54 |
af_netlink: force credentials passing [CVE-2012-3520]
Pablo Neira Ayuso discovered that avahi and
potentially NetworkManager accept spoofed Netlink messages because of a
kernel bug. The kernel passes all-zero SCM_CREDENTIALS ancillary data
to the receiver if the sender did not provide such data, instead of not
including any such data at all or including the correct data from the
peer (as it is the case with AF_UNIX).
This bug was introduced in commit 16e572626961
(af_unix: dont send SCM_CREDENTIALS by default)
This patch forces passing credentials for netlink, as
before the regression.
Another fix would be to not add SCM_CREDENTIALS in
netlink messages if not provided by the sender, but it
might break some programs.
With help from Florian Weimer & Petr Matousek
This issue is designated as CVE-2012-3520
Signed-off-by: Eric Dumazet <[email protected]>
Cc: Petr Matousek <[email protected]>
Cc: Florian Weimer <[email protected]>
Cc: Pablo Neira Ayuso <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
local_exported_variables ()
{
return (vapply (local_and_exported));
}
| 0 |
[] |
bash
|
863d31ae775d56b785dc5b0105b6d251515d81d5
| 332,120,196,097,731,800,000,000,000,000,000,000,000 | 4 |
commit bash-20120224 snapshot
|
int __sched cond_resched_softirq(void)
{
BUG_ON(!in_softirq());
if (need_resched() && system_state == SYSTEM_RUNNING) {
local_bh_enable();
__cond_resched();
local_bh_disable();
return 1;
}
return 0;
}
| 0 |
[] |
linux-2.6
|
8f1bc385cfbab474db6c27b5af1e439614f3025c
| 214,123,582,902,181,770,000,000,000,000,000,000,000 | 12 |
sched: fair: weight calculations
In order to level the hierarchy, we need to calculate load based on the
root view. That is, each task's load is in the same unit.
A
/ \
B 1
/ \
2 3
To compute 1's load we do:
weight(1)
--------------
rq_weight(A)
To compute 2's load we do:
weight(2) weight(B)
------------ * -----------
rq_weight(B) rw_weight(A)
This yields load fractions in comparable units.
The consequence is that it changes virtual time. We used to have:
time_{i}
vtime_{i} = ------------
weight_{i}
vtime = \Sum vtime_{i} = time / rq_weight.
But with the new way of load calculation we get that vtime equals time.
Signed-off-by: Peter Zijlstra <[email protected]>
Signed-off-by: Ingo Molnar <[email protected]>
|
void preempt_notifier_unregister(struct preempt_notifier *notifier)
{
hlist_del(¬ifier->link);
}
| 0 |
[] |
linux-2.6
|
8f1bc385cfbab474db6c27b5af1e439614f3025c
| 201,496,596,542,458,040,000,000,000,000,000,000,000 | 4 |
sched: fair: weight calculations
In order to level the hierarchy, we need to calculate load based on the
root view. That is, each task's load is in the same unit.
A
/ \
B 1
/ \
2 3
To compute 1's load we do:
weight(1)
--------------
rq_weight(A)
To compute 2's load we do:
weight(2) weight(B)
------------ * -----------
rq_weight(B) rw_weight(A)
This yields load fractions in comparable units.
The consequence is that it changes virtual time. We used to have:
time_{i}
vtime_{i} = ------------
weight_{i}
vtime = \Sum vtime_{i} = time / rq_weight.
But with the new way of load calculation we get that vtime equals time.
Signed-off-by: Peter Zijlstra <[email protected]>
Signed-off-by: Ingo Molnar <[email protected]>
|
inline char *strdup(const char *str)
{ return strdup_root(mem_root,str); }
| 0 |
[
"CWE-416"
] |
server
|
4681b6f2d8c82b4ec5cf115e83698251963d80d5
| 320,502,890,347,947,830,000,000,000,000,000,000,000 | 2 |
MDEV-26281 ASAN use-after-poison when complex conversion is involved in blob
the bug was that in_vector array in Item_func_in was allocated in the
statement arena, not in the table->expr_arena.
revert part of the 5acd391e8b2d. Instead, change the arena correctly
in fix_all_session_vcol_exprs().
Remove TABLE_ARENA, that was introduced in 5acd391e8b2d to force
item tree changes to be rolled back (because they were allocated in the
wrong arena and didn't persist. now they do)
|
void ssix_del(GF_Box *s)
{
u32 i;
GF_SubsegmentIndexBox *ptr = (GF_SubsegmentIndexBox *)s;
if (ptr == NULL) return;
if (ptr->subsegments) {
for (i = 0; i < ptr->subsegment_count; i++) {
GF_Subsegment *subsegment = &ptr->subsegments[i];
if (subsegment->levels) gf_free(subsegment->levels);
if (subsegment->range_sizes) gf_free(subsegment->range_sizes);
}
gf_free(ptr->subsegments);
}
gf_free(ptr);
}
| 0 |
[
"CWE-125"
] |
gpac
|
bceb03fd2be95097a7b409ea59914f332fb6bc86
| 108,433,709,539,055,720,000,000,000,000,000,000,000 | 15 |
fixed 2 possible heap overflows (inc. #1088)
|
static int sse4_c(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h)
{
int s, i;
uint32_t *sq = ff_squareTbl + 256;
s = 0;
for (i = 0; i < h; i++) {
s += sq[pix1[0] - pix2[0]];
s += sq[pix1[1] - pix2[1]];
s += sq[pix1[2] - pix2[2]];
s += sq[pix1[3] - pix2[3]];
pix1 += line_size;
pix2 += line_size;
}
return s;
}
| 0 |
[
"CWE-703",
"CWE-189"
] |
FFmpeg
|
454a11a1c9c686c78aa97954306fb63453299760
| 158,006,189,390,508,340,000,000,000,000,000,000,000 | 16 |
avcodec/dsputil: fix signedness in sizeof() comparissions
Signed-off-by: Michael Niedermayer <[email protected]>
|
email_compare (EContact *contact1,
EContact *contact2)
{
const gchar *email1, *email2;
gint i;
/*
if (e_contact_get (contact1, E_CONTACT_IS_LIST))
return TRUE;
*/
for (i = 0; i < 4; i++) {
gboolean equal;
email1 = e_contact_get_const (contact1, email_ids[i]);
email2 = e_contact_get_const (contact2, email_ids[i]);
if (email1 && email2)
equal = !strcmp (email1, email2);
else
equal = (!!email1 == !!email2);
if (!equal)
return equal;
}
return TRUE;
}
| 0 |
[] |
evolution-data-server
|
34bad61738e2127736947ac50e0c7969cc944972
| 60,125,416,251,316,560,000,000,000,000,000,000,000 | 26 |
Bug 796174 - strcat() considered unsafe for buffer overflow
|
static void put_buffer(QEMUFile *f, void *pv, size_t size)
{
uint8_t *v = pv;
qemu_put_buffer(f, v, size);
}
| 0 |
[
"CWE-119"
] |
qemu
|
d2ef4b61fe6d33d2a5dcf100a9b9440de341ad62
| 171,085,521,149,850,900,000,000,000,000,000,000,000 | 5 |
vmstate: fix buffer overflow in target-arm/machine.c
CVE-2013-4531
cpreg_vmstate_indexes is a VARRAY_INT32. A negative value for
cpreg_vmstate_array_len will cause a buffer overflow.
VMSTATE_INT32_LE was supposed to protect against this
but doesn't because it doesn't validate that input is
non-negative.
Fix this macro to valide the value appropriately.
The only other user of VMSTATE_INT32_LE doesn't
ever use negative numbers so it doesn't care.
Reported-by: Anthony Liguori <[email protected]>
Signed-off-by: Michael S. Tsirkin <[email protected]>
Signed-off-by: Juan Quintela <[email protected]>
|
static int ldb_match_message(struct ldb_context *ldb,
const struct ldb_message *msg,
const struct ldb_parse_tree *tree,
enum ldb_scope scope, bool *matched)
{
unsigned int i;
int ret;
*matched = false;
if (scope != LDB_SCOPE_BASE && ldb_dn_is_special(msg->dn)) {
/* don't match special records except on base searches */
return LDB_SUCCESS;
}
switch (tree->operation) {
case LDB_OP_AND:
for (i=0;i<tree->u.list.num_elements;i++) {
ret = ldb_match_message(ldb, msg, tree->u.list.elements[i], scope, matched);
if (ret != LDB_SUCCESS) return ret;
if (!*matched) return LDB_SUCCESS;
}
*matched = true;
return LDB_SUCCESS;
case LDB_OP_OR:
for (i=0;i<tree->u.list.num_elements;i++) {
ret = ldb_match_message(ldb, msg, tree->u.list.elements[i], scope, matched);
if (ret != LDB_SUCCESS) return ret;
if (*matched) return LDB_SUCCESS;
}
*matched = false;
return LDB_SUCCESS;
case LDB_OP_NOT:
ret = ldb_match_message(ldb, msg, tree->u.isnot.child, scope, matched);
if (ret != LDB_SUCCESS) return ret;
*matched = ! *matched;
return LDB_SUCCESS;
case LDB_OP_EQUALITY:
return ldb_match_equality(ldb, msg, tree, scope, matched);
case LDB_OP_SUBSTRING:
return ldb_match_substring(ldb, msg, tree, scope, matched);
case LDB_OP_GREATER:
return ldb_match_comparison(ldb, msg, tree, scope, LDB_OP_GREATER, matched);
case LDB_OP_LESS:
return ldb_match_comparison(ldb, msg, tree, scope, LDB_OP_LESS, matched);
case LDB_OP_PRESENT:
return ldb_match_present(ldb, msg, tree, scope, matched);
case LDB_OP_APPROX:
return ldb_match_comparison(ldb, msg, tree, scope, LDB_OP_APPROX, matched);
case LDB_OP_EXTENDED:
return ldb_match_extended(ldb, msg, tree, scope, matched);
}
return LDB_ERR_INAPPROPRIATE_MATCHING;
}
| 0 |
[
"CWE-189"
] |
samba
|
ec504dbf69636a554add1f3d5703dd6c3ad450b8
| 251,419,080,054,077,470,000,000,000,000,000,000,000 | 64 |
CVE-2015-3223: lib: ldb: Cope with canonicalise_fn returning string "", length 0.
BUG: https://bugzilla.samba.org/show_bug.cgi?id=11325
Signed-off-by: Jeremy Allison <[email protected]>
Reviewed-by: Ralph Boehme <[email protected]>
|
TLabel * __fastcall TCustomDialog::CreateLabel(UnicodeString Label)
{
TLabel * Result = new TLabel(this);
Result->Caption = Label;
return Result;
}
| 0 |
[
"CWE-787"
] |
winscp
|
faa96e8144e6925a380f94a97aa382c9427f688d
| 50,186,331,323,588,140,000,000,000,000,000,000,000 | 6 |
Bug 1943: Prevent loading session settings that can lead to remote code execution from handled URLs
https://winscp.net/tracker/1943
(cherry picked from commit ec584f5189a856cd79509f754722a6898045c5e0)
Source commit: 0f4be408b3f01132b00682da72d925d6c4ee649b
|
rfbProcessClientMessage(rfbClientPtr cl)
{
switch (cl->state) {
case RFB_PROTOCOL_VERSION:
rfbProcessClientProtocolVersion(cl);
return;
case RFB_SECURITY_TYPE:
rfbAuthProcessSecurityTypeMessage(cl);
return;
#ifdef VINO_HAVE_GNUTLS
case RFB_TLS_HANDSHAKE:
rfbAuthProcessTLSHandshake(cl);
return;
#endif
case RFB_AUTH_TYPE:
rfbAuthProcessAuthTypeMessage(cl);
return;
case RFB_AUTHENTICATION:
rfbAuthProcessClientMessage(cl);
return;
case RFB_AUTH_DEFERRED:
rfbLog("Authentication deferred for this client - closing connection\n");
rfbCloseClient(cl);
return;
case RFB_INITIALISATION:
rfbProcessClientInitMessage(cl);
return;
default:
rfbProcessClientNormalMessage(cl);
return;
}
}
| 0 |
[
"CWE-20"
] |
vino
|
860337231eaccfeed4f857afd0579546a260c23f
| 36,230,913,661,126,873,000,000,000,000,000,000,000 | 32 |
Reject new clients if in the deferred state
As mentioned in bug 641811, Vino can get stuck trying to process the
same data in an infinite loop if an authentication request is received
from a client while that client is in the deferred state.
Avoid this situation by closing new connections from the same client
when it is in the deferred state.
|
int tls1_save_sigalgs(SSL *s, const unsigned char *data, int dsize)
{
CERT *c = s->cert;
/* Extension ignored for inappropriate versions */
if (!SSL_USE_SIGALGS(s))
return 1;
/* Should never happen */
if (!c)
return 0;
OPENSSL_free(s->s3->tmp.peer_sigalgs);
s->s3->tmp.peer_sigalgs = OPENSSL_malloc(dsize);
if (s->s3->tmp.peer_sigalgs == NULL)
return 0;
s->s3->tmp.peer_sigalgslen = dsize;
memcpy(s->s3->tmp.peer_sigalgs, data, dsize);
return 1;
}
| 0 |
[
"CWE-20"
] |
openssl
|
4ad93618d26a3ea23d36ad5498ff4f59eff3a4d2
| 89,663,039,547,468,710,000,000,000,000,000,000,000 | 18 |
Don't change the state of the ETM flags until CCS processing
Changing the ciphersuite during a renegotiation can result in a crash
leading to a DoS attack. ETM has not been implemented in 1.1.0 for DTLS
so this is TLS only.
The problem is caused by changing the flag indicating whether to use ETM
or not immediately on negotiation of ETM, rather than at CCS. Therefore,
during a renegotiation, if the ETM state is changing (usually due to a
change of ciphersuite), then an error/crash will occur.
Due to the fact that there are separate CCS messages for read and write
we actually now need two flags to determine whether to use ETM or not.
CVE-2017-3733
Reviewed-by: Richard Levitte <[email protected]>
|
int unit_name_mangle_with_suffix(const char *name, const char *operation, UnitNameMangle flags, const char *suffix, char **ret) {
_cleanup_free_ char *s = NULL;
bool mangled, suggest_escape = true;
int r;
assert(name);
assert(suffix);
assert(ret);
if (isempty(name)) /* We cannot mangle empty unit names to become valid, sorry. */
return -EINVAL;
if (!unit_suffix_is_valid(suffix))
return -EINVAL;
/* Already a fully valid unit name? If so, no mangling is necessary... */
if (unit_name_is_valid(name, UNIT_NAME_ANY))
goto good;
/* Already a fully valid globbing expression? If so, no mangling is necessary either... */
if (string_is_glob(name) && in_charset(name, VALID_CHARS_GLOB)) {
if (flags & UNIT_NAME_MANGLE_GLOB)
goto good;
log_full(flags & UNIT_NAME_MANGLE_WARN ? LOG_NOTICE : LOG_DEBUG,
"Glob pattern passed%s%s, but globs are not supported for this.",
operation ? " " : "", strempty(operation));
suggest_escape = false;
}
if (is_device_path(name)) {
r = unit_name_from_path(name, ".device", ret);
if (r >= 0)
return 1;
if (r != -EINVAL)
return r;
}
if (path_is_absolute(name)) {
r = unit_name_from_path(name, ".mount", ret);
if (r >= 0)
return 1;
if (r != -EINVAL)
return r;
}
s = new(char, strlen(name) * 4 + strlen(suffix) + 1);
if (!s)
return -ENOMEM;
mangled = do_escape_mangle(name, flags & UNIT_NAME_MANGLE_GLOB, s);
if (mangled)
log_full(flags & UNIT_NAME_MANGLE_WARN ? LOG_NOTICE : LOG_DEBUG,
"Invalid unit name \"%s\" escaped as \"%s\"%s.",
name, s,
suggest_escape ? " (maybe you should use systemd-escape?)" : "");
/* Append a suffix if it doesn't have any, but only if this is not a glob, so that we can allow
* "foo.*" as a valid glob. */
if ((!(flags & UNIT_NAME_MANGLE_GLOB) || !string_is_glob(s)) && unit_name_to_type(s) < 0)
strcat(s, suffix);
/* Make sure mangling didn't grow this too large (but don't do this check if globbing is allowed,
* since globs generally do not qualify as valid unit names) */
if (!FLAGS_SET(flags, UNIT_NAME_MANGLE_GLOB) && !unit_name_is_valid(s, UNIT_NAME_ANY))
return -EINVAL;
*ret = TAKE_PTR(s);
return 1;
good:
s = strdup(name);
if (!s)
return -ENOMEM;
*ret = TAKE_PTR(s);
return 0;
}
| 0 |
[
"CWE-703"
] |
systemd-stable
|
b00674347337b7531c92fdb65590ab253bb57538
| 283,603,495,163,723,930,000,000,000,000,000,000,000 | 77 |
basic/unit-name: do not use strdupa() on a path
The path may have unbounded length, for example through a fuse mount.
CVE-2021-33910: attacked controlled alloca() leads to crash in systemd and
ultimately a kernel panic. Systemd parses the content of /proc/self/mountinfo
and each mountpoint is passed to mount_setup_unit(), which calls
unit_name_path_escape() underneath. A local attacker who is able to mount a
filesystem with a very long path can crash systemd and the whole system.
https://bugzilla.redhat.com/show_bug.cgi?id=1970887
The resulting string length is bounded by UNIT_NAME_MAX, which is 256. But we
can't easily check the length after simplification before doing the
simplification, which in turns uses a copy of the string we can write to.
So we can't reject paths that are too long before doing the duplication.
Hence the most obvious solution is to switch back to strdup(), as before
7410616cd9dbbec97cf98d75324da5cda2b2f7a2.
(cherry picked from commit 441e0115646d54f080e5c3bb0ba477c892861ab9)
(cherry picked from commit 764b74113e36ac5219a4b82a05f311b5a92136ce)
(cherry picked from commit 4a1c5f34bd3e1daed4490e9d97918e504d19733b)
|
isis_print_is_reach_subtlv(netdissect_options *ndo,
const uint8_t *tptr, u_int subt, u_int subl,
const char *ident)
{
u_int te_class,priority_level,gmpls_switch_cap;
union { /* int to float conversion buffer for several subTLVs */
float f;
uint32_t i;
} bw;
/* first lets see if we know the subTLVs name*/
ND_PRINT((ndo, "%s%s subTLV #%u, length: %u",
ident, tok2str(isis_ext_is_reach_subtlv_values, "unknown", subt),
subt, subl));
ND_TCHECK2(*tptr, subl);
switch(subt) {
case ISIS_SUBTLV_EXT_IS_REACH_ADMIN_GROUP:
case ISIS_SUBTLV_EXT_IS_REACH_LINK_LOCAL_REMOTE_ID:
case ISIS_SUBTLV_EXT_IS_REACH_LINK_REMOTE_ID:
if (subl >= 4) {
ND_PRINT((ndo, ", 0x%08x", EXTRACT_32BITS(tptr)));
if (subl == 8) /* rfc4205 */
ND_PRINT((ndo, ", 0x%08x", EXTRACT_32BITS(tptr+4)));
}
break;
case ISIS_SUBTLV_EXT_IS_REACH_IPV4_INTF_ADDR:
case ISIS_SUBTLV_EXT_IS_REACH_IPV4_NEIGHBOR_ADDR:
if (subl >= sizeof(struct in_addr))
ND_PRINT((ndo, ", %s", ipaddr_string(ndo, tptr)));
break;
case ISIS_SUBTLV_EXT_IS_REACH_MAX_LINK_BW :
case ISIS_SUBTLV_EXT_IS_REACH_RESERVABLE_BW:
if (subl >= 4) {
bw.i = EXTRACT_32BITS(tptr);
ND_PRINT((ndo, ", %.3f Mbps", bw.f * 8 / 1000000));
}
break;
case ISIS_SUBTLV_EXT_IS_REACH_UNRESERVED_BW :
if (subl >= 32) {
for (te_class = 0; te_class < 8; te_class++) {
bw.i = EXTRACT_32BITS(tptr);
ND_PRINT((ndo, "%s TE-Class %u: %.3f Mbps",
ident,
te_class,
bw.f * 8 / 1000000));
tptr+=4;
}
}
break;
case ISIS_SUBTLV_EXT_IS_REACH_BW_CONSTRAINTS: /* fall through */
case ISIS_SUBTLV_EXT_IS_REACH_BW_CONSTRAINTS_OLD:
if (subl == 0)
break;
ND_PRINT((ndo, "%sBandwidth Constraints Model ID: %s (%u)",
ident,
tok2str(diffserv_te_bc_values, "unknown", *tptr),
*tptr));
tptr++;
/* decode BCs until the subTLV ends */
for (te_class = 0; te_class < (subl-1)/4; te_class++) {
bw.i = EXTRACT_32BITS(tptr);
ND_PRINT((ndo, "%s Bandwidth constraint CT%u: %.3f Mbps",
ident,
te_class,
bw.f * 8 / 1000000));
tptr+=4;
}
break;
case ISIS_SUBTLV_EXT_IS_REACH_TE_METRIC:
if (subl >= 3)
ND_PRINT((ndo, ", %u", EXTRACT_24BITS(tptr)));
break;
case ISIS_SUBTLV_EXT_IS_REACH_LINK_ATTRIBUTE:
if (subl == 2) {
ND_PRINT((ndo, ", [ %s ] (0x%04x)",
bittok2str(isis_subtlv_link_attribute_values,
"Unknown",
EXTRACT_16BITS(tptr)),
EXTRACT_16BITS(tptr)));
}
break;
case ISIS_SUBTLV_EXT_IS_REACH_LINK_PROTECTION_TYPE:
if (subl >= 2) {
ND_PRINT((ndo, ", %s, Priority %u",
bittok2str(gmpls_link_prot_values, "none", *tptr),
*(tptr+1)));
}
break;
case ISIS_SUBTLV_SPB_METRIC:
if (subl >= 6) {
ND_PRINT((ndo, ", LM: %u", EXTRACT_24BITS(tptr)));
tptr=tptr+3;
ND_PRINT((ndo, ", P: %u", *(tptr)));
tptr++;
ND_PRINT((ndo, ", P-ID: %u", EXTRACT_16BITS(tptr)));
}
break;
case ISIS_SUBTLV_EXT_IS_REACH_INTF_SW_CAP_DESCR:
if (subl >= 36) {
gmpls_switch_cap = *tptr;
ND_PRINT((ndo, "%s Interface Switching Capability:%s",
ident,
tok2str(gmpls_switch_cap_values, "Unknown", gmpls_switch_cap)));
ND_PRINT((ndo, ", LSP Encoding: %s",
tok2str(gmpls_encoding_values, "Unknown", *(tptr + 1))));
tptr+=4;
ND_PRINT((ndo, "%s Max LSP Bandwidth:", ident));
for (priority_level = 0; priority_level < 8; priority_level++) {
bw.i = EXTRACT_32BITS(tptr);
ND_PRINT((ndo, "%s priority level %d: %.3f Mbps",
ident,
priority_level,
bw.f * 8 / 1000000));
tptr+=4;
}
subl-=36;
switch (gmpls_switch_cap) {
case GMPLS_PSC1:
case GMPLS_PSC2:
case GMPLS_PSC3:
case GMPLS_PSC4:
ND_TCHECK2(*tptr, 6);
bw.i = EXTRACT_32BITS(tptr);
ND_PRINT((ndo, "%s Min LSP Bandwidth: %.3f Mbps", ident, bw.f * 8 / 1000000));
ND_PRINT((ndo, "%s Interface MTU: %u", ident, EXTRACT_16BITS(tptr + 4)));
break;
case GMPLS_TSC:
ND_TCHECK2(*tptr, 8);
bw.i = EXTRACT_32BITS(tptr);
ND_PRINT((ndo, "%s Min LSP Bandwidth: %.3f Mbps", ident, bw.f * 8 / 1000000));
ND_PRINT((ndo, "%s Indication %s", ident,
tok2str(gmpls_switch_cap_tsc_indication_values, "Unknown (%u)", *(tptr + 4))));
break;
default:
/* there is some optional stuff left to decode but this is as of yet
not specified so just lets hexdump what is left */
if(subl>0){
if (!print_unknown_data(ndo, tptr, "\n\t\t ", subl))
return(0);
}
}
}
break;
default:
if (!print_unknown_data(ndo, tptr, "\n\t\t ", subl))
return(0);
break;
}
return(1);
trunc:
return(0);
}
| 0 |
[
"CWE-125",
"CWE-787"
] |
tcpdump
|
5d0d76e88ee2d3236d7e032589d6f1d4ec5f7b1e
| 242,488,695,567,581,100,000,000,000,000,000,000,000 | 155 |
CVE-2017-13055/IS-IS: fix an Extended IS Reachability sub-TLV
In isis_print_is_reach_subtlv() one of the case blocks did not check that
the sub-TLV "V" is actually present and could over-read the input buffer.
Add a length check to fix that and remove a useless boundary check from
a loop because the boundary is tested for the full length of "V" before
the switch block.
Update one of the prior test cases as it turns out it depended on this
previously incorrect code path to make it to its own malformed structure
further down the buffer, the bugfix has changed its output.
This fixes a buffer over-read discovered by Bhargava Shastry,
SecT/TU Berlin.
Add a test using the capture file supplied by the reporter(s).
|
virtual void clearSoftMask(GfxState * /*state*/) {}
| 0 |
[] |
poppler
|
abf167af8b15e5f3b510275ce619e6fdb42edd40
| 337,298,870,340,196,700,000,000,000,000,000,000,000 | 1 |
Implement tiling/patterns in SplashOutputDev
Fixes bug 13518
|
TEST_F(QuotedString_ExtractFrom_Tests, EscapedFormfeed) {
whenInputIs("\"hello \\fworld\\f\"");
resultMustBe("hello \fworld\f");
}
| 0 |
[
"CWE-415",
"CWE-119"
] |
ArduinoJson
|
5e7b9ec688d79e7b16ec7064e1d37e8481a31e72
| 228,763,270,060,616,970,000,000,000,000,000,000,000 | 4 |
Fix buffer overflow (pull request #81)
|
_ppdCacheCreateWithFile(
const char *filename, /* I - File to read */
ipp_t **attrs) /* IO - IPP attributes, if any */
{
cups_file_t *fp; /* File */
_ppd_cache_t *pc; /* PWG mapping data */
pwg_size_t *size; /* Current size */
pwg_map_t *map; /* Current map */
_pwg_finishings_t *finishings; /* Current finishings option */
int linenum, /* Current line number */
num_bins, /* Number of bins in file */
num_sizes, /* Number of sizes in file */
num_sources, /* Number of sources in file */
num_types; /* Number of types in file */
char line[2048], /* Current line */
*value, /* Pointer to value in line */
*valueptr, /* Pointer into value */
pwg_keyword[128], /* PWG keyword */
ppd_keyword[PPD_MAX_NAME];
/* PPD keyword */
_pwg_print_color_mode_t print_color_mode;
/* Print color mode for preset */
_pwg_print_quality_t print_quality; /* Print quality for preset */
DEBUG_printf(("_ppdCacheCreateWithFile(filename=\"%s\")", filename));
/*
* Range check input...
*/
if (attrs)
*attrs = NULL;
if (!filename)
{
_cupsSetError(IPP_STATUS_ERROR_INTERNAL, strerror(EINVAL), 0);
return (NULL);
}
/*
* Open the file...
*/
if ((fp = cupsFileOpen(filename, "r")) == NULL)
{
_cupsSetError(IPP_STATUS_ERROR_INTERNAL, strerror(errno), 0);
return (NULL);
}
/*
* Read the first line and make sure it has "#CUPS-PPD-CACHE-version" in it...
*/
if (!cupsFileGets(fp, line, sizeof(line)))
{
_cupsSetError(IPP_STATUS_ERROR_INTERNAL, strerror(errno), 0);
DEBUG_puts("_ppdCacheCreateWithFile: Unable to read first line.");
cupsFileClose(fp);
return (NULL);
}
if (strncmp(line, "#CUPS-PPD-CACHE-", 16))
{
_cupsSetError(IPP_STATUS_ERROR_INTERNAL, _("Bad PPD cache file."), 1);
DEBUG_printf(("_ppdCacheCreateWithFile: Wrong first line \"%s\".", line));
cupsFileClose(fp);
return (NULL);
}
if (atoi(line + 16) != _PPD_CACHE_VERSION)
{
_cupsSetError(IPP_STATUS_ERROR_INTERNAL, _("Out of date PPD cache file."), 1);
DEBUG_printf(("_ppdCacheCreateWithFile: Cache file has version %s, "
"expected %d.", line + 16, _PPD_CACHE_VERSION));
cupsFileClose(fp);
return (NULL);
}
/*
* Allocate the mapping data structure...
*/
if ((pc = calloc(1, sizeof(_ppd_cache_t))) == NULL)
{
_cupsSetError(IPP_STATUS_ERROR_INTERNAL, strerror(errno), 0);
DEBUG_puts("_ppdCacheCreateWithFile: Unable to allocate _ppd_cache_t.");
goto create_error;
}
pc->max_copies = 9999;
/*
* Read the file...
*/
linenum = 0;
num_bins = 0;
num_sizes = 0;
num_sources = 0;
num_types = 0;
while (cupsFileGetConf(fp, line, sizeof(line), &value, &linenum))
{
DEBUG_printf(("_ppdCacheCreateWithFile: line=\"%s\", value=\"%s\", "
"linenum=%d", line, value, linenum));
if (!value)
{
DEBUG_printf(("_ppdCacheCreateWithFile: Missing value on line %d.",
linenum));
_cupsSetError(IPP_STATUS_ERROR_INTERNAL, _("Bad PPD cache file."), 1);
goto create_error;
}
else if (!_cups_strcasecmp(line, "3D"))
{
pc->cups_3d = _cupsStrAlloc(value);
}
else if (!_cups_strcasecmp(line, "LayerOrder"))
{
pc->cups_layer_order = _cupsStrAlloc(value);
}
else if (!_cups_strcasecmp(line, "Accuracy"))
{
sscanf(value, "%d%d%d", pc->cups_accuracy + 0, pc->cups_accuracy + 1, pc->cups_accuracy + 2);
}
else if (!_cups_strcasecmp(line, "Volume"))
{
sscanf(value, "%d%d%d", pc->cups_volume + 0, pc->cups_volume + 1, pc->cups_volume + 2);
}
else if (!_cups_strcasecmp(line, "Material"))
{
/*
* Material key "name" name=value ... name=value
*/
if ((valueptr = strchr(value, ' ')) != NULL)
{
_pwg_material_t *material = (_pwg_material_t *)calloc(1, sizeof(_pwg_material_t));
*valueptr++ = '\0';
material->key = _cupsStrAlloc(value);
if (*valueptr == '\"')
{
value = valueptr + 1;
if ((valueptr = strchr(value, '\"')) != NULL)
{
*valueptr++ = '\0';
material->name = _cupsStrAlloc(value);
material->num_props = cupsParseOptions(valueptr, 0, &material->props);
}
}
if (!pc->materials)
pc->materials = cupsArrayNew3(NULL, NULL, NULL, 0, NULL, (cups_afree_func_t)pwg_free_material);
cupsArrayAdd(pc->materials, material);
}
}
else if (!_cups_strcasecmp(line, "Filter"))
{
if (!pc->filters)
pc->filters = cupsArrayNew3(NULL, NULL, NULL, 0,
(cups_acopy_func_t)_cupsStrAlloc,
(cups_afree_func_t)_cupsStrFree);
cupsArrayAdd(pc->filters, value);
}
else if (!_cups_strcasecmp(line, "PreFilter"))
{
if (!pc->prefilters)
pc->prefilters = cupsArrayNew3(NULL, NULL, NULL, 0,
(cups_acopy_func_t)_cupsStrAlloc,
(cups_afree_func_t)_cupsStrFree);
cupsArrayAdd(pc->prefilters, value);
}
else if (!_cups_strcasecmp(line, "Product"))
{
pc->product = _cupsStrAlloc(value);
}
else if (!_cups_strcasecmp(line, "SingleFile"))
{
pc->single_file = !_cups_strcasecmp(value, "true");
}
else if (!_cups_strcasecmp(line, "IPP"))
{
off_t pos = cupsFileTell(fp), /* Position in file */
length = strtol(value, NULL, 10);
/* Length of IPP attributes */
if (attrs && *attrs)
{
DEBUG_puts("_ppdCacheCreateWithFile: IPP listed multiple times.");
_cupsSetError(IPP_STATUS_ERROR_INTERNAL, _("Bad PPD cache file."), 1);
goto create_error;
}
else if (length <= 0)
{
DEBUG_puts("_ppdCacheCreateWithFile: Bad IPP length.");
_cupsSetError(IPP_STATUS_ERROR_INTERNAL, _("Bad PPD cache file."), 1);
goto create_error;
}
if (attrs)
{
/*
* Read IPP attributes into the provided variable...
*/
*attrs = ippNew();
if (ippReadIO(fp, (ipp_iocb_t)cupsFileRead, 1, NULL,
*attrs) != IPP_STATE_DATA)
{
DEBUG_puts("_ppdCacheCreateWithFile: Bad IPP data.");
_cupsSetError(IPP_STATUS_ERROR_INTERNAL, _("Bad PPD cache file."), 1);
goto create_error;
}
}
else
{
/*
* Skip the IPP data entirely...
*/
cupsFileSeek(fp, pos + length);
}
if (cupsFileTell(fp) != (pos + length))
{
DEBUG_puts("_ppdCacheCreateWithFile: Bad IPP data.");
_cupsSetError(IPP_STATUS_ERROR_INTERNAL, _("Bad PPD cache file."), 1);
goto create_error;
}
}
else if (!_cups_strcasecmp(line, "NumBins"))
{
if (num_bins > 0)
{
DEBUG_puts("_ppdCacheCreateWithFile: NumBins listed multiple times.");
_cupsSetError(IPP_STATUS_ERROR_INTERNAL, _("Bad PPD cache file."), 1);
goto create_error;
}
if ((num_bins = atoi(value)) <= 0 || num_bins > 65536)
{
DEBUG_printf(("_ppdCacheCreateWithFile: Bad NumBins value %d on line "
"%d.", num_sizes, linenum));
_cupsSetError(IPP_STATUS_ERROR_INTERNAL, _("Bad PPD cache file."), 1);
goto create_error;
}
if ((pc->bins = calloc((size_t)num_bins, sizeof(pwg_map_t))) == NULL)
{
DEBUG_printf(("_ppdCacheCreateWithFile: Unable to allocate %d bins.",
num_sizes));
_cupsSetError(IPP_STATUS_ERROR_INTERNAL, strerror(errno), 0);
goto create_error;
}
}
else if (!_cups_strcasecmp(line, "Bin"))
{
if (sscanf(value, "%127s%40s", pwg_keyword, ppd_keyword) != 2)
{
DEBUG_printf(("_ppdCacheCreateWithFile: Bad Bin on line %d.", linenum));
_cupsSetError(IPP_STATUS_ERROR_INTERNAL, _("Bad PPD cache file."), 1);
goto create_error;
}
if (pc->num_bins >= num_bins)
{
DEBUG_printf(("_ppdCacheCreateWithFile: Too many Bin's on line %d.",
linenum));
_cupsSetError(IPP_STATUS_ERROR_INTERNAL, _("Bad PPD cache file."), 1);
goto create_error;
}
map = pc->bins + pc->num_bins;
map->pwg = _cupsStrAlloc(pwg_keyword);
map->ppd = _cupsStrAlloc(ppd_keyword);
pc->num_bins ++;
}
else if (!_cups_strcasecmp(line, "NumSizes"))
{
if (num_sizes > 0)
{
DEBUG_puts("_ppdCacheCreateWithFile: NumSizes listed multiple times.");
_cupsSetError(IPP_STATUS_ERROR_INTERNAL, _("Bad PPD cache file."), 1);
goto create_error;
}
if ((num_sizes = atoi(value)) < 0 || num_sizes > 65536)
{
DEBUG_printf(("_ppdCacheCreateWithFile: Bad NumSizes value %d on line "
"%d.", num_sizes, linenum));
_cupsSetError(IPP_STATUS_ERROR_INTERNAL, _("Bad PPD cache file."), 1);
goto create_error;
}
if (num_sizes > 0)
{
if ((pc->sizes = calloc((size_t)num_sizes, sizeof(pwg_size_t))) == NULL)
{
DEBUG_printf(("_ppdCacheCreateWithFile: Unable to allocate %d sizes.",
num_sizes));
_cupsSetError(IPP_STATUS_ERROR_INTERNAL, strerror(errno), 0);
goto create_error;
}
}
}
else if (!_cups_strcasecmp(line, "Size"))
{
if (pc->num_sizes >= num_sizes)
{
DEBUG_printf(("_ppdCacheCreateWithFile: Too many Size's on line %d.",
linenum));
_cupsSetError(IPP_STATUS_ERROR_INTERNAL, _("Bad PPD cache file."), 1);
goto create_error;
}
size = pc->sizes + pc->num_sizes;
if (sscanf(value, "%127s%40s%d%d%d%d%d%d", pwg_keyword, ppd_keyword,
&(size->width), &(size->length), &(size->left),
&(size->bottom), &(size->right), &(size->top)) != 8)
{
DEBUG_printf(("_ppdCacheCreateWithFile: Bad Size on line %d.",
linenum));
_cupsSetError(IPP_STATUS_ERROR_INTERNAL, _("Bad PPD cache file."), 1);
goto create_error;
}
size->map.pwg = _cupsStrAlloc(pwg_keyword);
size->map.ppd = _cupsStrAlloc(ppd_keyword);
pc->num_sizes ++;
}
else if (!_cups_strcasecmp(line, "CustomSize"))
{
if (pc->custom_max_width > 0)
{
DEBUG_printf(("_ppdCacheCreateWithFile: Too many CustomSize's on line "
"%d.", linenum));
_cupsSetError(IPP_STATUS_ERROR_INTERNAL, _("Bad PPD cache file."), 1);
goto create_error;
}
if (sscanf(value, "%d%d%d%d%d%d%d%d", &(pc->custom_max_width),
&(pc->custom_max_length), &(pc->custom_min_width),
&(pc->custom_min_length), &(pc->custom_size.left),
&(pc->custom_size.bottom), &(pc->custom_size.right),
&(pc->custom_size.top)) != 8)
{
DEBUG_printf(("_ppdCacheCreateWithFile: Bad CustomSize on line %d.",
linenum));
_cupsSetError(IPP_STATUS_ERROR_INTERNAL, _("Bad PPD cache file."), 1);
goto create_error;
}
pwgFormatSizeName(pwg_keyword, sizeof(pwg_keyword), "custom", "max",
pc->custom_max_width, pc->custom_max_length, NULL);
pc->custom_max_keyword = _cupsStrAlloc(pwg_keyword);
pwgFormatSizeName(pwg_keyword, sizeof(pwg_keyword), "custom", "min",
pc->custom_min_width, pc->custom_min_length, NULL);
pc->custom_min_keyword = _cupsStrAlloc(pwg_keyword);
}
else if (!_cups_strcasecmp(line, "SourceOption"))
{
pc->source_option = _cupsStrAlloc(value);
}
else if (!_cups_strcasecmp(line, "NumSources"))
{
if (num_sources > 0)
{
DEBUG_puts("_ppdCacheCreateWithFile: NumSources listed multiple "
"times.");
_cupsSetError(IPP_STATUS_ERROR_INTERNAL, _("Bad PPD cache file."), 1);
goto create_error;
}
if ((num_sources = atoi(value)) <= 0 || num_sources > 65536)
{
DEBUG_printf(("_ppdCacheCreateWithFile: Bad NumSources value %d on "
"line %d.", num_sources, linenum));
_cupsSetError(IPP_STATUS_ERROR_INTERNAL, _("Bad PPD cache file."), 1);
goto create_error;
}
if ((pc->sources = calloc((size_t)num_sources, sizeof(pwg_map_t))) == NULL)
{
DEBUG_printf(("_ppdCacheCreateWithFile: Unable to allocate %d sources.",
num_sources));
_cupsSetError(IPP_STATUS_ERROR_INTERNAL, strerror(errno), 0);
goto create_error;
}
}
else if (!_cups_strcasecmp(line, "Source"))
{
if (sscanf(value, "%127s%40s", pwg_keyword, ppd_keyword) != 2)
{
DEBUG_printf(("_ppdCacheCreateWithFile: Bad Source on line %d.",
linenum));
_cupsSetError(IPP_STATUS_ERROR_INTERNAL, _("Bad PPD cache file."), 1);
goto create_error;
}
if (pc->num_sources >= num_sources)
{
DEBUG_printf(("_ppdCacheCreateWithFile: Too many Source's on line %d.",
linenum));
_cupsSetError(IPP_STATUS_ERROR_INTERNAL, _("Bad PPD cache file."), 1);
goto create_error;
}
map = pc->sources + pc->num_sources;
map->pwg = _cupsStrAlloc(pwg_keyword);
map->ppd = _cupsStrAlloc(ppd_keyword);
pc->num_sources ++;
}
else if (!_cups_strcasecmp(line, "NumTypes"))
{
if (num_types > 0)
{
DEBUG_puts("_ppdCacheCreateWithFile: NumTypes listed multiple times.");
_cupsSetError(IPP_STATUS_ERROR_INTERNAL, _("Bad PPD cache file."), 1);
goto create_error;
}
if ((num_types = atoi(value)) <= 0 || num_types > 65536)
{
DEBUG_printf(("_ppdCacheCreateWithFile: Bad NumTypes value %d on "
"line %d.", num_types, linenum));
_cupsSetError(IPP_STATUS_ERROR_INTERNAL, _("Bad PPD cache file."), 1);
goto create_error;
}
if ((pc->types = calloc((size_t)num_types, sizeof(pwg_map_t))) == NULL)
{
DEBUG_printf(("_ppdCacheCreateWithFile: Unable to allocate %d types.",
num_types));
_cupsSetError(IPP_STATUS_ERROR_INTERNAL, strerror(errno), 0);
goto create_error;
}
}
else if (!_cups_strcasecmp(line, "Type"))
{
if (sscanf(value, "%127s%40s", pwg_keyword, ppd_keyword) != 2)
{
DEBUG_printf(("_ppdCacheCreateWithFile: Bad Type on line %d.",
linenum));
_cupsSetError(IPP_STATUS_ERROR_INTERNAL, _("Bad PPD cache file."), 1);
goto create_error;
}
if (pc->num_types >= num_types)
{
DEBUG_printf(("_ppdCacheCreateWithFile: Too many Type's on line %d.",
linenum));
_cupsSetError(IPP_STATUS_ERROR_INTERNAL, _("Bad PPD cache file."), 1);
goto create_error;
}
map = pc->types + pc->num_types;
map->pwg = _cupsStrAlloc(pwg_keyword);
map->ppd = _cupsStrAlloc(ppd_keyword);
pc->num_types ++;
}
else if (!_cups_strcasecmp(line, "Preset"))
{
/*
* Preset output-mode print-quality name=value ...
*/
print_color_mode = (_pwg_print_color_mode_t)strtol(value, &valueptr, 10);
print_quality = (_pwg_print_quality_t)strtol(valueptr, &valueptr, 10);
if (print_color_mode < _PWG_PRINT_COLOR_MODE_MONOCHROME ||
print_color_mode >= _PWG_PRINT_COLOR_MODE_MAX ||
print_quality < _PWG_PRINT_QUALITY_DRAFT ||
print_quality >= _PWG_PRINT_QUALITY_MAX ||
valueptr == value || !*valueptr)
{
DEBUG_printf(("_ppdCacheCreateWithFile: Bad Preset on line %d.",
linenum));
_cupsSetError(IPP_STATUS_ERROR_INTERNAL, _("Bad PPD cache file."), 1);
goto create_error;
}
pc->num_presets[print_color_mode][print_quality] =
cupsParseOptions(valueptr, 0,
pc->presets[print_color_mode] + print_quality);
}
else if (!_cups_strcasecmp(line, "SidesOption"))
pc->sides_option = _cupsStrAlloc(value);
else if (!_cups_strcasecmp(line, "Sides1Sided"))
pc->sides_1sided = _cupsStrAlloc(value);
else if (!_cups_strcasecmp(line, "Sides2SidedLong"))
pc->sides_2sided_long = _cupsStrAlloc(value);
else if (!_cups_strcasecmp(line, "Sides2SidedShort"))
pc->sides_2sided_short = _cupsStrAlloc(value);
else if (!_cups_strcasecmp(line, "Finishings"))
{
if (!pc->finishings)
pc->finishings =
cupsArrayNew3((cups_array_func_t)pwg_compare_finishings,
NULL, NULL, 0, NULL,
(cups_afree_func_t)pwg_free_finishings);
if ((finishings = calloc(1, sizeof(_pwg_finishings_t))) == NULL)
goto create_error;
finishings->value = (ipp_finishings_t)strtol(value, &valueptr, 10);
finishings->num_options = cupsParseOptions(valueptr, 0,
&(finishings->options));
cupsArrayAdd(pc->finishings, finishings);
}
else if (!_cups_strcasecmp(line, "MaxCopies"))
pc->max_copies = atoi(value);
else if (!_cups_strcasecmp(line, "ChargeInfoURI"))
pc->charge_info_uri = _cupsStrAlloc(value);
else if (!_cups_strcasecmp(line, "JobAccountId"))
pc->account_id = !_cups_strcasecmp(value, "true");
else if (!_cups_strcasecmp(line, "JobAccountingUserId"))
pc->accounting_user_id = !_cups_strcasecmp(value, "true");
else if (!_cups_strcasecmp(line, "JobPassword"))
pc->password = _cupsStrAlloc(value);
else if (!_cups_strcasecmp(line, "Mandatory"))
{
if (pc->mandatory)
_cupsArrayAddStrings(pc->mandatory, value, ' ');
else
pc->mandatory = _cupsArrayNewStrings(value, ' ');
}
else if (!_cups_strcasecmp(line, "SupportFile"))
{
if (!pc->support_files)
pc->support_files = cupsArrayNew3(NULL, NULL, NULL, 0,
(cups_acopy_func_t)_cupsStrAlloc,
(cups_afree_func_t)_cupsStrFree);
cupsArrayAdd(pc->support_files, value);
}
else
{
DEBUG_printf(("_ppdCacheCreateWithFile: Unknown %s on line %d.", line,
linenum));
}
}
if (pc->num_sizes < num_sizes)
{
DEBUG_printf(("_ppdCacheCreateWithFile: Not enough sizes (%d < %d).",
pc->num_sizes, num_sizes));
_cupsSetError(IPP_STATUS_ERROR_INTERNAL, _("Bad PPD cache file."), 1);
goto create_error;
}
if (pc->num_sources < num_sources)
{
DEBUG_printf(("_ppdCacheCreateWithFile: Not enough sources (%d < %d).",
pc->num_sources, num_sources));
_cupsSetError(IPP_STATUS_ERROR_INTERNAL, _("Bad PPD cache file."), 1);
goto create_error;
}
if (pc->num_types < num_types)
{
DEBUG_printf(("_ppdCacheCreateWithFile: Not enough types (%d < %d).",
pc->num_types, num_types));
_cupsSetError(IPP_STATUS_ERROR_INTERNAL, _("Bad PPD cache file."), 1);
goto create_error;
}
cupsFileClose(fp);
return (pc);
/*
* If we get here the file was bad - free any data and return...
*/
create_error:
cupsFileClose(fp);
_ppdCacheDestroy(pc);
if (attrs)
{
ippDelete(*attrs);
*attrs = NULL;
}
return (NULL);
}
| 0 |
[
"CWE-93"
] |
cups
|
07428f6a640ff93aa0b4cc69ca372e2cf8490e41
| 245,151,996,283,461,600,000,000,000,000,000,000,000 | 602 |
Only list supported PDLs (Issue #4923)
|
cmsToneCurve* CMSEXPORT cmsJoinToneCurve(cmsContext ContextID,
const cmsToneCurve* X,
const cmsToneCurve* Y, cmsUInt32Number nResultingPoints)
{
cmsToneCurve* out = NULL;
cmsToneCurve* Yreversed = NULL;
cmsFloat32Number t, x;
cmsFloat32Number* Res = NULL;
cmsUInt32Number i;
_cmsAssert(X != NULL);
_cmsAssert(Y != NULL);
Yreversed = cmsReverseToneCurveEx(nResultingPoints, Y);
if (Yreversed == NULL) goto Error;
Res = (cmsFloat32Number*) _cmsCalloc(ContextID, nResultingPoints, sizeof(cmsFloat32Number));
if (Res == NULL) goto Error;
//Iterate
for (i=0; i < nResultingPoints; i++) {
t = (cmsFloat32Number) i / (nResultingPoints-1);
x = cmsEvalToneCurveFloat(X, t);
Res[i] = cmsEvalToneCurveFloat(Yreversed, x);
}
// Allocate space for output
out = cmsBuildTabulatedToneCurveFloat(ContextID, nResultingPoints, Res);
Error:
if (Res != NULL) _cmsFree(ContextID, Res);
if (Yreversed != NULL) cmsFreeToneCurve(Yreversed);
return out;
}
| 0 |
[] |
Little-CMS
|
9cf2d61867375f867e6e80906a571d222bc2cbf3
| 234,598,155,013,167,420,000,000,000,000,000,000,000 | 38 |
Memory squeezing fix: LCMS2: AllocateToneCurveStruct
Simply add an extra check on the last allocation to avoid returning a
partially built struct.
|
static void mod_wstunnel_patch_config(request_st * const r, plugin_data * const p) {
memcpy(&p->conf, &p->defaults, sizeof(plugin_config));
for (int i = 1, used = p->nconfig; i < used; ++i) {
if (config_check_cond(r, (uint32_t)p->cvlist[i].k_id))
mod_wstunnel_merge_config(&p->conf, p->cvlist+p->cvlist[i].v.u2[0]);
}
}
| 0 |
[
"CWE-476"
] |
lighttpd1.4
|
971773f1fae600074b46ef64f3ca1f76c227985f
| 306,561,465,553,595,000,000,000,000,000,000,000,000 | 7 |
[mod_wstunnel] fix crash with bad hybivers (fixes #3165)
(thx Michał Dardas)
x-ref:
"mod_wstunnel null pointer dereference"
https://redmine.lighttpd.net/issues/3165
|
static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
int addr_len)
{
struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
struct inet_sock *inet = inet_sk(sk);
struct inet_connection_sock *icsk = inet_csk(sk);
struct ipv6_pinfo *np = inet6_sk(sk);
struct tcp_sock *tp = tcp_sk(sk);
struct in6_addr *saddr = NULL, *final_p, final;
struct ipv6_txoptions *opt;
struct flowi6 fl6;
struct dst_entry *dst;
int addr_type;
int err;
if (addr_len < SIN6_LEN_RFC2133)
return -EINVAL;
if (usin->sin6_family != AF_INET6)
return -EAFNOSUPPORT;
memset(&fl6, 0, sizeof(fl6));
if (np->sndflow) {
fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
IP6_ECN_flow_init(fl6.flowlabel);
if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
struct ip6_flowlabel *flowlabel;
flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
if (!flowlabel)
return -EINVAL;
fl6_sock_release(flowlabel);
}
}
/*
* connect() to INADDR_ANY means loopback (BSD'ism).
*/
if (ipv6_addr_any(&usin->sin6_addr))
usin->sin6_addr.s6_addr[15] = 0x1;
addr_type = ipv6_addr_type(&usin->sin6_addr);
if (addr_type & IPV6_ADDR_MULTICAST)
return -ENETUNREACH;
if (addr_type&IPV6_ADDR_LINKLOCAL) {
if (addr_len >= sizeof(struct sockaddr_in6) &&
usin->sin6_scope_id) {
/* If interface is set while binding, indices
* must coincide.
*/
if (sk->sk_bound_dev_if &&
sk->sk_bound_dev_if != usin->sin6_scope_id)
return -EINVAL;
sk->sk_bound_dev_if = usin->sin6_scope_id;
}
/* Connect to link-local address requires an interface */
if (!sk->sk_bound_dev_if)
return -EINVAL;
}
if (tp->rx_opt.ts_recent_stamp &&
!ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
tp->rx_opt.ts_recent = 0;
tp->rx_opt.ts_recent_stamp = 0;
tp->write_seq = 0;
}
sk->sk_v6_daddr = usin->sin6_addr;
np->flow_label = fl6.flowlabel;
/*
* TCP over IPv4
*/
if (addr_type == IPV6_ADDR_MAPPED) {
u32 exthdrlen = icsk->icsk_ext_hdr_len;
struct sockaddr_in sin;
SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
if (__ipv6_only_sock(sk))
return -ENETUNREACH;
sin.sin_family = AF_INET;
sin.sin_port = usin->sin6_port;
sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
icsk->icsk_af_ops = &ipv6_mapped;
sk->sk_backlog_rcv = tcp_v4_do_rcv;
#ifdef CONFIG_TCP_MD5SIG
tp->af_specific = &tcp_sock_ipv6_mapped_specific;
#endif
err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
if (err) {
icsk->icsk_ext_hdr_len = exthdrlen;
icsk->icsk_af_ops = &ipv6_specific;
sk->sk_backlog_rcv = tcp_v6_do_rcv;
#ifdef CONFIG_TCP_MD5SIG
tp->af_specific = &tcp_sock_ipv6_specific;
#endif
goto failure;
}
np->saddr = sk->sk_v6_rcv_saddr;
return err;
}
if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
saddr = &sk->sk_v6_rcv_saddr;
fl6.flowi6_proto = IPPROTO_TCP;
fl6.daddr = sk->sk_v6_daddr;
fl6.saddr = saddr ? *saddr : np->saddr;
fl6.flowi6_oif = sk->sk_bound_dev_if;
fl6.flowi6_mark = sk->sk_mark;
fl6.fl6_dport = usin->sin6_port;
fl6.fl6_sport = inet->inet_sport;
opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk));
final_p = fl6_update_dst(&fl6, opt, &final);
security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
if (IS_ERR(dst)) {
err = PTR_ERR(dst);
goto failure;
}
if (!saddr) {
saddr = &fl6.saddr;
sk->sk_v6_rcv_saddr = *saddr;
}
/* set the source address */
np->saddr = *saddr;
inet->inet_rcv_saddr = LOOPBACK4_IPV6;
sk->sk_gso_type = SKB_GSO_TCPV6;
__ip6_dst_store(sk, dst, NULL, NULL);
if (tcp_death_row.sysctl_tw_recycle &&
!tp->rx_opt.ts_recent_stamp &&
ipv6_addr_equal(&fl6.daddr, &sk->sk_v6_daddr))
tcp_fetch_timewait_stamp(sk, dst);
icsk->icsk_ext_hdr_len = 0;
if (opt)
icsk->icsk_ext_hdr_len = opt->opt_flen +
opt->opt_nflen;
tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
inet->inet_dport = usin->sin6_port;
tcp_set_state(sk, TCP_SYN_SENT);
err = inet6_hash_connect(&tcp_death_row, sk);
if (err)
goto late_failure;
sk_set_txhash(sk);
if (!tp->write_seq && likely(!tp->repair))
tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
sk->sk_v6_daddr.s6_addr32,
inet->inet_sport,
inet->inet_dport);
err = tcp_connect(sk);
if (err)
goto late_failure;
return 0;
late_failure:
tcp_set_state(sk, TCP_CLOSE);
__sk_dst_reset(sk);
failure:
inet->inet_dport = 0;
sk->sk_route_caps = 0;
return err;
}
| 0 |
[
"CWE-416",
"CWE-284",
"CWE-264"
] |
linux
|
45f6fad84cc305103b28d73482b344d7f5b76f39
| 316,595,168,958,497,300,000,000,000,000,000,000,000 | 189 |
ipv6: add complete rcu protection around np->opt
This patch addresses multiple problems :
UDP/RAW sendmsg() need to get a stable struct ipv6_txoptions
while socket is not locked : Other threads can change np->opt
concurrently. Dmitry posted a syzkaller
(http://github.com/google/syzkaller) program desmonstrating
use-after-free.
Starting with TCP/DCCP lockless listeners, tcp_v6_syn_recv_sock()
and dccp_v6_request_recv_sock() also need to use RCU protection
to dereference np->opt once (before calling ipv6_dup_options())
This patch adds full RCU protection to np->opt
Reported-by: Dmitry Vyukov <[email protected]>
Signed-off-by: Eric Dumazet <[email protected]>
Acked-by: Hannes Frederic Sowa <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
switch (yych) {
case 'a': goto yy8;
default: goto yy5;
}
| 1 |
[
"CWE-787"
] |
re2c
|
039c18949190c5de5397eba504d2c75dad2ea9ca
| 126,089,511,431,492,390,000,000,000,000,000,000,000 | 4 |
Emit an error when repetition lower bound exceeds upper bound.
Historically this was allowed and re2c swapped the bounds. However, it
most likely indicates an error in user code and there is only a single
occurrence in the tests (and the test in an artificial one), so although
the change is backwards incompatible there is low chance of breaking
real-world code.
This fixes second test case in the bug #394 "Stack overflow due to
recursion in src/dfa/dead_rules.cc" (the actual fix is to limit DFA size
but the test also has counted repetition with swapped bounds).
|
TEST(MatchHeadersTest, MustMatchAllHeaderData) {
TestRequestHeaderMapImpl matching_headers_1{{"match-header-A", "1"}, {"match-header-B", "2"}};
TestRequestHeaderMapImpl matching_headers_2{
{"match-header-A", "3"}, {"match-header-B", "4"}, {"match-header-C", "5"}};
TestRequestHeaderMapImpl unmatching_headers_1{{"match-header-A", "6"}};
TestRequestHeaderMapImpl unmatching_headers_2{{"match-header-B", "7"}};
TestRequestHeaderMapImpl unmatching_headers_3{{"match-header-A", "8"}, {"match-header-C", "9"}};
TestRequestHeaderMapImpl unmatching_headers_4{{"match-header-C", "10"}, {"match-header-D", "11"}};
const std::string yamlA = R"EOF(
name: match-header-A
)EOF";
const std::string yamlB = R"EOF(
name: match-header-B
)EOF";
std::vector<HeaderUtility::HeaderDataPtr> header_data;
header_data.push_back(
std::make_unique<HeaderUtility::HeaderData>(parseHeaderMatcherFromYaml(yamlA)));
header_data.push_back(
std::make_unique<HeaderUtility::HeaderData>(parseHeaderMatcherFromYaml(yamlB)));
EXPECT_TRUE(HeaderUtility::matchHeaders(matching_headers_1, header_data));
EXPECT_TRUE(HeaderUtility::matchHeaders(matching_headers_2, header_data));
EXPECT_FALSE(HeaderUtility::matchHeaders(unmatching_headers_1, header_data));
EXPECT_FALSE(HeaderUtility::matchHeaders(unmatching_headers_2, header_data));
EXPECT_FALSE(HeaderUtility::matchHeaders(unmatching_headers_3, header_data));
EXPECT_FALSE(HeaderUtility::matchHeaders(unmatching_headers_4, header_data));
}
| 0 |
[] |
envoy
|
2c60632d41555ec8b3d9ef5246242be637a2db0f
| 116,969,746,691,909,880,000,000,000,000,000,000,000 | 29 |
http: header map security fixes for duplicate headers (#197)
Previously header matching did not match on all headers for
non-inline headers. This patch changes the default behavior to
always logically match on all headers. Multiple individual
headers will be logically concatenated with ',' similar to what
is done with inline headers. This makes the behavior effectively
consistent. This behavior can be temporary reverted by setting
the runtime value "envoy.reloadable_features.header_match_on_all_headers"
to "false".
Targeted fixes have been additionally performed on the following
extensions which make them consider all duplicate headers by default as
a comma concatenated list:
1) Any extension using CEL matching on headers.
2) The header to metadata filter.
3) The JWT filter.
4) The Lua filter.
Like primary header matching used in routing, RBAC, etc. this behavior
can be disabled by setting the runtime value
"envoy.reloadable_features.header_match_on_all_headers" to false.
Finally, the setCopy() header map API previously only set the first
header in the case of duplicate non-inline headers. setCopy() now
behaves similiarly to the other set*() APIs and replaces all found
headers with a single value. This may have had security implications
in the extauth filter which uses this API. This behavior can be disabled
by setting the runtime value
"envoy.reloadable_features.http_set_copy_replace_all_headers" to false.
Fixes https://github.com/envoyproxy/envoy-setec/issues/188
Signed-off-by: Matt Klein <[email protected]>
|
int hidp_get_conninfo(struct hidp_conninfo *ci)
{
struct hidp_session *session;
int err = 0;
down_read(&hidp_session_sem);
session = __hidp_get_session(&ci->bdaddr);
if (session)
__hidp_copy_session(session, ci);
else
err = -ENOENT;
up_read(&hidp_session_sem);
return err;
}
| 0 |
[
"CWE-200"
] |
linux
|
0a9ab9bdb3e891762553f667066190c1d22ad62b
| 299,984,440,454,305,620,000,000,000,000,000,000,000 | 16 |
Bluetooth: Fix incorrect strncpy() in hidp_setup_hid()
The length parameter should be sizeof(req->name) - 1 because there is no
guarantee that string provided by userspace will contain the trailing
'\0'.
Can be easily reproduced by manually setting req->name to 128 non-zero
bytes prior to ioctl(HIDPCONNADD) and checking the device name setup on
input subsystem:
$ cat /sys/devices/pnp0/00\:04/tty/ttyS0/hci0/hci0\:1/input8/name
AAAAAA[...]AAAAAAAAf0:af:f0:af:f0:af
("f0:af:f0:af:f0:af" is the device bluetooth address, taken from "phys"
field in struct hid_device due to overflow.)
Cc: [email protected]
Signed-off-by: Anderson Lizardo <[email protected]>
Acked-by: Marcel Holtmann <[email protected]>
Signed-off-by: Gustavo Padovan <[email protected]>
|
virtual void updateStrokeOpacity(GfxState *state) { }
| 0 |
[] |
poppler
|
abf167af8b15e5f3b510275ce619e6fdb42edd40
| 181,855,561,312,021,200,000,000,000,000,000,000,000 | 1 |
Implement tiling/patterns in SplashOutputDev
Fixes bug 13518
|
accountingReportStats(XML_Parser originParser, const char *epilog) {
const XML_Parser rootParser = getRootParserOf(originParser, NULL);
assert(! rootParser->m_parentParser);
if (rootParser->m_accounting.debugLevel < 1) {
return;
}
const float amplificationFactor
= accountingGetCurrentAmplification(rootParser);
fprintf(stderr,
"expat: Accounting(%p): Direct " EXPAT_FMT_ULL(
"10") ", indirect " EXPAT_FMT_ULL("10") ", amplification %8.2f%s",
(void *)rootParser, rootParser->m_accounting.countBytesDirect,
rootParser->m_accounting.countBytesIndirect,
(double)amplificationFactor, epilog);
}
| 0 |
[
"CWE-400",
"CWE-703"
] |
libexpat
|
9b4ce651b26557f16103c3a366c91934ecd439ab
| 260,247,334,999,923,460,000,000,000,000,000,000,000 | 17 |
Prevent stack exhaustion in build_model
It is possible to trigger stack exhaustion in build_model function if
depth of nested children in DTD element is large enough. This happens
because build_node is a recursively called function within build_model.
The code has been adjusted to run iteratively. It uses the already
allocated heap space as temporary stack (growing from top to bottom).
Output is identical to recursive version. No new fields in data
structures were added, i.e. it keeps full API and ABI compatibility.
Instead the numchildren variable is used to temporarily keep the
index of items (uint vs int).
Documentation and readability improvements kindly added by Sebastian.
Proof of Concept:
1. Compile poc binary which parses XML file line by line
```
cat > poc.c << EOF
#include <err.h>
#include <expat.h>
#include <stdio.h>
XML_Parser parser;
static void XMLCALL
dummy_element_decl_handler(void *userData, const XML_Char *name,
XML_Content *model) {
XML_FreeContentModel(parser, model);
}
int main(int argc, char *argv[]) {
FILE *fp;
char *p = NULL;
size_t s = 0;
ssize_t l;
if (argc != 2)
errx(1, "usage: poc poc.xml");
if ((parser = XML_ParserCreate(NULL)) == NULL)
errx(1, "XML_ParserCreate");
XML_SetElementDeclHandler(parser, dummy_element_decl_handler);
if ((fp = fopen(argv[1], "r")) == NULL)
err(1, "fopen");
while ((l = getline(&p, &s, fp)) > 0)
if (XML_Parse(parser, p, (int)l, XML_FALSE) != XML_STATUS_OK)
errx(1, "XML_Parse");
XML_ParserFree(parser);
free(p);
fclose(fp);
return 0;
}
EOF
cc -std=c11 -D_POSIX_C_SOURCE=200809L -lexpat -o poc poc.c
```
2. Create XML file with a lot of nested groups in DTD element
```
cat > poc.xml.zst.b64 << EOF
KLUv/aQkACAAPAEA+DwhRE9DVFlQRSB1d3UgWwo8IUVMRU1FTlQgdXd1CigBAHv/58AJAgAQKAIA
ECgCABAoAgAQKAIAECgCABAoAgAQKHwAAChvd28KKQIA2/8gV24XBAIAECkCABApAgAQKQIAECkC
ABApAgAQKQIAEClVAAAgPl0+CgEA4A4I2VwwnQ==
EOF
base64 -d poc.xml.zst.b64 | zstd -d > poc.xml
```
3. Run Proof of Concept
```
./poc poc.xml
```
Co-authored-by: Sebastian Pipping <[email protected]>
|
static int php_array_element_export(zval **zv TSRMLS_DC, int num_args, va_list args, zend_hash_key *hash_key) /* {{{ */
{
int level;
smart_str *buf;
level = va_arg(args, int);
buf = va_arg(args, smart_str *);
if (hash_key->nKeyLength == 0) { /* numeric key */
buffer_append_spaces(buf, level+1);
smart_str_append_long(buf, (long) hash_key->h);
smart_str_appendl(buf, " => ", 4);
} else { /* string key */
char *key, *tmp_str;
int key_len, tmp_len;
key = php_addcslashes(hash_key->arKey, hash_key->nKeyLength - 1, &key_len, 0, "'\\", 2 TSRMLS_CC);
tmp_str = php_str_to_str_ex(key, key_len, "\0", 1, "' . \"\\0\" . '", 12, &tmp_len, 0, NULL);
buffer_append_spaces(buf, level + 1);
smart_str_appendc(buf, '\'');
smart_str_appendl(buf, tmp_str, tmp_len);
smart_str_appendl(buf, "' => ", 5);
efree(key);
efree(tmp_str);
}
php_var_export_ex(zv, level + 2, buf TSRMLS_CC);
smart_str_appendc(buf, ',');
smart_str_appendc(buf, '\n');
return 0;
}
| 1 |
[] |
php-src
|
e8429400d40e3c3aa4b22ba701991d698a2f3b2f
| 237,272,823,571,784,070,000,000,000,000,000,000,000 | 35 |
Fix bug #70172 - Use After Free Vulnerability in unserialize()
|
GF_Err Media_GetESD(GF_MediaBox *mdia, u32 sampleDescIndex, GF_ESD **out_esd, Bool true_desc_only)
{
u32 type;
GF_ESD *esd;
GF_MPEGSampleEntryBox *entry = NULL;
GF_ESDBox *ESDa;
GF_ProtectionSchemeInfoBox *sinf;
GF_SampleDescriptionBox *stsd = mdia->information->sampleTable->SampleDescription;
*out_esd = NULL;
if (!stsd || !stsd->child_boxes || !sampleDescIndex || (sampleDescIndex > gf_list_count(stsd->child_boxes)) )
return GF_BAD_PARAM;
esd = NULL;
entry = (GF_MPEGSampleEntryBox*)gf_list_get(stsd->child_boxes, sampleDescIndex - 1);
if (! entry) return GF_ISOM_INVALID_MEDIA;
*out_esd = NULL;
ESDa = NULL;
type = entry->type;
switch (type) {
case GF_ISOM_BOX_TYPE_ENCV:
case GF_ISOM_BOX_TYPE_ENCA:
case GF_ISOM_BOX_TYPE_ENCS:
case GF_ISOM_BOX_TYPE_ENCF:
case GF_ISOM_BOX_TYPE_ENCM:
case GF_ISOM_BOX_TYPE_ENCT:
sinf = (GF_ProtectionSchemeInfoBox *) gf_isom_box_find_child(entry->child_boxes, GF_ISOM_BOX_TYPE_SINF);
if (sinf && sinf->original_format) {
type = sinf->original_format->data_format;
}
break;
case GF_ISOM_BOX_TYPE_RESV:
sinf = (GF_ProtectionSchemeInfoBox *) gf_isom_box_find_child(entry->child_boxes, GF_ISOM_BOX_TYPE_RINF);
if (sinf && sinf->original_format) {
type = sinf->original_format->data_format;
}
break;
}
switch (type) {
case GF_ISOM_BOX_TYPE_MP4V:
ESDa = ((GF_MPEGVisualSampleEntryBox*)entry)->esd;
if (ESDa) esd = (GF_ESD *) ESDa->desc;
/*avc1 encrypted*/
else esd = ((GF_MPEGVisualSampleEntryBox*) entry)->emul_esd;
break;
case GF_ISOM_BOX_TYPE_AVC1:
case GF_ISOM_BOX_TYPE_AVC2:
case GF_ISOM_BOX_TYPE_AVC3:
case GF_ISOM_BOX_TYPE_AVC4:
case GF_ISOM_BOX_TYPE_HVC1:
case GF_ISOM_BOX_TYPE_HEV1:
case GF_ISOM_BOX_TYPE_HVC2:
case GF_ISOM_BOX_TYPE_HEV2:
case GF_ISOM_BOX_TYPE_HVT1:
case GF_ISOM_BOX_TYPE_264B:
case GF_ISOM_BOX_TYPE_265B:
case GF_ISOM_BOX_TYPE_DVHE:
case GF_ISOM_BOX_TYPE_VVC1:
case GF_ISOM_BOX_TYPE_VVI1:
esd = ((GF_MPEGVisualSampleEntryBox*) entry)->emul_esd;
break;
case GF_ISOM_BOX_TYPE_SVC1:
case GF_ISOM_BOX_TYPE_MVC1:
if ((mdia->mediaTrack->extractor_mode & 0x0000FFFF) != GF_ISOM_NALU_EXTRACT_INSPECT)
AVC_RewriteESDescriptorEx((GF_MPEGVisualSampleEntryBox*) entry, mdia);
else
AVC_RewriteESDescriptorEx((GF_MPEGVisualSampleEntryBox*) entry, NULL);
esd = ((GF_MPEGVisualSampleEntryBox*) entry)->emul_esd;
break;
case GF_ISOM_BOX_TYPE_LHE1:
case GF_ISOM_BOX_TYPE_LHV1:
if ((mdia->mediaTrack->extractor_mode & 0x0000FFFF) != GF_ISOM_NALU_EXTRACT_INSPECT)
HEVC_RewriteESDescriptorEx((GF_MPEGVisualSampleEntryBox*) entry, mdia);
else
HEVC_RewriteESDescriptorEx((GF_MPEGVisualSampleEntryBox*) entry, NULL);
esd = ((GF_MPEGVisualSampleEntryBox*) entry)->emul_esd;
break;
case GF_ISOM_BOX_TYPE_AV01:
AV1_RewriteESDescriptorEx((GF_MPEGVisualSampleEntryBox*)entry, mdia);
esd = ((GF_MPEGVisualSampleEntryBox*)entry)->emul_esd;
break;
case GF_ISOM_BOX_TYPE_VP08:
case GF_ISOM_BOX_TYPE_VP09:
VP9_RewriteESDescriptorEx((GF_MPEGVisualSampleEntryBox*)entry, mdia);
esd = ((GF_MPEGVisualSampleEntryBox*)entry)->emul_esd;
break;
case GF_ISOM_BOX_TYPE_MP4A:
{
GF_MPEGAudioSampleEntryBox *ase = (GF_MPEGAudioSampleEntryBox*)entry;
ESDa = ase->esd;
if (ESDa) {
esd = (GF_ESD *) ESDa->desc;
} else if (!true_desc_only) {
Bool make_mp4a = GF_FALSE;
sinf = (GF_ProtectionSchemeInfoBox *) gf_isom_box_find_child(entry->child_boxes, GF_ISOM_BOX_TYPE_SINF);
if (sinf && sinf->original_format) {
if (sinf->original_format->data_format==GF_ISOM_BOX_TYPE_MP4A) {
make_mp4a = GF_TRUE;
}
} else {
// Assuming that if no ESD is provided the stream is Basic MPEG-4 AAC LC
make_mp4a = GF_TRUE;
}
if (make_mp4a) {
GF_M4ADecSpecInfo aacinfo;
memset(&aacinfo, 0, sizeof(GF_M4ADecSpecInfo));
aacinfo.nb_chan = ase->channel_count;
aacinfo.base_object_type = GF_M4A_AAC_LC;
aacinfo.base_sr = ase->samplerate_hi;
*out_esd = gf_odf_desc_esd_new(0);
(*out_esd)->decoderConfig->streamType = GF_STREAM_AUDIO;
(*out_esd)->decoderConfig->objectTypeIndication = GF_CODECID_AAC_MPEG4;
gf_m4a_write_config(&aacinfo, &(*out_esd)->decoderConfig->decoderSpecificInfo->data, &(*out_esd)->decoderConfig->decoderSpecificInfo->dataLength);
}
}
}
break;
case GF_ISOM_BOX_TYPE_MP4S:
if (entry->internal_type==GF_ISOM_SAMPLE_ENTRY_MP4S) {
ESDa = entry->esd;
if (ESDa) esd = (GF_ESD *) ESDa->desc;
}
break;
#ifndef GPAC_DISABLE_TTXT
case GF_ISOM_BOX_TYPE_TX3G:
case GF_ISOM_BOX_TYPE_TEXT:
if (!true_desc_only && mdia->mediaTrack->moov->mov->convert_streaming_text) {
GF_Err e = gf_isom_get_ttxt_esd(mdia, out_esd);
if (e) return e;
break;
}
else
return GF_ISOM_INVALID_MEDIA;
#endif
#ifndef GPAC_DISABLE_VTT
case GF_ISOM_BOX_TYPE_WVTT:
{
GF_WebVTTSampleEntryBox*vtte = (GF_WebVTTSampleEntryBox*)entry;
esd = gf_odf_desc_esd_new(2);
*out_esd = esd;
esd->decoderConfig->streamType = GF_STREAM_TEXT;
esd->decoderConfig->objectTypeIndication = GF_CODECID_WEBVTT;
if (vtte->config) {
esd->decoderConfig->decoderSpecificInfo->dataLength = (u32) strlen(vtte->config->string);
esd->decoderConfig->decoderSpecificInfo->data = gf_malloc(sizeof(char)*esd->decoderConfig->decoderSpecificInfo->dataLength);
memcpy(esd->decoderConfig->decoderSpecificInfo->data, vtte->config->string, esd->decoderConfig->decoderSpecificInfo->dataLength);
}
}
break;
case GF_ISOM_BOX_TYPE_STPP:
case GF_ISOM_BOX_TYPE_SBTT:
case GF_ISOM_BOX_TYPE_STXT:
break;
#endif
case GF_ISOM_SUBTYPE_3GP_AMR:
case GF_ISOM_SUBTYPE_3GP_AMR_WB:
case GF_ISOM_SUBTYPE_3GP_EVRC:
case GF_ISOM_SUBTYPE_3GP_QCELP:
case GF_ISOM_SUBTYPE_3GP_SMV:
if (!true_desc_only) {
GF_Err e = gf_isom_get_3gpp_audio_esd(mdia->information->sampleTable, type, (GF_GenericAudioSampleEntryBox*)entry, out_esd);
if (e) return e;
break;
} else return GF_ISOM_INVALID_MEDIA;
case GF_ISOM_SUBTYPE_OPUS: {
GF_OpusSpecificBox *e = ((GF_MPEGAudioSampleEntryBox*)entry)->cfg_opus;
GF_BitStream *bs_out;
if (!e) {
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("ESD not found for Opus\n)"));
break;
}
*out_esd = gf_odf_desc_esd_new(2);
(*out_esd)->decoderConfig->streamType = GF_STREAM_AUDIO;
(*out_esd)->decoderConfig->objectTypeIndication = GF_CODECID_OPUS;
//serialize box with header - compatibility with ffmpeg
bs_out = gf_bs_new(NULL, 0, GF_BITSTREAM_WRITE);
gf_isom_box_size((GF_Box *) e);
gf_isom_box_write((GF_Box *) e, bs_out);
gf_bs_get_content(bs_out, & (*out_esd)->decoderConfig->decoderSpecificInfo->data, & (*out_esd)->decoderConfig->decoderSpecificInfo->dataLength);
gf_bs_del(bs_out);
break;
}
case GF_ISOM_SUBTYPE_3GP_H263:
if (true_desc_only) {
return GF_ISOM_INVALID_MEDIA;
} else {
esd = gf_odf_desc_esd_new(2);
*out_esd = esd;
esd->decoderConfig->streamType = GF_STREAM_VISUAL;
esd->decoderConfig->objectTypeIndication = GF_CODECID_H263;
break;
}
case GF_ISOM_SUBTYPE_MP3:
if (true_desc_only) {
return GF_ISOM_INVALID_MEDIA;
} else {
esd = gf_odf_desc_esd_new(2);
*out_esd = esd;
esd->decoderConfig->streamType = GF_STREAM_AUDIO;
esd->decoderConfig->objectTypeIndication = GF_CODECID_MPEG_AUDIO;
break;
}
case GF_ISOM_SUBTYPE_LSR1:
if (true_desc_only) {
return GF_ISOM_INVALID_MEDIA;
} else {
GF_LASeRSampleEntryBox*ptr = (GF_LASeRSampleEntryBox*)entry;
esd = gf_odf_desc_esd_new(2);
*out_esd = esd;
esd->decoderConfig->streamType = GF_STREAM_SCENE;
esd->decoderConfig->objectTypeIndication = GF_CODECID_LASER;
esd->decoderConfig->decoderSpecificInfo->dataLength = ptr->lsr_config->hdr_size;
esd->decoderConfig->decoderSpecificInfo->data = gf_malloc(sizeof(char)*ptr->lsr_config->hdr_size);
if (!esd->decoderConfig->decoderSpecificInfo->data) return GF_OUT_OF_MEM;
memcpy(esd->decoderConfig->decoderSpecificInfo->data, ptr->lsr_config->hdr, sizeof(char)*ptr->lsr_config->hdr_size);
break;
}
case GF_ISOM_SUBTYPE_MH3D_MHA1:
case GF_ISOM_SUBTYPE_MH3D_MHA2:
case GF_ISOM_SUBTYPE_MH3D_MHM1:
case GF_ISOM_SUBTYPE_MH3D_MHM2:
if (true_desc_only) {
return GF_ISOM_INVALID_MEDIA;
} else {
GF_MPEGAudioSampleEntryBox*ptr = (GF_MPEGAudioSampleEntryBox*)entry;
esd = gf_odf_desc_esd_new(2);
*out_esd = esd;
esd->decoderConfig->streamType = GF_STREAM_AUDIO;
if ((type==GF_ISOM_SUBTYPE_MH3D_MHA1) || (type==GF_ISOM_SUBTYPE_MH3D_MHA2))
esd->decoderConfig->objectTypeIndication = GF_CODECID_MPHA;
else
esd->decoderConfig->objectTypeIndication = GF_CODECID_MHAS;
if (ptr->cfg_mha) {
GF_BitStream *bs = gf_bs_new(NULL, 0, GF_BITSTREAM_WRITE);
gf_bs_write_u8(bs, ptr->cfg_mha->configuration_version);
gf_bs_write_u8(bs, ptr->cfg_mha->mha_pl_indication);
gf_bs_write_u8(bs, ptr->cfg_mha->reference_channel_layout);
gf_bs_write_u16(bs, ptr->cfg_mha->mha_config ? ptr->cfg_mha->mha_config_size : 0);
if (ptr->cfg_mha->mha_config && ptr->cfg_mha->mha_config_size)
gf_bs_write_data(bs, ptr->cfg_mha->mha_config, ptr->cfg_mha->mha_config_size);
gf_bs_get_content(bs, &esd->decoderConfig->decoderSpecificInfo->data, &esd->decoderConfig->decoderSpecificInfo->dataLength);
gf_bs_del(bs);
}
}
break;
default:
return GF_ISOM_INVALID_MEDIA;
}
if (true_desc_only) {
if (!esd) return GF_ISOM_INVALID_MEDIA;
*out_esd = esd;
return GF_OK;
} else {
if (!esd && !*out_esd) return GF_ISOM_INVALID_MEDIA;
if (*out_esd == NULL) return gf_odf_desc_copy((GF_Descriptor *)esd, (GF_Descriptor **)out_esd);
}
return GF_OK;
}
| 1 |
[
"CWE-476",
"CWE-401"
] |
gpac
|
328c6d682698fdb9878dbb4f282963d42c538c01
| 269,376,206,003,932,060,000,000,000,000,000,000,000 | 271 |
fixed #1756
|
static int replmd_process_la_group(struct ldb_module *module,
struct replmd_private *replmd_private,
struct la_group *la_group)
{
struct la_entry *la = NULL;
struct la_entry *prev = NULL;
int ret;
TALLOC_CTX *tmp_ctx = NULL;
struct la_entry *first_la = DLIST_TAIL(la_group->la_entries);
struct ldb_message *msg = NULL;
enum deletion_state deletion_state = OBJECT_NOT_DELETED;
struct ldb_context *ldb = ldb_module_get_ctx(module);
const struct dsdb_attribute *attr = NULL;
struct ldb_message_element *old_el = NULL;
struct parsed_dn *pdn_list = NULL;
replmd_link_changed change_type;
uint32_t num_changes = 0;
time_t t;
uint64_t seq_num = 0;
tmp_ctx = talloc_new(la_group);
if (tmp_ctx == NULL) {
return ldb_oom(ldb);
}
/*
* get the attribute being modified and the search result for the
* source object
*/
ret = replmd_get_la_entry_source(module, first_la, tmp_ctx, &attr,
&msg);
if (ret != LDB_SUCCESS) {
return ret;
}
/*
* Check for deleted objects per MS-DRSR 4.1.10.6.14
* ProcessLinkValue, because link updates are not applied to
* recycled and tombstone objects. We don't have to delete
* any existing link, that should have happened when the
* object deletion was replicated or initiated.
*
* This needs isDeleted and isRecycled to be included as
* attributes in the search and so in msg if set.
*/
replmd_deletion_state(module, msg, &deletion_state, NULL);
if (deletion_state >= OBJECT_RECYCLED) {
TALLOC_FREE(tmp_ctx);
return LDB_SUCCESS;
}
/*
* Now that we know the deletion_state, remove the extra
* attributes added for that purpose. We need to do this
* otherwise in the case of isDeleted: FALSE the modify will
* fail with:
*
* Failed to apply linked attribute change 'attribute 'isDeleted':
* invalid modify flags on
* 'CN=g1_1527570609273,CN=Users,DC=samba,DC=example,DC=com':
* 0x0'
*
* This is becaue isDeleted is a Boolean, so FALSE is a
* legitimate value (set by Samba's deletetest.py)
*/
ldb_msg_remove_attr(msg, "isDeleted");
ldb_msg_remove_attr(msg, "isRecycled");
/* get the msg->element[] for the link attribute being processed */
old_el = ldb_msg_find_element(msg, attr->lDAPDisplayName);
if (old_el == NULL) {
ret = ldb_msg_add_empty(msg, attr->lDAPDisplayName,
LDB_FLAG_MOD_REPLACE, &old_el);
if (ret != LDB_SUCCESS) {
ldb_module_oom(module);
return LDB_ERR_OPERATIONS_ERROR;
}
} else {
old_el->flags = LDB_FLAG_MOD_REPLACE;
}
/*
* go through and process the link target value(s) for this particular
* source object and attribute. For optimization, the same msg is used
* across multiple calls to replmd_process_linked_attribute().
* Note that we should not add or remove any msg attributes inside the
* loop (we should only add/modify *values* for the attribute being
* processed). Otherwise msg->elements is realloc'd and old_el/pdn_list
* pointers will be invalidated
*/
for (la = DLIST_TAIL(la_group->la_entries); la; la=prev) {
prev = DLIST_PREV(la);
DLIST_REMOVE(la_group->la_entries, la);
/*
* parse the existing links (this can be costly for a large
* group, so we try to minimize the times we do it)
*/
if (pdn_list == NULL) {
ret = get_parsed_dns_trusted_fallback(module,
replmd_private,
tmp_ctx, old_el,
&pdn_list,
attr->syntax->ldap_oid,
NULL);
if (ret != LDB_SUCCESS) {
return ret;
}
}
ret = replmd_process_linked_attribute(module, tmp_ctx,
replmd_private,
msg->dn, attr, la, NULL,
msg->elements, old_el,
pdn_list, &change_type);
if (ret != LDB_SUCCESS) {
replmd_txn_cleanup(replmd_private);
return ret;
}
/*
* Adding a link reallocs memory, and so invalidates all the
* pointers in pdn_list. Reparse the PDNs on the next loop
*/
if (change_type == LINK_CHANGE_ADDED) {
TALLOC_FREE(pdn_list);
}
if (change_type != LINK_CHANGE_NONE) {
num_changes++;
}
if ((++replmd_private->num_processed % 8192) == 0) {
DBG_NOTICE("Processed %u/%u linked attributes\n",
replmd_private->num_processed,
replmd_private->total_links);
}
}
/*
* it's possible we're already up-to-date and so don't need to modify
* the object at all (e.g. doing a 'drs replicate --full-sync')
*/
if (num_changes == 0) {
TALLOC_FREE(tmp_ctx);
return LDB_SUCCESS;
}
/*
* Note that adding the whenChanged/etc attributes below will realloc
* msg->elements, invalidating the existing element/parsed-DN pointers
*/
old_el = NULL;
TALLOC_FREE(pdn_list);
/* update whenChanged/uSNChanged as the object has changed */
t = time(NULL);
ret = ldb_sequence_number(ldb, LDB_SEQ_HIGHEST_SEQ,
&seq_num);
if (ret != LDB_SUCCESS) {
return ret;
}
ret = add_time_element(msg, "whenChanged", t);
if (ret != LDB_SUCCESS) {
ldb_operr(ldb);
return ret;
}
ret = add_uint64_element(ldb, msg, "uSNChanged", seq_num);
if (ret != LDB_SUCCESS) {
ldb_operr(ldb);
return ret;
}
/* apply the link changes to the source object */
ret = linked_attr_modify(module, msg, NULL);
if (ret != LDB_SUCCESS) {
ldb_debug(ldb, LDB_DEBUG_WARNING,
"Failed to apply linked attribute change "
"Error: '%s' DN: '%s' Attribute: '%s'\n",
ldb_errstring(ldb),
ldb_dn_get_linearized(msg->dn),
attr->lDAPDisplayName);
TALLOC_FREE(tmp_ctx);
return ret;
}
TALLOC_FREE(tmp_ctx);
return LDB_SUCCESS;
}
| 0 |
[
"CWE-200"
] |
samba
|
0a3aa5f908e351201dc9c4d4807b09ed9eedff77
| 243,534,986,210,832,750,000,000,000,000,000,000,000 | 193 |
CVE-2022-32746 ldb: Make use of functions for appending to an ldb_message
This aims to minimise usage of the error-prone pattern of searching for
a just-added message element in order to make modifications to it (and
potentially finding the wrong element).
BUG: https://bugzilla.samba.org/show_bug.cgi?id=15009
Signed-off-by: Joseph Sutton <[email protected]>
|
complete_job (GVfsJob *job,
GError *error)
{
if (error != NULL)
{
g_vfs_job_failed_from_error (job, error);
g_error_free (error);
return;
}
g_vfs_job_succeeded (job);
}
| 0 |
[] |
gvfs
|
d7d362995aa0cb8905c8d5c2a2a4c305d2ffff80
| 159,578,131,451,250,380,000,000,000,000,000,000,000 | 12 |
admin: Use fsuid to ensure correct file ownership
Files created over admin backend should be owned by root, but they are
owned by the user itself. This is because the daemon drops the uid to
make dbus connection work. Use fsuid and euid to fix this issue.
Closes: https://gitlab.gnome.org/GNOME/gvfs/issues/21
|
static int process_backtrace(int argc, char *argv[]) {
char *context[_CONTEXT_MAX] = {};
_cleanup_free_ char *message = NULL;
_cleanup_free_ struct iovec *iovec = NULL;
size_t n_iovec, n_allocated, n_to_free = 0, i;
int r;
JournalImporter importer = {
.fd = STDIN_FILENO,
};
log_debug("Processing backtrace on stdin...");
if (argc < CONTEXT_COMM + 1)
return log_error_errno(SYNTHETIC_ERRNO(EINVAL),
"Not enough arguments passed (%i, expected %i).",
argc - 1, CONTEXT_COMM + 1 - 1);
context[CONTEXT_PID] = argv[2 + CONTEXT_PID];
context[CONTEXT_UID] = argv[2 + CONTEXT_UID];
context[CONTEXT_GID] = argv[2 + CONTEXT_GID];
context[CONTEXT_SIGNAL] = argv[2 + CONTEXT_SIGNAL];
context[CONTEXT_TIMESTAMP] = argv[2 + CONTEXT_TIMESTAMP];
context[CONTEXT_RLIMIT] = argv[2 + CONTEXT_RLIMIT];
context[CONTEXT_HOSTNAME] = argv[2 + CONTEXT_HOSTNAME];
n_allocated = 34 + COREDUMP_STORAGE_EXTERNAL;
/* 26 metadata, 2 static, +unknown input, 4 storage, rounded up */
iovec = new(struct iovec, n_allocated);
if (!iovec)
return log_oom();
r = gather_pid_metadata(context, argv + 2 + CONTEXT_COMM, iovec, &n_to_free);
if (r < 0)
goto finish;
if (r > 0) {
/* This was a special crash, and has already been processed. */
r = 0;
goto finish;
}
n_iovec = n_to_free;
for (;;) {
r = journal_importer_process_data(&importer);
if (r < 0) {
log_error_errno(r, "Failed to parse journal entry on stdin: %m");
goto finish;
}
if (r == 1 || /* complete entry */
journal_importer_eof(&importer)) /* end of data */
break;
}
if (!GREEDY_REALLOC(iovec, n_allocated, n_iovec + importer.iovw.count + 2))
return log_oom();
if (journal_importer_eof(&importer)) {
log_warning("Did not receive a full journal entry on stdin, ignoring message sent by reporter");
message = strjoin("MESSAGE=Process ", context[CONTEXT_PID],
" (", context[CONTEXT_COMM], ")"
" of user ", context[CONTEXT_UID],
" failed with ", context[CONTEXT_SIGNAL]);
if (!message) {
r = log_oom();
goto finish;
}
iovec[n_iovec++] = IOVEC_MAKE_STRING(message);
} else {
for (i = 0; i < importer.iovw.count; i++)
iovec[n_iovec++] = importer.iovw.iovec[i];
}
iovec[n_iovec++] = IOVEC_MAKE_STRING("MESSAGE_ID=" SD_MESSAGE_BACKTRACE_STR);
assert_cc(2 == LOG_CRIT);
iovec[n_iovec++] = IOVEC_MAKE_STRING("PRIORITY=2");
assert(n_iovec <= n_allocated);
r = sd_journal_sendv(iovec, n_iovec);
if (r < 0)
log_error_errno(r, "Failed to log backtrace: %m");
finish:
for (i = 0; i < n_to_free; i++)
free(iovec[i].iov_base);
/* Those fields are allocated by gather_pid_metadata */
free(context[CONTEXT_COMM]);
free(context[CONTEXT_EXE]);
free(context[CONTEXT_UNIT]);
return r;
}
| 0 |
[
"CWE-770"
] |
systemd
|
084eeb865ca63887098e0945fb4e93c852b91b0f
| 176,483,612,419,241,000,000,000,000,000,000,000,000 | 93 |
journald: do not store the iovec entry for process commandline on stack
This fixes a crash where we would read the commandline, whose length is under
control of the sending program, and then crash when trying to create a stack
allocation for it.
CVE-2018-16864
https://bugzilla.redhat.com/show_bug.cgi?id=1653855
The message actually doesn't get written to disk, because
journal_file_append_entry() returns -E2BIG.
|
static int ext4_cross_rename(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry)
{
handle_t *handle = NULL;
struct ext4_renament old = {
.dir = old_dir,
.dentry = old_dentry,
.inode = d_inode(old_dentry),
};
struct ext4_renament new = {
.dir = new_dir,
.dentry = new_dentry,
.inode = d_inode(new_dentry),
};
u8 new_file_type;
int retval;
struct timespec64 ctime;
if ((ext4_test_inode_flag(new_dir, EXT4_INODE_PROJINHERIT) &&
!projid_eq(EXT4_I(new_dir)->i_projid,
EXT4_I(old_dentry->d_inode)->i_projid)) ||
(ext4_test_inode_flag(old_dir, EXT4_INODE_PROJINHERIT) &&
!projid_eq(EXT4_I(old_dir)->i_projid,
EXT4_I(new_dentry->d_inode)->i_projid)))
return -EXDEV;
retval = dquot_initialize(old.dir);
if (retval)
return retval;
retval = dquot_initialize(new.dir);
if (retval)
return retval;
old.bh = ext4_find_entry(old.dir, &old.dentry->d_name,
&old.de, &old.inlined);
if (IS_ERR(old.bh))
return PTR_ERR(old.bh);
/*
* Check for inode number is _not_ due to possible IO errors.
* We might rmdir the source, keep it as pwd of some process
* and merrily kill the link to whatever was created under the
* same name. Goodbye sticky bit ;-<
*/
retval = -ENOENT;
if (!old.bh || le32_to_cpu(old.de->inode) != old.inode->i_ino)
goto end_rename;
new.bh = ext4_find_entry(new.dir, &new.dentry->d_name,
&new.de, &new.inlined);
if (IS_ERR(new.bh)) {
retval = PTR_ERR(new.bh);
new.bh = NULL;
goto end_rename;
}
/* RENAME_EXCHANGE case: old *and* new must both exist */
if (!new.bh || le32_to_cpu(new.de->inode) != new.inode->i_ino)
goto end_rename;
handle = ext4_journal_start(old.dir, EXT4_HT_DIR,
(2 * EXT4_DATA_TRANS_BLOCKS(old.dir->i_sb) +
2 * EXT4_INDEX_EXTRA_TRANS_BLOCKS + 2));
if (IS_ERR(handle)) {
retval = PTR_ERR(handle);
handle = NULL;
goto end_rename;
}
if (IS_DIRSYNC(old.dir) || IS_DIRSYNC(new.dir))
ext4_handle_sync(handle);
if (S_ISDIR(old.inode->i_mode)) {
old.is_dir = true;
retval = ext4_rename_dir_prepare(handle, &old);
if (retval)
goto end_rename;
}
if (S_ISDIR(new.inode->i_mode)) {
new.is_dir = true;
retval = ext4_rename_dir_prepare(handle, &new);
if (retval)
goto end_rename;
}
/*
* Other than the special case of overwriting a directory, parents'
* nlink only needs to be modified if this is a cross directory rename.
*/
if (old.dir != new.dir && old.is_dir != new.is_dir) {
old.dir_nlink_delta = old.is_dir ? -1 : 1;
new.dir_nlink_delta = -old.dir_nlink_delta;
retval = -EMLINK;
if ((old.dir_nlink_delta > 0 && EXT4_DIR_LINK_MAX(old.dir)) ||
(new.dir_nlink_delta > 0 && EXT4_DIR_LINK_MAX(new.dir)))
goto end_rename;
}
new_file_type = new.de->file_type;
retval = ext4_setent(handle, &new, old.inode->i_ino, old.de->file_type);
if (retval)
goto end_rename;
retval = ext4_setent(handle, &old, new.inode->i_ino, new_file_type);
if (retval)
goto end_rename;
/*
* Like most other Unix systems, set the ctime for inodes on a
* rename.
*/
ctime = current_time(old.inode);
old.inode->i_ctime = ctime;
new.inode->i_ctime = ctime;
retval = ext4_mark_inode_dirty(handle, old.inode);
if (unlikely(retval))
goto end_rename;
retval = ext4_mark_inode_dirty(handle, new.inode);
if (unlikely(retval))
goto end_rename;
if (old.dir_bh) {
retval = ext4_rename_dir_finish(handle, &old, new.dir->i_ino);
if (retval)
goto end_rename;
}
if (new.dir_bh) {
retval = ext4_rename_dir_finish(handle, &new, old.dir->i_ino);
if (retval)
goto end_rename;
}
ext4_update_dir_count(handle, &old);
ext4_update_dir_count(handle, &new);
retval = 0;
end_rename:
brelse(old.dir_bh);
brelse(new.dir_bh);
brelse(old.bh);
brelse(new.bh);
if (handle)
ext4_journal_stop(handle);
return retval;
}
| 0 |
[
"CWE-125"
] |
linux
|
5872331b3d91820e14716632ebb56b1399b34fe1
| 220,851,723,496,720,350,000,000,000,000,000,000,000 | 143 |
ext4: fix potential negative array index in do_split()
If for any reason a directory passed to do_split() does not have enough
active entries to exceed half the size of the block, we can end up
iterating over all "count" entries without finding a split point.
In this case, count == move, and split will be zero, and we will
attempt a negative index into map[].
Guard against this by detecting this case, and falling back to
split-to-half-of-count instead; in this case we will still have
plenty of space (> half blocksize) in each split block.
Fixes: ef2b02d3e617 ("ext34: ensure do_split leaves enough free space in both blocks")
Signed-off-by: Eric Sandeen <[email protected]>
Reviewed-by: Andreas Dilger <[email protected]>
Reviewed-by: Jan Kara <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Theodore Ts'o <[email protected]>
|
static void nfs4_opendata_free(struct kref *kref)
{
struct nfs4_opendata *p = container_of(kref,
struct nfs4_opendata, kref);
nfs_free_seqid(p->o_arg.seqid);
if (p->state != NULL)
nfs4_put_open_state(p->state);
nfs4_put_state_owner(p->owner);
dput(p->dir);
path_put(&p->path);
kfree(p);
}
| 0 |
[
"CWE-703"
] |
linux
|
dc0b027dfadfcb8a5504f7d8052754bf8d501ab9
| 161,449,160,720,260,840,000,000,000,000,000,000,000 | 13 |
NFSv4: Convert the open and close ops to use fmode
Signed-off-by: Trond Myklebust <[email protected]>
|
static int fat_readdir(struct file *filp, void *dirent, filldir_t filldir)
{
struct inode *inode = filp->f_dentry->d_inode;
return __fat_readdir(inode, filp, dirent, filldir, 0, 0);
}
| 0 |
[] |
linux-2.6
|
188f83dfe0eeecd1427d0d255cc97dbf7ef6b4b7
| 169,818,358,877,457,570,000,000,000,000,000,000,000 | 5 |
[PATCH] BLOCK: Move the msdos device ioctl compat stuff to the msdos driver [try #6]
Move the msdos device ioctl compat stuff from fs/compat_ioctl.c to the msdos
driver so that the msdos header file doesn't need to be included.
Signed-Off-By: David Howells <[email protected]>
Signed-off-by: Jens Axboe <[email protected]>
|
static CURLcode imap_perform_fetch(struct connectdata *conn)
{
CURLcode result = CURLE_OK;
struct IMAP *imap = conn->data->req.protop;
/* Check we have a UID */
if(!imap->uid) {
failf(conn->data, "Cannot FETCH without a UID.");
return CURLE_URL_MALFORMAT;
}
/* Send the FETCH command */
if(imap->partial)
result = imap_sendf(conn, "FETCH %s BODY[%s]<%s>",
imap->uid,
imap->section ? imap->section : "",
imap->partial);
else
result = imap_sendf(conn, "FETCH %s BODY[%s]",
imap->uid,
imap->section ? imap->section : "");
if(!result)
state(conn, IMAP_FETCH);
return result;
}
| 0 |
[
"CWE-119"
] |
curl
|
13c9a9ded3ae744a1e11cbc14e9146d9fa427040
| 143,445,259,068,131,790,000,000,000,000,000,000,000 | 27 |
imap: if a FETCH response has no size, don't call write callback
CVE-2017-1000257
Reported-by: Brian Carpenter and 0xd34db347
Also detected by OSS-Fuzz: https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=3586
|
static void rsm_set_desc_flags(struct desc_struct *desc, u32 flags)
{
desc->g = (flags >> 23) & 1;
desc->d = (flags >> 22) & 1;
desc->l = (flags >> 21) & 1;
desc->avl = (flags >> 20) & 1;
desc->p = (flags >> 15) & 1;
desc->dpl = (flags >> 13) & 3;
desc->s = (flags >> 12) & 1;
desc->type = (flags >> 8) & 15;
}
| 0 |
[
"CWE-284"
] |
linux
|
33ab91103b3415e12457e3104f0e4517ce12d0f3
| 269,896,266,522,338,400,000,000,000,000,000,000,000 | 11 |
KVM: x86: fix emulation of "MOV SS, null selector"
This is CVE-2017-2583. On Intel this causes a failed vmentry because
SS's type is neither 3 nor 7 (even though the manual says this check is
only done for usable SS, and the dmesg splat says that SS is unusable!).
On AMD it's worse: svm.c is confused and sets CPL to 0 in the vmcb.
The fix fabricates a data segment descriptor when SS is set to a null
selector, so that CPL and SS.DPL are set correctly in the VMCS/vmcb.
Furthermore, only allow setting SS to a NULL selector if SS.RPL < 3;
this in turn ensures CPL < 3 because RPL must be equal to CPL.
Thanks to Andy Lutomirski and Willy Tarreau for help in analyzing
the bug and deciphering the manuals.
Reported-by: Xiaohan Zhang <[email protected]>
Fixes: 79d5b4c3cd809c770d4bf9812635647016c56011
Cc: [email protected]
Signed-off-by: Paolo Bonzini <[email protected]>
|
int uwsgi_build_cap(char *what, cap_value_t ** cap) {
int cap_id;
char *caps = uwsgi_str(what);
int pos = 0;
int count = 0;
char *p, *ctx = NULL;
uwsgi_foreach_token(caps, ",", p, ctx) {
if (is_a_number(p)) {
count++;
}
else {
cap_id = uwsgi_get_cap_id(p);
if (cap_id != -1) {
count++;
}
else {
uwsgi_log("[security] unknown capability: %s\n", p);
}
}
}
free(caps);
*cap = uwsgi_malloc(sizeof(cap_value_t) * count);
caps = uwsgi_str(what);
ctx = NULL;
uwsgi_foreach_token(caps, ",", p, ctx) {
if (is_a_number(p)) {
cap_id = atoi(p);
}
else {
cap_id = uwsgi_get_cap_id(p);
}
if (cap_id != -1) {
(*cap)[pos] = cap_id;
uwsgi_log("setting capability %s [%d]\n", p, cap_id);
pos++;
}
else {
uwsgi_log("[security] unknown capability: %s\n", p);
}
}
free(caps);
return count;
}
| 0 |
[
"CWE-119",
"CWE-703",
"CWE-787"
] |
uwsgi
|
cb4636f7c0af2e97a4eef7a3cdcbd85a71247bfe
| 197,711,871,651,465,500,000,000,000,000,000,000,000 | 48 |
improve uwsgi_expand_path() to sanitize input, avoiding stack corruption and potential security issue
|
static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct page *page = vmf->page;
struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_sbd *sdp = GFS2_SB(inode);
unsigned long last_index;
u64 pos = page->index << PAGE_CACHE_SHIFT;
unsigned int data_blocks, ind_blocks, rblocks;
struct gfs2_holder gh;
struct gfs2_alloc *al;
loff_t size;
int ret;
/* Wait if fs is frozen. This is racy so we check again later on
* and retry if the fs has been frozen after the page lock has
* been acquired
*/
vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
ret = gfs2_glock_nq(&gh);
if (ret)
goto out;
set_bit(GLF_DIRTY, &ip->i_gl->gl_flags);
set_bit(GIF_SW_PAGED, &ip->i_flags);
if (!gfs2_write_alloc_required(ip, pos, PAGE_CACHE_SIZE)) {
lock_page(page);
if (!PageUptodate(page) || page->mapping != inode->i_mapping) {
ret = -EAGAIN;
unlock_page(page);
}
goto out_unlock;
}
ret = -ENOMEM;
al = gfs2_alloc_get(ip);
if (al == NULL)
goto out_unlock;
ret = gfs2_quota_lock_check(ip);
if (ret)
goto out_alloc_put;
gfs2_write_calc_reserv(ip, PAGE_CACHE_SIZE, &data_blocks, &ind_blocks);
al->al_requested = data_blocks + ind_blocks;
ret = gfs2_inplace_reserve(ip);
if (ret)
goto out_quota_unlock;
rblocks = RES_DINODE + ind_blocks;
if (gfs2_is_jdata(ip))
rblocks += data_blocks ? data_blocks : 1;
if (ind_blocks || data_blocks) {
rblocks += RES_STATFS + RES_QUOTA;
rblocks += gfs2_rg_blocks(ip);
}
ret = gfs2_trans_begin(sdp, rblocks, 0);
if (ret)
goto out_trans_fail;
lock_page(page);
ret = -EINVAL;
size = i_size_read(inode);
last_index = (size - 1) >> PAGE_CACHE_SHIFT;
/* Check page index against inode size */
if (size == 0 || (page->index > last_index))
goto out_trans_end;
ret = -EAGAIN;
/* If truncated, we must retry the operation, we may have raced
* with the glock demotion code.
*/
if (!PageUptodate(page) || page->mapping != inode->i_mapping)
goto out_trans_end;
/* Unstuff, if required, and allocate backing blocks for page */
ret = 0;
if (gfs2_is_stuffed(ip))
ret = gfs2_unstuff_dinode(ip, page);
if (ret == 0)
ret = gfs2_allocate_page_backing(page);
out_trans_end:
if (ret)
unlock_page(page);
gfs2_trans_end(sdp);
out_trans_fail:
gfs2_inplace_release(ip);
out_quota_unlock:
gfs2_quota_unlock(ip);
out_alloc_put:
gfs2_alloc_put(ip);
out_unlock:
gfs2_glock_dq(&gh);
out:
gfs2_holder_uninit(&gh);
if (ret == 0) {
set_page_dirty(page);
/* This check must be post dropping of transaction lock */
if (inode->i_sb->s_frozen == SB_UNFROZEN) {
wait_on_page_writeback(page);
} else {
ret = -EAGAIN;
unlock_page(page);
}
}
return block_page_mkwrite_return(ret);
}
| 0 |
[
"CWE-119",
"CWE-787"
] |
linux
|
64dd153c83743af81f20924c6343652d731eeecb
| 79,520,893,353,447,100,000,000,000,000,000,000,000 | 110 |
GFS2: rewrite fallocate code to write blocks directly
GFS2's fallocate code currently goes through the page cache. Since it's only
writing to the end of the file or to holes in it, it doesn't need to, and it
was causing issues on low memory environments. This patch pulls in some of
Steve's block allocation work, and uses it to simply allocate the blocks for
the file, and zero them out at allocation time. It provides a slight
performance increase, and it dramatically simplifies the code.
Signed-off-by: Benjamin Marzinski <[email protected]>
Signed-off-by: Steven Whitehouse <[email protected]>
|
gx_default_fill_rectangle_hl_color(gx_device *pdev,
const gs_fixed_rect *rect,
const gs_gstate *pgs, const gx_drawing_color *pdcolor,
const gx_clip_path *pcpath)
{
return_error(gs_error_rangecheck);
}
| 0 |
[] |
ghostpdl
|
c9b362ba908ca4b1d7c72663a33229588012d7d9
| 23,044,792,503,989,180,000,000,000,000,000,000,000 | 7 |
Bug 699670: disallow copying of the epo device
The erasepage optimisation (epo) subclass device shouldn't be allowed to be
copied because the subclass private data, child and parent pointers end up
being shared between the original device and the copy.
Add an epo_finish_copydevice which NULLs the three offending pointers, and
then communicates to the caller that copying is not allowed.
This also exposed a separate issue with the stype for subclasses devices.
Devices are, I think, unique in having two stype objects associated with them:
the usual one in the memory manager header, and the other stored in the device
structere directly. In order for the stype to be correct, we have to use the
stype for the incoming device, with the ssize of the original device (ssize
should reflect the size of the memory allocation). We correctly did so with the
stype in the device structure, but then used the prototype device's stype to
patch the memory manager stype - meaning the ssize potentially no longer
matched the allocated memory. This caused problems in the garbager where there
is an implicit assumption that the size of a single object clump (c_alone == 1)
is also the size (+ memory manager overheads) of the single object it contains.
The solution is to use the same stype instance to patch the memory manager
data as we do in the device structure (with the correct ssize).
|
ins_left(void)
{
pos_T tpos;
int end_change = dont_sync_undo == FALSE; // end undoable change
#ifdef FEAT_FOLDING
if ((fdo_flags & FDO_HOR) && KeyTyped)
foldOpenCursor();
#endif
undisplay_dollar();
tpos = curwin->w_cursor;
if (oneleft() == OK)
{
#if defined(FEAT_XIM) && defined(FEAT_GUI_GTK)
// Only call start_arrow() when not busy with preediting, it will
// break undo. K_LEFT is inserted in im_correct_cursor().
if (p_imst == IM_OVER_THE_SPOT || !im_is_preediting())
#endif
{
start_arrow_with_change(&tpos, end_change);
if (!end_change)
AppendCharToRedobuff(K_LEFT);
}
#ifdef FEAT_RIGHTLEFT
// If exit reversed string, position is fixed
if (revins_scol != -1 && (int)curwin->w_cursor.col >= revins_scol)
revins_legal++;
revins_chars++;
#endif
}
/*
* if 'whichwrap' set for cursor in insert mode may go to
* previous line
*/
else if (vim_strchr(p_ww, '[') != NULL && curwin->w_cursor.lnum > 1)
{
// always break undo when moving upwards/downwards, else undo may break
start_arrow(&tpos);
--(curwin->w_cursor.lnum);
coladvance((colnr_T)MAXCOL);
curwin->w_set_curswant = TRUE; // so we stay at the end
}
else
vim_beep(BO_CRSR);
dont_sync_undo = FALSE;
}
| 0 |
[] |
vim
|
98a336dd497d3422e7efeef9f24cc9e25aeb8a49
| 18,084,795,655,761,365,000,000,000,000,000,000,000 | 47 |
patch 8.2.0133: invalid memory access with search command
Problem: Invalid memory access with search command.
Solution: When :normal runs out of characters in bracketed paste mode break
out of the loop.(closes #5511)
|
read_2007_section_objfreespace (Bit_Chain *restrict dat, Dwg_Data *restrict dwg,
r2007_section *restrict sections_map,
r2007_page *restrict pages_map)
{
Bit_Chain old_dat, sec_dat = { 0 };
//Bit_Chain *str_dat;
Dwg_ObjFreeSpace *_obj = &dwg->objfreespace;
Dwg_Object *obj = NULL;
int error = 0;
BITCODE_RL rcount1 = 0, rcount2 = 0;
// compressed, page size: 0x7400
error = read_data_section (&sec_dat, dat, sections_map, pages_map,
SECTION_OBJFREESPACE);
if (error >= DWG_ERR_CRITICAL || !sec_dat.chain)
{
LOG_INFO ("%s section not found\n", "ObjFreeSpace");
if (sec_dat.chain)
free (sec_dat.chain);
return error;
}
LOG_TRACE ("\nObjFreeSpace (%lu)\n-------------------\n", sec_dat.size)
old_dat = *dat;
dat = &sec_dat; // restrict in size
bit_chain_set_version (&old_dat, dat);
// clang-format off
#include "objfreespace.spec"
// clang-format on
LOG_TRACE ("\n")
if (sec_dat.chain)
free (sec_dat.chain);
*dat = old_dat; // unrestrict
return error;
}
| 0 |
[
"CWE-787"
] |
libredwg
|
45d2a290c65ed691be0901ba2b2ef51044e07a16
| 82,627,825,608,867,110,000,000,000,000,000,000,000 | 37 |
decode_r2007: fix for invalid section size
See GH #350. With fuzzing section->data_size might not fit
section_page->uncomp_size.
|
static void ExecNative(GGadget *g, GEvent *e) {
struct sd_data *sd = GDrawGetUserData(GGadgetGetWindow(g));
Context c;
Val args[1];
jmp_buf env;
memset( &c,0,sizeof(c));
memset( args,0,sizeof(args));
running_script = true;
c.a.argc = 1;
c.a.vals = args;
c.filename = args[0].u.sval = "ScriptDlg";
args[0].type = v_str;
c.return_val.type = v_void;
c.err_env = &env;
c.curfv = (FontViewBase *) sd->fv;
if ( setjmp(env)!=0 ) {
running_script = false;
return; /* Error return */
}
c.script = GFileTmpfile();
if ( c.script==NULL )
ScriptError(&c, "Can't create temporary file");
else {
const unichar_t *ret = _GGadgetGetTitle(GWidgetGetControl(sd->gw,CID_Script));
while ( *ret ) {
/* There's a bug here. Filenames need to be converted to the local charset !!!! */
putc(*ret,c.script);
++ret;
}
rewind(c.script);
ff_VerboseCheck();
c.lineno = 1;
while ( !c.returned && !c.broken && ff_NextToken(&c)!=tt_eof ) {
ff_backuptok(&c);
ff_statement(&c);
}
fclose(c.script);
sd->done = true;
}
running_script = false;
}
| 0 |
[
"CWE-119",
"CWE-787"
] |
fontforge
|
626f751752875a0ddd74b9e217b6f4828713573c
| 87,597,859,574,253,080,000,000,000,000,000,000,000 | 43 |
Warn users before discarding their unsaved scripts (#3852)
* Warn users before discarding their unsaved scripts
This closes #3846.
|
DWORD FileIo::Impl::winNumberOfLinks() const
{
DWORD nlink = 1;
HANDLE hFd = (HANDLE)_get_osfhandle(fileno(fp_));
if (hFd != INVALID_HANDLE_VALUE) {
typedef BOOL (WINAPI * GetFileInformationByHandle_t)(HANDLE, LPBY_HANDLE_FILE_INFORMATION);
HMODULE hKernel = ::GetModuleHandleA("kernel32.dll");
if (hKernel) {
GetFileInformationByHandle_t pfcn_GetFileInformationByHandle = (GetFileInformationByHandle_t)GetProcAddress(hKernel, "GetFileInformationByHandle");
if (pfcn_GetFileInformationByHandle) {
BY_HANDLE_FILE_INFORMATION fi = {0,0,0,0,0,0,0,0,0,0,0,0,0};
if (pfcn_GetFileInformationByHandle(hFd, &fi)) {
nlink = fi.nNumberOfLinks;
}
#ifdef DEBUG
else EXV_DEBUG << "GetFileInformationByHandle failed\n";
#endif
}
#ifdef DEBUG
else EXV_DEBUG << "GetProcAddress(hKernel, \"GetFileInformationByHandle\") failed\n";
#endif
}
#ifdef DEBUG
else EXV_DEBUG << "GetModuleHandleA(\"kernel32.dll\") failed\n";
#endif
}
#ifdef DEBUG
else EXV_DEBUG << "_get_osfhandle failed: INVALID_HANDLE_VALUE\n";
#endif
return nlink;
} // FileIo::Impl::winNumberOfLinks
| 0 |
[
"CWE-125"
] |
exiv2
|
6e3855aed7ba8bb4731fc4087ca7f9078b2f3d97
| 105,191,246,800,927,800,000,000,000,000,000,000,000 | 33 |
Fix https://github.com/Exiv2/exiv2/issues/55
|
static inline void fallback_on_nodma_alloc(char **addr, size_t l)
{
#ifdef FLOPPY_CAN_FALLBACK_ON_NODMA
if (*addr)
return; /* we have the memory */
if (can_use_virtual_dma != 2)
return; /* no fallback allowed */
pr_info("DMA memory shortage. Temporarily falling back on virtual DMA\n");
*addr = (char *)nodma_mem_alloc(l);
#else
return;
#endif
}
| 0 |
[
"CWE-264",
"CWE-754"
] |
linux
|
ef87dbe7614341c2e7bfe8d32fcb7028cc97442c
| 323,277,031,523,228,170,000,000,000,000,000,000,000 | 13 |
floppy: ignore kernel-only members in FDRAWCMD ioctl input
Always clear out these floppy_raw_cmd struct members after copying the
entire structure from userspace so that the in-kernel version is always
valid and never left in an interdeterminate state.
Signed-off-by: Matthew Daley <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
static GF_Filter *locate_alias_sink(GF_Filter *filter, const char *url, const char *mime_type)
{
u32 i;
for (i=0; i<filter->num_output_pids; i++) {
u32 j;
GF_FilterPid *pid = gf_list_get(filter->output_pids, i);
for (j=0; j<pid->num_destinations; j++) {
GF_Filter *f;
GF_FilterPidInst *pidi = gf_list_get(pid->destinations, j);
if (!pidi->filter) continue;
if (pidi->filter->act_as_sink && pidi->filter->freg->use_alias
&& pidi->filter->freg->use_alias(pidi->filter, url, mime_type)
) {
return pidi->filter;
}
//recursovely walk towards the sink
f = locate_alias_sink(pidi->filter, url, mime_type);
if (f) return f;
}
}
return NULL;
}
| 0 |
[
"CWE-787"
] |
gpac
|
da37ec8582266983d0ec4b7550ec907401ec441e
| 74,031,167,641,275,860,000,000,000,000,000,000,000 | 22 |
fixed crashes for very long path - cf #1908
|
ByteVectorPrivate(TagLib::uint len, char value) : RefCounter(), data(len, value), size(len) {}
| 0 |
[
"CWE-189"
] |
taglib
|
dcdf4fd954e3213c355746fa15b7480461972308
| 135,871,179,654,744,960,000,000,000,000,000,000,000 | 1 |
Avoid uint overflow in case the length + index is over UINT_MAX
|
Config::Config()
: mime_types_file("/etc/mime.types"),
stream_read_timeout(1_min),
stream_write_timeout(1_min),
data_ptr(nullptr),
padding(0),
num_worker(1),
max_concurrent_streams(100),
header_table_size(-1),
encoder_header_table_size(-1),
window_bits(-1),
connection_window_bits(-1),
port(0),
verbose(false),
daemon(false),
verify_client(false),
no_tls(false),
error_gzip(false),
early_response(false),
hexdump(false),
echo_upload(false),
no_content_length(false) {}
| 0 |
[] |
nghttp2
|
95efb3e19d174354ca50c65d5d7227d92bcd60e1
| 36,817,048,895,633,865,000,000,000,000,000,000,000 | 22 |
Don't read too greedily
|
static int dwc3_gadget_ep_set_halt(struct usb_ep *ep, int value)
{
struct dwc3_ep *dep = to_dwc3_ep(ep);
struct dwc3 *dwc = dep->dwc;
unsigned long flags;
int ret;
spin_lock_irqsave(&dwc->lock, flags);
ret = __dwc3_gadget_ep_set_halt(dep, value, false);
spin_unlock_irqrestore(&dwc->lock, flags);
return ret;
}
| 0 |
[
"CWE-703",
"CWE-667",
"CWE-189"
] |
linux
|
c91815b596245fd7da349ecc43c8def670d2269e
| 298,414,699,781,248,930,000,000,000,000,000,000,000 | 15 |
usb: dwc3: gadget: never call ->complete() from ->ep_queue()
This is a requirement which has always existed but, somehow, wasn't
reflected in the documentation and problems weren't found until now
when Tuba Yavuz found a possible deadlock happening between dwc3 and
f_hid. She described the situation as follows:
spin_lock_irqsave(&hidg->write_spinlock, flags); // first acquire
/* we our function has been disabled by host */
if (!hidg->req) {
free_ep_req(hidg->in_ep, hidg->req);
goto try_again;
}
[...]
status = usb_ep_queue(hidg->in_ep, hidg->req, GFP_ATOMIC);
=>
[...]
=> usb_gadget_giveback_request
=>
f_hidg_req_complete
=>
spin_lock_irqsave(&hidg->write_spinlock, flags); // second acquire
Note that this happens because dwc3 would call ->complete() on a
failed usb_ep_queue() due to failed Start Transfer command. This is,
anyway, a theoretical situation because dwc3 currently uses "No
Response Update Transfer" command for Bulk and Interrupt endpoints.
It's still good to make this case impossible to happen even if the "No
Reponse Update Transfer" command is changed.
Reported-by: Tuba Yavuz <[email protected]>
Signed-off-by: Felipe Balbi <[email protected]>
Cc: stable <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]>
|
do_uncompress( compress_filter_context_t *zfx, z_stream *zs,
IOBUF a, size_t *ret_len )
{
int zrc;
int rc=0;
size_t n;
int nread, count;
int refill = !zs->avail_in;
if( DBG_FILTER )
log_debug("begin inflate: avail_in=%u, avail_out=%u, inbuf=%u\n",
(unsigned)zs->avail_in, (unsigned)zs->avail_out,
(unsigned)zfx->inbufsize );
do {
if( zs->avail_in < zfx->inbufsize && refill ) {
n = zs->avail_in;
if( !n )
zs->next_in = BYTEF_CAST (zfx->inbuf);
count = zfx->inbufsize - n;
nread = iobuf_read( a, zfx->inbuf + n, count );
if( nread == -1 ) nread = 0;
n += nread;
/* If we use the undocumented feature to suppress
* the zlib header, we have to give inflate an
* extra dummy byte to read */
if( nread < count && zfx->algo == 1 ) {
*(zfx->inbuf + n) = 0xFF; /* is it really needed ? */
zfx->algo1hack = 1;
n++;
}
zs->avail_in = n;
}
refill = 1;
if( DBG_FILTER )
log_debug("enter inflate: avail_in=%u, avail_out=%u\n",
(unsigned)zs->avail_in, (unsigned)zs->avail_out);
zrc = inflate ( zs, Z_SYNC_FLUSH );
if( DBG_FILTER )
log_debug("leave inflate: avail_in=%u, avail_out=%u, zrc=%d\n",
(unsigned)zs->avail_in, (unsigned)zs->avail_out, zrc);
if( zrc == Z_STREAM_END )
rc = -1; /* eof */
else if( zrc != Z_OK && zrc != Z_BUF_ERROR ) {
if( zs->msg )
log_fatal("zlib inflate problem: %s\n", zs->msg );
else
log_fatal("zlib inflate problem: rc=%d\n", zrc );
}
} while( zs->avail_out && zrc != Z_STREAM_END && zrc != Z_BUF_ERROR );
*ret_len = zfx->outbufsize - zs->avail_out;
if( DBG_FILTER )
log_debug("do_uncompress: returning %u bytes (%u ignored)\n",
(unsigned int)*ret_len, (unsigned int)zs->avail_in );
return rc;
}
| 1 |
[
"CWE-20"
] |
gnupg
|
014b2103fcb12f261135e3954f26e9e07b39e342
| 335,397,701,699,140,800,000,000,000,000,000,000,000 | 56 |
gpg: Avoid infinite loop in uncompressing garbled packets.
* g10/compress.c (do_uncompress): Limit the number of extra FF bytes.
--
A packet like (a3 01 5b ff) leads to an infinite loop. Using
--max-output won't help if it is a partial packet. This patch
actually fixes a regression introduced on 1999-05-31 (c34c6769).
Actually it would be sufficient to stuff just one extra 0xff byte.
Given that this problem popped up only after 15 years, I feel safer to
allow for a very few FF bytes.
Thanks to Olivier Levillain and Florian Maury for their detailed
report.
|
TEST_P(ProxyProtocolTest, V1Minimal) {
connect();
write("PROXY UNKNOWN\r\nmore data");
expectData("more data");
if (GetParam() == Envoy::Network::Address::IpVersion::v4) {
EXPECT_EQ(server_connection_->remoteAddress()->ip()->addressAsString(), "127.0.0.1");
} else {
EXPECT_EQ(server_connection_->remoteAddress()->ip()->addressAsString(), "::1");
}
EXPECT_FALSE(server_connection_->localAddressRestored());
disconnect();
}
| 0 |
[
"CWE-400"
] |
envoy
|
dfddb529e914d794ac552e906b13d71233609bf7
| 75,234,057,132,852,860,000,000,000,000,000,000,000 | 15 |
listener: Add configurable accepted connection limits (#153)
Add support for per-listener limits on accepted connections.
Signed-off-by: Tony Allen <[email protected]>
|
static unsigned long cpu_avg_load_per_task(int cpu)
{
struct rq *rq = cpu_rq(cpu);
unsigned long nr_running = READ_ONCE(rq->cfs.h_nr_running);
unsigned long load_avg = weighted_cpuload(rq);
if (nr_running)
return load_avg / nr_running;
return 0;
}
| 0 |
[
"CWE-400",
"CWE-703",
"CWE-835"
] |
linux
|
c40f7d74c741a907cfaeb73a7697081881c497d0
| 299,738,900,454,987,180,000,000,000,000,000,000,000 | 11 |
sched/fair: Fix infinite loop in update_blocked_averages() by reverting a9e7f6544b9c
Zhipeng Xie, Xie XiuQi and Sargun Dhillon reported lockups in the
scheduler under high loads, starting at around the v4.18 time frame,
and Zhipeng Xie tracked it down to bugs in the rq->leaf_cfs_rq_list
manipulation.
Do a (manual) revert of:
a9e7f6544b9c ("sched/fair: Fix O(nr_cgroups) in load balance path")
It turns out that the list_del_leaf_cfs_rq() introduced by this commit
is a surprising property that was not considered in followup commits
such as:
9c2791f936ef ("sched/fair: Fix hierarchical order in rq->leaf_cfs_rq_list")
As Vincent Guittot explains:
"I think that there is a bigger problem with commit a9e7f6544b9c and
cfs_rq throttling:
Let take the example of the following topology TG2 --> TG1 --> root:
1) The 1st time a task is enqueued, we will add TG2 cfs_rq then TG1
cfs_rq to leaf_cfs_rq_list and we are sure to do the whole branch in
one path because it has never been used and can't be throttled so
tmp_alone_branch will point to leaf_cfs_rq_list at the end.
2) Then TG1 is throttled
3) and we add TG3 as a new child of TG1.
4) The 1st enqueue of a task on TG3 will add TG3 cfs_rq just before TG1
cfs_rq and tmp_alone_branch will stay on rq->leaf_cfs_rq_list.
With commit a9e7f6544b9c, we can del a cfs_rq from rq->leaf_cfs_rq_list.
So if the load of TG1 cfs_rq becomes NULL before step 2) above, TG1
cfs_rq is removed from the list.
Then at step 4), TG3 cfs_rq is added at the beginning of rq->leaf_cfs_rq_list
but tmp_alone_branch still points to TG3 cfs_rq because its throttled
parent can't be enqueued when the lock is released.
tmp_alone_branch doesn't point to rq->leaf_cfs_rq_list whereas it should.
So if TG3 cfs_rq is removed or destroyed before tmp_alone_branch
points on another TG cfs_rq, the next TG cfs_rq that will be added,
will be linked outside rq->leaf_cfs_rq_list - which is bad.
In addition, we can break the ordering of the cfs_rq in
rq->leaf_cfs_rq_list but this ordering is used to update and
propagate the update from leaf down to root."
Instead of trying to work through all these cases and trying to reproduce
the very high loads that produced the lockup to begin with, simplify
the code temporarily by reverting a9e7f6544b9c - which change was clearly
not thought through completely.
This (hopefully) gives us a kernel that doesn't lock up so people
can continue to enjoy their holidays without worrying about regressions. ;-)
[ mingo: Wrote changelog, fixed weird spelling in code comment while at it. ]
Analyzed-by: Xie XiuQi <[email protected]>
Analyzed-by: Vincent Guittot <[email protected]>
Reported-by: Zhipeng Xie <[email protected]>
Reported-by: Sargun Dhillon <[email protected]>
Reported-by: Xie XiuQi <[email protected]>
Tested-by: Zhipeng Xie <[email protected]>
Tested-by: Sargun Dhillon <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
Acked-by: Vincent Guittot <[email protected]>
Cc: <[email protected]> # v4.13+
Cc: Bin Li <[email protected]>
Cc: Mike Galbraith <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Tejun Heo <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Fixes: a9e7f6544b9c ("sched/fair: Fix O(nr_cgroups) in load balance path")
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]>
|
static int __init init (void)
{
int status;
status = register_filesystem (&gadgetfs_type);
if (status == 0)
pr_info ("%s: %s, version " DRIVER_VERSION "\n",
shortname, driver_desc);
return status;
}
| 0 |
[
"CWE-763"
] |
linux
|
501e38a5531efbd77d5c73c0ba838a889bfc1d74
| 334,068,865,038,708,270,000,000,000,000,000,000,000 | 10 |
usb: gadget: clear related members when goto fail
dev->config and dev->hs_config and dev->dev need to be cleaned if
dev_config fails to avoid UAF.
Acked-by: Alan Stern <[email protected]>
Signed-off-by: Hangyu Hua <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Greg Kroah-Hartman <[email protected]>
|
int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, unsigned short port)
{
unsigned long val = kvm_register_read(vcpu, VCPU_REGS_RAX);
int ret = emulator_pio_out_emulated(&vcpu->arch.emulate_ctxt,
size, port, &val, 1);
/* do not return to emulator after return from userspace */
vcpu->arch.pio.count = 0;
return ret;
}
| 0 |
[] |
kvm
|
0769c5de24621141c953fbe1f943582d37cb4244
| 291,980,631,111,206,500,000,000,000,000,000,000,000 | 9 |
KVM: x86: extend "struct x86_emulate_ops" with "get_cpuid"
In order to be able to proceed checks on CPU-specific properties
within the emulator, function "get_cpuid" is introduced.
With "get_cpuid" it is possible to virtually call the guests
"cpuid"-opcode without changing the VM's context.
[mtosatti: cleanup/beautify code]
Signed-off-by: Stephan Baerwolf <[email protected]>
Signed-off-by: Marcelo Tosatti <[email protected]>
|
struct ssh_iterator *ssh_list_get_iterator(const struct ssh_list *list){
if(!list)
return NULL;
return list->root;
}
| 0 |
[] |
libssh
|
2ba1dea5493fb2f5a5be2dd263ce46ccb5f8ec76
| 300,701,374,571,100,060,000,000,000,000,000,000,000 | 5 |
CVE-2019-14889: misc: Add function to quote file names
The added function quote file names strings to be used in a shell.
Special cases are treated for the charactes '\'' and '!'.
Fixes T181
Signed-off-by: Anderson Toshiyuki Sasaki <[email protected]>
Reviewed-by: Andreas Schneider <[email protected]>
(cherry picked from commit c4ad1aba9860e02fe03ef3f58a047964e9e765fc)
|
static int wsgi_hook_handler(request_rec *r)
{
int status;
apr_off_t limit = 0;
WSGIRequestConfig *config = NULL;
const char *value = NULL;
/* Filter out the obvious case of no handler defined. */
if (!r->handler)
return DECLINED;
/*
* Construct request configuration and cache it in the
* request object against this module so can access it later
* from handler code.
*/
config = wsgi_create_req_config(r->pool, r);
ap_set_module_config(r->request_config, &wsgi_module, config);
/*
* Only process requests for this module. First check for
* where target is the actual WSGI script. Then need to
* check for the case where handler name mapped to a handler
* script definition.
*/
if (!strcmp(r->handler, "wsgi-script") ||
!strcmp(r->handler, "application/x-httpd-wsgi")) {
/*
* Ensure that have adequate privileges to run the WSGI
* script. Require ExecCGI to be specified in Options for
* this. In doing this, using the wider interpretation that
* ExecCGI refers to any executable like script even though
* not a separate process execution.
*/
if (!(ap_allow_options(r) & OPT_EXECCGI) &&
!wsgi_is_script_aliased(r)) {
wsgi_log_script_error(r, "Options ExecCGI is off in this "
"directory", r->filename);
return HTTP_FORBIDDEN;
}
/* Ensure target script exists and is a file. */
if (r->finfo.filetype == 0) {
wsgi_log_script_error(r, "Target WSGI script not found or unable "
"to stat", r->filename);
return HTTP_NOT_FOUND;
}
if (r->finfo.filetype == APR_DIR) {
wsgi_log_script_error(r, "Attempt to invoke directory as WSGI "
"application", r->filename);
return HTTP_FORBIDDEN;
}
if (wsgi_is_script_aliased(r)) {
/*
* Allow any configuration supplied through request notes to
* override respective values. Request notes are used when
* configuration supplied with WSGIScriptAlias directives.
*/
if ((value = apr_table_get(r->notes, "mod_wsgi.process_group")))
config->process_group = wsgi_process_group(r, value);
if ((value = apr_table_get(r->notes, "mod_wsgi.application_group")))
config->application_group = wsgi_application_group(r, value);
if ((value = apr_table_get(r->notes, "mod_wsgi.callable_object")))
config->callable_object = value;
if ((value = apr_table_get(r->notes,
"mod_wsgi.pass_authorization"))) {
if (!strcmp(value, "1"))
config->pass_authorization = 1;
else
config->pass_authorization = 0;
}
}
}
#if 0
else if (strstr(r->handler, "wsgi-handler=") == r->handler) {
config->handler_script = apr_pstrcat(r->pool, r->handler+13, NULL);
config->callable_object = "handle_request";
}
#endif
else if (config->handler_scripts) {
WSGIScriptFile *entry;
entry = (WSGIScriptFile *)apr_hash_get(config->handler_scripts,
r->handler,
APR_HASH_KEY_STRING);
if (entry) {
config->handler_script = entry->handler_script;
config->callable_object = "handle_request";
if ((value = entry->process_group))
config->process_group = wsgi_process_group(r, value);
if ((value = entry->application_group))
config->application_group = wsgi_application_group(r, value);
if ((value = entry->pass_authorization)) {
if (!strcmp(value, "1"))
config->pass_authorization = 1;
else
config->pass_authorization = 0;
}
}
else
return DECLINED;
}
else
return DECLINED;
/*
* Honour AcceptPathInfo directive. Default behaviour is
* accept additional path information.
*/
#if AP_MODULE_MAGIC_AT_LEAST(20011212,0)
if ((r->used_path_info == AP_REQ_REJECT_PATH_INFO) &&
r->path_info && *r->path_info) {
wsgi_log_script_error(r, "AcceptPathInfo off disallows user's path",
r->filename);
return HTTP_NOT_FOUND;
}
#endif
/*
* Setup policy to apply if request contains a body. Note
* that WSGI specification doesn't strictly allow for chunked
* request content as CONTENT_LENGTH required when reading
* input and application isn't meant to read more than what
* is defined by CONTENT_LENGTH. To allow chunked request
* content tell Apache to dechunk it. For application to use
* the content, it has to ignore WSGI specification and use
* read() with no arguments to read all available input, or
* call read() with specific block size until read() returns
* an empty string.
*/
if (config->chunked_request)
status = ap_setup_client_block(r, REQUEST_CHUNKED_DECHUNK);
else
status = ap_setup_client_block(r, REQUEST_CHUNKED_ERROR);
if (status != OK)
return status;
/*
* Check to see if request content is too large and end
* request here. We do this as otherwise it will not be done
* until first time input data is read in application.
* Problem is that underlying HTTP output filter will
* also generate a 413 response and the error raised from
* the application will be appended to that. The call to
* ap_discard_request_body() is hopefully enough to trigger
* sending of the 413 response by the HTTP filter.
*/
limit = ap_get_limit_req_body(r);
if (limit && limit < r->remaining) {
ap_discard_request_body(r);
return OK;
}
/* Build the sub process environment. */
wsgi_build_environment(r);
/*
* If a dispatch script has been provided, as appropriate
* allow it to override any of the configuration related
* to what context the script will be executed in and what
* the target callable object for the application is.
*/
if (config->dispatch_script) {
status = wsgi_execute_dispatch(r);
if (status != OK)
return status;
}
/*
* Execute the target WSGI application script or proxy
* request to one of the daemon processes as appropriate.
*/
#if defined(MOD_WSGI_WITH_DAEMONS)
status = wsgi_execute_remote(r);
if (status != DECLINED)
return status;
#endif
#if defined(MOD_WSGI_DISABLE_EMBEDDED)
wsgi_log_script_error(r, "Embedded mode of mod_wsgi disabled at compile "
"time", r->filename);
return HTTP_INTERNAL_SERVER_ERROR;
#endif
if (wsgi_server_config->restrict_embedded == 1) {
wsgi_log_script_error(r, "Embedded mode of mod_wsgi disabled by "
"runtime configuration", r->filename);
return HTTP_INTERNAL_SERVER_ERROR;
}
return wsgi_execute_script(r);
}
| 0 |
[
"CWE-254"
] |
mod_wsgi
|
545354a80b9cc20d8b6916ca30542eab36c3b8bd
| 122,847,111,896,564,960,000,000,000,000,000,000,000 | 218 |
When there is any sort of error in setting up daemon process group, kill the process rather than risk running in an unexpected state.
|
dname_lab_startswith(uint8_t* label, char* prefix, char** endptr)
{
size_t plen = strlen(prefix);
size_t orig_plen = plen;
size_t lablen = (size_t)*label;
if(plen > lablen)
return 0;
label++;
while(plen--) {
if(*prefix != tolower((unsigned char)*label)) {
return 0;
}
prefix++; label++;
}
if(orig_plen < lablen)
*endptr = (char *)label;
else
/* prefix length == label length */
*endptr = NULL;
return 1;
}
| 0 |
[
"CWE-400"
] |
unbound
|
ba0f382eee814e56900a535778d13206b86b6d49
| 89,425,709,260,460,420,000,000,000,000,000,000,000 | 21 |
- CVE-2020-12662 Unbound can be tricked into amplifying an incoming
query into a large number of queries directed to a target.
- CVE-2020-12663 Malformed answers from upstream name servers can be
used to make Unbound unresponsive.
|
static void megasas_complete_outstanding_ioctls(struct megasas_instance *instance)
{
int i;
struct megasas_cmd *cmd_mfi;
struct megasas_cmd_fusion *cmd_fusion;
struct fusion_context *fusion = instance->ctrl_context;
/* Find all outstanding ioctls */
if (fusion) {
for (i = 0; i < instance->max_fw_cmds; i++) {
cmd_fusion = fusion->cmd_list[i];
if (cmd_fusion->sync_cmd_idx != (u32)ULONG_MAX) {
cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx];
if (cmd_mfi->sync_cmd &&
(cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT)) {
cmd_mfi->frame->hdr.cmd_status =
MFI_STAT_WRONG_STATE;
megasas_complete_cmd(instance,
cmd_mfi, DID_OK);
}
}
}
} else {
for (i = 0; i < instance->max_fw_cmds; i++) {
cmd_mfi = instance->cmd_list[i];
if (cmd_mfi->sync_cmd && cmd_mfi->frame->hdr.cmd !=
MFI_CMD_ABORT)
megasas_complete_cmd(instance, cmd_mfi, DID_OK);
}
}
}
| 0 |
[
"CWE-476"
] |
linux
|
bcf3b67d16a4c8ffae0aa79de5853435e683945c
| 314,534,837,537,943,550,000,000,000,000,000,000,000 | 31 |
scsi: megaraid_sas: return error when create DMA pool failed
when create DMA pool for cmd frames failed, we should return -ENOMEM,
instead of 0.
In some case in:
megasas_init_adapter_fusion()
-->megasas_alloc_cmds()
-->megasas_create_frame_pool
create DMA pool failed,
--> megasas_free_cmds() [1]
-->megasas_alloc_cmds_fusion()
failed, then goto fail_alloc_cmds.
-->megasas_free_cmds() [2]
we will call megasas_free_cmds twice, [1] will kfree cmd_list,
[2] will use cmd_list.it will cause a problem:
Unable to handle kernel NULL pointer dereference at virtual address
00000000
pgd = ffffffc000f70000
[00000000] *pgd=0000001fbf893003, *pud=0000001fbf893003,
*pmd=0000001fbf894003, *pte=006000006d000707
Internal error: Oops: 96000005 [#1] SMP
Modules linked in:
CPU: 18 PID: 1 Comm: swapper/0 Not tainted
task: ffffffdfb9290000 ti: ffffffdfb923c000 task.ti: ffffffdfb923c000
PC is at megasas_free_cmds+0x30/0x70
LR is at megasas_free_cmds+0x24/0x70
...
Call trace:
[<ffffffc0005b779c>] megasas_free_cmds+0x30/0x70
[<ffffffc0005bca74>] megasas_init_adapter_fusion+0x2f4/0x4d8
[<ffffffc0005b926c>] megasas_init_fw+0x2dc/0x760
[<ffffffc0005b9ab0>] megasas_probe_one+0x3c0/0xcd8
[<ffffffc0004a5abc>] local_pci_probe+0x4c/0xb4
[<ffffffc0004a5c40>] pci_device_probe+0x11c/0x14c
[<ffffffc00053a5e4>] driver_probe_device+0x1ec/0x430
[<ffffffc00053a92c>] __driver_attach+0xa8/0xb0
[<ffffffc000538178>] bus_for_each_dev+0x74/0xc8
[<ffffffc000539e88>] driver_attach+0x28/0x34
[<ffffffc000539a18>] bus_add_driver+0x16c/0x248
[<ffffffc00053b234>] driver_register+0x6c/0x138
[<ffffffc0004a5350>] __pci_register_driver+0x5c/0x6c
[<ffffffc000ce3868>] megasas_init+0xc0/0x1a8
[<ffffffc000082a58>] do_one_initcall+0xe8/0x1ec
[<ffffffc000ca7be8>] kernel_init_freeable+0x1c8/0x284
[<ffffffc0008d90b8>] kernel_init+0x1c/0xe4
Signed-off-by: Jason Yan <[email protected]>
Acked-by: Sumit Saxena <[email protected]>
Signed-off-by: Martin K. Petersen <[email protected]>
|
inline void DepthToSpace(const T* input_data, const Dims<4>& input_dims,
int block_size, T* output_data,
const Dims<4>& output_dims) {
tflite::DepthToSpaceParams op_params;
op_params.block_size = block_size;
DepthToSpace(op_params, DimsToShape(input_dims), input_data,
DimsToShape(output_dims), output_data);
}
| 0 |
[
"CWE-703",
"CWE-835"
] |
tensorflow
|
dfa22b348b70bb89d6d6ec0ff53973bacb4f4695
| 6,247,818,787,428,127,000,000,000,000,000,000,000 | 9 |
Prevent a division by 0 in average ops.
PiperOrigin-RevId: 385184660
Change-Id: I7affd4554f9b336fca29ac68f633232c094d0bd3
|
InitializeTLS(void)
{
int i;
if (rfbTLSInitialized) return TRUE;
mutex_buf = malloc(CRYPTO_num_locks() * sizeof(MUTEX_TYPE));
if (mutex_buf == NULL) {
rfbClientLog("Failed to initialized OpenSSL: memory.\n");
return (-1);
}
for (i = 0; i < CRYPTO_num_locks(); i++)
MUTEX_INIT(mutex_buf[i]);
CRYPTO_set_locking_callback(locking_function);
CRYPTO_set_id_callback(id_function);
CRYPTO_set_dynlock_create_callback(dyn_create_function);
CRYPTO_set_dynlock_lock_callback(dyn_lock_function);
CRYPTO_set_dynlock_destroy_callback(dyn_destroy_function);
SSL_load_error_strings();
SSLeay_add_ssl_algorithms();
RAND_load_file("/dev/urandom", 1024);
rfbClientLog("OpenSSL version %s initialized.\n", SSLeay_version(SSLEAY_VERSION));
rfbTLSInitialized = TRUE;
return TRUE;
}
| 0 |
[
"CWE-476"
] |
libvncserver
|
33441d90a506d5f3ae9388f2752901227e430553
| 56,806,464,912,239,020,000,000,000,000,000,000,000 | 28 |
libvncclient/tls_openssl: do not deref a NULL pointer
Happens in anonTLS mode where cred is NULL.
re #347
|
void exec_context_free_log_extra_fields(ExecContext *c) {
size_t l;
assert(c);
for (l = 0; l < c->n_log_extra_fields; l++)
free(c->log_extra_fields[l].iov_base);
c->log_extra_fields = mfree(c->log_extra_fields);
c->n_log_extra_fields = 0;
}
| 0 |
[
"CWE-269"
] |
systemd
|
f69567cbe26d09eac9d387c0be0fc32c65a83ada
| 161,078,097,886,285,270,000,000,000,000,000,000,000 | 10 |
core: expose SUID/SGID restriction as new unit setting RestrictSUIDSGID=
|
_PUBLIC_ int strncasecmp_m_handle(struct smb_iconv_handle *iconv_handle,
const char *s1, const char *s2, size_t n)
{
codepoint_t c1=0, c2=0;
size_t size1, size2;
/* handle null ptr comparisons to simplify the use in qsort */
if (s1 == s2) return 0;
if (s1 == NULL) return -1;
if (s2 == NULL) return 1;
while (*s1 && *s2 && n) {
n--;
c1 = next_codepoint_handle(iconv_handle, s1, &size1);
c2 = next_codepoint_handle(iconv_handle, s2, &size2);
if (c1 == INVALID_CODEPOINT ||
c2 == INVALID_CODEPOINT) {
/*
* n was specified in characters,
* now we must convert it to bytes.
* As bytes are the smallest
* character unit, the following
* increment and strncasecmp is always
* safe.
*
* The source string was already known
* to be n characters long, so we are
* guaranteed to be able to look at the
* (n remaining + size1) bytes from the
* s1 position).
*/
n += size1;
return strncasecmp(s1, s2, n);
}
s1 += size1;
s2 += size2;
if (c1 == c2) {
continue;
}
if (toupper_m(c1) != toupper_m(c2)) {
return c1 - c2;
}
}
if (n == 0) {
return 0;
}
return *s1 - *s2;
}
| 0 |
[
"CWE-200"
] |
samba
|
ba5dbda6d0174a59d221c45cca52ecd232820d48
| 13,207,678,468,404,492,000,000,000,000,000,000,000 | 55 |
CVE-2015-5330: Fix handling of unicode near string endings
Until now next_codepoint_ext() and next_codepoint_handle_ext() were
using strnlen(str, 5) to determine how much string they should try to
decode. This ended up looking past the end of the string when it was not
null terminated and the final character looked like a multi-byte encoding.
The fix is to let the caller say how long the string can be.
Bug: https://bugzilla.samba.org/show_bug.cgi?id=11599
Signed-off-by: Douglas Bagnall <[email protected]>
Pair-programmed-with: Andrew Bartlett <[email protected]>
Reviewed-by: Ralph Boehme <[email protected]>
|
bool MemIo::isopen() const
{
return true;
}
| 0 |
[
"CWE-125"
] |
exiv2
|
6e3855aed7ba8bb4731fc4087ca7f9078b2f3d97
| 273,764,008,988,257,040,000,000,000,000,000,000,000 | 4 |
Fix https://github.com/Exiv2/exiv2/issues/55
|
find_inlist_lock_noload(struct list_head *head, const char *name, int *error,
struct mutex *mutex)
{
struct {
struct list_head list;
char name[EBT_FUNCTION_MAXNAMELEN];
} *e;
*error = mutex_lock_interruptible(mutex);
if (*error != 0)
return NULL;
list_for_each_entry(e, head, list) {
if (strcmp(e->name, name) == 0)
return e;
}
*error = -ENOENT;
mutex_unlock(mutex);
return NULL;
}
| 0 |
[
"CWE-20"
] |
linux
|
d846f71195d57b0bbb143382647c2c6638b04c5a
| 299,245,988,959,410,600,000,000,000,000,000,000,000 | 20 |
bridge: netfilter: fix information leak
Struct tmp is copied from userspace. It is not checked whether the "name"
field is NULL terminated. This may lead to buffer overflow and passing
contents of kernel stack as a module name to try_then_request_module() and,
consequently, to modprobe commandline. It would be seen by all userspace
processes.
Signed-off-by: Vasiliy Kulikov <[email protected]>
Signed-off-by: Patrick McHardy <[email protected]>
|
static struct dentry *proc_lookupfd(struct inode *dir, struct dentry *dentry,
struct nameidata *nd)
{
return proc_lookupfd_common(dir, dentry, proc_fd_instantiate);
}
| 0 |
[] |
linux
|
0499680a42141d86417a8fbaa8c8db806bea1201
| 91,788,375,767,981,670,000,000,000,000,000,000,000 | 5 |
procfs: add hidepid= and gid= mount options
Add support for mount options to restrict access to /proc/PID/
directories. The default backward-compatible "relaxed" behaviour is left
untouched.
The first mount option is called "hidepid" and its value defines how much
info about processes we want to be available for non-owners:
hidepid=0 (default) means the old behavior - anybody may read all
world-readable /proc/PID/* files.
hidepid=1 means users may not access any /proc/<pid>/ directories, but
their own. Sensitive files like cmdline, sched*, status are now protected
against other users. As permission checking done in proc_pid_permission()
and files' permissions are left untouched, programs expecting specific
files' modes are not confused.
hidepid=2 means hidepid=1 plus all /proc/PID/ will be invisible to other
users. It doesn't mean that it hides whether a process exists (it can be
learned by other means, e.g. by kill -0 $PID), but it hides process' euid
and egid. It compicates intruder's task of gathering info about running
processes, whether some daemon runs with elevated privileges, whether
another user runs some sensitive program, whether other users run any
program at all, etc.
gid=XXX defines a group that will be able to gather all processes' info
(as in hidepid=0 mode). This group should be used instead of putting
nonroot user in sudoers file or something. However, untrusted users (like
daemons, etc.) which are not supposed to monitor the tasks in the whole
system should not be added to the group.
hidepid=1 or higher is designed to restrict access to procfs files, which
might reveal some sensitive private information like precise keystrokes
timings:
http://www.openwall.com/lists/oss-security/2011/11/05/3
hidepid=1/2 doesn't break monitoring userspace tools. ps, top, pgrep, and
conky gracefully handle EPERM/ENOENT and behave as if the current user is
the only user running processes. pstree shows the process subtree which
contains "pstree" process.
Note: the patch doesn't deal with setuid/setgid issues of keeping
preopened descriptors of procfs files (like
https://lkml.org/lkml/2011/2/7/368). We rely on that the leaked
information like the scheduling counters of setuid apps doesn't threaten
anybody's privacy - only the user started the setuid program may read the
counters.
Signed-off-by: Vasiliy Kulikov <[email protected]>
Cc: Alexey Dobriyan <[email protected]>
Cc: Al Viro <[email protected]>
Cc: Randy Dunlap <[email protected]>
Cc: "H. Peter Anvin" <[email protected]>
Cc: Greg KH <[email protected]>
Cc: Theodore Tso <[email protected]>
Cc: Alan Cox <[email protected]>
Cc: James Morris <[email protected]>
Cc: Oleg Nesterov <[email protected]>
Cc: Hugh Dickins <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
static void commitReservation(Buffer::RawSlice* iovecs, uint64_t num_iovecs, OwnedImpl& buffer) {
buffer.commit(iovecs, num_iovecs);
}
| 0 |
[
"CWE-401"
] |
envoy
|
5eba69a1f375413fb93fab4173f9c393ac8c2818
| 199,708,279,729,785,100,000,000,000,000,000,000,000 | 3 |
[buffer] Add on-drain hook to buffer API and use it to avoid fragmentation due to tracking of H2 data and control frames in the output buffer (#144)
Signed-off-by: antonio <[email protected]>
|
void InstanceKlass::set_shared_class_loader_type(s2 loader_type) {
switch (loader_type) {
case ClassLoader::BOOT_LOADER:
_misc_flags |= _misc_is_shared_boot_class;
break;
case ClassLoader::PLATFORM_LOADER:
_misc_flags |= _misc_is_shared_platform_class;
break;
case ClassLoader::APP_LOADER:
_misc_flags |= _misc_is_shared_app_class;
break;
default:
ShouldNotReachHere();
break;
}
}
| 0 |
[] |
jdk17u
|
f8eb9abe034f7c6bea4da05a9ea42017b3f80730
| 261,645,216,873,159,650,000,000,000,000,000,000,000 | 16 |
8270386: Better verification of scan methods
Reviewed-by: coleenp
Backport-of: ac329cef45979bd0159ecd1347e36f7129bb2ce4
|
opfunc_construct (vm_frame_ctx_t *frame_ctx_p) /**< frame context */
{
const uint8_t *byte_code_p = frame_ctx_p->byte_code_p + 1;
uint8_t opcode = byte_code_p[-1];
unsigned int arguments_list_len;
if (opcode >= CBC_NEW0)
{
arguments_list_len = (unsigned int) (opcode - CBC_NEW0);
}
else
{
arguments_list_len = *byte_code_p++;
}
ecma_value_t *stack_top_p = frame_ctx_p->stack_top_p - arguments_list_len;
ecma_value_t constructor_value = stack_top_p[-1];
ecma_value_t completion_value;
const char *constructor_message_p = ecma_check_constructor (constructor_value);
if (constructor_message_p != ECMA_IS_VALID_CONSTRUCTOR)
{
completion_value = ecma_raise_type_error (constructor_message_p);
}
else
{
ecma_object_t *constructor_obj_p = ecma_get_object_from_value (constructor_value);
completion_value = ecma_op_function_construct (constructor_obj_p,
constructor_obj_p,
stack_top_p,
arguments_list_len);
}
/* Free registers. */
for (uint32_t i = 0; i < arguments_list_len; i++)
{
ecma_fast_free_value (stack_top_p[i]);
}
if (JERRY_UNLIKELY (ECMA_IS_VALUE_ERROR (completion_value)))
{
#if JERRY_DEBUGGER
JERRY_CONTEXT (debugger_exception_byte_code_p) = frame_ctx_p->byte_code_p;
#endif /* JERRY_DEBUGGER */
frame_ctx_p->byte_code_p = (uint8_t *) vm_error_byte_code_p;
}
else
{
ecma_free_value (stack_top_p[-1]);
frame_ctx_p->byte_code_p = byte_code_p;
stack_top_p[-1] = completion_value;
}
frame_ctx_p->stack_top_p = stack_top_p;
} /* opfunc_construct */
| 0 |
[
"CWE-416"
] |
jerryscript
|
3ad76f932c8d2e3b9ba2d95e64848698ec7d7290
| 205,902,916,601,218,180,000,000,000,000,000,000,000 | 56 |
Fix for-in collection cleanup on abrupt 'has' result (#4807)
This patch fixes #4747
JerryScript-DCO-1.0-Signed-off-by: Robert Fancsik [email protected]
|
tables_term_destination(j_compress_ptr cinfo)
{
JPEGState* sp = (JPEGState*) cinfo;
/* set tables length to number of bytes actually emitted */
sp->jpegtables_length -= (uint32) sp->dest.free_in_buffer;
}
| 0 |
[
"CWE-369"
] |
libtiff
|
47f2fb61a3a64667bce1a8398a8fcb1b348ff122
| 153,500,438,587,748,740,000,000,000,000,000,000,000 | 7 |
* libtiff/tif_jpeg.c: avoid integer division by zero in
JPEGSetupEncode() when horizontal or vertical sampling is set to 0.
Fixes http://bugzilla.maptools.org/show_bug.cgi?id=2653
|
theme_adium_focus_toggled (EmpathyChatView *view,
gboolean has_focus)
{
EmpathyThemeAdiumPriv *priv = GET_PRIV (view);
priv->has_focus = has_focus;
if (!priv->has_focus) {
/* We've lost focus, so let's make sure all the acked
* messages have lost their unread marker. */
g_queue_foreach (&priv->acked_messages,
theme_adium_remove_acked_message_unread_mark_foreach,
view);
g_queue_clear (&priv->acked_messages);
priv->has_unread_message = FALSE;
}
}
| 0 |
[
"CWE-79"
] |
empathy
|
739aca418457de752be13721218aaebc74bd9d36
| 229,261,083,927,365,350,000,000,000,000,000,000,000 | 17 |
theme_adium_append_message: escape alias before displaying it
Not doing so can lead to nasty HTML injection from hostile users.
https://bugzilla.gnome.org/show_bug.cgi?id=662035
|
PackLinuxElf64::generateElfHdr(
OutputFile *fo,
void const *proto,
unsigned const brka
)
{
cprElfHdr2 *const h2 = (cprElfHdr2 *)(void *)&elfout;
cprElfHdr3 *const h3 = (cprElfHdr3 *)(void *)&elfout;
memcpy(h3, proto, sizeof(*h3)); // reads beyond, but OK
h3->ehdr.e_type = ehdri.e_type; // ET_EXEC vs ET_DYN (gcc -pie -fPIC)
h3->ehdr.e_ident[Elf64_Ehdr::EI_OSABI] = ei_osabi;
if (Elf64_Ehdr::ELFOSABI_LINUX == ei_osabi // proper
&& Elf64_Ehdr::ELFOSABI_NONE == ehdri.e_ident[Elf64_Ehdr::EI_OSABI] // sloppy
) { // propagate sloppiness so that decompression does not complain
h3->ehdr.e_ident[Elf64_Ehdr::EI_OSABI] = ehdri.e_ident[Elf64_Ehdr::EI_OSABI];
}
if (Elf64_Ehdr::EM_PPC64 == ehdri.e_machine) {
h3->ehdr.e_flags = ehdri.e_flags; // "0x1, abiv1" vs "0x2, abiv2"
}
unsigned phnum_o = get_te16(&h2->ehdr.e_phnum);
assert(get_te64(&h2->ehdr.e_phoff) == sizeof(Elf64_Ehdr));
h2->ehdr.e_shoff = 0;
assert(get_te16(&h2->ehdr.e_ehsize) == sizeof(Elf64_Ehdr));
assert(get_te16(&h2->ehdr.e_phentsize) == sizeof(Elf64_Phdr));
set_te16(&h2->ehdr.e_shentsize, sizeof(Elf64_Shdr));
if (o_elf_shnum) {
h2->ehdr.e_shnum = o_elf_shnum;
h2->ehdr.e_shstrndx = o_elf_shnum - 1;
}
else {
h2->ehdr.e_shnum = 0;
h2->ehdr.e_shstrndx = 0;
}
sz_elf_hdrs = sizeof(*h2) - sizeof(linfo); // default
if (gnu_stack) {
sz_elf_hdrs += sizeof(Elf64_Phdr);
memcpy(&h2->phdr[phnum_o++], gnu_stack, sizeof(*gnu_stack));
set_te16(&h2->ehdr.e_phnum, phnum_o);
}
o_binfo = sizeof(Elf64_Ehdr) + sizeof(Elf64_Phdr)*phnum_o + sizeof(l_info) + sizeof(p_info);
set_te64(&h2->phdr[0].p_filesz, sizeof(*h2)); // + identsize;
h2->phdr[0].p_memsz = h2->phdr[0].p_filesz;
for (unsigned j=0; j < 4; ++j) {
if (PT_LOAD64==get_te32(&h3->phdr[j].p_type)) {
set_te64(&h3->phdr[j].p_align, page_size);
}
}
// Info for OS kernel to set the brk()
if (brka) {
// linux-2.6.14 binfmt_elf.c: SIGKILL if (0==.p_memsz) on a page boundary
unsigned const brkb = brka | ((0==(~page_mask & brka)) ? 0x20 : 0);
set_te32(&h2->phdr[1].p_type, PT_LOAD64); // be sure
// Invoking by ld-linux-x86_64-2.21 complains if (filesize < .p_offset),
// which can happen with good compression of a stripped executable
// and large .p_align. However (0==.p_filesz) so ld-linux has a bug.
// Try to evade the bug by reducing .p_align. The alignment is forced
// anyway by phdr[0].p_align and constant offset from phdr[0].p_vaddr.
// However, somebody might complain because (.p_vaddr - .p_offset)
// is divisible only by phdr[1].p_align, and not by phdr[0].p_align.
if (get_te16(&elfout.ehdr.e_machine) != Elf64_Ehdr::EM_PPC64) {
set_te64(&h2->phdr[1].p_align, 0x1000);
}
set_te64(&h2->phdr[1].p_offset, (-1+ get_te64(&h2->phdr[1].p_align)) & brkb);
set_te64(&h2->phdr[1].p_vaddr, brkb);
set_te64(&h2->phdr[1].p_paddr, brkb);
h2->phdr[1].p_filesz = 0;
h2->phdr[1].p_memsz = 0;
set_te32(&h2->phdr[1].p_flags, Elf64_Phdr::PF_R | Elf64_Phdr::PF_W);
}
if (ph.format==getFormat()) {
assert((2u+ !!gnu_stack) == phnum_o);
set_te32(&h2->phdr[0].p_flags, ~Elf64_Phdr::PF_W & get_te32(&h2->phdr[0].p_flags));
if (!gnu_stack) {
memset(&h2->linfo, 0, sizeof(h2->linfo));
fo->write(h2, sizeof(*h2));
}
else {
memset(&h3->linfo, 0, sizeof(h3->linfo));
fo->write(h3, sizeof(*h3));
}
}
else {
assert(false); // unknown ph.format, PackLinuxElf64
}
}
| 0 |
[
"CWE-476"
] |
upx
|
ef336dbcc6dc8344482f8cf6c909ae96c3286317
| 259,161,205,340,723,300,000,000,000,000,000,000,000 | 92 |
Protect against bad crafted input.
https://github.com/upx/upx/issues/128
modified: p_lx_elf.cpp
|
~Generation() {
if (owner) {
removeDirTree(path);
}
}
| 0 |
[
"CWE-59",
"CWE-61"
] |
passenger
|
5483b3292cc2af1c83033eaaadec20dba4dcfd9b
| 156,995,830,128,883,940,000,000,000,000,000,000,000 | 5 |
If the server instance directory already exists, it is now removed first in order get correct directory permissions.
If the directory still exists after removal, Phusion Passenger aborts to avoid writing to a directory with unexpected permissions.
Fixes issue #910.
|
Subsets and Splits
CWE 416 & 19
The query filters records related to specific CWEs (Common Weakness Enumerations), providing a basic overview of entries with these vulnerabilities but without deeper analysis.
CWE Frequency in Train Set
Counts the occurrences of each CWE (Common Weakness Enumeration) in the dataset, providing a basic distribution but limited insight.