func
stringlengths 0
484k
| target
int64 0
1
| cwe
sequencelengths 0
4
| project
stringclasses 799
values | commit_id
stringlengths 40
40
| hash
float64 1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
| size
int64 1
24k
| message
stringlengths 0
13.3k
|
---|---|---|---|---|---|---|---|
TEST_F(Http1ServerConnectionImplTest, Http11AbsolutePath2) {
initialize();
TestRequestHeaderMapImpl expected_headers{
{":authority", "www.somewhere.com"}, {":path", "/foo/bar"}, {":method", "GET"}};
Buffer::OwnedImpl buffer("GET http://www.somewhere.com/foo/bar HTTP/1.1\r\nHost: bah\r\n\r\n");
expectHeadersTest(Protocol::Http11, true, buffer, expected_headers);
} | 0 | [
"CWE-770"
] | envoy | 7ca28ff7d46454ae930e193d97b7d08156b1ba59 | 271,592,981,722,863,750,000,000,000,000,000,000,000 | 8 | [http1] Include request URL in request header size computation, and reject partial headers that exceed configured limits (#145)
Signed-off-by: antonio <[email protected]> |
bool errorIsExpected() {
return _errorExpected;
} | 0 | [
"CWE-755"
] | mongo | 75f7184eafa78006a698cda4c4adfb57f1290047 | 303,776,714,208,999,140,000,000,000,000,000,000,000 | 3 | SERVER-50170 fix max staleness read preference parameter for server selection |
main(int argc, char** argv)
{
char * cmdname;
int rc;
Stonith * s;
const char * SwitchType = NULL;
const char * tmp;
const char * optfile = NULL;
const char * parameters = NULL;
int reset_type = ST_GENERIC_RESET;
int verbose = 0;
int status = 0;
int silent = 0;
int listhosts = 0;
int listtypes = 0;
int listparanames = 0;
int c;
int errors = 0;
int argcount;
StonithNVpair nvargs[MAXNVARG];
int nvcount=0;
int j;
int count = 1;
int help = 0;
int metadata = 0;
/* The bladehpi stonith plugin makes use of openhpi which is
* threaded. The mix of memory allocation without thread
* initialization followed by g_thread_init followed by
* deallocating that memory results in segfault. Hence the
* following G_SLICE setting; see
* http://library.gnome.org/devel/glib/stable/glib-Memory-Slices.html#g-slice-alloc
*/
setenv("G_SLICE", "always-malloc", 1);
if ((cmdname = strrchr(argv[0], '/')) == NULL) {
cmdname = argv[0];
}else{
++cmdname;
}
while ((c = getopt(argc, argv, OPTIONS)) != -1) {
switch(c) {
case 'c': count = atoi(optarg);
if (count < 1) {
fprintf(stderr
, "bad count [%s]\n"
, optarg);
usage(cmdname, 1, NULL);
}
break;
case 'd': debug++;
break;
case 'F': optfile = optarg;
break;
case 'h': help++;
break;
case 'm': metadata++;
break;
case 'l': ++listhosts;
break;
case 'L': ++listtypes;
break;
case 'p': parameters = optarg;
break;
case 's': ++silent;
break;
case 'S': ++status;
break;
case 't': SwitchType = optarg;
break;
case 'T': if (strcmp(optarg, "on")== 0) {
reset_type = ST_POWERON;
}else if (strcmp(optarg, "off")== 0) {
reset_type = ST_POWEROFF;
}else if (strcmp(optarg, "reset")== 0) {
reset_type = ST_GENERIC_RESET;
}else{
fprintf(stderr
, "bad reset type [%s]\n"
, optarg);
usage(cmdname, 1, NULL);
}
break;
case 'n': ++listparanames;
break;
case 'v': ++verbose;
break;
default: ++errors;
break;
}
}
if (help && !errors) {
usage(cmdname, 0, SwitchType);
}
if (debug) {
PILpisysSetDebugLevel(debug);
setenv("HA_debug","2",0);
}
if (optfile && parameters) {
fprintf(stderr
, "Cannot include both -F and -p options\n");
usage(cmdname, 1, NULL);
}
/*
* Process name=value arguments on command line...
*/
for (;optind < argc; ++optind) {
char * eqpos;
if ((eqpos=strchr(argv[optind], EQUAL)) == NULL) {
break;
}
if (parameters) {
fprintf(stderr
, "Cannot include both -p and name=value "
"style arguments\n");
usage(cmdname, 1, NULL);
}
if (optfile) {
fprintf(stderr
, "Cannot include both -F and name=value "
"style arguments\n");
usage(cmdname, 1, NULL);
}
if (nvcount >= MAXNVARG) {
fprintf(stderr
, "Too many name=value style arguments\n");
exit(1);
}
nvargs[nvcount].s_name = argv[optind];
*eqpos = EOS;
nvargs[nvcount].s_value = eqpos+1;
nvcount++;
}
nvargs[nvcount].s_name = NULL;
nvargs[nvcount].s_value = NULL;
argcount = argc - optind;
if (!(argcount == 1 || (argcount < 1
&& (status||listhosts||listtypes||listparanames||metadata)))) {
++errors;
}
if (errors) {
usage(cmdname, 1, NULL);
}
if (listtypes) {
char ** typelist;
typelist = stonith_types();
if (typelist == NULL) {
syslog(LOG_ERR, "Could not list Stonith types.");
}else{
char ** this;
for(this=typelist; *this; ++this) {
printf("%s\n", *this);
}
}
exit(0);
}
#ifndef LOG_PERROR
# define LOG_PERROR 0
#endif
openlog(cmdname, (LOG_CONS|(silent ? 0 : LOG_PERROR)), LOG_USER);
if (SwitchType == NULL) {
fprintf(stderr, "Must specify device type (-t option)\n");
usage(cmdname, 1, NULL);
}
s = stonith_new(SwitchType);
if (s == NULL) {
syslog(LOG_ERR, "Invalid device type: '%s'", SwitchType);
exit(S_OOPS);
}
if (debug) {
stonith_set_debug(s, debug);
}
if (!listparanames && !metadata && optfile == NULL && parameters == NULL && nvcount == 0) {
const char** names;
int needs_parms = 1;
if (s != NULL && (names = stonith_get_confignames(s)) != NULL && names[0] == NULL) {
needs_parms = 0;
}
if (needs_parms) {
fprintf(stderr
, "Must specify either -p option, -F option or "
"name=value style arguments\n");
if (s != NULL) {
stonith_delete(s);
}
usage(cmdname, 1, NULL);
}
}
if (listparanames) {
const char** names;
int i;
names = stonith_get_confignames(s);
if (names != NULL) {
for (i=0; names[i]; ++i) {
printf("%s ", names[i]);
}
}
printf("\n");
stonith_delete(s);
s=NULL;
exit(0);
}
if (metadata) {
print_stonith_meta(s,SwitchType);
stonith_delete(s);
s=NULL;
exit(0);
}
/* Old STONITH version 1 stuff... */
if (optfile) {
/* Configure the Stonith object from a file */
if ((rc=stonith_set_config_file(s, optfile)) != S_OK) {
syslog(LOG_ERR
, "Invalid config file for %s device."
, SwitchType);
#if 0
syslog(LOG_INFO, "Config file syntax: %s"
, s->s_ops->getinfo(s, ST_CONF_FILE_SYNTAX));
#endif
stonith_delete(s); s=NULL;
exit(S_BADCONFIG);
}
}else if (parameters) {
/* Configure Stonith object from the -p argument */
StonithNVpair * pairs;
if ((pairs = stonith1_compat_string_to_NVpair
( s, parameters)) == NULL) {
fprintf(stderr
, "Invalid STONITH -p parameter [%s]\n"
, parameters);
stonith_delete(s); s=NULL;
exit(1);
}
if ((rc = stonith_set_config(s, pairs)) != S_OK) {
fprintf(stderr
, "Invalid config info for %s device"
, SwitchType);
}
}else{
/*
* Configure STONITH device using cmdline arguments...
*/
if ((rc = stonith_set_config(s, nvargs)) != S_OK) {
const char** names;
int j;
fprintf(stderr
, "Invalid config info for %s device\n"
, SwitchType);
names = stonith_get_confignames(s);
if (names != NULL) {
fprintf(stderr
, "Valid config names are:\n");
for (j=0; names[j]; ++j) {
fprintf(stderr
, "\t%s\n", names[j]);
}
}
stonith_delete(s); s=NULL;
exit(rc);
}
}
for (j=0; j < count; ++j) {
rc = stonith_get_status(s);
if ((tmp = stonith_get_info(s, ST_DEVICEID)) == NULL) {
SwitchType = tmp;
}
if (status && !silent) {
if (rc == S_OK) {
syslog(LOG_ERR, "%s device OK.", SwitchType);
}else{
/* Uh-Oh */
syslog(LOG_ERR, "%s device not accessible."
, SwitchType);
}
}
if (listhosts) {
char ** hostlist;
hostlist = stonith_get_hostlist(s);
if (hostlist == NULL) {
syslog(LOG_ERR, "Could not list hosts for %s."
, SwitchType);
}else{
char ** this;
for(this=hostlist; *this; ++this) {
printf("%s\n", *this);
}
stonith_free_hostlist(hostlist);
}
}
if (optind < argc) {
char *nodename;
nodename = g_strdup(argv[optind]);
g_strdown(nodename);
rc = stonith_req_reset(s, reset_type, nodename);
g_free(nodename);
}
}
stonith_delete(s); s = NULL;
return(rc);
} | 1 | [
"CWE-287"
] | cluster-glue | 3d7b464439ee0271da76e0ee9480f3dc14005879 | 57,286,746,842,451,480,000,000,000,000,000,000,000 | 346 | Medium: stonith: add -E option to get the configuration from the environment |
static rsRetVal qDestructLinkedList(qqueue_t __attribute__((unused)) *pThis)
{
DEFiRet;
queueDrain(pThis); /* discard any remaining queue entries */
/* with the linked list type, there is nothing left to do here. The
* reason is that there are no dynamic elements for the list itself.
*/
RETiRet;
} | 0 | [
"CWE-772"
] | rsyslog | dfa88369d4ca4290db56b843f9eabdae1bfe0fd5 | 106,309,367,660,462,430,000,000,000,000,000,000,000 | 12 | bugfix: memory leak when $RepeatedMsgReduction on was used
bug tracker: http://bugzilla.adiscon.com/show_bug.cgi?id=225 |
void mark_rect_as_modified(int x1, int y1, int x2, int y2, int force) {
if (damage_time != 0) {
/*
* This is not XDAMAGE, rather a hack for testing
* where we allow the framebuffer to be corrupted for
* damage_delay seconds.
*/
int debug = 0;
if (time(NULL) > damage_time + damage_delay) {
if (! quiet) {
rfbLog("damaging turned off.\n");
}
damage_time = 0;
damage_delay = 0;
} else {
if (debug) {
rfbLog("damaging viewer fb by not marking "
"rect: %d,%d,%d,%d\n", x1, y1, x2, y2);
}
return;
}
}
if (rfb_fb == main_fb || force) {
mark_wrapper(x1, y1, x2, y2);
return;
}
if (cmap8to24) {
bpp8to24(x1, y1, x2, y2);
}
if (scaling) {
scale_and_mark_rect(x1, y1, x2, y2, 1);
} else {
mark_wrapper(x1, y1, x2, y2);
}
} | 0 | [
"CWE-862",
"CWE-284",
"CWE-732"
] | x11vnc | 69eeb9f7baa14ca03b16c9de821f9876def7a36a | 104,053,314,330,558,570,000,000,000,000,000,000,000 | 40 | scan: limit access to shared memory segments to current user |
static int packet_notifier(struct notifier_block *this,
unsigned long msg, void *ptr)
{
struct sock *sk;
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct net *net = dev_net(dev);
rcu_read_lock();
sk_for_each_rcu(sk, &net->packet.sklist) {
struct packet_sock *po = pkt_sk(sk);
switch (msg) {
case NETDEV_UNREGISTER:
if (po->mclist)
packet_dev_mclist_delete(dev, &po->mclist);
/* fallthrough */
case NETDEV_DOWN:
if (dev->ifindex == po->ifindex) {
spin_lock(&po->bind_lock);
if (po->running) {
__unregister_prot_hook(sk, false);
sk->sk_err = ENETDOWN;
if (!sock_flag(sk, SOCK_DEAD))
sk->sk_error_report(sk);
}
if (msg == NETDEV_UNREGISTER) {
packet_cached_dev_reset(po);
po->ifindex = -1;
if (po->prot_hook.dev)
dev_put(po->prot_hook.dev);
po->prot_hook.dev = NULL;
}
spin_unlock(&po->bind_lock);
}
break;
case NETDEV_UP:
if (dev->ifindex == po->ifindex) {
spin_lock(&po->bind_lock);
if (po->num)
register_prot_hook(sk);
spin_unlock(&po->bind_lock);
}
break;
}
}
rcu_read_unlock();
return NOTIFY_DONE;
} | 0 | [
"CWE-119"
] | linux | 2b6867c2ce76c596676bec7d2d525af525fdc6e2 | 128,100,876,737,345,120,000,000,000,000,000,000,000 | 49 | net/packet: fix overflow in check for priv area size
Subtracting tp_sizeof_priv from tp_block_size and casting to int
to check whether one is less then the other doesn't always work
(both of them are unsigned ints).
Compare them as is instead.
Also cast tp_sizeof_priv to u64 before using BLK_PLUS_PRIV, as
it can overflow inside BLK_PLUS_PRIV otherwise.
Signed-off-by: Andrey Konovalov <[email protected]>
Acked-by: Eric Dumazet <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
void read_pcm_samples_internal(thread_context* tctx, int x0, int y0, int log2CbSize,
int cIdx, bitreader& br)
{
const seq_parameter_set& sps = tctx->img->get_sps();
int nPcmBits;
int bitDepth;
int w = 1<<log2CbSize;
int h = 1<<log2CbSize;
if (cIdx>0) {
w /= sps.SubWidthC;
h /= sps.SubHeightC;
x0 /= sps.SubWidthC;
y0 /= sps.SubHeightC;
nPcmBits = sps.pcm_sample_bit_depth_chroma;
bitDepth = sps.BitDepth_C;
}
else {
nPcmBits = sps.pcm_sample_bit_depth_luma;
bitDepth = sps.BitDepth_Y;
}
pixel_t* ptr;
int stride;
ptr = tctx->img->get_image_plane_at_pos_NEW<pixel_t>(cIdx,x0,y0);
stride = tctx->img->get_image_stride(cIdx);
int shift = bitDepth - nPcmBits;
// a shift < 0 may result when the SPS sequence header is broken
if (shift < 0) {
shift=0;
}
for (int y=0;y<h;y++)
for (int x=0;x<w;x++)
{
int value = get_bits(&br, nPcmBits);
ptr[y*stride+x] = value << shift;
}
} | 0 | [] | libde265 | e83f3798dd904aa579425c53020c67e03735138d | 198,584,477,935,115,060,000,000,000,000,000,000,000 | 45 | fix check for valid PPS idx (#298) |
plugin_prepare (struct backend *b, struct connection *conn, int readonly)
{
return 0;
} | 0 | [
"CWE-406"
] | nbdkit | a6b88b195a959b17524d1c8353fd425d4891dc5f | 114,645,694,507,081,080,000,000,000,000,000,000,000 | 4 | server: Fix regression for NBD_OPT_INFO before NBD_OPT_GO
Most known NBD clients do not bother with NBD_OPT_INFO (except for
clients like 'qemu-nbd --list' that don't ever intend to connect), but
go straight to NBD_OPT_GO. However, it's not too hard to hack up qemu
to add in an extra client step (whether info on the same name, or more
interestingly, info on a different name), as a patch against qemu
commit 6f214b30445:
| diff --git i/nbd/client.c w/nbd/client.c
| index f6733962b49b..425292ac5ea9 100644
| --- i/nbd/client.c
| +++ w/nbd/client.c
| @@ -1038,6 +1038,14 @@ int nbd_receive_negotiate(AioContext *aio_context, QIOChannel *ioc,
| * TLS). If it is not available, fall back to
| * NBD_OPT_LIST for nicer error messages about a missing
| * export, then use NBD_OPT_EXPORT_NAME. */
| + if (getenv ("HACK"))
| + info->name[0]++;
| + result = nbd_opt_info_or_go(ioc, NBD_OPT_INFO, info, errp);
| + if (getenv ("HACK"))
| + info->name[0]--;
| + if (result < 0) {
| + return -EINVAL;
| + }
| result = nbd_opt_info_or_go(ioc, NBD_OPT_GO, info, errp);
| if (result < 0) {
| return -EINVAL;
This works just fine in 1.14.0, where we call .open only once (so the
INFO and GO repeat calls into the same plugin handle), but in 1.14.1
it regressed into causing an assertion failure: we are now calling
.open a second time on a connection that is already opened:
$ nbdkit -rfv null &
$ hacked-qemu-io -f raw -r nbd://localhost -c quit
...
nbdkit: null[1]: debug: null: open readonly=1
nbdkit: backend.c:179: backend_open: Assertion `h->handle == NULL' failed.
Worse, on the mainline development, we have recently made it possible
for plugins to actively report different information for different
export names; for example, a plugin may choose to report different
answers for .can_write on export A than for export B; but if we share
cached handles, then an NBD_OPT_INFO on one export prevents correct
answers for NBD_OPT_GO on the second export name. (The HACK envvar in
my qemu modifications can be used to demonstrate cross-name requests,
which are even less likely in a real client).
The solution is to call .close after NBD_OPT_INFO, coupled with enough
glue logic to reset cached connection handles back to the state
expected by .open. This in turn means factoring out another backend_*
function, but also gives us an opportunity to change
backend_set_handle to no longer accept NULL.
The assertion failure is, to some extent, a possible denial of service
attack (one client can force nbdkit to exit by merely sending OPT_INFO
before OPT_GO, preventing the next client from connecting), although
this is mitigated by using TLS to weed out untrusted clients. Still,
the fact that we introduced a potential DoS attack while trying to fix
a traffic amplification security bug is not very nice.
Sadly, as there are no known clients that easily trigger this mode of
operation (OPT_INFO before OPT_GO), there is no easy way to cover this
via a testsuite addition. I may end up hacking something into libnbd.
Fixes: c05686f957
Signed-off-by: Eric Blake <[email protected]> |
static inline unsigned int skb_end_offset(const struct sk_buff *skb)
{
return skb->end - skb->head; | 0 | [
"CWE-20"
] | linux | 2b16f048729bf35e6c28a40cbfad07239f9dcd90 | 181,428,882,865,438,330,000,000,000,000,000,000,000 | 4 | net: create skb_gso_validate_mac_len()
If you take a GSO skb, and split it into packets, will the MAC
length (L2 + L3 + L4 headers + payload) of those packets be small
enough to fit within a given length?
Move skb_gso_mac_seglen() to skbuff.h with other related functions
like skb_gso_network_seglen() so we can use it, and then create
skb_gso_validate_mac_len to do the full calculation.
Signed-off-by: Daniel Axtens <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
ecma_stringbuilder_create_raw (const lit_utf8_byte_t *data_p, /**< pointer to data */
const lit_utf8_size_t data_size) /**< size of the data */
{
const lit_utf8_size_t initial_size = data_size + ECMA_ASCII_STRING_HEADER_SIZE;
ecma_stringbuilder_header_t *header_p = (ecma_stringbuilder_header_t *) jmem_heap_alloc_block (initial_size);
header_p->current_size = initial_size;
#if JERRY_MEM_STATS
jmem_stats_allocate_string_bytes (initial_size);
#endif /* JERRY_MEM_STATS */
memcpy (ECMA_STRINGBUILDER_STRING_PTR (header_p), data_p, data_size);
ecma_stringbuilder_t ret = {.header_p = header_p};
return ret;
} /* ecma_stringbuilder_create_raw */ | 0 | [
"CWE-416"
] | jerryscript | 3bcd48f72d4af01d1304b754ef19fe1a02c96049 | 70,775,172,430,250,640,000,000,000,000,000,000,000 | 16 | Improve parse_identifier (#4691)
Ascii string length is no longer computed during string allocation.
JerryScript-DCO-1.0-Signed-off-by: Daniel Batiz [email protected] |
_copyAlterTSConfigurationStmt(const AlterTSConfigurationStmt *from)
{
AlterTSConfigurationStmt *newnode = makeNode(AlterTSConfigurationStmt);
COPY_NODE_FIELD(cfgname);
COPY_NODE_FIELD(tokentype);
COPY_NODE_FIELD(dicts);
COPY_SCALAR_FIELD(override);
COPY_SCALAR_FIELD(replace);
COPY_SCALAR_FIELD(missing_ok);
return newnode;
} | 0 | [
"CWE-362"
] | postgres | 5f173040e324f6c2eebb90d86cf1b0cdb5890f0a | 82,070,752,374,716,860,000,000,000,000,000,000,000 | 13 | Avoid repeated name lookups during table and index DDL.
If the name lookups come to different conclusions due to concurrent
activity, we might perform some parts of the DDL on a different table
than other parts. At least in the case of CREATE INDEX, this can be
used to cause the permissions checks to be performed against a
different table than the index creation, allowing for a privilege
escalation attack.
This changes the calling convention for DefineIndex, CreateTrigger,
transformIndexStmt, transformAlterTableStmt, CheckIndexCompatible
(in 9.2 and newer), and AlterTable (in 9.1 and older). In addition,
CheckRelationOwnership is removed in 9.2 and newer and the calling
convention is changed in older branches. A field has also been added
to the Constraint node (FkConstraint in 8.4). Third-party code calling
these functions or using the Constraint node will require updating.
Report by Andres Freund. Patch by Robert Haas and Andres Freund,
reviewed by Tom Lane.
Security: CVE-2014-0062 |
correct_range(exarg_T *eap)
{
if (!(eap->argt & EX_ZEROR)) // zero in range not allowed
{
if (eap->line1 == 0)
eap->line1 = 1;
if (eap->line2 == 0)
eap->line2 = 1;
}
} | 0 | [
"CWE-122"
] | vim | 35a319b77f897744eec1155b736e9372c9c5575f | 183,218,429,796,493,670,000,000,000,000,000,000,000 | 10 | patch 8.2.3489: ml_get error after search with range
Problem: ml_get error after search with range.
Solution: Limit the line number to the buffer line count. |
int sas_smp_get_phy_events(struct sas_phy *phy)
{
int res;
u8 *req;
u8 *resp;
struct sas_rphy *rphy = dev_to_rphy(phy->dev.parent);
struct domain_device *dev = sas_find_dev_by_rphy(rphy);
req = alloc_smp_req(RPEL_REQ_SIZE);
if (!req)
return -ENOMEM;
resp = alloc_smp_resp(RPEL_RESP_SIZE);
if (!resp) {
kfree(req);
return -ENOMEM;
}
req[1] = SMP_REPORT_PHY_ERR_LOG;
req[9] = phy->number;
res = smp_execute_task(dev, req, RPEL_REQ_SIZE,
resp, RPEL_RESP_SIZE);
if (!res)
goto out;
phy->invalid_dword_count = scsi_to_u32(&resp[12]);
phy->running_disparity_error_count = scsi_to_u32(&resp[16]);
phy->loss_of_dword_sync_count = scsi_to_u32(&resp[20]);
phy->phy_reset_problem_count = scsi_to_u32(&resp[24]);
out:
kfree(resp);
return res;
} | 1 | [
"CWE-399",
"CWE-772"
] | linux | 4a491b1ab11ca0556d2fda1ff1301e862a2d44c4 | 86,447,086,696,164,020,000,000,000,000,000,000,000 | 37 | scsi: libsas: fix memory leak in sas_smp_get_phy_events()
We've got a memory leak with the following producer:
while true;
do cat /sys/class/sas_phy/phy-1:0:12/invalid_dword_count >/dev/null;
done
The buffer req is allocated and not freed after we return. Fix it.
Fixes: 2908d778ab3e ("[SCSI] aic94xx: new driver")
Signed-off-by: Jason Yan <[email protected]>
CC: John Garry <[email protected]>
CC: chenqilin <[email protected]>
CC: chenxiang <[email protected]>
Reviewed-by: Christoph Hellwig <[email protected]>
Reviewed-by: Hannes Reinecke <[email protected]>
Signed-off-by: Martin K. Petersen <[email protected]> |
DEFUN (no_ip_community_list_name_standard_all,
no_ip_community_list_name_standard_all_cmd,
"no ip community-list standard WORD",
NO_STR
IP_STR
COMMUNITY_LIST_STR
"Add a standard community-list entry\n"
"Community list name\n")
{
return community_list_unset_vty (vty, argc, argv, COMMUNITY_LIST_STANDARD);
} | 0 | [
"CWE-125"
] | frr | 6d58272b4cf96f0daa846210dd2104877900f921 | 206,913,479,490,887,540,000,000,000,000,000,000,000 | 11 | [bgpd] cleanup, compact and consolidate capability parsing code
2007-07-26 Paul Jakma <[email protected]>
* (general) Clean up and compact capability parsing slightly.
Consolidate validation of length and logging of generic TLV, and
memcpy of capability data, thus removing such from cap specifc
code (not always present or correct).
* bgp_open.h: Add structures for the generic capability TLV header
and for the data formats of the various specific capabilities we
support. Hence remove the badly named, or else misdefined, struct
capability.
* bgp_open.c: (bgp_capability_vty_out) Use struct capability_mp_data.
Do the length checks *before* memcpy()'ing based on that length
(stored capability - should have been validated anyway on input,
but..).
(bgp_afi_safi_valid_indices) new function to validate (afi,safi)
which is about to be used as index into arrays, consolidates
several instances of same, at least one of which appeared to be
incomplete..
(bgp_capability_mp) Much condensed.
(bgp_capability_orf_entry) New, process one ORF entry
(bgp_capability_orf) Condensed. Fixed to process all ORF entries.
(bgp_capability_restart) Condensed, and fixed to use a
cap-specific type, rather than abusing capability_mp.
(struct message capcode_str) added to aid generic logging.
(size_t cap_minsizes[]) added to aid generic validation of
capability length field.
(bgp_capability_parse) Generic logging and validation of TLV
consolidated here. Code compacted as much as possible.
* bgp_packet.c: (bgp_open_receive) Capability parsers now use
streams, so no more need here to manually fudge the input stream
getp.
(bgp_capability_msg_parse) use struct capability_mp_data. Validate
lengths /before/ memcpy. Use bgp_afi_safi_valid_indices.
(bgp_capability_receive) Exported for use by test harness.
* bgp_vty.c: (bgp_show_summary) fix conversion warning
(bgp_show_peer) ditto
* bgp_debug.h: Fix storage 'extern' after type 'const'.
* lib/log.c: (mes_lookup) warning about code not being in
same-number array slot should be debug, not warning. E.g. BGP
has several discontigious number spaces, allocating from
different parts of a space is not uncommon (e.g. IANA
assigned versus vendor-assigned code points in some number
space). |
vmxnet3_get_next_head_rx_descr(VMXNET3State *s,
struct Vmxnet3_RxDesc *descr_buf,
uint32_t *descr_idx,
uint32_t *ridx)
{
for (;;) {
uint32_t ring_gen;
vmxnet3_read_next_rx_descr(s, RXQ_IDX, RX_HEAD_BODY_RING,
descr_buf, descr_idx);
/* If no more free descriptors - return */
ring_gen = vmxnet3_get_rx_ring_gen(s, RXQ_IDX, RX_HEAD_BODY_RING);
if (descr_buf->gen != ring_gen) {
return false;
}
/* Only read after generation field verification */
smp_rmb();
/* Re-read to be sure we got the latest version */
vmxnet3_read_next_rx_descr(s, RXQ_IDX, RX_HEAD_BODY_RING,
descr_buf, descr_idx);
/* Mark current descriptor as used/skipped */
vmxnet3_inc_rx_consumption_counter(s, RXQ_IDX, RX_HEAD_BODY_RING);
/* If this is what we are looking for - return */
if (descr_buf->btype == VMXNET3_RXD_BTYPE_HEAD) {
*ridx = RX_HEAD_BODY_RING;
return true;
}
}
} | 0 | [
"CWE-20"
] | qemu | a7278b36fcab9af469563bd7b9dadebe2ae25e48 | 298,458,184,930,055,130,000,000,000,000,000,000,000 | 32 | net/vmxnet3: Refine l2 header validation
Validation of l2 header length assumed minimal packet size as
eth_header + 2 * vlan_header regardless of the actual protocol.
This caused crash for valid non-IP packets shorter than 22 bytes, as
'tx_pkt->packet_type' hasn't been assigned for such packets, and
'vmxnet3_on_tx_done_update_stats()' expects it to be properly set.
Refine header length validation in 'vmxnet_tx_pkt_parse_headers'.
Check its return value during packet processing flow.
As a side effect, in case IPv4 and IPv6 header validation failure,
corrupt packets will be dropped.
Signed-off-by: Dana Rubin <[email protected]>
Signed-off-by: Shmulik Ladkani <[email protected]>
Signed-off-by: Jason Wang <[email protected]> |
static MagickBooleanType WritePSDImage(const ImageInfo *image_info,
Image *image,ExceptionInfo *exception)
{
const StringInfo
*icc_profile;
MagickBooleanType
status;
PSDInfo
psd_info;
register ssize_t
i;
size_t
length,
num_channels,
packet_size;
StringInfo
*bim_profile;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception);
if (status == MagickFalse)
return(status);
packet_size=(size_t) (image->depth > 8 ? 6 : 3);
if (image->alpha_trait != UndefinedPixelTrait)
packet_size+=image->depth > 8 ? 2 : 1;
psd_info.version=1;
if ((LocaleCompare(image_info->magick,"PSB") == 0) ||
(image->columns > 30000) || (image->rows > 30000))
psd_info.version=2;
(void) WriteBlob(image,4,(const unsigned char *) "8BPS");
(void) WriteBlobMSBShort(image,psd_info.version); /* version */
for (i=1; i <= 6; i++)
(void) WriteBlobByte(image, 0); /* 6 bytes of reserved */
/* When the image has a color profile it won't be converted to gray scale */
if ((GetImageProfile(image,"icc") == (StringInfo *) NULL) &&
(SetImageGray(image,exception) != MagickFalse))
num_channels=(image->alpha_trait != UndefinedPixelTrait ? 2UL : 1UL);
else
if ((image_info->type != TrueColorType) && (image_info->type !=
TrueColorAlphaType) && (image->storage_class == PseudoClass))
num_channels=(image->alpha_trait != UndefinedPixelTrait ? 2UL : 1UL);
else
{
if (image->storage_class == PseudoClass)
(void) SetImageStorageClass(image,DirectClass,exception);
if (image->colorspace != CMYKColorspace)
num_channels=(image->alpha_trait != UndefinedPixelTrait ? 4UL : 3UL);
else
num_channels=(image->alpha_trait != UndefinedPixelTrait ? 5UL : 4UL);
}
(void) WriteBlobMSBShort(image,(unsigned short) num_channels);
(void) WriteBlobMSBLong(image,(unsigned int) image->rows);
(void) WriteBlobMSBLong(image,(unsigned int) image->columns);
if (IsImageGray(image) != MagickFalse)
{
MagickBooleanType
monochrome;
/*
Write depth & mode.
*/
monochrome=IsImageMonochrome(image) && (image->depth == 1) ?
MagickTrue : MagickFalse;
(void) WriteBlobMSBShort(image,(unsigned short)
(monochrome != MagickFalse ? 1 : image->depth > 8 ? 16 : 8));
(void) WriteBlobMSBShort(image,(unsigned short)
(monochrome != MagickFalse ? BitmapMode : GrayscaleMode));
}
else
{
(void) WriteBlobMSBShort(image,(unsigned short) (image->storage_class ==
PseudoClass ? 8 : image->depth > 8 ? 16 : 8));
if (((image_info->colorspace != UndefinedColorspace) ||
(image->colorspace != CMYKColorspace)) &&
(image_info->colorspace != CMYKColorspace))
{
(void) TransformImageColorspace(image,sRGBColorspace,exception);
(void) WriteBlobMSBShort(image,(unsigned short)
(image->storage_class == PseudoClass ? IndexedMode : RGBMode));
}
else
{
if (image->colorspace != CMYKColorspace)
(void) TransformImageColorspace(image,CMYKColorspace,exception);
(void) WriteBlobMSBShort(image,CMYKMode);
}
}
if ((IsImageGray(image) != MagickFalse) ||
(image->storage_class == DirectClass) || (image->colors > 256))
(void) WriteBlobMSBLong(image,0);
else
{
/*
Write PSD raster colormap.
*/
(void) WriteBlobMSBLong(image,768);
for (i=0; i < (ssize_t) image->colors; i++)
(void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum(
image->colormap[i].red)));
for ( ; i < 256; i++)
(void) WriteBlobByte(image,0);
for (i=0; i < (ssize_t) image->colors; i++)
(void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum(
image->colormap[i].green)));
for ( ; i < 256; i++)
(void) WriteBlobByte(image,0);
for (i=0; i < (ssize_t) image->colors; i++)
(void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum(
image->colormap[i].blue)));
for ( ; i < 256; i++)
(void) WriteBlobByte(image,0);
}
/*
Image resource block.
*/
length=28; /* 0x03EB */
bim_profile=(StringInfo *) GetImageProfile(image,"8bim");
icc_profile=GetImageProfile(image,"icc");
if (bim_profile != (StringInfo *) NULL)
{
bim_profile=CloneStringInfo(bim_profile);
if (icc_profile != (StringInfo *) NULL)
RemoveICCProfileFromResourceBlock(bim_profile);
RemoveResolutionFromResourceBlock(bim_profile);
length+=PSDQuantum(GetStringInfoLength(bim_profile));
}
if (icc_profile != (const StringInfo *) NULL)
length+=PSDQuantum(GetStringInfoLength(icc_profile))+12;
(void) WriteBlobMSBLong(image,(unsigned int) length);
WriteResolutionResourceBlock(image);
if (bim_profile != (StringInfo *) NULL)
{
(void) WriteBlob(image,GetStringInfoLength(bim_profile),
GetStringInfoDatum(bim_profile));
bim_profile=DestroyStringInfo(bim_profile);
}
if (icc_profile != (StringInfo *) NULL)
{
(void) WriteBlob(image,4,(const unsigned char *) "8BIM");
(void) WriteBlobMSBShort(image,0x0000040F);
(void) WriteBlobMSBShort(image,0);
(void) WriteBlobMSBLong(image,(unsigned int) GetStringInfoLength(
icc_profile));
(void) WriteBlob(image,GetStringInfoLength(icc_profile),
GetStringInfoDatum(icc_profile));
if ((ssize_t) GetStringInfoLength(icc_profile) != PSDQuantum(GetStringInfoLength(icc_profile)))
(void) WriteBlobByte(image,0);
}
if (status != MagickFalse)
{
MagickOffsetType
size_offset;
size_t
size;
size_offset=TellBlob(image);
(void) SetPSDSize(&psd_info,image,0);
status=WritePSDLayersInternal(image,image_info,&psd_info,&size,
exception);
size_offset+=WritePSDSize(&psd_info,image,size+
(psd_info.version == 1 ? 8 : 12),size_offset);
}
(void) WriteBlobMSBLong(image,0); /* user mask data */
/*
Write composite image.
*/
if (status != MagickFalse)
{
CompressionType
compression;
compression=image->compression;
if (image->compression == ZipCompression)
image->compression=RLECompression;
if (image_info->compression != UndefinedCompression)
image->compression=image_info->compression;
if (WritePSDChannels(&psd_info,image_info,image,image,0,MagickFalse,
exception) == 0)
status=MagickFalse;
image->compression=compression;
}
(void) CloseBlob(image);
return(status);
} | 0 | [
"CWE-399",
"CWE-401"
] | ImageMagick | 8a43abefb38c5e29138e1c9c515b313363541c06 | 77,340,019,313,878,970,000,000,000,000,000,000,000 | 201 | https://github.com/ImageMagick/ImageMagick/issues/1451 |
static inline void kvm_apic_set_ldr(struct kvm_lapic *apic, u32 id)
{
kvm_lapic_set_reg(apic, APIC_LDR, id);
atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
} | 0 | [
"CWE-703",
"CWE-459"
] | linux | f7d8a19f9a056a05c5c509fa65af472a322abfee | 120,656,151,573,630,760,000,000,000,000,000,000,000 | 5 | Revert "KVM: x86: Open code necessary bits of kvm_lapic_set_base() at vCPU RESET"
Revert a change to open code bits of kvm_lapic_set_base() when emulating
APIC RESET to fix an apic_hw_disabled underflow bug due to arch.apic_base
and apic_hw_disabled being unsyncrhonized when the APIC is created. If
kvm_arch_vcpu_create() fails after creating the APIC, kvm_free_lapic()
will see the initialized-to-zero vcpu->arch.apic_base and decrement
apic_hw_disabled without KVM ever having incremented apic_hw_disabled.
Using kvm_lapic_set_base() in kvm_lapic_reset() is also desirable for a
potential future where KVM supports RESET outside of vCPU creation, in
which case all the side effects of kvm_lapic_set_base() are needed, e.g.
to handle the transition from x2APIC => xAPIC.
Alternatively, KVM could temporarily increment apic_hw_disabled (and call
kvm_lapic_set_base() at RESET), but that's a waste of cycles and would
impact the performance of other vCPUs and VMs. The other subtle side
effect is that updating the xAPIC ID needs to be done at RESET regardless
of whether the APIC was previously enabled, i.e. kvm_lapic_reset() needs
an explicit call to kvm_apic_set_xapic_id() regardless of whether or not
kvm_lapic_set_base() also performs the update. That makes stuffing the
enable bit at vCPU creation slightly more palatable, as doing so affects
only the apic_hw_disabled key.
Opportunistically tweak the comment to explicitly call out the connection
between vcpu->arch.apic_base and apic_hw_disabled, and add a comment to
call out the need to always do kvm_apic_set_xapic_id() at RESET.
Underflow scenario:
kvm_vm_ioctl() {
kvm_vm_ioctl_create_vcpu() {
kvm_arch_vcpu_create() {
if (something_went_wrong)
goto fail_free_lapic;
/* vcpu->arch.apic_base is initialized when something_went_wrong is false. */
kvm_vcpu_reset() {
kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event) {
vcpu->arch.apic_base = APIC_DEFAULT_PHYS_BASE | MSR_IA32_APICBASE_ENABLE;
}
}
return 0;
fail_free_lapic:
kvm_free_lapic() {
/* vcpu->arch.apic_base is not yet initialized when something_went_wrong is true. */
if (!(vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE))
static_branch_slow_dec_deferred(&apic_hw_disabled); // <= underflow bug.
}
return r;
}
}
}
This (mostly) reverts commit 421221234ada41b4a9f0beeb08e30b07388bd4bd.
Fixes: 421221234ada ("KVM: x86: Open code necessary bits of kvm_lapic_set_base() at vCPU RESET")
Reported-by: [email protected]
Debugged-by: Tetsuo Handa <[email protected]>
Signed-off-by: Sean Christopherson <[email protected]>
Message-Id: <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]> |
chdlc_if_print(netdissect_options *ndo, const struct pcap_pkthdr *h, register const u_char *p)
{
register u_int length = h->len;
register u_int caplen = h->caplen;
if (caplen < CHDLC_HDRLEN) {
ND_PRINT((ndo, "[|chdlc]"));
return (caplen);
}
return (chdlc_print(ndo, p,length));
} | 1 | [
"CWE-125"
] | tcpdump | a1eefe986065846b6c69dbc09afd9fa1a02c4a3d | 324,855,004,130,558,770,000,000,000,000,000,000,000 | 11 | CVE-2017-13687/CHDLC: Improve bounds and length checks.
Prevent a possible buffer overread in chdlc_print() and replace the
custom check in chdlc_if_print() with a standard check in chdlc_print()
so that the latter certainly does not over-read even when reached via
juniper_chdlc_print(). Add length checks. |
static void vnc_led_state_change(VncState *vs)
{
int ledstate = 0;
if (!vnc_has_feature(vs, VNC_FEATURE_LED_STATE)) {
return;
}
ledstate = current_led_state(vs);
vnc_lock_output(vs);
vnc_write_u8(vs, VNC_MSG_SERVER_FRAMEBUFFER_UPDATE);
vnc_write_u8(vs, 0);
vnc_write_u16(vs, 1);
vnc_framebuffer_update(vs, 0, 0, 1, 1, VNC_ENCODING_LED_STATE);
vnc_write_u8(vs, ledstate);
vnc_unlock_output(vs);
vnc_flush(vs);
} | 0 | [
"CWE-125"
] | qemu | bea60dd7679364493a0d7f5b54316c767cf894ef | 135,058,461,606,436,680,000,000,000,000,000,000,000 | 18 | ui/vnc: fix potential memory corruption issues
this patch makes the VNC server work correctly if the
server surface and the guest surface have different sizes.
Basically the server surface is adjusted to not exceed VNC_MAX_WIDTH
x VNC_MAX_HEIGHT and additionally the width is rounded up to multiple of
VNC_DIRTY_PIXELS_PER_BIT.
If we have a resolution whose width is not dividable by VNC_DIRTY_PIXELS_PER_BIT
we now get a small black bar on the right of the screen.
If the surface is too big to fit the limits only the upper left area is shown.
On top of that this fixes 2 memory corruption issues:
The first was actually discovered during playing
around with a Windows 7 vServer. During resolution
change in Windows 7 it happens sometimes that Windows
changes to an intermediate resolution where
server_stride % cmp_bytes != 0 (in vnc_refresh_server_surface).
This happens only if width % VNC_DIRTY_PIXELS_PER_BIT != 0.
The second is a theoretical issue, but is maybe exploitable
by the guest. If for some reason the guest surface size is bigger
than VNC_MAX_WIDTH x VNC_MAX_HEIGHT we end up in severe corruption since
this limit is nowhere enforced.
Signed-off-by: Peter Lieven <[email protected]>
Signed-off-by: Gerd Hoffmann <[email protected]> |
struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
nodemask_t *nmask, gfp_t gfp_mask)
{
spin_lock_irq(&hugetlb_lock);
if (h->free_huge_pages - h->resv_huge_pages > 0) {
struct page *page;
page = dequeue_huge_page_nodemask(h, gfp_mask, preferred_nid, nmask);
if (page) {
spin_unlock_irq(&hugetlb_lock);
return page;
}
}
spin_unlock_irq(&hugetlb_lock);
return alloc_migrate_huge_page(h, gfp_mask, preferred_nid, nmask);
} | 0 | [] | linux | a4a118f2eead1d6c49e00765de89878288d4b890 | 16,661,989,160,713,528,000,000,000,000,000,000,000 | 17 | hugetlbfs: flush TLBs correctly after huge_pmd_unshare
When __unmap_hugepage_range() calls to huge_pmd_unshare() succeed, a TLB
flush is missing. This TLB flush must be performed before releasing the
i_mmap_rwsem, in order to prevent an unshared PMDs page from being
released and reused before the TLB flush took place.
Arguably, a comprehensive solution would use mmu_gather interface to
batch the TLB flushes and the PMDs page release, however it is not an
easy solution: (1) try_to_unmap_one() and try_to_migrate_one() also call
huge_pmd_unshare() and they cannot use the mmu_gather interface; and (2)
deferring the release of the page reference for the PMDs page until
after i_mmap_rwsem is dropeed can confuse huge_pmd_unshare() into
thinking PMDs are shared when they are not.
Fix __unmap_hugepage_range() by adding the missing TLB flush, and
forcing a flush when unshare is successful.
Fixes: 24669e58477e ("hugetlb: use mmu_gather instead of a temporary linked list for accumulating pages)" # 3.6
Signed-off-by: Nadav Amit <[email protected]>
Reviewed-by: Mike Kravetz <[email protected]>
Cc: Aneesh Kumar K.V <[email protected]>
Cc: KAMEZAWA Hiroyuki <[email protected]>
Cc: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
static void vnc_queue_clear(VncJobQueue *q)
{
qemu_cond_destroy(&queue->cond);
qemu_mutex_destroy(&queue->mutex);
buffer_free(&queue->buffer);
g_free(q);
queue = NULL; /* Unset global queue */
} | 0 | [
"CWE-125"
] | qemu | 9f64916da20eea67121d544698676295bbb105a7 | 199,025,915,693,247,960,000,000,000,000,000,000,000 | 8 | pixman/vnc: use pixman images in vnc.
The vnc code uses *three* DisplaySurfaces:
First is the surface of the actual QemuConsole, usually the guest
screen, but could also be a text console (monitor/serial reachable via
Ctrl-Alt-<nr> keys). This is left as-is.
Second is the current server's view of the screen content. The vnc code
uses this to figure which parts of the guest screen did _really_ change
to reduce the amount of updates sent to the vnc clients. It is also
used as data source when sending out the updates to the clients. This
surface gets replaced by a pixman image. The format changes too,
instead of using the guest screen format we'll use fixed 32bit rgb
framebuffer and convert the pixels on the fly when comparing and
updating the server framebuffer.
Third surface carries the format expected by the vnc client. That isn't
used to store image data. This surface is switched to PixelFormat and a
boolean for bigendian byte order.
Signed-off-by: Gerd Hoffmann <[email protected]> |
static int acp_sw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev->acp.cgs_device)
amdgpu_cgs_destroy_device(adev->acp.cgs_device);
return 0;
} | 0 | [
"CWE-400",
"CWE-401"
] | linux | 57be09c6e8747bf48704136d9e3f92bfb93f5725 | 233,089,144,374,117,900,000,000,000,000,000,000,000 | 9 | drm/amdgpu: fix multiple memory leaks in acp_hw_init
In acp_hw_init there are some allocations that needs to be released in
case of failure:
1- adev->acp.acp_genpd should be released if any allocation attemp for
adev->acp.acp_cell, adev->acp.acp_res or i2s_pdata fails.
2- all of those allocations should be released if
mfd_add_hotplug_devices or pm_genpd_add_device fail.
3- Release is needed in case of time out values expire.
Reviewed-by: Christian König <[email protected]>
Signed-off-by: Navid Emamdoost <[email protected]>
Signed-off-by: Alex Deucher <[email protected]> |
xmlSAXParseDTD(xmlSAXHandlerPtr sax, const xmlChar *ExternalID,
const xmlChar *SystemID) {
xmlDtdPtr ret = NULL;
xmlParserCtxtPtr ctxt;
xmlParserInputPtr input = NULL;
xmlCharEncoding enc;
xmlChar* systemIdCanonic;
if ((ExternalID == NULL) && (SystemID == NULL)) return(NULL);
ctxt = xmlNewParserCtxt();
if (ctxt == NULL) {
return(NULL);
}
/*
* Set-up the SAX context
*/
if (sax != NULL) {
if (ctxt->sax != NULL)
xmlFree(ctxt->sax);
ctxt->sax = sax;
ctxt->userData = ctxt;
}
/*
* Canonicalise the system ID
*/
systemIdCanonic = xmlCanonicPath(SystemID);
if ((SystemID != NULL) && (systemIdCanonic == NULL)) {
xmlFreeParserCtxt(ctxt);
return(NULL);
}
/*
* Ask the Entity resolver to load the damn thing
*/
if ((ctxt->sax != NULL) && (ctxt->sax->resolveEntity != NULL))
input = ctxt->sax->resolveEntity(ctxt->userData, ExternalID,
systemIdCanonic);
if (input == NULL) {
if (sax != NULL) ctxt->sax = NULL;
xmlFreeParserCtxt(ctxt);
if (systemIdCanonic != NULL)
xmlFree(systemIdCanonic);
return(NULL);
}
/*
* plug some encoding conversion routines here.
*/
if (xmlPushInput(ctxt, input) < 0) {
if (sax != NULL) ctxt->sax = NULL;
xmlFreeParserCtxt(ctxt);
if (systemIdCanonic != NULL)
xmlFree(systemIdCanonic);
return(NULL);
}
if ((ctxt->input->end - ctxt->input->cur) >= 4) {
enc = xmlDetectCharEncoding(ctxt->input->cur, 4);
xmlSwitchEncoding(ctxt, enc);
}
if (input->filename == NULL)
input->filename = (char *) systemIdCanonic;
else
xmlFree(systemIdCanonic);
input->line = 1;
input->col = 1;
input->base = ctxt->input->cur;
input->cur = ctxt->input->cur;
input->free = NULL;
/*
* let's parse that entity knowing it's an external subset.
*/
ctxt->inSubset = 2;
ctxt->myDoc = xmlNewDoc(BAD_CAST "1.0");
if (ctxt->myDoc == NULL) {
xmlErrMemory(ctxt, "New Doc failed");
if (sax != NULL) ctxt->sax = NULL;
xmlFreeParserCtxt(ctxt);
return(NULL);
}
ctxt->myDoc->properties = XML_DOC_INTERNAL;
ctxt->myDoc->extSubset = xmlNewDtd(ctxt->myDoc, BAD_CAST "none",
ExternalID, SystemID);
xmlParseExternalSubset(ctxt, ExternalID, SystemID);
if (ctxt->myDoc != NULL) {
if (ctxt->wellFormed) {
ret = ctxt->myDoc->extSubset;
ctxt->myDoc->extSubset = NULL;
if (ret != NULL) {
xmlNodePtr tmp;
ret->doc = NULL;
tmp = ret->children;
while (tmp != NULL) {
tmp->doc = NULL;
tmp = tmp->next;
}
}
} else {
ret = NULL;
}
xmlFreeDoc(ctxt->myDoc);
ctxt->myDoc = NULL;
}
if (sax != NULL) ctxt->sax = NULL;
xmlFreeParserCtxt(ctxt);
return(ret);
} | 0 | [
"CWE-119"
] | libxml2 | 6a36fbe3b3e001a8a840b5c1fdd81cefc9947f0d | 101,965,409,257,857,640,000,000,000,000,000,000,000 | 115 | Fix potential out of bound access |
connect_to_server_process (CamelIMAPXServer *is,
const gchar *cmd,
GError **error)
{
GSubprocessLauncher *launcher;
GSubprocess *subprocess = NULL;
CamelNetworkSettings *network_settings;
CamelProvider *provider;
CamelSettings *settings;
CamelIMAPXStore *store;
CamelURL url;
gchar **argv = NULL;
gchar *buf;
gchar *cmd_copy;
gchar *full_cmd;
const gchar *password;
gchar *host;
gchar *user;
guint16 port;
memset (&url, 0, sizeof (CamelURL));
launcher = g_subprocess_launcher_new (
G_SUBPROCESS_FLAGS_STDIN_PIPE |
G_SUBPROCESS_FLAGS_STDOUT_PIPE |
G_SUBPROCESS_FLAGS_STDERR_SILENCE);
#ifdef G_OS_UNIX
g_subprocess_launcher_set_child_setup (
launcher, imapx_server_child_process_setup,
NULL, (GDestroyNotify) NULL);
#endif
store = camel_imapx_server_ref_store (is);
password = camel_service_get_password (CAMEL_SERVICE (store));
provider = camel_service_get_provider (CAMEL_SERVICE (store));
settings = camel_service_ref_settings (CAMEL_SERVICE (store));
network_settings = CAMEL_NETWORK_SETTINGS (settings);
host = camel_network_settings_dup_host (network_settings);
port = camel_network_settings_get_port (network_settings);
user = camel_network_settings_dup_user (network_settings);
/* Put full details in the environment, in case the connection
* program needs them */
camel_url_set_protocol (&url, provider->protocol);
camel_url_set_host (&url, host);
camel_url_set_port (&url, port);
camel_url_set_user (&url, user);
buf = camel_url_to_string (&url, 0);
g_subprocess_launcher_setenv (launcher, "URL", buf, TRUE);
g_subprocess_launcher_setenv (launcher, "URLHOST", host, TRUE);
if (port > 0) {
gchar *port_string;
port_string = g_strdup_printf ("%u", port);
g_subprocess_launcher_setenv (
launcher, "URLPORT", port_string, TRUE);
g_free (port_string);
}
if (user != NULL) {
g_subprocess_launcher_setenv (
launcher, "URLPORT", user, TRUE);
}
if (password != NULL) {
g_subprocess_launcher_setenv (
launcher, "URLPASSWD", password, TRUE);
}
g_free (buf);
g_object_unref (settings);
g_object_unref (store);
/* Now do %h, %u, etc. substitution in cmd */
buf = cmd_copy = g_strdup (cmd);
full_cmd = g_strdup ("");
for (;;) {
gchar *pc;
gchar *tmp;
const gchar *var;
gint len;
pc = strchr (buf, '%');
ignore:
if (!pc) {
tmp = g_strdup_printf ("%s%s", full_cmd, buf);
g_free (full_cmd);
full_cmd = tmp;
break;
}
len = pc - buf;
var = NULL;
switch (pc[1]) {
case 'h':
var = host;
break;
case 'u':
var = user;
break;
}
if (!var) {
/* If there wasn't a valid %-code, with an actual
* variable to insert, pretend we didn't see the % */
pc = strchr (pc + 1, '%');
goto ignore;
}
tmp = g_strdup_printf ("%s%.*s%s", full_cmd, len, buf, var);
g_free (full_cmd);
full_cmd = tmp;
buf = pc + 2;
}
g_free (cmd_copy);
g_free (host);
g_free (user);
if (g_shell_parse_argv (full_cmd, NULL, &argv, error)) {
subprocess = g_subprocess_launcher_spawnv (
launcher, (const gchar * const *) argv, error);
g_strfreev (argv);
}
g_free (full_cmd);
g_object_unref (launcher);
if (subprocess != NULL) {
GInputStream *input_stream;
GOutputStream *output_stream;
g_mutex_lock (&is->priv->stream_lock);
g_warn_if_fail (is->priv->subprocess == NULL);
is->priv->subprocess = g_object_ref (subprocess);
g_mutex_unlock (&is->priv->stream_lock);
input_stream = g_subprocess_get_stdout_pipe (subprocess);
output_stream = g_subprocess_get_stdin_pipe (subprocess);
imapx_server_set_streams (is, input_stream, output_stream);
g_object_unref (subprocess);
}
return TRUE;
} | 0 | [] | evolution-data-server | f26a6f672096790d0bbd76903db4c9a2e44f116b | 69,323,131,382,849,290,000,000,000,000,000,000,000 | 156 | [IMAPx] 'STARTTLS not supported' error ignored
When a user has setup the STARTTLS encryption method, but the server doesn't
support it, then an error should be shown to the user, instead of using
unsecure connection. There had been two bugs in the existing code which
prevented this error from being used and the failure properly reported.
This had been filled at:
https://bugzilla.redhat.com/show_bug.cgi?id=1334842 |
const char *get_ptr()
{
return m_ptr;
} | 0 | [
"CWE-703"
] | server | 39feab3cd31b5414aa9b428eaba915c251ac34a2 | 202,953,518,916,590,000,000,000,000,000,000,000,000 | 4 | MDEV-26412 Server crash in Item_field::fix_outer_field for INSERT SELECT
IF an INSERT/REPLACE SELECT statement contained an ON expression in the top
level select and this expression used a subquery with a column reference
that could not be resolved then an attempt to resolve this reference as
an outer reference caused a crash of the server. This happened because the
outer context field in the Name_resolution_context structure was not set
to NULL for such references. Rather it pointed to the first element in
the select_stack.
Note that starting from 10.4 we cannot use the SELECT_LEX::outer_select()
method when parsing a SELECT construct.
Approved by Oleksandr Byelkin <[email protected]> |
int CMS_RecipientInfo_ktri_cert_cmp(CMS_RecipientInfo *ri, X509 *cert)
{
if (ri->type != CMS_RECIPINFO_TRANS) {
CMSerr(CMS_F_CMS_RECIPIENTINFO_KTRI_CERT_CMP,
CMS_R_NOT_KEY_TRANSPORT);
return -2;
}
return cms_SignerIdentifier_cert_cmp(ri->d.ktri->rid, cert);
} | 0 | [
"CWE-311",
"CWE-327"
] | openssl | 08229ad838c50f644d7e928e2eef147b4308ad64 | 264,985,807,931,027,000,000,000,000,000,000,000,000 | 9 | Fix a padding oracle in PKCS7_dataDecode and CMS_decrypt_set1_pkey
An attack is simple, if the first CMS_recipientInfo is valid but the
second CMS_recipientInfo is chosen ciphertext. If the second
recipientInfo decodes to PKCS #1 v1.5 form plaintext, the correct
encryption key will be replaced by garbage, and the message cannot be
decoded, but if the RSA decryption fails, the correct encryption key is
used and the recipient will not notice the attack.
As a work around for this potential attack the length of the decrypted
key must be equal to the cipher default key length, in case the
certifiate is not given and all recipientInfo are tried out.
The old behaviour can be re-enabled in the CMS code by setting the
CMS_DEBUG_DECRYPT flag.
Reviewed-by: Matt Caswell <[email protected]>
(Merged from https://github.com/openssl/openssl/pull/9777)
(cherry picked from commit 5840ed0cd1e6487d247efbc1a04136a41d7b3a37) |
_set_source_rsvg_linear_gradient (RsvgDrawingCtx * ctx,
RsvgLinearGradient * linear,
guint32 current_color_rgb, guint8 opacity, RsvgBbox bbox)
{
RsvgCairoRender *render = RSVG_CAIRO_RENDER (ctx->render);
cairo_t *cr = render->cr;
cairo_pattern_t *pattern;
cairo_matrix_t matrix;
RsvgLinearGradient statlinear;
statlinear = *linear;
linear = &statlinear;
rsvg_linear_gradient_fix_fallback (ctx, linear);
if (linear->has_current_color)
current_color_rgb = linear->current_color;
if (linear->obj_bbox)
_rsvg_push_view_box (ctx, 1., 1.);
pattern = cairo_pattern_create_linear (_rsvg_css_normalize_length (&linear->x1, ctx, 'h'),
_rsvg_css_normalize_length (&linear->y1, ctx, 'v'),
_rsvg_css_normalize_length (&linear->x2, ctx, 'h'),
_rsvg_css_normalize_length (&linear->y2, ctx, 'v'));
if (linear->obj_bbox)
_rsvg_pop_view_box (ctx);
matrix = linear->affine;
if (linear->obj_bbox) {
cairo_matrix_t bboxmatrix;
cairo_matrix_init (&bboxmatrix, bbox.rect.width, 0, 0, bbox.rect.height,
bbox.rect.x, bbox.rect.y);
cairo_matrix_multiply (&matrix, &matrix, &bboxmatrix);
}
cairo_matrix_invert (&matrix);
cairo_pattern_set_matrix (pattern, &matrix);
cairo_pattern_set_extend (pattern, linear->spread);
_pattern_add_rsvg_color_stops (pattern, linear->super.children, current_color_rgb, opacity);
cairo_set_source (cr, pattern);
cairo_pattern_destroy (pattern);
} | 0 | [] | librsvg | a51919f7e1ca9c535390a746fbf6e28c8402dc61 | 110,436,950,179,307,580,000,000,000,000,000,000,000 | 42 | rsvg: Add rsvg_acquire_node()
This function does proper recursion checks when looking up resources
from URLs and thereby helps avoiding infinite loops when cyclic
references span multiple types of elements. |
static long kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg)
{
switch (arg) {
case KVM_CAP_USER_MEMORY:
case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS:
case KVM_CAP_INTERNAL_ERROR_DATA:
#ifdef CONFIG_HAVE_KVM_MSI
case KVM_CAP_SIGNAL_MSI:
#endif
#ifdef CONFIG_HAVE_KVM_IRQFD
case KVM_CAP_IRQFD:
case KVM_CAP_IRQFD_RESAMPLE:
#endif
case KVM_CAP_IOEVENTFD_ANY_LENGTH:
case KVM_CAP_CHECK_EXTENSION_VM:
case KVM_CAP_ENABLE_CAP_VM:
return 1;
#ifdef CONFIG_KVM_MMIO
case KVM_CAP_COALESCED_MMIO:
return KVM_COALESCED_MMIO_PAGE_OFFSET;
case KVM_CAP_COALESCED_PIO:
return 1;
#endif
#ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
case KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2:
return KVM_DIRTY_LOG_MANUAL_CAPS;
#endif
#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
case KVM_CAP_IRQ_ROUTING:
return KVM_MAX_IRQ_ROUTES;
#endif
#if KVM_ADDRESS_SPACE_NUM > 1
case KVM_CAP_MULTI_ADDRESS_SPACE:
return KVM_ADDRESS_SPACE_NUM;
#endif
case KVM_CAP_NR_MEMSLOTS:
return KVM_USER_MEM_SLOTS;
default:
break;
}
return kvm_vm_ioctl_check_extension(kvm, arg);
} | 0 | [
"CWE-416"
] | linux | 0774a964ef561b7170d8d1b1bfe6f88002b6d219 | 300,347,577,846,432,700,000,000,000,000,000,000,000 | 43 | KVM: Fix out of range accesses to memslots
Reset the LRU slot if it becomes invalid when deleting a memslot to fix
an out-of-bounds/use-after-free access when searching through memslots.
Explicitly check for there being no used slots in search_memslots(), and
in the caller of s390's approximation variant.
Fixes: 36947254e5f9 ("KVM: Dynamically size memslot array based on number of used slots")
Reported-by: Qian Cai <[email protected]>
Cc: Peter Xu <[email protected]>
Signed-off-by: Sean Christopherson <[email protected]>
Message-Id: <[email protected]>
Acked-by: Christian Borntraeger <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]> |
_outFieldStore(StringInfo str, const FieldStore *node)
{
WRITE_NODE_TYPE("FIELDSTORE");
WRITE_NODE_FIELD(arg);
WRITE_NODE_FIELD(newvals);
WRITE_NODE_FIELD(fieldnums);
WRITE_OID_FIELD(resulttype);
} | 0 | [
"CWE-362"
] | postgres | 5f173040e324f6c2eebb90d86cf1b0cdb5890f0a | 249,649,693,376,432,100,000,000,000,000,000,000,000 | 9 | Avoid repeated name lookups during table and index DDL.
If the name lookups come to different conclusions due to concurrent
activity, we might perform some parts of the DDL on a different table
than other parts. At least in the case of CREATE INDEX, this can be
used to cause the permissions checks to be performed against a
different table than the index creation, allowing for a privilege
escalation attack.
This changes the calling convention for DefineIndex, CreateTrigger,
transformIndexStmt, transformAlterTableStmt, CheckIndexCompatible
(in 9.2 and newer), and AlterTable (in 9.1 and older). In addition,
CheckRelationOwnership is removed in 9.2 and newer and the calling
convention is changed in older branches. A field has also been added
to the Constraint node (FkConstraint in 8.4). Third-party code calling
these functions or using the Constraint node will require updating.
Report by Andres Freund. Patch by Robert Haas and Andres Freund,
reviewed by Tom Lane.
Security: CVE-2014-0062 |
text_fixup_invalid_utf8 (const gchar* text, gssize len, gsize *len_out)
{
static GIConv utf8_fixup_converter = NULL;
if (utf8_fixup_converter == NULL)
{
utf8_fixup_converter = g_iconv_open ("UTF-8", "UTF-8");
}
return text_convert_invalid (text, len, utf8_fixup_converter, unicode_fallback_string, len_out);
} | 0 | [
"CWE-22"
] | hexchat | 15600f405f2d5bda6ccf0dd73957395716e0d4d3 | 168,668,268,692,881,420,000,000,000,000,000,000,000 | 10 | Sanitize network name for scrollback files
This prevents using invalid chars on Windows or creating directories |
static inline int get_amv(Mpeg4DecContext *ctx, int n)
{
MpegEncContext *s = &ctx->m;
int x, y, mb_v, sum, dx, dy, shift;
int len = 1 << (s->f_code + 4);
const int a = s->sprite_warping_accuracy;
if (s->workaround_bugs & FF_BUG_AMV)
len >>= s->quarter_sample;
if (s->real_sprite_warping_points == 1) {
if (ctx->divx_version == 500 && ctx->divx_build == 413)
sum = s->sprite_offset[0][n] / (1 << (a - s->quarter_sample));
else
sum = RSHIFT(s->sprite_offset[0][n] * (1 << s->quarter_sample), a);
} else {
dx = s->sprite_delta[n][0];
dy = s->sprite_delta[n][1];
shift = ctx->sprite_shift[0];
if (n)
dy -= 1 << (shift + a + 1);
else
dx -= 1 << (shift + a + 1);
mb_v = s->sprite_offset[0][n] + dx * s->mb_x * 16 + dy * s->mb_y * 16;
sum = 0;
for (y = 0; y < 16; y++) {
int v;
v = mb_v + dy * y;
// FIXME optimize
for (x = 0; x < 16; x++) {
sum += v >> shift;
v += dx;
}
}
sum = RSHIFT(sum, a + 8 - s->quarter_sample);
}
if (sum < -len)
sum = -len;
else if (sum >= len)
sum = len - 1;
return sum;
} | 0 | [
"CWE-476"
] | FFmpeg | 2aa9047486dbff12d9e040f917e5f799ed2fd78b | 79,206,692,571,573,210,000,000,000,000,000,000,000 | 46 | avcodec/mpeg4videodec: Check read profile before setting it
Fixes: null pointer dereference
Fixes: ffmpeg_crash_7.avi
Found-by: Thuan Pham, Marcel Böhme, Andrew Santosa and Alexandru Razvan Caciulescu with AFLSmart
Signed-off-by: Michael Niedermayer <[email protected]> |
void json_object_seed(size_t seed) {
uint32_t new_seed = (uint32_t)seed;
if (hashtable_seed == 0) {
if (__atomic_test_and_set(&seed_initialized, __ATOMIC_RELAXED) == 0) {
/* Do the seeding ourselves */
if (new_seed == 0)
new_seed = generate_seed();
__atomic_store_n(&hashtable_seed, new_seed, __ATOMIC_RELEASE);
} else {
/* Wait for another thread to do the seeding */
do {
#ifdef HAVE_SCHED_YIELD
sched_yield();
#endif
} while(__atomic_load_n(&hashtable_seed, __ATOMIC_ACQUIRE) == 0);
}
}
} | 0 | [
"CWE-310"
] | jansson | 42016a35c8907e477be73b0b5d06cc09af231ee4 | 222,471,687,701,434,730,000,000,000,000,000,000,000 | 20 | Oops, ATOMIC_ACQ_REL is not a correct memmodel for __atomic_store_n |
const char * util_acl_to_str(const sc_acl_entry_t *e)
{
static char line[80], buf[20];
unsigned int acl;
if (e == NULL)
return "N/A";
line[0] = 0;
while (e != NULL) {
acl = e->method;
switch (acl) {
case SC_AC_UNKNOWN:
return "N/A";
case SC_AC_NEVER:
return "NEVR";
case SC_AC_NONE:
return "NONE";
case SC_AC_CHV:
strcpy(buf, "CHV");
if (e->key_ref != SC_AC_KEY_REF_NONE)
sprintf(buf + 3, "%d", e->key_ref);
break;
case SC_AC_TERM:
strcpy(buf, "TERM");
break;
case SC_AC_PRO:
strcpy(buf, "PROT");
break;
case SC_AC_AUT:
strcpy(buf, "AUTH");
if (e->key_ref != SC_AC_KEY_REF_NONE)
sprintf(buf + 4, "%d", e->key_ref);
break;
case SC_AC_SEN:
strcpy(buf, "Sec.Env. ");
if (e->key_ref != SC_AC_KEY_REF_NONE)
sprintf(buf + 3, "#%d", e->key_ref);
break;
case SC_AC_SCB:
strcpy(buf, "Sec.ControlByte ");
if (e->key_ref != SC_AC_KEY_REF_NONE)
sprintf(buf + 3, "Ox%X", e->key_ref);
break;
case SC_AC_IDA:
strcpy(buf, "PKCS#15 AuthID ");
if (e->key_ref != SC_AC_KEY_REF_NONE)
sprintf(buf + 3, "#%d", e->key_ref);
break;
default:
strcpy(buf, "????");
break;
}
strncat(line, buf, sizeof line);
strncat(line, " ", sizeof line);
e = e->next;
}
line[(sizeof line)-1] = '\0'; /* make sure it's NUL terminated */
line[strlen(line)-1] = 0; /* get rid of trailing space */
return line;
} | 0 | [
"CWE-415",
"CWE-119"
] | OpenSC | 360e95d45ac4123255a4c796db96337f332160ad | 183,348,996,025,025,830,000,000,000,000,000,000,000 | 61 | fixed out of bounds writes
Thanks to Eric Sesterhenn from X41 D-SEC GmbH
for reporting the problems. |
QueueSetHandle_t xQueueCreateSet( const UBaseType_t uxEventQueueLength )
{
QueueSetHandle_t pxQueue;
pxQueue = xQueueGenericCreate( uxEventQueueLength, ( UBaseType_t ) sizeof( Queue_t * ), queueQUEUE_TYPE_SET );
return pxQueue;
}
| 0 | [
"CWE-200",
"CWE-190"
] | FreeRTOS-Kernel | 47338393f1f79558f6144213409f09f81d7c4837 | 199,240,148,745,815,560,000,000,000,000,000,000,000 | 8 | add assert for addition overflow on queue creation (#225) |
int get_ack(int fd, unsigned char *result_sha1)
{
static char line[1000];
int len = packet_read_line(fd, line, sizeof(line));
if (!len)
die("git fetch-pack: expected ACK/NAK, got EOF");
if (line[len-1] == '\n')
line[--len] = 0;
if (!strcmp(line, "NAK"))
return 0;
if (!prefixcmp(line, "ACK ")) {
if (!get_sha1_hex(line+4, result_sha1)) {
if (strstr(line+45, "continue"))
return 2;
return 1;
}
}
die("git fetch_pack: expected ACK/NAK, got '%s'", line);
} | 0 | [] | git | 73bb33a94ec67a53e7d805b12ad9264fa25f4f8d | 301,191,694,673,456,000,000,000,000,000,000,000,000 | 20 | daemon: Strictly parse the "extra arg" part of the command
Since 1.4.4.5 (49ba83fb67 "Add virtualization support to git-daemon")
git daemon enters an infinite loop and never terminates if a client
hides any extra arguments in the initial request line which is not
exactly "\0host=blah\0".
Since that change, a client must never insert additional extra
arguments, or attempt to use any argument other than "host=", as
any daemon will get stuck parsing the request line and will never
complete the request.
Since the client can't tell if the daemon is patched or not, it
is not possible to know if additional extra args might actually be
able to be safely requested.
If we ever need to extend the git daemon protocol to support a new
feature, we may have to do something like this to the exchange:
# If both support git:// v2
#
C: 000cgit://v2
S: 0010ok host user
C: 0018host git.kernel.org
C: 0027git-upload-pack /pub/linux-2.6.git
S: ...git-upload-pack header...
# If client supports git:// v2, server does not:
#
C: 000cgit://v2
S: <EOF>
C: 003bgit-upload-pack /pub/linux-2.6.git\0host=git.kernel.org\0
S: ...git-upload-pack header...
This requires the client to create two TCP connections to talk to
an older git daemon, however all daemons since the introduction of
daemon.c will safely reject the unknown "git://v2" command request,
so the client can quite easily determine the server supports an
older protocol.
Signed-off-by: Shawn O. Pearce <[email protected]>
Signed-off-by: Junio C Hamano <[email protected]> |
void co64_del(GF_Box *s)
{
GF_ChunkLargeOffsetBox *ptr;
ptr = (GF_ChunkLargeOffsetBox *) s;
if (ptr == NULL) return;
if (ptr->offsets) gf_free(ptr->offsets);
gf_free(ptr);
} | 0 | [
"CWE-400",
"CWE-401"
] | gpac | d2371b4b204f0a3c0af51ad4e9b491144dd1225c | 5,458,304,369,642,211,000,000,000,000,000,000,000 | 8 | prevent dref memleak on invalid input (#1183) |
void CMSEXPORT cmsStageFree(cmsStage* mpe)
{
if (mpe ->FreePtr)
mpe ->FreePtr(mpe);
_cmsFree(mpe ->ContextID, mpe);
} | 0 | [] | Little-CMS | b0d5ffd4ad91cf8683ee106f13742db3dc66599a | 78,341,597,295,107,240,000,000,000,000,000,000,000 | 7 | Memory Squeezing: LCMS2: CLUTElemDup
Check for allocation failures and tidy up if found. |
bool walk(Item_processor processor, bool walk_subquery, void *arg)
{
if (ref && *ref)
return (*ref)->walk(processor, walk_subquery, arg) ||
(this->*processor)(arg);
else
return FALSE;
} | 0 | [
"CWE-617"
] | server | 2e7891080667c59ac80f788eef4d59d447595772 | 330,453,402,430,927,500,000,000,000,000,000,000,000 | 8 | MDEV-25635 Assertion failure when pushing from HAVING into WHERE of view
This bug could manifest itself after pushing a where condition over a
mergeable derived table / view / CTE DT into a grouping view / derived
table / CTE V whose item list contained set functions with constant
arguments such as MIN(2), SUM(1) etc. In such cases the field references
used in the condition pushed into the view V that correspond set functions
are wrapped into Item_direct_view_ref wrappers. Due to a wrong implementation
of the virtual method const_item() for the class Item_direct_view_ref the
wrapped set functions with constant arguments could be erroneously taken
for constant items. This could lead to a wrong result set returned by the
main select query in 10.2. In 10.4 where a possibility of pushing condition
from HAVING into WHERE had been added this could cause a crash.
Approved by Sergey Petrunya <[email protected]> |
static uint dump_events_for_db(char *db)
{
char query_buff[QUERY_LENGTH];
char db_name_buff[NAME_LEN*2+3], name_buff[NAME_LEN*2+3];
char *event_name;
char delimiter[QUERY_LENGTH];
FILE *sql_file= md_result_file;
MYSQL_RES *event_res, *event_list_res;
MYSQL_ROW row, event_list_row;
char db_cl_name[MY_CS_NAME_SIZE];
int db_cl_altered= FALSE;
DBUG_ENTER("dump_events_for_db");
DBUG_PRINT("enter", ("db: '%s'", db));
mysql_real_escape_string(mysql, db_name_buff, db, (ulong)strlen(db));
/* nice comments */
print_comment(sql_file, 0,
"\n--\n-- Dumping events for database '%s'\n--\n", db);
/*
not using "mysql_query_with_error_report" because we may have not
enough privileges to lock mysql.events.
*/
if (lock_tables)
mysql_query(mysql, "LOCK TABLES mysql.event READ");
if (mysql_query_with_error_report(mysql, &event_list_res, "show events"))
DBUG_RETURN(0);
strcpy(delimiter, ";");
if (mysql_num_rows(event_list_res) > 0)
{
if (opt_xml)
fputs("\t<events>\n", sql_file);
else
{
fprintf(sql_file, "/*!50106 SET @save_time_zone= @@TIME_ZONE */ ;\n");
/* Get database collation. */
if (fetch_db_collation(db_name_buff, db_cl_name, sizeof (db_cl_name)))
DBUG_RETURN(1);
}
if (switch_character_set_results(mysql, "binary"))
DBUG_RETURN(1);
while ((event_list_row= mysql_fetch_row(event_list_res)) != NULL)
{
event_name= quote_name(event_list_row[1], name_buff, 0);
DBUG_PRINT("info", ("retrieving CREATE EVENT for %s", name_buff));
my_snprintf(query_buff, sizeof(query_buff), "SHOW CREATE EVENT %s",
event_name);
if (mysql_query_with_error_report(mysql, &event_res, query_buff))
DBUG_RETURN(1);
while ((row= mysql_fetch_row(event_res)) != NULL)
{
if (opt_xml)
{
print_xml_row(sql_file, "event", event_res, &row,
"Create Event");
continue;
}
/*
if the user has EXECUTE privilege he can see event names, but not the
event body!
*/
if (strlen(row[3]) != 0)
{
char *query_str;
if (opt_drop)
fprintf(sql_file, "/*!50106 DROP EVENT IF EXISTS %s */%s\n",
event_name, delimiter);
if (create_delimiter(row[3], delimiter, sizeof(delimiter)) == NULL)
{
fprintf(stderr, "%s: Warning: Can't create delimiter for event '%s'\n",
my_progname, event_name);
DBUG_RETURN(1);
}
fprintf(sql_file, "DELIMITER %s\n", delimiter);
if (mysql_num_fields(event_res) >= 7)
{
if (switch_db_collation(sql_file, db_name_buff, delimiter,
db_cl_name, row[6], &db_cl_altered))
{
DBUG_RETURN(1);
}
switch_cs_variables(sql_file, delimiter,
row[4], /* character_set_client */
row[4], /* character_set_results */
row[5]); /* collation_connection */
}
else
{
/*
mysqldump is being run against the server, that does not
provide character set information in SHOW CREATE
statements.
NOTE: the dump may be incorrect, since character set
information is required in order to restore event properly.
*/
fprintf(sql_file,
"--\n"
"-- WARNING: old server version. "
"The following dump may be incomplete.\n"
"--\n");
}
switch_sql_mode(sql_file, delimiter, row[1]);
switch_time_zone(sql_file, delimiter, row[2]);
query_str= cover_definer_clause(row[3], strlen(row[3]),
C_STRING_WITH_LEN("50117"),
C_STRING_WITH_LEN("50106"),
C_STRING_WITH_LEN(" EVENT"));
fprintf(sql_file,
"/*!50106 %s */ %s\n",
(const char *) (query_str != NULL ? query_str : row[3]),
(const char *) delimiter);
if(query_str)
{
my_free(query_str);
query_str= NULL;
}
restore_time_zone(sql_file, delimiter);
restore_sql_mode(sql_file, delimiter);
if (mysql_num_fields(event_res) >= 7)
{
restore_cs_variables(sql_file, delimiter);
if (db_cl_altered)
{
if (restore_db_collation(sql_file, db_name_buff, delimiter,
db_cl_name))
DBUG_RETURN(1);
}
}
}
} /* end of event printing */
mysql_free_result(event_res);
} /* end of list of events */
if (opt_xml)
{
fputs("\t</events>\n", sql_file);
check_io(sql_file);
}
else
{
fprintf(sql_file, "DELIMITER ;\n");
fprintf(sql_file, "/*!50106 SET TIME_ZONE= @save_time_zone */ ;\n");
}
if (switch_character_set_results(mysql, default_charset))
DBUG_RETURN(1);
}
mysql_free_result(event_list_res);
if (lock_tables)
(void) mysql_query_with_error_report(mysql, 0, "UNLOCK TABLES");
DBUG_RETURN(0);
} | 1 | [] | mysql-server | 6fa5e0814662d691be1a29bf88332348ec7c50c9 | 230,314,986,634,583,640,000,000,000,000,000,000,000 | 179 | Bug #25717383: MYSQLDUMP MAY EXECUTE ANY ARBITRARY QUERY
While writing comments if database object names has a new
line character, then next line is considered a command, rather
than a comment.
This patch fixes the way comments are constructed in mysqldump.
(cherry picked from commit 1099f9d17b1c697c2760f86556f5bae7d202b444) |
static inline Quantum GetPixelY(const Image *restrict image,
const Quantum *restrict pixel)
{
return(pixel[image->channel_map[YPixelChannel].offset]);
} | 0 | [
"CWE-119",
"CWE-787"
] | ImageMagick | 450bd716ed3b9186dd10f9e60f630a3d9eeea2a4 | 252,126,731,833,723,400,000,000,000,000,000,000,000 | 5 | |
int ssl3_send_next_proto(SSL *s)
{
unsigned int len, padding_len;
unsigned char *d;
if (s->state == SSL3_ST_CW_NEXT_PROTO_A)
{
len = s->next_proto_negotiated_len;
padding_len = 32 - ((len + 2) % 32);
d = (unsigned char *)s->init_buf->data;
d[4] = len;
memcpy(d + 5, s->next_proto_negotiated, len);
d[5 + len] = padding_len;
memset(d + 6 + len, 0, padding_len);
*(d++)=SSL3_MT_NEXT_PROTO;
l2n3(2 + len + padding_len, d);
s->state = SSL3_ST_CW_NEXT_PROTO_B;
s->init_num = 4 + 2 + len + padding_len;
s->init_off = 0;
}
return ssl3_do_write(s, SSL3_RT_HANDSHAKE);
} | 0 | [
"CWE-326",
"CWE-310"
] | openssl | bc8923b1ec9c467755cd86f7848c50ee8812e441 | 251,731,246,859,340,150,000,000,000,000,000,000,000 | 23 | Fix for CVE-2014-0224
Only accept change cipher spec when it is expected instead of at any
time. This prevents premature setting of session keys before the master
secret is determined which an attacker could use as a MITM attack.
Thanks to KIKUCHI Masashi (Lepidum Co. Ltd.) for reporting this issue
and providing the initial fix this patch is based on. |
void StreamListener::OnStreamAfterShutdown(ShutdownWrap* w, int status) {
CHECK_NOT_NULL(previous_listener_);
previous_listener_->OnStreamAfterShutdown(w, status);
} | 0 | [
"CWE-416"
] | node | 4f8772f9b731118628256189b73cd202149bbd97 | 256,683,240,211,235,570,000,000,000,000,000,000,000 | 4 | src: retain pointers to WriteWrap/ShutdownWrap
Avoids potential use-after-free when wrap req's are synchronously
destroyed.
CVE-ID: CVE-2020-8265
Fixes: https://github.com/nodejs-private/node-private/issues/227
Refs: https://hackerone.com/bugs?subject=nodejs&report_id=988103
PR-URL: https://github.com/nodejs-private/node-private/pull/23
Reviewed-By: Anna Henningsen <[email protected]>
Reviewed-By: Matteo Collina <[email protected]>
Reviewed-By: Rich Trott <[email protected]> |
re_compile_fastmap (bufp)
struct re_pattern_buffer *bufp;
{
re_dfa_t *dfa = bufp->buffer;
char *fastmap = bufp->fastmap;
memset (fastmap, '\0', sizeof (char) * SBC_MAX);
re_compile_fastmap_iter (bufp, dfa->init_state, fastmap);
if (dfa->init_state != dfa->init_state_word)
re_compile_fastmap_iter (bufp, dfa->init_state_word, fastmap);
if (dfa->init_state != dfa->init_state_nl)
re_compile_fastmap_iter (bufp, dfa->init_state_nl, fastmap);
if (dfa->init_state != dfa->init_state_begbuf)
re_compile_fastmap_iter (bufp, dfa->init_state_begbuf, fastmap);
bufp->fastmap_accurate = 1;
return 0;
} | 0 | [
"CWE-19"
] | gnulib | 5513b40999149090987a0341c018d05d3eea1272 | 183,881,159,499,837,950,000,000,000,000,000,000,000 | 17 | Diagnose ERE '()|\1'
Problem reported by Hanno Böck in: http://bugs.gnu.org/21513
* lib/regcomp.c (parse_reg_exp): While parsing alternatives, keep
track of the set of previously-completed subexpressions available
before the first alternative, and restore this set just before
parsing each subsequent alternative. This lets us diagnose the
invalid back-reference in the ERE '()|\1'. |
qf_mark_adjust(
win_T *wp,
linenr_T line1,
linenr_T line2,
long amount,
long amount_after)
{
int i;
qfline_T *qfp;
int idx;
qf_info_T *qi = &ql_info;
int found_one = FALSE;
int buf_has_flag = wp == NULL ? BUF_HAS_QF_ENTRY : BUF_HAS_LL_ENTRY;
if (!(curbuf->b_has_qf_entry & buf_has_flag))
return;
if (wp != NULL)
{
if (wp->w_llist == NULL)
return;
qi = wp->w_llist;
}
for (idx = 0; idx < qi->qf_listcount; ++idx)
{
qf_list_T *qfl = qf_get_list(qi, idx);
if (!qf_list_empty(qfl))
FOR_ALL_QFL_ITEMS(qfl, qfp, i)
if (qfp->qf_fnum == curbuf->b_fnum)
{
found_one = TRUE;
if (qfp->qf_lnum >= line1 && qfp->qf_lnum <= line2)
{
if (amount == MAXLNUM)
qfp->qf_cleared = TRUE;
else
qfp->qf_lnum += amount;
}
else if (amount_after && qfp->qf_lnum > line2)
qfp->qf_lnum += amount_after;
}
}
if (!found_one)
curbuf->b_has_qf_entry &= ~buf_has_flag;
} | 0 | [
"CWE-416"
] | vim | 4f1b083be43f351bc107541e7b0c9655a5d2c0bb | 132,733,310,155,558,960,000,000,000,000,000,000,000 | 47 | patch 9.0.0322: crash when no errors and 'quickfixtextfunc' is set
Problem: Crash when no errors and 'quickfixtextfunc' is set.
Solution: Do not handle errors if there aren't any. |
void Magick::Image::reduceNoise(void)
{
reduceNoise(3);
} | 0 | [
"CWE-416"
] | ImageMagick | 8c35502217c1879cb8257c617007282eee3fe1cc | 232,286,646,830,838,500,000,000,000,000,000,000,000 | 4 | Added missing return to avoid use after free. |
PJ_DEF(void) pj_scan_get_until( pj_scanner *scanner,
const pj_cis_t *spec, pj_str_t *out)
{
register char *s = scanner->curptr;
if (s >= scanner->end) {
pj_scan_syntax_err(scanner);
return;
}
while (PJ_SCAN_CHECK_EOF(s) && !pj_cis_match(spec, *s)) {
++s;
}
pj_strset3(out, scanner->curptr, s);
scanner->curptr = s;
if (PJ_SCAN_IS_PROBABLY_SPACE(*s) && scanner->skip_ws) {
pj_scan_skip_whitespace(scanner);
}
} | 0 | [
"CWE-125"
] | pjproject | 077b465c33f0aec05a49cd2ca456f9a1b112e896 | 156,869,147,566,820,350,000,000,000,000,000,000,000 | 22 | Merge pull request from GHSA-7fw8-54cv-r7pm |
and_cclass(CClassNode* dest, CClassNode* cc, OnigEncoding enc)
{
int r, not1, not2;
BBuf *buf1, *buf2, *pbuf;
BitSetRef bsr1, bsr2;
BitSet bs1, bs2;
not1 = IS_NCCLASS_NOT(dest);
bsr1 = dest->bs;
buf1 = dest->mbuf;
not2 = IS_NCCLASS_NOT(cc);
bsr2 = cc->bs;
buf2 = cc->mbuf;
if (not1 != 0) {
bitset_invert_to(bsr1, bs1);
bsr1 = bs1;
}
if (not2 != 0) {
bitset_invert_to(bsr2, bs2);
bsr2 = bs2;
}
bitset_and(bsr1, bsr2);
if (bsr1 != dest->bs) {
bitset_copy(dest->bs, bsr1);
}
if (not1 != 0) {
bitset_invert(dest->bs);
}
if (! ONIGENC_IS_SINGLEBYTE(enc)) {
if (not1 != 0 && not2 != 0) {
r = or_code_range_buf(enc, buf1, 0, buf2, 0, &pbuf);
}
else {
r = and_code_range_buf(buf1, not1, buf2, not2, &pbuf);
if (r == 0 && not1 != 0) {
BBuf *tbuf;
r = not_code_range_buf(enc, pbuf, &tbuf);
if (r != 0) {
bbuf_free(pbuf);
return r;
}
bbuf_free(pbuf);
pbuf = tbuf;
}
}
if (r != 0) return r;
dest->mbuf = pbuf;
bbuf_free(buf1);
return r;
}
return 0;
} | 0 | [
"CWE-400",
"CWE-399",
"CWE-674"
] | oniguruma | 4097828d7cc87589864fecf452f2cd46c5f37180 | 100,026,486,373,091,090,000,000,000,000,000,000,000 | 55 | fix #147: Stack Exhaustion Problem caused by some parsing functions in regcomp.c making recursive calls to themselves. |
append_numopt(char *s, const char *opt, unsigned int num)
{
char buf[32];
snprintf(buf, sizeof(buf), "%u", num);
return append_opt(s, opt, buf);
} | 0 | [
"CWE-200"
] | util-linux | 0377ef91270d06592a0d4dd009c29e7b1ff9c9b8 | 175,511,931,940,484,930,000,000,000,000,000,000,000 | 7 | mount: (deprecated) drop --guess-fstype
The option is undocumented and unnecessary.
Signed-off-by: Karel Zak <[email protected]> |
bool test_r_str_str_xy(void) {
char *canvas = "Hello World\n"
"This World is World\n"
"World is Hello\n";
int x = 0, y = 0;
const char *next = r_str_str_xy (canvas, "World", NULL, &x, &y);
mu_assert_eq (x, 6, "x of first occurrence");
mu_assert_eq (y, 0, "y of first occurrence");
next = r_str_str_xy (canvas, "World", next, &x, &y);
mu_assert_eq (x, 5, "x of second occurrence");
mu_assert_eq (y, 1, "y of second occurrence");
next = r_str_str_xy (canvas, "World", next, &x, &y);
mu_assert_eq (x, 14, "x of third occurrence");
mu_assert_eq (y, 1, "y of third occurrence");
next = r_str_str_xy (canvas, "World", next, &x, &y);
mu_assert_eq (x, 0, "x of fourth occurrence");
mu_assert_eq (y, 2, "y of fourth occurrence");
next = r_str_str_xy (canvas, "World", next, &x, &y);
mu_assert_null (next, "no more occurences");
mu_end;
} | 0 | [
"CWE-78"
] | radare2 | 04edfa82c1f3fa2bc3621ccdad2f93bdbf00e4f9 | 44,713,157,025,423,490,000,000,000,000,000,000,000 | 21 | Fix command injection on PDB download (#16966)
* Fix r_sys_mkdirp with absolute path on Windows
* Fix build with --with-openssl
* Use RBuffer in r_socket_http_answer()
* r_socket_http_answer: Fix read for big responses
* Implement r_str_escape_sh()
* Cleanup r_socket_connect() on Windows
* Fix socket being created without a protocol
* Fix socket connect with SSL ##socket
* Use select() in r_socket_ready()
* Fix read failing if received only protocol answer
* Fix double-free
* r_socket_http_get: Fail if req. SSL with no support
* Follow redirects in r_socket_http_answer()
* Fix r_socket_http_get result length with R2_CURL=1
* Also follow redirects
* Avoid using curl for downloading PDBs
* Use r_socket_http_get() on UNIXs
* Use WinINet API on Windows for r_socket_http_get()
* Fix command injection
* Fix r_sys_cmd_str_full output for binary data
* Validate GUID on PDB download
* Pass depth to socket_http_get_recursive()
* Remove 'r_' and '__' from static function names
* Fix is_valid_guid
* Fix for comments |
skip_ipv6_digits(char *str)
{
while (isxdigit(*str) || *str == ':' || *str == '.') {
str++;
}
return str;
} | 0 | [
"CWE-400"
] | ovs | 79349cbab0b2a755140eedb91833ad2760520a83 | 47,091,385,733,910,520,000,000,000,000,000,000,000 | 7 | flow: Support extra padding length.
Although not required, padding can be optionally added until
the packet length is MTU bytes. A packet with extra padding
currently fails sanity checks.
Vulnerability: CVE-2020-35498
Fixes: fa8d9001a624 ("miniflow_extract: Properly handle small IP packets.")
Reported-by: Joakim Hindersson <[email protected]>
Acked-by: Ilya Maximets <[email protected]>
Signed-off-by: Flavio Leitner <[email protected]>
Signed-off-by: Ilya Maximets <[email protected]> |
int __kvm_set_memory_region(struct kvm *kvm,
const struct kvm_userspace_memory_region *mem)
{
struct kvm_memory_slot old, new;
struct kvm_memory_slot *tmp;
enum kvm_mr_change change;
int as_id, id;
int r;
r = check_memory_region_flags(mem);
if (r)
return r;
as_id = mem->slot >> 16;
id = (u16)mem->slot;
/* General sanity checks */
if (mem->memory_size & (PAGE_SIZE - 1))
return -EINVAL;
if (mem->guest_phys_addr & (PAGE_SIZE - 1))
return -EINVAL;
/* We can read the guest memory with __xxx_user() later on. */
if ((mem->userspace_addr & (PAGE_SIZE - 1)) ||
(mem->userspace_addr != untagged_addr(mem->userspace_addr)) ||
!access_ok((void __user *)(unsigned long)mem->userspace_addr,
mem->memory_size))
return -EINVAL;
if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_MEM_SLOTS_NUM)
return -EINVAL;
if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
return -EINVAL;
/*
* Make a full copy of the old memslot, the pointer will become stale
* when the memslots are re-sorted by update_memslots(), and the old
* memslot needs to be referenced after calling update_memslots(), e.g.
* to free its resources and for arch specific behavior.
*/
tmp = id_to_memslot(__kvm_memslots(kvm, as_id), id);
if (tmp) {
old = *tmp;
tmp = NULL;
} else {
memset(&old, 0, sizeof(old));
old.id = id;
}
if (!mem->memory_size)
return kvm_delete_memslot(kvm, mem, &old, as_id);
new.as_id = as_id;
new.id = id;
new.base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
new.npages = mem->memory_size >> PAGE_SHIFT;
new.flags = mem->flags;
new.userspace_addr = mem->userspace_addr;
if (new.npages > KVM_MEM_MAX_NR_PAGES)
return -EINVAL;
if (!old.npages) {
change = KVM_MR_CREATE;
new.dirty_bitmap = NULL;
memset(&new.arch, 0, sizeof(new.arch));
} else { /* Modify an existing slot. */
if ((new.userspace_addr != old.userspace_addr) ||
(new.npages != old.npages) ||
((new.flags ^ old.flags) & KVM_MEM_READONLY))
return -EINVAL;
if (new.base_gfn != old.base_gfn)
change = KVM_MR_MOVE;
else if (new.flags != old.flags)
change = KVM_MR_FLAGS_ONLY;
else /* Nothing to change. */
return 0;
/* Copy dirty_bitmap and arch from the current memslot. */
new.dirty_bitmap = old.dirty_bitmap;
memcpy(&new.arch, &old.arch, sizeof(new.arch));
}
if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) {
/* Check for overlaps */
kvm_for_each_memslot(tmp, __kvm_memslots(kvm, as_id)) {
if (tmp->id == id)
continue;
if (!((new.base_gfn + new.npages <= tmp->base_gfn) ||
(new.base_gfn >= tmp->base_gfn + tmp->npages)))
return -EEXIST;
}
}
/* Allocate/free page dirty bitmap as needed */
if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES))
new.dirty_bitmap = NULL;
else if (!new.dirty_bitmap && !kvm->dirty_ring_size) {
r = kvm_alloc_dirty_bitmap(&new);
if (r)
return r;
if (kvm_dirty_log_manual_protect_and_init_set(kvm))
bitmap_set(new.dirty_bitmap, 0, new.npages);
}
r = kvm_set_memslot(kvm, mem, &old, &new, as_id, change);
if (r)
goto out_bitmap;
if (old.dirty_bitmap && !new.dirty_bitmap)
kvm_destroy_dirty_bitmap(&old);
return 0;
out_bitmap:
if (new.dirty_bitmap && !old.dirty_bitmap)
kvm_destroy_dirty_bitmap(&new);
return r;
} | 0 | [
"CWE-119"
] | linux | f8be156be163a052a067306417cd0ff679068c97 | 250,594,492,873,924,680,000,000,000,000,000,000,000 | 118 | KVM: do not allow mapping valid but non-reference-counted pages
It's possible to create a region which maps valid but non-refcounted
pages (e.g., tail pages of non-compound higher order allocations). These
host pages can then be returned by gfn_to_page, gfn_to_pfn, etc., family
of APIs, which take a reference to the page, which takes it from 0 to 1.
When the reference is dropped, this will free the page incorrectly.
Fix this by only taking a reference on valid pages if it was non-zero,
which indicates it is participating in normal refcounting (and can be
released with put_page).
This addresses CVE-2021-22543.
Signed-off-by: Nicholas Piggin <[email protected]>
Tested-by: Paolo Bonzini <[email protected]>
Cc: [email protected]
Signed-off-by: Paolo Bonzini <[email protected]> |
dp_packet_get_rss_hash(const struct dp_packet *p)
{
return p->rss_hash;
} | 0 | [
"CWE-400"
] | ovs | 3512fb512c76a1f08eba4005aa2eb69160d0840e | 325,213,012,721,883,100,000,000,000,000,000,000,000 | 4 | flow: Support extra padding length.
Although not required, padding can be optionally added until
the packet length is MTU bytes. A packet with extra padding
currently fails sanity checks.
Vulnerability: CVE-2020-35498
Fixes: fa8d9001a624 ("miniflow_extract: Properly handle small IP packets.")
Reported-by: Joakim Hindersson <[email protected]>
Acked-by: Ilya Maximets <[email protected]>
Signed-off-by: Flavio Leitner <[email protected]>
Signed-off-by: Ilya Maximets <[email protected]> |
lyp_get_yang_data_template_name(const struct lyd_node *node)
{
struct lys_node *snode;
snode = lys_parent(node->schema);
while (snode && snode->nodetype & (LYS_USES | LYS_CASE | LYS_CHOICE)) {
snode = lys_parent(snode);
}
if (snode && snode->nodetype == LYS_EXT && strcmp(((struct lys_ext_instance_complex *)snode)->def->name, "yang-data") == 0) {
return ((struct lys_ext_instance_complex *)snode)->arg_value;
} else {
return NULL;
}
} | 0 | [
"CWE-787"
] | libyang | f6d684ade99dd37b21babaa8a856f64faa1e2e0d | 297,331,909,841,149,970,000,000,000,000,000,000,000 | 15 | parser BUGFIX long identity name buffer overflow
STRING_OVERFLOW (CWE-120) |
//! Display 1D graph in an interactive window \overloading.
const CImg<T>& display_graph(const char *const title=0,
const unsigned int plot_type=1, const unsigned int vertex_type=1,
const char *const labelx=0, const double xmin=0, const double xmax=0,
const char *const labely=0, const double ymin=0, const double ymax=0,
const bool exit_on_anykey=false) const {
CImgDisplay disp;
return _display_graph(disp,title,plot_type,vertex_type,labelx,xmin,xmax,labely,ymin,ymax,exit_on_anykey); | 0 | [
"CWE-119",
"CWE-787"
] | CImg | ac8003393569aba51048c9d67e1491559877b1d1 | 324,260,540,851,776,500,000,000,000,000,000,000,000 | 8 | . |
static NTSTATUS smb_set_file_unix_link(connection_struct *conn,
struct smb_request *req,
const char *pdata,
int total_data,
const struct smb_filename *smb_fname)
{
char *link_target = NULL;
const char *newname = smb_fname->base_name;
NTSTATUS status = NT_STATUS_OK;
TALLOC_CTX *ctx = talloc_tos();
/* Set a symbolic link. */
/* Don't allow this if follow links is false. */
if (total_data == 0) {
return NT_STATUS_INVALID_PARAMETER;
}
if (!lp_symlinks(SNUM(conn))) {
return NT_STATUS_ACCESS_DENIED;
}
srvstr_pull_talloc(ctx, pdata, req->flags2, &link_target, pdata,
total_data, STR_TERMINATE);
if (!link_target) {
return NT_STATUS_INVALID_PARAMETER;
}
/* !widelinks forces the target path to be within the share. */
/* This means we can interpret the target as a pathname. */
if (!lp_widelinks(SNUM(conn))) {
char *rel_name = NULL;
char *last_dirp = NULL;
if (*link_target == '/') {
/* No absolute paths allowed. */
return NT_STATUS_ACCESS_DENIED;
}
rel_name = talloc_strdup(ctx,newname);
if (!rel_name) {
return NT_STATUS_NO_MEMORY;
}
last_dirp = strrchr_m(rel_name, '/');
if (last_dirp) {
last_dirp[1] = '\0';
} else {
rel_name = talloc_strdup(ctx,"./");
if (!rel_name) {
return NT_STATUS_NO_MEMORY;
}
}
rel_name = talloc_asprintf_append(rel_name,
"%s",
link_target);
if (!rel_name) {
return NT_STATUS_NO_MEMORY;
}
status = check_name(conn, rel_name);
if (!NT_STATUS_IS_OK(status)) {
return status;
}
}
DEBUG(10,("smb_set_file_unix_link: SMB_SET_FILE_UNIX_LINK doing symlink %s -> %s\n",
newname, link_target ));
if (SMB_VFS_SYMLINK(conn,link_target,newname) != 0) {
return map_nt_error_from_unix(errno);
}
return NT_STATUS_OK;
} | 1 | [
"CWE-22"
] | samba | bd269443e311d96ef495a9db47d1b95eb83bb8f4 | 50,610,553,973,009,060,000,000,000,000,000,000,000 | 74 | Fix bug 7104 - "wide links" and "unix extensions" are incompatible.
Change parameter "wide links" to default to "no".
Ensure "wide links = no" if "unix extensions = yes" on a share.
Fix man pages to refect this.
Remove "within share" checks for a UNIX symlink set - even if
widelinks = no. The server will not follow that link anyway.
Correct DEBUG message in check_reduced_name() to add missing "\n"
so it's really clear when a path is being denied as it's outside
the enclosing share path.
Jeremy. |
void operator()(input_buffer* data) { total_ += data->get_remaining(); } | 0 | [
"CWE-254"
] | mysql-server | e7061f7e5a96c66cb2e0bf46bec7f6ff35801a69 | 144,328,360,860,441,800,000,000,000,000,000,000,000 | 1 | Bug #22738607: YASSL FUNCTION X509_NAME_GET_INDEX_BY_NID IS NOT WORKING AS EXPECTED. |
void handle_usb_rx(const void *msg, size_t len)
{
if (msg_tiny_flag) {
uint8_t buf[64];
memcpy(buf, msg, sizeof(buf));
uint16_t msgId = buf[4] | ((uint16_t)buf[3]) << 8;
uint32_t msgSize = buf[8] |
((uint32_t)buf[7]) << 8 |
((uint32_t)buf[6]) << 16 |
((uint32_t)buf[5]) << 24;
if (msgSize > 64 - 9) {
(*msg_failure)(FailureType_Failure_UnexpectedMessage, "Malformed tiny packet");
return;
}
// Determine callback handler and message map type.
const MessagesMap_t *entry = message_map_entry(NORMAL_MSG, msgId, IN_MSG);
if (!entry) {
(*msg_failure)(FailureType_Failure_UnexpectedMessage, "Unknown message");
return;
}
tiny_dispatch(entry, buf + 9, msgSize);
} else {
usb_rx_helper(msg, len, NORMAL_MSG);
}
} | 1 | [
"CWE-787"
] | keepkey-firmware | b222c66cdd7c3203d917c80ba615082d309d80c3 | 84,678,919,006,948,860,000,000,000,000,000,000,000 | 29 | board: factor out tiny_dispatch
And add stronger checks on what tiny_msg's are allowed to be decoded. |
guint32 menu_cache_app_get_show_flags( MenuCacheApp* app )
{
return app->show_in_flags;
} | 0 | [
"CWE-20"
] | menu-cache | 56f66684592abf257c4004e6e1fff041c64a12ce | 89,720,652,023,697,440,000,000,000,000,000,000,000 | 4 | Fix potential access violation, use runtime user dir instead of tmp dir.
Note: it limits libmenu-cache compatibility to menu-cached >= 0.7.0. |
mono_image_fill_export_table_from_module (MonoDomain *domain, MonoReflectionModule *module,
guint32 module_index, MonoDynamicImage *assembly)
{
MonoImage *image = module->image;
MonoTableInfo *t;
guint32 i;
t = &image->tables [MONO_TABLE_TYPEDEF];
for (i = 0; i < t->rows; ++i) {
MonoClass *klass = mono_class_get (image, mono_metadata_make_token (MONO_TABLE_TYPEDEF, i + 1));
if (klass->flags & TYPE_ATTRIBUTE_PUBLIC)
mono_image_fill_export_table_from_class (domain, klass, module_index, 0, assembly);
}
} | 0 | [
"CWE-20"
] | mono | 4905ef1130feb26c3150b28b97e4a96752e0d399 | 165,524,815,172,040,380,000,000,000,000,000,000,000 | 16 | Handle invalid instantiation of generic methods.
* verify.c: Add new function to internal verifier API to check
method instantiations.
* reflection.c (mono_reflection_bind_generic_method_parameters):
Check the instantiation before returning it.
Fixes #655847 |
void Monitor::apply_quorum_to_compatset_features()
{
CompatSet new_features(features);
if (quorum_con_features & CEPH_FEATURE_OSD_ERASURE_CODES) {
new_features.incompat.insert(CEPH_MON_FEATURE_INCOMPAT_OSD_ERASURE_CODES);
}
if (quorum_con_features & CEPH_FEATURE_OSDMAP_ENC) {
new_features.incompat.insert(CEPH_MON_FEATURE_INCOMPAT_OSDMAP_ENC);
}
if (quorum_con_features & CEPH_FEATURE_ERASURE_CODE_PLUGINS_V2) {
new_features.incompat.insert(CEPH_MON_FEATURE_INCOMPAT_ERASURE_CODE_PLUGINS_V2);
}
if (quorum_con_features & CEPH_FEATURE_ERASURE_CODE_PLUGINS_V3) {
new_features.incompat.insert(CEPH_MON_FEATURE_INCOMPAT_ERASURE_CODE_PLUGINS_V3);
}
dout(5) << __func__ << dendl;
_apply_compatset_features(new_features);
} | 0 | [
"CWE-287",
"CWE-284"
] | ceph | 5ead97120e07054d80623dada90a5cc764c28468 | 154,484,222,361,706,780,000,000,000,000,000,000,000 | 18 | auth/cephx: add authorizer challenge
Allow the accepting side of a connection to reject an initial authorizer
with a random challenge. The connecting side then has to respond with an
updated authorizer proving they are able to decrypt the service's challenge
and that the new authorizer was produced for this specific connection
instance.
The accepting side requires this challenge and response unconditionally
if the client side advertises they have the feature bit. Servers wishing
to require this improved level of authentication simply have to require
the appropriate feature.
Signed-off-by: Sage Weil <[email protected]>
(cherry picked from commit f80b848d3f830eb6dba50123e04385173fa4540b)
# Conflicts:
# src/auth/Auth.h
# src/auth/cephx/CephxProtocol.cc
# src/auth/cephx/CephxProtocol.h
# src/auth/none/AuthNoneProtocol.h
# src/msg/Dispatcher.h
# src/msg/async/AsyncConnection.cc
- const_iterator
- ::decode vs decode
- AsyncConnection ctor arg noise
- get_random_bytes(), not cct->random() |
static void charstring_end(void)
{
byte *bp;
sprintf(line, "%d ", (int) (charstring_bp - charstring_buf));
eexec_string(line);
sprintf(line, "%s ", cs_start);
eexec_string(line);
for (bp = charstring_buf; bp < charstring_bp; bp++)
eexec_byte(*bp);
} | 0 | [
"CWE-119",
"CWE-787"
] | t1utils | 6b9d1aafcb61a3663c883663eb19ccdbfcde8d33 | 294,487,965,846,999,700,000,000,000,000,000,000,000 | 11 | Security fixes.
- Don't overflow the small cs_start buffer (reported by Niels
Thykier via the debian tracker (Jakub Wilk), found with a
fuzzer ("American fuzzy lop")).
- Cast arguments to <ctype.h> functions to unsigned char. |
void tmx_pretran_link_safe(int slotid)
{
if(_tmx_proc_ptran==NULL)
return;
if(_tmx_ptran_table[slotid].plist==NULL) {
_tmx_ptran_table[slotid].plist = _tmx_proc_ptran;
_tmx_proc_ptran->linked = 1;
return;
}
_tmx_proc_ptran->next = _tmx_ptran_table[slotid].plist;
_tmx_ptran_table[slotid].plist->prev = _tmx_proc_ptran;
_tmx_ptran_table[slotid].plist = _tmx_proc_ptran;
_tmx_proc_ptran->linked = 1;
return;
} | 0 | [
"CWE-119",
"CWE-79",
"CWE-787"
] | kamailio | e1d8008a09d9390ebaf698abe8909e10dfec4097 | 271,774,639,952,276,430,000,000,000,000,000,000,000 | 16 | tmx: allocate space to store ending 0 for branch value
- reported by Alfred Farrugia and Sandro Gauci |
static int dlpar_parse_id_type(char **cmd, struct pseries_hp_errorlog *hp_elog)
{
char *arg;
u32 count, index;
arg = strsep(cmd, " ");
if (!arg)
return -EINVAL;
if (sysfs_streq(arg, "indexed-count")) {
hp_elog->id_type = PSERIES_HP_ELOG_ID_DRC_IC;
arg = strsep(cmd, " ");
if (!arg) {
pr_err("No DRC count specified.\n");
return -EINVAL;
}
if (kstrtou32(arg, 0, &count)) {
pr_err("Invalid DRC count specified.\n");
return -EINVAL;
}
arg = strsep(cmd, " ");
if (!arg) {
pr_err("No DRC Index specified.\n");
return -EINVAL;
}
if (kstrtou32(arg, 0, &index)) {
pr_err("Invalid DRC Index specified.\n");
return -EINVAL;
}
hp_elog->_drc_u.ic.count = cpu_to_be32(count);
hp_elog->_drc_u.ic.index = cpu_to_be32(index);
} else if (sysfs_streq(arg, "index")) {
hp_elog->id_type = PSERIES_HP_ELOG_ID_DRC_INDEX;
arg = strsep(cmd, " ");
if (!arg) {
pr_err("No DRC Index specified.\n");
return -EINVAL;
}
if (kstrtou32(arg, 0, &index)) {
pr_err("Invalid DRC Index specified.\n");
return -EINVAL;
}
hp_elog->_drc_u.drc_index = cpu_to_be32(index);
} else if (sysfs_streq(arg, "count")) {
hp_elog->id_type = PSERIES_HP_ELOG_ID_DRC_COUNT;
arg = strsep(cmd, " ");
if (!arg) {
pr_err("No DRC count specified.\n");
return -EINVAL;
}
if (kstrtou32(arg, 0, &count)) {
pr_err("Invalid DRC count specified.\n");
return -EINVAL;
}
hp_elog->_drc_u.drc_count = cpu_to_be32(count);
} else {
pr_err("Invalid id_type specified.\n");
return -EINVAL;
}
return 0;
} | 0 | [
"CWE-476"
] | linux | efa9ace68e487ddd29c2b4d6dd23242158f1f607 | 319,664,379,625,194,240,000,000,000,000,000,000,000 | 70 | powerpc/pseries/dlpar: Fix a missing check in dlpar_parse_cc_property()
In dlpar_parse_cc_property(), 'prop->name' is allocated by kstrdup().
kstrdup() may return NULL, so it should be checked and handle error.
And prop should be freed if 'prop->name' is NULL.
Signed-off-by: Gen Zhang <[email protected]>
Signed-off-by: Michael Ellerman <[email protected]> |
static void opj_j2k_write_coc_in_memory(opj_j2k_t *p_j2k,
OPJ_UINT32 p_comp_no,
OPJ_BYTE * p_data,
OPJ_UINT32 * p_data_written,
opj_event_mgr_t * p_manager
)
{
opj_cp_t *l_cp = 00;
opj_tcp_t *l_tcp = 00;
OPJ_UINT32 l_coc_size, l_remaining_size;
OPJ_BYTE * l_current_data = 00;
opj_image_t *l_image = 00;
OPJ_UINT32 l_comp_room;
/* preconditions */
assert(p_j2k != 00);
assert(p_manager != 00);
l_cp = &(p_j2k->m_cp);
l_tcp = &l_cp->tcps[p_j2k->m_current_tile_number];
l_image = p_j2k->m_private_image;
l_comp_room = (l_image->numcomps <= 256) ? 1 : 2;
l_coc_size = 5 + l_comp_room + opj_j2k_get_SPCod_SPCoc_size(p_j2k,
p_j2k->m_current_tile_number, p_comp_no);
l_remaining_size = l_coc_size;
l_current_data = p_data;
opj_write_bytes(l_current_data, J2K_MS_COC,
2); /* COC */
l_current_data += 2;
opj_write_bytes(l_current_data, l_coc_size - 2,
2); /* L_COC */
l_current_data += 2;
opj_write_bytes(l_current_data, p_comp_no, l_comp_room); /* Ccoc */
l_current_data += l_comp_room;
opj_write_bytes(l_current_data, l_tcp->tccps[p_comp_no].csty,
1); /* Scoc */
++l_current_data;
l_remaining_size -= (5 + l_comp_room);
opj_j2k_write_SPCod_SPCoc(p_j2k, p_j2k->m_current_tile_number, 0,
l_current_data, &l_remaining_size, p_manager);
* p_data_written = l_coc_size;
} | 0 | [
"CWE-416",
"CWE-787"
] | openjpeg | 4241ae6fbbf1de9658764a80944dc8108f2b4154 | 193,581,908,205,945,500,000,000,000,000,000,000,000 | 49 | Fix assertion in debug mode / heap-based buffer overflow in opj_write_bytes_LE for Cinema profiles with numresolutions = 1 (#985) |
void ext4_ext_drop_refs(struct ext4_ext_path *path)
{
int depth, i;
if (!path)
return;
depth = path->p_depth;
for (i = 0; i <= depth; i++, path++) {
brelse(path->p_bh);
path->p_bh = NULL;
}
} | 0 | [
"CWE-703"
] | linux | ce9f24cccdc019229b70a5c15e2b09ad9c0ab5d1 | 78,830,027,666,588,520,000,000,000,000,000,000,000 | 12 | ext4: check journal inode extents more carefully
Currently, system zones just track ranges of block, that are "important"
fs metadata (bitmaps, group descriptors, journal blocks, etc.). This
however complicates how extent tree (or indirect blocks) can be checked
for inodes that actually track such metadata - currently the journal
inode but arguably we should be treating quota files or resize inode
similarly. We cannot run __ext4_ext_check() on such metadata inodes when
loading their extents as that would immediately trigger the validity
checks and so we just hack around that and special-case the journal
inode. This however leads to a situation that a journal inode which has
extent tree of depth at least one can have invalid extent tree that gets
unnoticed until ext4_cache_extents() crashes.
To overcome this limitation, track inode number each system zone belongs
to (0 is used for zones not belonging to any inode). We can then verify
inode number matches the expected one when verifying extent tree and
thus avoid the false errors. With this there's no need to to
special-case journal inode during extent tree checking anymore so remove
it.
Fixes: 0a944e8a6c66 ("ext4: don't perform block validity checks on the journal inode")
Reported-by: Wolfgang Frisch <[email protected]>
Reviewed-by: Lukas Czerner <[email protected]>
Signed-off-by: Jan Kara <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Theodore Ts'o <[email protected]> |
xstrpisotime(const char *s, char **endptr)
{
/** like strptime() but strictly for ISO 8601 Zulu strings */
struct tm tm;
time_t res = (time_t)-1;
/* make sure tm is clean */
memset(&tm, 0, sizeof(tm));
/* as a courtesy to our callers, and since this is a non-standard
* routine, we skip leading whitespace */
while (*s == ' ' || *s == '\t')
++s;
/* read year */
if ((tm.tm_year = strtoi_lim(s, &s, 1583, 4095)) < 0 || *s++ != '-') {
goto out;
}
/* read month */
if ((tm.tm_mon = strtoi_lim(s, &s, 1, 12)) < 0 || *s++ != '-') {
goto out;
}
/* read day-of-month */
if ((tm.tm_mday = strtoi_lim(s, &s, 1, 31)) < 0 || *s++ != 'T') {
goto out;
}
/* read hour */
if ((tm.tm_hour = strtoi_lim(s, &s, 0, 23)) < 0 || *s++ != ':') {
goto out;
}
/* read minute */
if ((tm.tm_min = strtoi_lim(s, &s, 0, 59)) < 0 || *s++ != ':') {
goto out;
}
/* read second */
if ((tm.tm_sec = strtoi_lim(s, &s, 0, 60)) < 0 || *s++ != 'Z') {
goto out;
}
/* massage TM to fulfill some of POSIX' constraints */
tm.tm_year -= 1900;
tm.tm_mon--;
/* now convert our custom tm struct to a unix stamp using UTC */
res = time_from_tm(&tm);
out:
if (endptr != NULL) {
*endptr = deconst(s);
}
return res;
} | 0 | [
"CWE-415",
"CWE-787"
] | libarchive | 9c84b7426660c09c18cc349f6d70b5f8168b5680 | 312,316,112,934,855,400,000,000,000,000,000,000,000 | 52 | warc: consume data once read
The warc decoder only used read ahead, it wouldn't actually consume
data that had previously been printed. This means that if you specify
an invalid content length, it will just reprint the same data over
and over and over again until it hits the desired length.
This means that a WARC resource with e.g.
Content-Length: 666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666665
but only a few hundred bytes of data, causes a quasi-infinite loop.
Consume data in subsequent calls to _warc_read.
Found with an AFL + afl-rb + qsym setup. |
Status AuthorizationManager::getUserDescription(OperationContext* txn,
const UserName& userName,
BSONObj* result) {
return _externalState->getUserDescription(txn, userName, result);
} | 0 | [
"CWE-613"
] | mongo | 64d8e9e1b12d16b54d6a592bae8110226c491b4e | 291,526,900,102,777,780,000,000,000,000,000,000,000 | 5 | SERVER-38984 Validate unique User ID on UserCache hit
(cherry picked from commit e55d6e2292e5dbe2f97153251d8193d1cc89f5d7) |
flatpak_remote_state_lookup_sparse_cache (FlatpakRemoteState *self,
const char *ref,
VarMetadataRef *out_metadata,
GError **error)
{
VarSummaryRef summary;
VarMetadataRef meta;
VarVariantRef sparse_cache_v;
guint32 summary_version;
GVariant *summary_v;
if (!flatpak_remote_state_ensure_summary (self, error))
return FALSE;
summary_v = get_summary_for_ref (self, ref);
if (summary_v == NULL)
return flatpak_fail_error (error, FLATPAK_ERROR_REF_NOT_FOUND,
_("No entry for %s in remote summary flatpak sparse cache "), ref);
summary = var_summary_from_gvariant (summary_v);
meta = var_summary_get_metadata (summary);
summary_version = GUINT32_FROM_LE (var_metadata_lookup_uint32 (meta, "xa.summary-version", 0));
if (summary_version == 0)
{
if (var_metadata_lookup (meta, "xa.sparse-cache", NULL, &sparse_cache_v))
{
VarSparseCacheRef sparse_cache = var_sparse_cache_from_variant (sparse_cache_v);
if (var_sparse_cache_lookup (sparse_cache, ref, NULL, out_metadata))
return TRUE;
}
}
else if (summary_version == 1)
{
VarRefMapRef ref_map = var_summary_get_ref_map (summary);
VarRefInfoRef info;
if (flatpak_var_ref_map_lookup_ref (ref_map, ref, &info))
{
*out_metadata = var_ref_info_get_metadata (info);
return TRUE;
}
}
else
{
flatpak_fail_error (error, FLATPAK_ERROR_INVALID_DATA, _("Unsupported summary version %d for remote %s"),
summary_version, self->remote_name);
return FALSE;
}
return flatpak_fail_error (error, FLATPAK_ERROR_REF_NOT_FOUND,
_("No entry for %s in remote summary flatpak sparse cache "), ref);
} | 0 | [
"CWE-74"
] | flatpak | fb473cad801c6b61706353256cab32330557374a | 142,759,479,324,691,050,000,000,000,000,000,000,000 | 54 | dir: Pass environment via bwrap --setenv when running apply_extra
This means we can systematically pass the environment variables
through bwrap(1), even if it is setuid and thus is filtering out
security-sensitive environment variables. bwrap ends up being
run with an empty environment instead.
As with the previous commit, this regressed while fixing CVE-2021-21261.
Fixes: 6d1773d2 "run: Convert all environment variables into bwrap arguments"
Signed-off-by: Simon McVittie <[email protected]> |
static void save_vsx(struct task_struct *tsk)
{
if (tsk->thread.regs->msr & MSR_FP)
save_fpu(tsk);
if (tsk->thread.regs->msr & MSR_VEC)
save_altivec(tsk);
} | 0 | [] | linux | 5d176f751ee3c6eededd984ad409bff201f436a7 | 1,201,862,858,879,878,500,000,000,000,000,000,000 | 7 | powerpc: tm: Enable transactional memory (TM) lazily for userspace
Currently the MSR TM bit is always set if the hardware is TM capable.
This adds extra overhead as it means the TM SPRS (TFHAR, TEXASR and
TFAIR) must be swapped for each process regardless of if they use TM.
For processes that don't use TM the TM MSR bit can be turned off
allowing the kernel to avoid the expensive swap of the TM registers.
A TM unavailable exception will occur if a thread does use TM and the
kernel will enable MSR_TM and leave it so for some time afterwards.
Signed-off-by: Cyril Bur <[email protected]>
Signed-off-by: Michael Ellerman <[email protected]> |
static void __netdev_adjacent_dev_remove(struct net_device *dev,
struct net_device *adj_dev,
u16 ref_nr,
struct list_head *dev_list)
{
struct netdev_adjacent *adj;
pr_debug("Remove adjacency: dev %s adj_dev %s ref_nr %d\n",
dev->name, adj_dev->name, ref_nr);
adj = __netdev_find_adj(adj_dev, dev_list);
if (!adj) {
pr_err("Adjacency does not exist for device %s from %s\n",
dev->name, adj_dev->name);
WARN_ON(1);
return;
}
if (adj->ref_nr > ref_nr) {
pr_debug("adjacency: %s to %s ref_nr - %d = %d\n",
dev->name, adj_dev->name, ref_nr,
adj->ref_nr - ref_nr);
adj->ref_nr -= ref_nr;
return;
}
if (adj->master)
sysfs_remove_link(&(dev->dev.kobj), "master");
if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
list_del_rcu(&adj->list);
pr_debug("adjacency: dev_put for %s, because link removed from %s to %s\n",
adj_dev->name, dev->name, adj_dev->name);
dev_put(adj_dev);
kfree_rcu(adj, rcu); | 0 | [
"CWE-476"
] | linux | 0ad646c81b2182f7fa67ec0c8c825e0ee165696d | 224,901,797,072,329,620,000,000,000,000,000,000,000 | 39 | tun: call dev_get_valid_name() before register_netdevice()
register_netdevice() could fail early when we have an invalid
dev name, in which case ->ndo_uninit() is not called. For tun
device, this is a problem because a timer etc. are already
initialized and it expects ->ndo_uninit() to clean them up.
We could move these initializations into a ->ndo_init() so
that register_netdevice() knows better, however this is still
complicated due to the logic in tun_detach().
Therefore, I choose to just call dev_get_valid_name() before
register_netdevice(), which is quicker and much easier to audit.
And for this specific case, it is already enough.
Fixes: 96442e42429e ("tuntap: choose the txq based on rxq")
Reported-by: Dmitry Alexeev <[email protected]>
Cc: Jason Wang <[email protected]>
Cc: "Michael S. Tsirkin" <[email protected]>
Signed-off-by: Cong Wang <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
static long wsgi_alias_matches(const char *uri, const char *alias_fakename)
{
/* Code for this function from Apache mod_alias module. */
const char *aliasp = alias_fakename, *urip = uri;
while (*aliasp) {
if (*aliasp == '/') {
/* any number of '/' in the alias matches any number in
* the supplied URI, but there must be at least one...
*/
if (*urip != '/')
return 0;
do {
++aliasp;
} while (*aliasp == '/');
do {
++urip;
} while (*urip == '/');
}
else {
/* Other characters are compared literally */
if (*urip++ != *aliasp++)
return 0;
}
}
/* Check last alias path component matched all the way */
if (aliasp[-1] != '/' && *urip != '\0' && *urip != '/')
return 0;
/* Return number of characters from URI which matched (may be
* greater than length of alias, since we may have matched
* doubled slashes)
*/
return urip - uri;
} | 0 | [
"CWE-254"
] | mod_wsgi | 545354a80b9cc20d8b6916ca30542eab36c3b8bd | 108,854,656,849,225,170,000,000,000,000,000,000,000 | 40 | When there is any sort of error in setting up daemon process group, kill the process rather than risk running in an unexpected state. |
static void disk_alloc_events(struct gendisk *disk)
{
struct disk_events *ev;
if (!disk->fops->check_events)
return;
ev = kzalloc(sizeof(*ev), GFP_KERNEL);
if (!ev) {
pr_warn("%s: failed to initialize events\n", disk->disk_name);
return;
}
INIT_LIST_HEAD(&ev->node);
ev->disk = disk;
spin_lock_init(&ev->lock);
mutex_init(&ev->block_mutex);
ev->block = 1;
ev->poll_msecs = -1;
INIT_DELAYED_WORK(&ev->dwork, disk_events_workfn);
disk->ev = ev;
} | 0 | [
"CWE-416"
] | linux-stable | 77da160530dd1dc94f6ae15a981f24e5f0021e84 | 277,017,168,485,834,300,000,000,000,000,000,000,000 | 23 | block: fix use-after-free in seq file
I got a KASAN report of use-after-free:
==================================================================
BUG: KASAN: use-after-free in klist_iter_exit+0x61/0x70 at addr ffff8800b6581508
Read of size 8 by task trinity-c1/315
=============================================================================
BUG kmalloc-32 (Not tainted): kasan: bad access detected
-----------------------------------------------------------------------------
Disabling lock debugging due to kernel taint
INFO: Allocated in disk_seqf_start+0x66/0x110 age=144 cpu=1 pid=315
___slab_alloc+0x4f1/0x520
__slab_alloc.isra.58+0x56/0x80
kmem_cache_alloc_trace+0x260/0x2a0
disk_seqf_start+0x66/0x110
traverse+0x176/0x860
seq_read+0x7e3/0x11a0
proc_reg_read+0xbc/0x180
do_loop_readv_writev+0x134/0x210
do_readv_writev+0x565/0x660
vfs_readv+0x67/0xa0
do_preadv+0x126/0x170
SyS_preadv+0xc/0x10
do_syscall_64+0x1a1/0x460
return_from_SYSCALL_64+0x0/0x6a
INFO: Freed in disk_seqf_stop+0x42/0x50 age=160 cpu=1 pid=315
__slab_free+0x17a/0x2c0
kfree+0x20a/0x220
disk_seqf_stop+0x42/0x50
traverse+0x3b5/0x860
seq_read+0x7e3/0x11a0
proc_reg_read+0xbc/0x180
do_loop_readv_writev+0x134/0x210
do_readv_writev+0x565/0x660
vfs_readv+0x67/0xa0
do_preadv+0x126/0x170
SyS_preadv+0xc/0x10
do_syscall_64+0x1a1/0x460
return_from_SYSCALL_64+0x0/0x6a
CPU: 1 PID: 315 Comm: trinity-c1 Tainted: G B 4.7.0+ #62
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Ubuntu-1.8.2-1ubuntu1 04/01/2014
ffffea0002d96000 ffff880119b9f918 ffffffff81d6ce81 ffff88011a804480
ffff8800b6581500 ffff880119b9f948 ffffffff8146c7bd ffff88011a804480
ffffea0002d96000 ffff8800b6581500 fffffffffffffff4 ffff880119b9f970
Call Trace:
[<ffffffff81d6ce81>] dump_stack+0x65/0x84
[<ffffffff8146c7bd>] print_trailer+0x10d/0x1a0
[<ffffffff814704ff>] object_err+0x2f/0x40
[<ffffffff814754d1>] kasan_report_error+0x221/0x520
[<ffffffff8147590e>] __asan_report_load8_noabort+0x3e/0x40
[<ffffffff83888161>] klist_iter_exit+0x61/0x70
[<ffffffff82404389>] class_dev_iter_exit+0x9/0x10
[<ffffffff81d2e8ea>] disk_seqf_stop+0x3a/0x50
[<ffffffff8151f812>] seq_read+0x4b2/0x11a0
[<ffffffff815f8fdc>] proc_reg_read+0xbc/0x180
[<ffffffff814b24e4>] do_loop_readv_writev+0x134/0x210
[<ffffffff814b4c45>] do_readv_writev+0x565/0x660
[<ffffffff814b8a17>] vfs_readv+0x67/0xa0
[<ffffffff814b8de6>] do_preadv+0x126/0x170
[<ffffffff814b92ec>] SyS_preadv+0xc/0x10
This problem can occur in the following situation:
open()
- pread()
- .seq_start()
- iter = kmalloc() // succeeds
- seqf->private = iter
- .seq_stop()
- kfree(seqf->private)
- pread()
- .seq_start()
- iter = kmalloc() // fails
- .seq_stop()
- class_dev_iter_exit(seqf->private) // boom! old pointer
As the comment in disk_seqf_stop() says, stop is called even if start
failed, so we need to reinitialise the private pointer to NULL when seq
iteration stops.
An alternative would be to set the private pointer to NULL when the
kmalloc() in disk_seqf_start() fails.
Cc: [email protected]
Signed-off-by: Vegard Nossum <[email protected]>
Acked-by: Tejun Heo <[email protected]>
Signed-off-by: Jens Axboe <[email protected]> |
void Magick::Image::page(const Magick::Geometry &pageSize_)
{
modifyImage();
options()->page(pageSize_);
image()->page=pageSize_;
} | 0 | [
"CWE-416"
] | ImageMagick | 8c35502217c1879cb8257c617007282eee3fe1cc | 23,599,945,420,553,326,000,000,000,000,000,000,000 | 6 | Added missing return to avoid use after free. |
static inline int ext4_handle_has_enough_credits(handle_t *handle, int needed)
{
if (ext4_handle_valid(handle) && handle->h_buffer_credits < needed)
return 0;
return 1;
} | 0 | [
"CWE-703"
] | linux | 744692dc059845b2a3022119871846e74d4f6e11 | 234,153,898,656,423,370,000,000,000,000,000,000,000 | 6 | ext4: use ext4_get_block_write in buffer write
Allocate uninitialized extent before ext4 buffer write and
convert the extent to initialized after io completes.
The purpose is to make sure an extent can only be marked
initialized after it has been written with new data so
we can safely drop the i_mutex lock in ext4 DIO read without
exposing stale data. This helps to improve multi-thread DIO
read performance on high-speed disks.
Skip the nobh and data=journal mount cases to make things simple for now.
Signed-off-by: Jiaying Zhang <[email protected]>
Signed-off-by: "Theodore Ts'o" <[email protected]> |
bool CModules::OnUserMsg(CString& sTarget, CString& sMessage) {
MODHALTCHK(OnUserMsg(sTarget, sMessage));
} | 0 | [
"CWE-20",
"CWE-264"
] | znc | 8de9e376ce531fe7f3c8b0aa4876d15b479b7311 | 52,042,245,441,397,870,000,000,000,000,000,000,000 | 3 | Fix remote code execution and privilege escalation vulnerability.
To trigger this, need to have a user already.
Thanks for Jeriko One <[email protected]> for finding and reporting this.
CVE-2019-12816 |
_cdf_tole2(uint16_t sv)
{
uint16_t rv;
uint8_t *s = RCAST(uint8_t *, RCAST(void *, &sv));
uint8_t *d = RCAST(uint8_t *, RCAST(void *, &rv));
d[0] = s[1];
d[1] = s[0];
return rv;
} | 0 | [
"CWE-787"
] | file | 46a8443f76cec4b41ec736eca396984c74664f84 | 213,332,044,962,741,900,000,000,000,000,000,000,000 | 9 | Limit the number of elements in a vector (found by oss-fuzz) |
int bcf_has_filter(const bcf_hdr_t *hdr, bcf1_t *line, char *filter)
{
if ( filter[0]=='.' && !filter[1] ) filter = "PASS";
int id = bcf_hdr_id2int(hdr, BCF_DT_ID, filter);
if ( !bcf_hdr_idinfo_exists(hdr,BCF_HL_FLT,id) ) return -1; // not defined in the header
if ( !(line->unpacked & BCF_UN_FLT) ) bcf_unpack(line, BCF_UN_FLT);
if ( id==0 && !line->d.n_flt) return 1; // PASS
int i;
for (i=0; i<line->d.n_flt; i++)
if ( line->d.flt[i]==id ) return 1;
return 0;
} | 0 | [
"CWE-787"
] | htslib | dcd4b7304941a8832fba2d0fc4c1e716e7a4e72c | 193,820,774,462,553,230,000,000,000,000,000,000,000 | 14 | Fix check for VCF record size
The check for excessive record size in vcf_parse_format() only
looked at individual fields. It was therefore possible to
exceed the limit and overflow fmt_aux_t::offset by having
multiple fields with a combined size that went over INT_MAX.
Fix by including the amount of memory used so far in the check.
Credit to OSS-Fuzz
Fixes oss-fuzz 24097 |
void CLASS lossy_dng_load_raw()
{
#ifdef LIBRAW_LIBRARY_BUILD
if (!image)
throw LIBRAW_EXCEPTION_IO_CORRUPT;
#endif
struct jpeg_decompress_struct cinfo;
struct jpeg_error_mgr jerr;
JSAMPARRAY buf;
JSAMPLE(*pixel)[3];
unsigned sorder = order, ntags, opcode, deg, i, j, c;
unsigned save = data_offset - 4, trow = 0, tcol = 0, row, col;
ushort cur[3][256];
double coeff[9], tot;
if (meta_offset)
{
fseek(ifp, meta_offset, SEEK_SET);
order = 0x4d4d;
ntags = get4();
while (ntags--)
{
opcode = get4();
get4();
get4();
if (opcode != 8)
{
fseek(ifp, get4(), SEEK_CUR);
continue;
}
fseek(ifp, 20, SEEK_CUR);
if ((c = get4()) > 2)
break;
fseek(ifp, 12, SEEK_CUR);
if ((deg = get4()) > 8)
break;
for (i = 0; i <= deg && i < 9; i++)
coeff[i] = getreal(12);
for (i = 0; i < 256; i++)
{
for (tot = j = 0; j <= deg; j++)
tot += coeff[j] * pow(i / 255.0, (int)j);
cur[c][i] = tot * 0xffff;
}
}
order = sorder;
}
else
{
gamma_curve(1 / 2.4, 12.92, 1, 255);
FORC3 memcpy(cur[c], curve, sizeof cur[0]);
}
cinfo.err = jpeg_std_error(&jerr);
jpeg_create_decompress(&cinfo);
while (trow < raw_height)
{
fseek(ifp, save += 4, SEEK_SET);
if (tile_length < INT_MAX)
fseek(ifp, get4(), SEEK_SET);
#ifdef LIBRAW_LIBRARY_BUILD
if (libraw_internal_data.internal_data.input->jpeg_src(&cinfo) == -1)
{
jpeg_destroy_decompress(&cinfo);
throw LIBRAW_EXCEPTION_DECODE_JPEG;
}
#else
jpeg_stdio_src(&cinfo, ifp);
#endif
jpeg_read_header(&cinfo, TRUE);
jpeg_start_decompress(&cinfo);
buf = (*cinfo.mem->alloc_sarray)((j_common_ptr)&cinfo, JPOOL_IMAGE, cinfo.output_width * 3, 1);
#ifdef LIBRAW_LIBRARY_BUILD
try
{
#endif
while (cinfo.output_scanline < cinfo.output_height && (row = trow + cinfo.output_scanline) < height)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
jpeg_read_scanlines(&cinfo, buf, 1);
pixel = (JSAMPLE(*)[3])buf[0];
for (col = 0; col < cinfo.output_width && tcol + col < width; col++)
{
FORC3 image[row * width + tcol + col][c] = cur[c][pixel[col][c]];
}
}
#ifdef LIBRAW_LIBRARY_BUILD
}
catch (...)
{
jpeg_destroy_decompress(&cinfo);
throw;
}
#endif
jpeg_abort_decompress(&cinfo);
if ((tcol += tile_width) >= raw_width)
trow += tile_length + (tcol = 0);
}
jpeg_destroy_decompress(&cinfo);
maximum = 0xffff;
} | 0 | [
"CWE-190"
] | LibRaw | 4554e24ce24beaef5d0ef48372801cfd91039076 | 21,554,022,973,510,725,000,000,000,000,000,000,000 | 102 | parse_qt: possible integer overflow |
const char *am_cache_env_fetch_first(am_cache_entry_t *t,
const char *var)
{
const char *str;
int i;
for (i = 0; i < t->size; i++) {
str = am_cache_entry_get_string(t, &t->env[i].varname);
if (str == NULL)
break;
if (strcmp(str, var) == 0)
return am_cache_entry_get_string(t, &t->env[i].value);
}
return NULL;
} | 0 | [
"CWE-79"
] | mod_auth_mellon | 7af21c53da7bb1de024274ee6da30bc22316a079 | 156,306,037,451,717,880,000,000,000,000,000,000,000 | 16 | Fix Cross-Site Session Transfer vulnerability
mod_auth_mellon did not verify that the site the session was created
for was the same site as the site the user accessed. This allows an
attacker with access to one web site on a server to use the same
session to get access to a different site running on the same server.
This patch fixes this vulnerability by storing the cookie parameters
used when creating the session in the session, and verifying those
parameters when the session is loaded.
Thanks to François Kooman for reporting this vulnerability.
This vulnerability has been assigned CVE-2017-6807. |
static inline int save_general_regs(struct pt_regs *regs,
struct mcontext __user *frame)
{
elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
int i;
WARN_ON(!FULL_REGS(regs));
for (i = 0; i <= PT_RESULT; i ++) {
if (i == 14 && !FULL_REGS(regs))
i = 32;
if (__put_user((unsigned int)gregs[i], &frame->mc_gregs[i]))
return -EFAULT;
}
return 0;
} | 0 | [
"CWE-20",
"CWE-284",
"CWE-369"
] | linux | d2b9d2a5ad5ef04ff978c9923d19730cb05efd55 | 210,907,316,244,657,600,000,000,000,000,000,000,000 | 16 | powerpc/tm: Block signal return setting invalid MSR state
Currently we allow both the MSR T and S bits to be set by userspace on
a signal return. Unfortunately this is a reserved configuration and
will cause a TM Bad Thing exception if attempted (via rfid).
This patch checks for this case in both the 32 and 64 bit signals
code. If both T and S are set, we mark the context as invalid.
Found using a syscall fuzzer.
Fixes: 2b0a576d15e0 ("powerpc: Add new transactional memory state to the signal context")
Cc: [email protected] # v3.9+
Signed-off-by: Michael Neuling <[email protected]>
Signed-off-by: Michael Ellerman <[email protected]> |
void PrintLiveUsage()
{
u32 i=0;
gf_sys_format_help(helpout, help_flags, "# Live Scene Encoder Options\n"
"The options shall be specified as `opt_name=opt_val.\n"
"Options:\n"
"\n"
);
while (m4b_liveenc_args[i].name) {
GF_GPACArg *arg = (GF_GPACArg *) &m4b_liveenc_args[i];
i++;
gf_sys_print_arg(helpout, help_flags, arg, "mp4box-extract");
}
gf_sys_format_help(helpout, help_flags, " \n"
"Runtime options:\n"
"- q: quits\n"
"- u: inputs some commands to be sent\n"
"- U: same as u but signals the updates as critical\n"
"- e: inputs some commands to be sent without being aggregated\n"
"- E: same as e but signals the updates as critical\n"
"- f: forces RAP sending\n"
"- F: forces RAP regeneration and sending\n"
"- p: dumps current scene\n"
);
} | 0 | [
"CWE-787"
] | gpac | 4e56ad72ac1afb4e049a10f2d99e7512d7141f9d | 15,527,147,465,274,680,000,000,000,000,000,000,000 | 26 | fixed #2216 |
int sctp_cmp_addr_exact(const union sctp_addr *ss1,
const union sctp_addr *ss2)
{
struct sctp_af *af;
af = sctp_get_af_specific(ss1->sa.sa_family);
if (unlikely(!af))
return 0;
return af->cmp_addr(ss1, ss2);
} | 0 | [
"CWE-287"
] | linux-2.6 | add52379dde2e5300e2d574b172e62c6cf43b3d3 | 209,342,827,272,940,240,000,000,000,000,000,000,000 | 11 | sctp: Fix oops when INIT-ACK indicates that peer doesn't support AUTH
If INIT-ACK is received with SupportedExtensions parameter which
indicates that the peer does not support AUTH, the packet will be
silently ignore, and sctp_process_init() do cleanup all of the
transports in the association.
When T1-Init timer is expires, OOPS happen while we try to choose
a different init transport.
The solution is to only clean up the non-active transports, i.e
the ones that the peer added. However, that introduces a problem
with sctp_connectx(), because we don't mark the proper state for
the transports provided by the user. So, we'll simply mark
user-provided transports as ACTIVE. That will allow INIT
retransmissions to work properly in the sctp_connectx() context
and prevent the crash.
Signed-off-by: Vlad Yasevich <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
gs_window_reset_background_surface (GSWindow *window)
{
cairo_pattern_t *pattern;
pattern = cairo_pattern_create_for_surface (window->priv->background_surface);
gdk_window_set_background_pattern (gtk_widget_get_window (GTK_WIDGET (window)),
pattern);
cairo_pattern_destroy (pattern);
gtk_widget_queue_draw (GTK_WIDGET (window));
} | 0 | [
"CWE-284"
] | cinnamon-screensaver | da7af55f1fa966c52e15cc288d4f8928eca8cc9f | 93,941,940,567,230,820,000,000,000,000,000,000,000 | 9 | Workaround gtk3 bug, don't allow GtkWindow to handle popup_menu. |
void set_geometry_type(uint type)
{ Type_geometry_attributes::set_geometry_type(type); } | 0 | [
"CWE-617"
] | server | 807945f2eb5fa22e6f233cc17b85a2e141efe2c8 | 146,796,006,521,446,200,000,000,000,000,000,000,000 | 2 | MDEV-26402: A SEGV in Item_field::used_tables/update_depend_map_for_order...
When doing condition pushdown from HAVING into WHERE,
Item_equal::create_pushable_equalities() calls
item->set_extraction_flag(IMMUTABLE_FL) for constant items.
Then, Item::cleanup_excluding_immutables_processor() checks for this flag
to see if it should call item->cleanup() or leave the item as-is.
The failure happens when a constant item has a non-constant one inside it,
like:
(tbl.col=0 AND impossible_cond)
item->walk(cleanup_excluding_immutables_processor) works in a bottom-up
way so it
1. will call Item_func_eq(tbl.col=0)->cleanup()
2. will not call Item_cond_and->cleanup (as the AND is constant)
This creates an item tree where a fixed Item has an un-fixed Item inside
it which eventually causes an assertion failure.
Fixed by introducing this rule: instead of just calling
item->set_extraction_flag(IMMUTABLE_FL);
we call Item::walk() to set the flag for all sub-items of the item. |
int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
struct kvm_sregs *sregs)
{
vcpu_load(vcpu);
memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
vcpu_put(vcpu);
return 0;
} | 0 | [
"CWE-416"
] | linux | 0774a964ef561b7170d8d1b1bfe6f88002b6d219 | 229,940,534,478,517,700,000,000,000,000,000,000,000 | 11 | KVM: Fix out of range accesses to memslots
Reset the LRU slot if it becomes invalid when deleting a memslot to fix
an out-of-bounds/use-after-free access when searching through memslots.
Explicitly check for there being no used slots in search_memslots(), and
in the caller of s390's approximation variant.
Fixes: 36947254e5f9 ("KVM: Dynamically size memslot array based on number of used slots")
Reported-by: Qian Cai <[email protected]>
Cc: Peter Xu <[email protected]>
Signed-off-by: Sean Christopherson <[email protected]>
Message-Id: <[email protected]>
Acked-by: Christian Borntraeger <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]> |
ethertype_print(netdissect_options *ndo,
u_short ether_type, const u_char *p,
u_int length, u_int caplen,
const struct lladdr_info *src, const struct lladdr_info *dst)
{
switch (ether_type) {
case ETHERTYPE_IP:
ip_print(ndo, p, length);
return (1);
case ETHERTYPE_IPV6:
ip6_print(ndo, p, length);
return (1);
case ETHERTYPE_ARP:
case ETHERTYPE_REVARP:
arp_print(ndo, p, length, caplen);
return (1);
case ETHERTYPE_DN:
decnet_print(ndo, p, length, caplen);
return (1);
case ETHERTYPE_ATALK:
if (ndo->ndo_vflag)
ND_PRINT((ndo, "et1 "));
atalk_print(ndo, p, length);
return (1);
case ETHERTYPE_AARP:
aarp_print(ndo, p, length);
return (1);
case ETHERTYPE_IPX:
ND_PRINT((ndo, "(NOV-ETHII) "));
ipx_print(ndo, p, length);
return (1);
case ETHERTYPE_ISO:
if (length == 0 || caplen == 0) {
ND_PRINT((ndo, " [|osi]"));
return (1);
}
isoclns_print(ndo, p + 1, length - 1, caplen - 1);
return(1);
case ETHERTYPE_PPPOED:
case ETHERTYPE_PPPOES:
case ETHERTYPE_PPPOED2:
case ETHERTYPE_PPPOES2:
pppoe_print(ndo, p, length);
return (1);
case ETHERTYPE_EAPOL:
eap_print(ndo, p, length);
return (1);
case ETHERTYPE_RRCP:
rrcp_print(ndo, p, length, src, dst);
return (1);
case ETHERTYPE_PPP:
if (length) {
ND_PRINT((ndo, ": "));
ppp_print(ndo, p, length);
}
return (1);
case ETHERTYPE_MPCP:
mpcp_print(ndo, p, length);
return (1);
case ETHERTYPE_SLOW:
slow_print(ndo, p, length);
return (1);
case ETHERTYPE_CFM:
case ETHERTYPE_CFM_OLD:
cfm_print(ndo, p, length);
return (1);
case ETHERTYPE_LLDP:
lldp_print(ndo, p, length);
return (1);
case ETHERTYPE_NSH:
nsh_print(ndo, p, length);
return (1);
case ETHERTYPE_LOOPBACK:
loopback_print(ndo, p, length);
return (1);
case ETHERTYPE_MPLS:
case ETHERTYPE_MPLS_MULTI:
mpls_print(ndo, p, length);
return (1);
case ETHERTYPE_TIPC:
tipc_print(ndo, p, length, caplen);
return (1);
case ETHERTYPE_MS_NLB_HB:
msnlb_print(ndo, p);
return (1);
case ETHERTYPE_GEONET_OLD:
case ETHERTYPE_GEONET:
geonet_print(ndo, p, length, src);
return (1);
case ETHERTYPE_CALM_FAST:
calm_fast_print(ndo, p, length, src);
return (1);
case ETHERTYPE_AOE:
aoe_print(ndo, p, length);
return (1);
case ETHERTYPE_MEDSA:
medsa_print(ndo, p, length, caplen, src, dst);
return (1);
case ETHERTYPE_LAT:
case ETHERTYPE_SCA:
case ETHERTYPE_MOPRC:
case ETHERTYPE_MOPDL:
case ETHERTYPE_IEEE1905_1:
/* default_print for now */
default:
return (0);
}
} | 1 | [
"CWE-125",
"CWE-787"
] | tcpdump | 1dcd10aceabbc03bf571ea32b892c522cbe923de | 258,122,773,114,790,850,000,000,000,000,000,000,000 | 134 | CVE-2017-12897/ISO CLNS: Use ND_TTEST() for the bounds checks in isoclns_print().
This fixes a buffer over-read discovered by Kamil Frankowicz.
Don't pass the remaining caplen - that's too hard to get right, and we
were getting it wrong in at least one case; just use ND_TTEST().
Add a test using the capture file supplied by the reporter(s). |
bool smb1cli_conn_server_writeunlock(struct smbXcli_conn *conn)
{
return conn->smb1.server.writeunlock;
} | 0 | [
"CWE-20"
] | samba | a819d2b440aafa3138d95ff6e8b824da885a70e9 | 229,208,515,914,505,170,000,000,000,000,000,000,000 | 4 | CVE-2015-5296: libcli/smb: make sure we require signing when we demand encryption on a session
BUG: https://bugzilla.samba.org/show_bug.cgi?id=11536
Signed-off-by: Stefan Metzmacher <[email protected]>
Reviewed-by: Jeremy Allison <[email protected]> |
virtual bool ms_can_fast_dispatch_any() const { return false; } | 0 | [
"CWE-287",
"CWE-284"
] | ceph | 5ead97120e07054d80623dada90a5cc764c28468 | 160,164,959,684,919,040,000,000,000,000,000,000,000 | 1 | auth/cephx: add authorizer challenge
Allow the accepting side of a connection to reject an initial authorizer
with a random challenge. The connecting side then has to respond with an
updated authorizer proving they are able to decrypt the service's challenge
and that the new authorizer was produced for this specific connection
instance.
The accepting side requires this challenge and response unconditionally
if the client side advertises they have the feature bit. Servers wishing
to require this improved level of authentication simply have to require
the appropriate feature.
Signed-off-by: Sage Weil <[email protected]>
(cherry picked from commit f80b848d3f830eb6dba50123e04385173fa4540b)
# Conflicts:
# src/auth/Auth.h
# src/auth/cephx/CephxProtocol.cc
# src/auth/cephx/CephxProtocol.h
# src/auth/none/AuthNoneProtocol.h
# src/msg/Dispatcher.h
# src/msg/async/AsyncConnection.cc
- const_iterator
- ::decode vs decode
- AsyncConnection ctor arg noise
- get_random_bytes(), not cct->random() |
ar6000_configure_target(struct ar6_softc *ar)
{
u32 param;
if (enableuartprint) {
param = 1;
if (BMIWriteMemory(ar->arHifDevice,
HOST_INTEREST_ITEM_ADDRESS(ar, hi_serial_enable),
(u8 *)¶m,
4)!= 0)
{
AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("BMIWriteMemory for enableuartprint failed \n"));
return A_ERROR;
}
AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("Serial console prints enabled\n"));
}
/* Tell target which HTC version it is used*/
param = HTC_PROTOCOL_VERSION;
if (BMIWriteMemory(ar->arHifDevice,
HOST_INTEREST_ITEM_ADDRESS(ar, hi_app_host_interest),
(u8 *)¶m,
4)!= 0)
{
AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("BMIWriteMemory for htc version failed \n"));
return A_ERROR;
}
#ifdef CONFIG_HOST_TCMD_SUPPORT
if(testmode) {
ar->arTargetMode = AR6000_TCMD_MODE;
}else {
ar->arTargetMode = AR6000_WLAN_MODE;
}
#endif
if (enabletimerwar) {
u32 param;
if (BMIReadMemory(ar->arHifDevice,
HOST_INTEREST_ITEM_ADDRESS(ar, hi_option_flag),
(u8 *)¶m,
4)!= 0)
{
AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("BMIReadMemory for enabletimerwar failed \n"));
return A_ERROR;
}
param |= HI_OPTION_TIMER_WAR;
if (BMIWriteMemory(ar->arHifDevice,
HOST_INTEREST_ITEM_ADDRESS(ar, hi_option_flag),
(u8 *)¶m,
4) != 0)
{
AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("BMIWriteMemory for enabletimerwar failed \n"));
return A_ERROR;
}
AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("Timer WAR enabled\n"));
}
/* set the firmware mode to STA/IBSS/AP */
{
u32 param;
if (BMIReadMemory(ar->arHifDevice,
HOST_INTEREST_ITEM_ADDRESS(ar, hi_option_flag),
(u8 *)¶m,
4)!= 0)
{
AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("BMIReadMemory for setting fwmode failed \n"));
return A_ERROR;
}
param |= (num_device << HI_OPTION_NUM_DEV_SHIFT);
param |= (fwmode << HI_OPTION_FW_MODE_SHIFT);
param |= (mac_addr_method << HI_OPTION_MAC_ADDR_METHOD_SHIFT);
param |= (firmware_bridge << HI_OPTION_FW_BRIDGE_SHIFT);
if (BMIWriteMemory(ar->arHifDevice,
HOST_INTEREST_ITEM_ADDRESS(ar, hi_option_flag),
(u8 *)¶m,
4) != 0)
{
AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("BMIWriteMemory for setting fwmode failed \n"));
return A_ERROR;
}
AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("Firmware mode set\n"));
}
#ifdef ATH6KL_DISABLE_TARGET_DBGLOGS
{
u32 param;
if (BMIReadMemory(ar->arHifDevice,
HOST_INTEREST_ITEM_ADDRESS(ar, hi_option_flag),
(u8 *)¶m,
4)!= 0)
{
AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("BMIReadMemory for disabling debug logs failed\n"));
return A_ERROR;
}
param |= HI_OPTION_DISABLE_DBGLOG;
if (BMIWriteMemory(ar->arHifDevice,
HOST_INTEREST_ITEM_ADDRESS(ar, hi_option_flag),
(u8 *)¶m,
4) != 0)
{
AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("BMIWriteMemory for HI_OPTION_DISABLE_DBGLOG\n"));
return A_ERROR;
}
AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("Firmware mode set\n"));
}
#endif /* ATH6KL_DISABLE_TARGET_DBGLOGS */
/*
* Hardcode the address use for the extended board data
* Ideally this should be pre-allocate by the OS at boot time
* But since it is a new feature and board data is loaded
* at init time, we have to workaround this from host.
* It is difficult to patch the firmware boot code,
* but possible in theory.
*/
if (ar->arTargetType == TARGET_TYPE_AR6003) {
u32 ramReservedSz;
if (ar->arVersion.target_ver == AR6003_REV2_VERSION) {
param = AR6003_REV2_BOARD_EXT_DATA_ADDRESS;
ramReservedSz = AR6003_REV2_RAM_RESERVE_SIZE;
} else {
param = AR6003_REV3_BOARD_EXT_DATA_ADDRESS;
ramReservedSz = AR6003_REV3_RAM_RESERVE_SIZE;
}
if (BMIWriteMemory(ar->arHifDevice,
HOST_INTEREST_ITEM_ADDRESS(ar, hi_board_ext_data),
(u8 *)¶m, 4) != 0) {
AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
("BMIWriteMemory for "
"hi_board_ext_data failed\n"));
return A_ERROR;
}
if (BMIWriteMemory(ar->arHifDevice,
HOST_INTEREST_ITEM_ADDRESS(ar,
hi_end_RAM_reserve_sz),
(u8 *)&ramReservedSz, 4) != 0) {
AR_DEBUG_PRINTF(ATH_DEBUG_ERR ,
("BMIWriteMemory for "
"hi_end_RAM_reserve_sz failed\n"));
return A_ERROR;
}
}
/* since BMIInit is called in the driver layer, we have to set the block
* size here for the target */
if (ar6000_set_htc_params(ar->arHifDevice, ar->arTargetType,
mbox_yield_limit, 0)) {
/* use default number of control buffers */
return A_ERROR;
}
if (setupbtdev != 0) {
if (ar6000_set_hci_bridge_flags(ar->arHifDevice,
ar->arTargetType,
setupbtdev)) {
return A_ERROR;
}
}
return 0;
} | 0 | [
"CWE-703",
"CWE-264"
] | linux | 550fd08c2cebad61c548def135f67aba284c6162 | 251,153,215,017,353,630,000,000,000,000,000,000,000 | 171 | net: Audit drivers to identify those needing IFF_TX_SKB_SHARING cleared
After the last patch, We are left in a state in which only drivers calling
ether_setup have IFF_TX_SKB_SHARING set (we assume that drivers touching real
hardware call ether_setup for their net_devices and don't hold any state in
their skbs. There are a handful of drivers that violate this assumption of
course, and need to be fixed up. This patch identifies those drivers, and marks
them as not being able to support the safe transmission of skbs by clearning the
IFF_TX_SKB_SHARING flag in priv_flags
Signed-off-by: Neil Horman <[email protected]>
CC: Karsten Keil <[email protected]>
CC: "David S. Miller" <[email protected]>
CC: Jay Vosburgh <[email protected]>
CC: Andy Gospodarek <[email protected]>
CC: Patrick McHardy <[email protected]>
CC: Krzysztof Halasa <[email protected]>
CC: "John W. Linville" <[email protected]>
CC: Greg Kroah-Hartman <[email protected]>
CC: Marcel Holtmann <[email protected]>
CC: Johannes Berg <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
static void svm_set_vintr(struct vcpu_svm *svm)
{
struct vmcb_control_area *control;
/*
* The following fields are ignored when AVIC is enabled
*/
WARN_ON(kvm_vcpu_apicv_activated(&svm->vcpu));
svm_set_intercept(svm, INTERCEPT_VINTR);
/*
* This is just a dummy VINTR to actually cause a vmexit to happen.
* Actual injection of virtual interrupts happens through EVENTINJ.
*/
control = &svm->vmcb->control;
control->int_vector = 0x0;
control->int_ctl &= ~V_INTR_PRIO_MASK;
control->int_ctl |= V_IRQ_MASK |
((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT);
vmcb_mark_dirty(svm->vmcb, VMCB_INTR);
} | 0 | [
"CWE-703"
] | linux | 6cd88243c7e03845a450795e134b488fc2afb736 | 23,183,531,718,485,100,000,000,000,000,000,000,000 | 22 | KVM: x86: do not report a vCPU as preempted outside instruction boundaries
If a vCPU is outside guest mode and is scheduled out, it might be in the
process of making a memory access. A problem occurs if another vCPU uses
the PV TLB flush feature during the period when the vCPU is scheduled
out, and a virtual address has already been translated but has not yet
been accessed, because this is equivalent to using a stale TLB entry.
To avoid this, only report a vCPU as preempted if sure that the guest
is at an instruction boundary. A rescheduling request will be delivered
to the host physical CPU as an external interrupt, so for simplicity
consider any vmexit *not* instruction boundary except for external
interrupts.
It would in principle be okay to report the vCPU as preempted also
if it is sleeping in kvm_vcpu_block(): a TLB flush IPI will incur the
vmentry/vmexit overhead unnecessarily, and optimistic spinning is
also unlikely to succeed. However, leave it for later because right
now kvm_vcpu_check_block() is doing memory accesses. Even
though the TLB flush issue only applies to virtual memory address,
it's very much preferrable to be conservative.
Reported-by: Jann Horn <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]> |
PJ_DEF(pj_status_t) pjmedia_rtcp_get_ntp_time(const pjmedia_rtcp_session *sess,
pjmedia_rtcp_ntp_rec *ntp)
{
/* Seconds between 1900-01-01 to 1970-01-01 */
#define JAN_1970 (2208988800UL)
pj_timestamp ts;
pj_status_t status;
status = pj_get_timestamp(&ts);
/* Fill up the high 32bit part */
ntp->hi = (pj_uint32_t)((ts.u64 - sess->ts_base.u64) / sess->ts_freq.u64)
+ sess->tv_base.sec + JAN_1970;
/* Calculate seconds fractions */
ts.u64 = (ts.u64 - sess->ts_base.u64) % sess->ts_freq.u64;
pj_assert(ts.u64 < sess->ts_freq.u64);
ts.u64 = (ts.u64 << 32) / sess->ts_freq.u64;
/* Fill up the low 32bit part */
ntp->lo = ts.u32.lo;
#if (defined(PJ_WIN32) && PJ_WIN32!=0) || \
(defined(PJ_WIN64) && PJ_WIN64!=0) || \
(defined(PJ_WIN32_WINCE) && PJ_WIN32_WINCE!=0)
/* On Win32, since we use QueryPerformanceCounter() as the backend
* timestamp API, we need to protect against this bug:
* Performance counter value may unexpectedly leap forward
* http://support.microsoft.com/default.aspx?scid=KB;EN-US;Q274323
*/
{
/*
* Compare elapsed time reported by timestamp with actual elapsed
* time. If the difference is too excessive, then we use system
* time instead.
*/
/* MIN_DIFF needs to be large enough so that "normal" diff caused
* by system activity or context switch doesn't trigger the time
* correction.
*/
enum { MIN_DIFF = 400 };
pj_time_val ts_time, elapsed, diff;
pj_gettimeofday(&elapsed);
ts_time.sec = ntp->hi - sess->tv_base.sec - JAN_1970;
ts_time.msec = (long)(ntp->lo * 1000.0 / 0xFFFFFFFF);
PJ_TIME_VAL_SUB(elapsed, sess->tv_base);
if (PJ_TIME_VAL_LT(ts_time, elapsed)) {
diff = elapsed;
PJ_TIME_VAL_SUB(diff, ts_time);
} else {
diff = ts_time;
PJ_TIME_VAL_SUB(diff, elapsed);
}
if (PJ_TIME_VAL_MSEC(diff) >= MIN_DIFF) {
TRACE_((sess->name, "RTCP NTP timestamp corrected by %d ms",
PJ_TIME_VAL_MSEC(diff)));
ntp->hi = elapsed.sec + sess->tv_base.sec + JAN_1970;
ntp->lo = (elapsed.msec * 65536 / 1000) << 16;
}
}
#endif
return status;
} | 0 | [
"CWE-125"
] | pjproject | 8b621f192cae14456ee0b0ade52ce6c6f258af1e | 165,833,343,828,424,630,000,000,000,000,000,000,000 | 77 | Merge pull request from GHSA-3qx3-cg72-wrh9 |
int perf_event_init_context(struct task_struct *child, int ctxn)
{
struct perf_event_context *child_ctx, *parent_ctx;
struct perf_event_context *cloned_ctx;
struct perf_event *event;
struct task_struct *parent = current;
int inherited_all = 1;
unsigned long flags;
int ret = 0;
if (likely(!parent->perf_event_ctxp[ctxn]))
return 0;
/*
* If the parent's context is a clone, pin it so it won't get
* swapped under us.
*/
parent_ctx = perf_pin_task_context(parent, ctxn);
/*
* No need to check if parent_ctx != NULL here; since we saw
* it non-NULL earlier, the only reason for it to become NULL
* is if we exit, and since we're currently in the middle of
* a fork we can't be exiting at the same time.
*/
/*
* Lock the parent list. No need to lock the child - not PID
* hashed yet and not running, so nobody can access it.
*/
mutex_lock(&parent_ctx->mutex);
/*
* We dont have to disable NMIs - we are only looking at
* the list, not manipulating it:
*/
list_for_each_entry(event, &parent_ctx->pinned_groups, group_entry) {
ret = inherit_task_group(event, parent, parent_ctx,
child, ctxn, &inherited_all);
if (ret)
break;
}
/*
* We can't hold ctx->lock when iterating the ->flexible_group list due
* to allocations, but we need to prevent rotation because
* rotate_ctx() will change the list from interrupt context.
*/
raw_spin_lock_irqsave(&parent_ctx->lock, flags);
parent_ctx->rotate_disable = 1;
raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) {
ret = inherit_task_group(event, parent, parent_ctx,
child, ctxn, &inherited_all);
if (ret)
break;
}
raw_spin_lock_irqsave(&parent_ctx->lock, flags);
parent_ctx->rotate_disable = 0;
child_ctx = child->perf_event_ctxp[ctxn];
if (child_ctx && inherited_all) {
/*
* Mark the child context as a clone of the parent
* context, or of whatever the parent is a clone of.
*
* Note that if the parent is a clone, the holding of
* parent_ctx->lock avoids it from being uncloned.
*/
cloned_ctx = parent_ctx->parent_ctx;
if (cloned_ctx) {
child_ctx->parent_ctx = cloned_ctx;
child_ctx->parent_gen = parent_ctx->parent_gen;
} else {
child_ctx->parent_ctx = parent_ctx;
child_ctx->parent_gen = parent_ctx->generation;
}
get_ctx(child_ctx->parent_ctx);
}
raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
mutex_unlock(&parent_ctx->mutex);
perf_unpin_context(parent_ctx);
put_ctx(parent_ctx);
return ret;
} | 0 | [
"CWE-703",
"CWE-189"
] | linux | 8176cced706b5e5d15887584150764894e94e02f | 224,850,689,789,694,440,000,000,000,000,000,000,000 | 91 | perf: Treat attr.config as u64 in perf_swevent_init()
Trinity discovered that we fail to check all 64 bits of
attr.config passed by user space, resulting to out-of-bounds
access of the perf_swevent_enabled array in
sw_perf_event_destroy().
Introduced in commit b0a873ebb ("perf: Register PMU
implementations").
Signed-off-by: Tommi Rantala <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: [email protected]
Cc: Paul Mackerras <[email protected]>
Cc: Arnaldo Carvalho de Melo <[email protected]>
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]> |
static struct rmap_item *scan_get_next_rmap_item(struct page **page)
{
struct mm_struct *mm;
struct mm_slot *slot;
struct vm_area_struct *vma;
struct rmap_item *rmap_item;
if (list_empty(&ksm_mm_head.mm_list))
return NULL;
slot = ksm_scan.mm_slot;
if (slot == &ksm_mm_head) {
/*
* A number of pages can hang around indefinitely on per-cpu
* pagevecs, raised page count preventing write_protect_page
* from merging them. Though it doesn't really matter much,
* it is puzzling to see some stuck in pages_volatile until
* other activity jostles them out, and they also prevented
* LTP's KSM test from succeeding deterministically; so drain
* them here (here rather than on entry to ksm_do_scan(),
* so we don't IPI too often when pages_to_scan is set low).
*/
lru_add_drain_all();
root_unstable_tree = RB_ROOT;
spin_lock(&ksm_mmlist_lock);
slot = list_entry(slot->mm_list.next, struct mm_slot, mm_list);
ksm_scan.mm_slot = slot;
spin_unlock(&ksm_mmlist_lock);
/*
* Although we tested list_empty() above, a racing __ksm_exit
* of the last mm on the list may have removed it since then.
*/
if (slot == &ksm_mm_head)
return NULL;
next_mm:
ksm_scan.address = 0;
ksm_scan.rmap_list = &slot->rmap_list;
}
mm = slot->mm;
down_read(&mm->mmap_sem);
if (ksm_test_exit(mm))
vma = NULL;
else
vma = find_vma(mm, ksm_scan.address);
for (; vma; vma = vma->vm_next) {
if (!(vma->vm_flags & VM_MERGEABLE))
continue;
if (ksm_scan.address < vma->vm_start)
ksm_scan.address = vma->vm_start;
if (!vma->anon_vma)
ksm_scan.address = vma->vm_end;
while (ksm_scan.address < vma->vm_end) {
if (ksm_test_exit(mm))
break;
*page = follow_page(vma, ksm_scan.address, FOLL_GET);
if (IS_ERR_OR_NULL(*page)) {
ksm_scan.address += PAGE_SIZE;
cond_resched();
continue;
}
if (PageAnon(*page) ||
page_trans_compound_anon(*page)) {
flush_anon_page(vma, *page, ksm_scan.address);
flush_dcache_page(*page);
rmap_item = get_next_rmap_item(slot,
ksm_scan.rmap_list, ksm_scan.address);
if (rmap_item) {
ksm_scan.rmap_list =
&rmap_item->rmap_list;
ksm_scan.address += PAGE_SIZE;
} else
put_page(*page);
up_read(&mm->mmap_sem);
return rmap_item;
}
put_page(*page);
ksm_scan.address += PAGE_SIZE;
cond_resched();
}
}
if (ksm_test_exit(mm)) {
ksm_scan.address = 0;
ksm_scan.rmap_list = &slot->rmap_list;
}
/*
* Nuke all the rmap_items that are above this current rmap:
* because there were no VM_MERGEABLE vmas with such addresses.
*/
remove_trailing_rmap_items(slot, ksm_scan.rmap_list);
spin_lock(&ksm_mmlist_lock);
ksm_scan.mm_slot = list_entry(slot->mm_list.next,
struct mm_slot, mm_list);
if (ksm_scan.address == 0) {
/*
* We've completed a full scan of all vmas, holding mmap_sem
* throughout, and found no VM_MERGEABLE: so do the same as
* __ksm_exit does to remove this mm from all our lists now.
* This applies either when cleaning up after __ksm_exit
* (but beware: we can reach here even before __ksm_exit),
* or when all VM_MERGEABLE areas have been unmapped (and
* mmap_sem then protects against race with MADV_MERGEABLE).
*/
hlist_del(&slot->link);
list_del(&slot->mm_list);
spin_unlock(&ksm_mmlist_lock);
free_mm_slot(slot);
clear_bit(MMF_VM_MERGEABLE, &mm->flags);
up_read(&mm->mmap_sem);
mmdrop(mm);
} else {
spin_unlock(&ksm_mmlist_lock);
up_read(&mm->mmap_sem);
}
/* Repeat until we've completed scanning the whole list */
slot = ksm_scan.mm_slot;
if (slot != &ksm_mm_head)
goto next_mm;
ksm_scan.seqnr++;
return NULL;
} | 0 | [
"CWE-362",
"CWE-125"
] | linux | 2b472611a32a72f4a118c069c2d62a1a3f087afd | 139,760,727,682,557,810,000,000,000,000,000,000,000 | 130 | ksm: fix NULL pointer dereference in scan_get_next_rmap_item()
Andrea Righi reported a case where an exiting task can race against
ksmd::scan_get_next_rmap_item (http://lkml.org/lkml/2011/6/1/742) easily
triggering a NULL pointer dereference in ksmd.
ksm_scan.mm_slot == &ksm_mm_head with only one registered mm
CPU 1 (__ksm_exit) CPU 2 (scan_get_next_rmap_item)
list_empty() is false
lock slot == &ksm_mm_head
list_del(slot->mm_list)
(list now empty)
unlock
lock
slot = list_entry(slot->mm_list.next)
(list is empty, so slot is still ksm_mm_head)
unlock
slot->mm == NULL ... Oops
Close this race by revalidating that the new slot is not simply the list
head again.
Andrea's test case:
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <sys/mman.h>
#define BUFSIZE getpagesize()
int main(int argc, char **argv)
{
void *ptr;
if (posix_memalign(&ptr, getpagesize(), BUFSIZE) < 0) {
perror("posix_memalign");
exit(1);
}
if (madvise(ptr, BUFSIZE, MADV_MERGEABLE) < 0) {
perror("madvise");
exit(1);
}
*(char *)NULL = 0;
return 0;
}
Reported-by: Andrea Righi <[email protected]>
Tested-by: Andrea Righi <[email protected]>
Cc: Andrea Arcangeli <[email protected]>
Signed-off-by: Hugh Dickins <[email protected]>
Signed-off-by: Chris Wright <[email protected]>
Cc: <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
static int __cache_free_alien(struct kmem_cache *cachep, void *objp,
int node, int page_node)
{
struct kmem_cache_node *n;
struct alien_cache *alien = NULL;
struct array_cache *ac;
LIST_HEAD(list);
n = get_node(cachep, node);
STATS_INC_NODEFREES(cachep);
if (n->alien && n->alien[page_node]) {
alien = n->alien[page_node];
ac = &alien->ac;
spin_lock(&alien->lock);
if (unlikely(ac->avail == ac->limit)) {
STATS_INC_ACOVERFLOW(cachep);
__drain_alien_cache(cachep, ac, page_node, &list);
}
ac->entry[ac->avail++] = objp;
spin_unlock(&alien->lock);
slabs_destroy(cachep, &list);
} else {
n = get_node(cachep, page_node);
spin_lock(&n->list_lock);
free_block(cachep, &objp, 1, page_node, &list);
spin_unlock(&n->list_lock);
slabs_destroy(cachep, &list);
}
return 1;
} | 0 | [
"CWE-703"
] | linux | c4e490cf148e85ead0d1b1c2caaba833f1d5b29f | 320,388,991,733,717,860,000,000,000,000,000,000,000 | 30 | mm/slab.c: fix SLAB freelist randomization duplicate entries
This patch fixes a bug in the freelist randomization code. When a high
random number is used, the freelist will contain duplicate entries. It
will result in different allocations sharing the same chunk.
It will result in odd behaviours and crashes. It should be uncommon but
it depends on the machines. We saw it happening more often on some
machines (every few hours of running tests).
Fixes: c7ce4f60ac19 ("mm: SLAB freelist randomization")
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: John Sperbeck <[email protected]>
Signed-off-by: Thomas Garnier <[email protected]>
Cc: Christoph Lameter <[email protected]>
Cc: Pekka Enberg <[email protected]>
Cc: David Rientjes <[email protected]>
Cc: Joonsoo Kim <[email protected]>
Cc: <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
char *converse(pam_handle_t *pamh, int echocode, const char *prompt) {
const struct pam_message msg = {.msg_style = echocode, .msg = (char *)prompt};
const struct pam_message *msgs = &msg;
struct pam_response *resp = NULL;
int retval = _converse(pamh, 1, &msgs, &resp);
char *ret = NULL;
if (retval != PAM_SUCCESS || resp == NULL || resp->resp == NULL ||
*resp->resp == '\000') {
if (retval == PAM_SUCCESS && resp && resp->resp) {
ret = resp->resp;
}
} else {
ret = resp->resp;
}
// Deallocate temporary storage.
if (resp) {
if (!ret) {
free(resp->resp);
}
free(resp);
}
return ret;
} | 0 | [
"CWE-200"
] | pam-u2f | 18b1914e32b74ff52000f10e97067e841e5fff62 | 62,816,199,872,691,690,000,000,000,000,000,000,000 | 27 | Do not leak file descriptor when doing exec
When opening a custom debug file, the descriptor would stay
open when calling exec and leak to the child process.
Make sure all files are opened with close-on-exec.
This fixes CVE-2019-12210.
Thanks to Matthias Gerstner of the SUSE Security Team for reporting
the issue. |
_pango_ot_info_get_layout (PangoOTInfo *info)
{
return info->layout;
} | 0 | [] | pango | 336bb3201096bdd0494d29926dd44e8cca8bed26 | 266,471,568,233,486,320,000,000,000,000,000,000,000 | 4 | [HB] Remove all references to the old code! |
ExecutionStatus Interpreter::caseDirectEval(
Runtime *runtime,
PinnedHermesValue *frameRegs,
const Inst *ip) {
auto *result = &O1REG(DirectEval);
auto *input = &O2REG(DirectEval);
GCScopeMarkerRAII gcMarker{runtime};
// Check to see if global eval() has been overriden, in which case call it as
// as normal function.
auto global = runtime->getGlobal();
auto existingEval = global->getNamed_RJS(
global, runtime, Predefined::getSymbolID(Predefined::eval));
if (LLVM_UNLIKELY(existingEval == ExecutionStatus::EXCEPTION)) {
return ExecutionStatus::EXCEPTION;
}
auto *nativeExistingEval = dyn_vmcast<NativeFunction>(existingEval->get());
if (LLVM_UNLIKELY(
!nativeExistingEval ||
nativeExistingEval->getFunctionPtr() != hermes::vm::eval)) {
if (auto *existingEvalCallable =
dyn_vmcast<Callable>(existingEval->get())) {
auto evalRes = existingEvalCallable->executeCall1(
runtime->makeHandle<Callable>(existingEvalCallable),
runtime,
Runtime::getUndefinedValue(),
*input);
if (LLVM_UNLIKELY(evalRes == ExecutionStatus::EXCEPTION)) {
return ExecutionStatus::EXCEPTION;
}
*result = evalRes->get();
evalRes->invalidate();
return ExecutionStatus::RETURNED;
}
return runtime->raiseTypeErrorForValue(
runtime->makeHandle(std::move(*existingEval)), " is not a function");
}
if (!input->isString()) {
*result = *input;
return ExecutionStatus::RETURNED;
}
// Create a dummy scope, so that the local eval executes in its own scope
// (as per the spec for strict callers, which is the only thing we support).
ScopeChain scopeChain{};
scopeChain.functions.emplace_back();
auto cr = vm::directEval(
runtime, Handle<StringPrimitive>::vmcast(input), scopeChain, false);
if (cr == ExecutionStatus::EXCEPTION)
return ExecutionStatus::EXCEPTION;
*result = *cr;
return ExecutionStatus::RETURNED;
} | 0 | [
"CWE-670",
"CWE-703"
] | hermes | b2021df620824627f5a8c96615edbd1eb7fdddfc | 251,940,655,603,174,900,000,000,000,000,000,000,000 | 58 | Fix CVE-2020-1914 by using NEXTINST for SaveGeneratorLong
Summary:
If `SaveGeneratorLong` was emitted, it would accidentally jump to the
wrong next instruction, based on how long SaveGenerator was.
Make a callout function to handle the common case, and handle the dispatch
within each case of the interpreter loop.
Fixes CVE-2020-1914
Reviewed By: neildhar
Differential Revision: D24024242
fbshipit-source-id: 3bcb88daa740f0d50e91771a49eb212551ce8bd8 |
static unsigned fill_bitbuffer(STATE_PARAM unsigned bitbuffer, unsigned *current, const unsigned required)
{
while (*current < required) {
if (bytebuffer_offset >= bytebuffer_size) {
unsigned sz = bytebuffer_max - 4;
if (to_read >= 0 && to_read < sz) /* unzip only */
sz = to_read;
/* Leave the first 4 bytes empty so we can always unwind the bitbuffer
* to the front of the bytebuffer */
bytebuffer_size = safe_read(gunzip_src_fd, &bytebuffer[4], sz);
if ((int)bytebuffer_size < 1) {
error_msg = "unexpected end of file";
abort_unzip(PASS_STATE_ONLY);
}
if (to_read >= 0) /* unzip only */
to_read -= bytebuffer_size;
bytebuffer_size += 4;
bytebuffer_offset = 4;
}
bitbuffer |= ((unsigned) bytebuffer[bytebuffer_offset]) << *current;
bytebuffer_offset++;
*current += 8;
}
return bitbuffer;
} | 0 | [
"CWE-476"
] | busybox | 1de25a6e87e0e627aa34298105a3d17c60a1f44e | 117,145,974,569,850,240,000,000,000,000,000,000,000 | 25 | unzip: test for bad archive SEGVing
function old new delta
huft_build 1296 1300 +4
Signed-off-by: Denys Vlasenko <[email protected]> |
static void get_pi_state(struct futex_pi_state *pi_state)
{
WARN_ON_ONCE(!refcount_inc_not_zero(&pi_state->refcount));
} | 0 | [
"CWE-416"
] | tip | 8019ad13ef7f64be44d4f892af9c840179009254 | 291,042,825,374,034,900,000,000,000,000,000,000,000 | 4 | futex: Fix inode life-time issue
As reported by Jann, ihold() does not in fact guarantee inode
persistence. And instead of making it so, replace the usage of inode
pointers with a per boot, machine wide, unique inode identifier.
This sequence number is global, but shared (file backed) futexes are
rare enough that this should not become a performance issue.
Reported-by: Jann Horn <[email protected]>
Suggested-by: Linus Torvalds <[email protected]>
Signed-off-by: Peter Zijlstra (Intel) <[email protected]> |
static enum b43_dmatype dma_mask_to_engine_type(u64 dmamask)
{
if (dmamask == DMA_BIT_MASK(30))
return B43_DMA_30BIT;
if (dmamask == DMA_BIT_MASK(32))
return B43_DMA_32BIT;
if (dmamask == DMA_BIT_MASK(64))
return B43_DMA_64BIT;
B43_WARN_ON(1);
return B43_DMA_30BIT;
} | 0 | [
"CWE-119",
"CWE-787"
] | linux | c85ce65ecac078ab1a1835c87c4a6319cf74660a | 159,306,838,054,264,590,000,000,000,000,000,000,000 | 11 | b43: allocate receive buffers big enough for max frame len + offset
Otherwise, skb_put inside of dma_rx can fail...
https://bugzilla.kernel.org/show_bug.cgi?id=32042
Signed-off-by: John W. Linville <[email protected]>
Acked-by: Larry Finger <[email protected]>
Cc: [email protected] |
Subsets and Splits