func
stringlengths 0
484k
| target
int64 0
1
| cwe
sequencelengths 0
4
| project
stringclasses 799
values | commit_id
stringlengths 40
40
| hash
float64 1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
| size
int64 1
24k
| message
stringlengths 0
13.3k
|
---|---|---|---|---|---|---|---|
compute_box_blur_width (double radius)
{
double width;
width = radius * 3 * sqrt (2 * G_PI) / 4;
return (gint) (width + 0.5);
} | 0 | [] | librsvg | a51919f7e1ca9c535390a746fbf6e28c8402dc61 | 277,570,363,736,699,600,000,000,000,000,000,000,000 | 7 | rsvg: Add rsvg_acquire_node()
This function does proper recursion checks when looking up resources
from URLs and thereby helps avoiding infinite loops when cyclic
references span multiple types of elements. |
static void print_qualifiers(BIO *out, STACK_OF(POLICYQUALINFO) *quals,
int indent)
{
POLICYQUALINFO *qualinfo;
int i;
for (i = 0; i < sk_POLICYQUALINFO_num(quals); i++) {
qualinfo = sk_POLICYQUALINFO_value(quals, i);
switch (OBJ_obj2nid(qualinfo->pqualid)) {
case NID_id_qt_cps:
BIO_printf(out, "%*sCPS: %s\n", indent, "",
qualinfo->d.cpsuri->data);
break;
case NID_id_qt_unotice:
BIO_printf(out, "%*sUser Notice:\n", indent, "");
print_notice(out, qualinfo->d.usernotice, indent + 2);
break;
default:
BIO_printf(out, "%*sUnknown Qualifier: ", indent + 2, "");
i2a_ASN1_OBJECT(out, qualinfo->pqualid);
BIO_puts(out, "\n");
break;
}
}
} | 1 | [
"CWE-125"
] | openssl | 5f54e57406ca17731b9ade3afd561d3c652e07f2 | 162,380,754,794,248,300,000,000,000,000,000,000,000 | 27 | Fix POLICYINFO printing to not assume NUL terminated strings
ASN.1 strings may not be NUL terminated. Don't assume they are.
CVE-2021-3712
Reviewed-by: Viktor Dukhovni <[email protected]>
Reviewed-by: Paul Dale <[email protected]> |
io_thread_init_command(Master_info *mi, const char *query, int allowed_error,
MYSQL_RES **master_res= NULL,
MYSQL_ROW *master_row= NULL)
{
DBUG_ENTER("io_thread_init_command");
DBUG_PRINT("info", ("IO thread initialization command: '%s'", query));
MYSQL *mysql= mi->mysql;
int ret= mysql_real_query(mysql, query, strlen(query));
if (io_slave_killed(mi->info_thd, mi))
{
sql_print_information("The slave IO thread was killed while executing "
"initialization query '%s'", query);
mysql_free_result(mysql_store_result(mysql));
DBUG_RETURN(COMMAND_STATUS_ERROR);
}
if (ret != 0)
{
int err= mysql_errno(mysql);
mysql_free_result(mysql_store_result(mysql));
if (!err || err != allowed_error)
{
mi->report(is_network_error(err) ? WARNING_LEVEL : ERROR_LEVEL, err,
"The slave IO thread stops because the initialization query "
"'%s' failed with error '%s'.",
query, mysql_error(mysql));
DBUG_RETURN(COMMAND_STATUS_ERROR);
}
DBUG_RETURN(COMMAND_STATUS_ALLOWED_ERROR);
}
if (master_res != NULL)
{
if ((*master_res= mysql_store_result(mysql)) == NULL)
{
mi->report(WARNING_LEVEL, mysql_errno(mysql),
"The slave IO thread stops because the initialization query "
"'%s' did not return any result.",
query);
DBUG_RETURN(COMMAND_STATUS_ERROR);
}
if (master_row != NULL)
{
if ((*master_row= mysql_fetch_row(*master_res)) == NULL)
{
mysql_free_result(*master_res);
mi->report(WARNING_LEVEL, mysql_errno(mysql),
"The slave IO thread stops because the initialization query "
"'%s' did not return any row.",
query);
DBUG_RETURN(COMMAND_STATUS_ERROR);
}
}
}
else
DBUG_ASSERT(master_row == NULL);
DBUG_RETURN(COMMAND_STATUS_OK);
} | 0 | [
"CWE-284",
"CWE-295"
] | mysql-server | 3bd5589e1a5a93f9c224badf983cd65c45215390 | 214,534,887,980,314,650,000,000,000,000,000,000,000 | 56 | WL#6791 : Redefine client --ssl option to imply enforced encryption
# Changed the meaning of the --ssl=1 option of all client binaries
to mean force ssl, not try ssl and fail over to eunecrypted
# Added a new MYSQL_OPT_SSL_ENFORCE mysql_options()
option to specify that an ssl connection is required.
# Added a new macro SSL_SET_OPTIONS() to the client
SSL handling headers that sets all the relevant SSL options at
once.
# Revamped all of the current native clients to use the new macro
# Removed some Windows line endings.
# Added proper handling of the new option into the ssl helper
headers.
# If SSL is mandatory assume that the media is secure enough
for the sha256 plugin to do unencrypted password exchange even
before establishing a connection.
# Set the default ssl cipher to DHE-RSA-AES256-SHA if none is
specified.
# updated test cases that require a non-default cipher to spawn
a mysql command line tool binary since mysqltest has no support
for specifying ciphers.
# updated the replication slave connection code to always enforce
SSL if any of the SSL config options is present.
# test cases added and updated.
# added a mysql_get_option() API to return mysql_options()
values. Used the new API inside the sha256 plugin.
# Fixed compilation warnings because of unused variables.
# Fixed test failures (mysql_ssl and bug13115401)
# Fixed whitespace issues.
# Fully implemented the mysql_get_option() function.
# Added a test case for mysql_get_option()
# fixed some trailing whitespace issues
# fixed some uint/int warnings in mysql_client_test.c
# removed shared memory option from non-windows get_options
tests
# moved MYSQL_OPT_LOCAL_INFILE to the uint options |
iperf_set_test_socket_bufsize(struct iperf_test *ipt, int socket_bufsize)
{
ipt->settings->socket_bufsize = socket_bufsize;
} | 0 | [
"CWE-120",
"CWE-119",
"CWE-787"
] | iperf | 91f2fa59e8ed80dfbf400add0164ee0e508e412a | 168,626,348,094,723,020,000,000,000,000,000,000,000 | 4 | Fix a buffer overflow / heap corruption issue that could occur if a
malformed JSON string was passed on the control channel. This issue,
present in the cJSON library, was already fixed upstream, so was
addressed here in iperf3 by importing a newer version of cJSON (plus
local ESnet modifications).
Discovered and reported by Dave McDaniel, Cisco Talos.
Based on a patch by @dopheide-esnet, with input from @DaveGamble.
Cross-references: TALOS-CAN-0164, ESNET-SECADV-2016-0001,
CVE-2016-4303
(cherry picked from commit ed94082be27d971a5e1b08b666e2c217cf470a40)
Signed-off-by: Bruce A. Mah <[email protected]> |
static ssize_t shadow_copy2_getxattr(vfs_handle_struct *handle,
const char *fname, const char *aname,
void *value, size_t size)
{
time_t timestamp;
char *stripped;
ssize_t ret;
int saved_errno;
char *conv;
if (!shadow_copy2_strip_snapshot(talloc_tos(), handle, fname,
×tamp, &stripped)) {
return -1;
}
if (timestamp == 0) {
return SMB_VFS_NEXT_GETXATTR(handle, fname, aname, value,
size);
}
conv = shadow_copy2_convert(talloc_tos(), handle, stripped, timestamp);
TALLOC_FREE(stripped);
if (conv == NULL) {
return -1;
}
ret = SMB_VFS_NEXT_GETXATTR(handle, conv, aname, value, size);
saved_errno = errno;
TALLOC_FREE(conv);
errno = saved_errno;
return ret;
} | 0 | [
"CWE-200"
] | samba | 675fd8d771f9d43e354dba53ddd9b5483ae0a1d7 | 208,619,974,894,165,940,000,000,000,000,000,000,000 | 29 | CVE-2015-5299: s3-shadow-copy2: fix missing access check on snapdir
Fix originally from <[email protected]>
https://bugzilla.samba.org/show_bug.cgi?id=11529
Signed-off-by: Jeremy Allison <[email protected]>
Reviewed-by: David Disseldorp <[email protected]> |
static void handle_rx(struct vhost_net *net)
{
struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_RX];
struct vhost_virtqueue *vq = &nvq->vq;
unsigned uninitialized_var(in), log;
struct vhost_log *vq_log;
struct msghdr msg = {
.msg_name = NULL,
.msg_namelen = 0,
.msg_control = NULL, /* FIXME: get and handle RX aux data. */
.msg_controllen = 0,
.msg_iov = vq->iov,
.msg_flags = MSG_DONTWAIT,
};
struct virtio_net_hdr_mrg_rxbuf hdr = {
.hdr.flags = 0,
.hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE
};
size_t total_len = 0;
int err, mergeable;
s16 headcount;
size_t vhost_hlen, sock_hlen;
size_t vhost_len, sock_len;
struct socket *sock;
mutex_lock(&vq->mutex);
sock = vq->private_data;
if (!sock)
goto out;
vhost_disable_notify(&net->dev, vq);
vhost_hlen = nvq->vhost_hlen;
sock_hlen = nvq->sock_hlen;
vq_log = unlikely(vhost_has_feature(&net->dev, VHOST_F_LOG_ALL)) ?
vq->log : NULL;
mergeable = vhost_has_feature(&net->dev, VIRTIO_NET_F_MRG_RXBUF);
while ((sock_len = peek_head_len(sock->sk))) {
sock_len += sock_hlen;
vhost_len = sock_len + vhost_hlen;
headcount = get_rx_bufs(vq, vq->heads, vhost_len,
&in, vq_log, &log,
likely(mergeable) ? UIO_MAXIOV : 1);
/* On error, stop handling until the next kick. */
if (unlikely(headcount < 0))
break;
/* OK, now we need to know about added descriptors. */
if (!headcount) {
if (unlikely(vhost_enable_notify(&net->dev, vq))) {
/* They have slipped one in as we were
* doing that: check again. */
vhost_disable_notify(&net->dev, vq);
continue;
}
/* Nothing new? Wait for eventfd to tell us
* they refilled. */
break;
}
/* We don't need to be notified again. */
if (unlikely((vhost_hlen)))
/* Skip header. TODO: support TSO. */
move_iovec_hdr(vq->iov, nvq->hdr, vhost_hlen, in);
else
/* Copy the header for use in VIRTIO_NET_F_MRG_RXBUF:
* needed because recvmsg can modify msg_iov. */
copy_iovec_hdr(vq->iov, nvq->hdr, sock_hlen, in);
msg.msg_iovlen = in;
err = sock->ops->recvmsg(NULL, sock, &msg,
sock_len, MSG_DONTWAIT | MSG_TRUNC);
/* Userspace might have consumed the packet meanwhile:
* it's not supposed to do this usually, but might be hard
* to prevent. Discard data we got (if any) and keep going. */
if (unlikely(err != sock_len)) {
pr_debug("Discarded rx packet: "
" len %d, expected %zd\n", err, sock_len);
vhost_discard_vq_desc(vq, headcount);
continue;
}
if (unlikely(vhost_hlen) &&
memcpy_toiovecend(nvq->hdr, (unsigned char *)&hdr, 0,
vhost_hlen)) {
vq_err(vq, "Unable to write vnet_hdr at addr %p\n",
vq->iov->iov_base);
break;
}
/* TODO: Should check and handle checksum. */
if (likely(mergeable) &&
memcpy_toiovecend(nvq->hdr, (unsigned char *)&headcount,
offsetof(typeof(hdr), num_buffers),
sizeof hdr.num_buffers)) {
vq_err(vq, "Failed num_buffers write");
vhost_discard_vq_desc(vq, headcount);
break;
}
vhost_add_used_and_signal_n(&net->dev, vq, vq->heads,
headcount);
if (unlikely(vq_log))
vhost_log_write(vq, vq_log, log, vhost_len);
total_len += vhost_len;
if (unlikely(total_len >= VHOST_NET_WEIGHT)) {
vhost_poll_queue(&vq->poll);
break;
}
}
out:
mutex_unlock(&vq->mutex);
} | 1 | [
"CWE-20",
"CWE-787"
] | linux | d8316f3991d207fe32881a9ac20241be8fa2bad0 | 116,987,039,205,117,420,000,000,000,000,000,000,000 | 108 | vhost: fix total length when packets are too short
When mergeable buffers are disabled, and the
incoming packet is too large for the rx buffer,
get_rx_bufs returns success.
This was intentional in order for make recvmsg
truncate the packet and then handle_rx would
detect err != sock_len and drop it.
Unfortunately we pass the original sock_len to
recvmsg - which means we use parts of iov not fully
validated.
Fix this up by detecting this overrun and doing packet drop
immediately.
CVE-2014-0077
Signed-off-by: Michael S. Tsirkin <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
quit_response_cancel (void)
{
response_cancel ();
gtk_main_quit ();
return FALSE;
} | 0 | [
"CWE-362"
] | gnome-screensaver | ab08cc93f2dc6223c8c00bfa1ca4f2d89069dbe0 | 335,359,259,850,611,330,000,000,000,000,000,000,000 | 6 | Work around x errors by asking dialog to die on cancel
Basically, what is happening is that gnome-screensaver-dialog exits after the
5th failed attempt at unlocking the screen, but before the shake animation
finishes. If the timing is slightly unlucky, this results in gnome-screensaver
accessing X resources that have already been destroyed (I ran it through
xtrace, and that showed this happening)
My patch fixes this by making gnome-screensaver-dialog request to
gnome-screensaver that it be terminated after the 5th failed attempt (rather
than exitting straight away, although there is a fallback timeout too).
gnome-screensaver then terminates the dialog after it is finished with the
shake animation, to avoid the race condition that is currently making it crash. |
int _yr_re_fiber_split(
RE_FIBER_LIST* fiber_list,
RE_FIBER_POOL* fiber_pool,
RE_FIBER* fiber,
RE_FIBER** new_fiber)
{
int32_t i;
FAIL_ON_ERROR(_yr_re_fiber_create(fiber_pool, new_fiber));
(*new_fiber)->sp = fiber->sp;
(*new_fiber)->ip = fiber->ip;
(*new_fiber)->rc = fiber->rc;
for (i = 0; i <= fiber->sp; i++)
(*new_fiber)->stack[i] = fiber->stack[i];
(*new_fiber)->next = fiber->next;
(*new_fiber)->prev = fiber;
if (fiber->next != NULL)
fiber->next->prev = *new_fiber;
fiber->next = *new_fiber;
if (fiber_list->tail == fiber)
fiber_list->tail = *new_fiber;
assert(fiber_list->tail->next == NULL);
assert(fiber_list->head->prev == NULL);
return ERROR_SUCCESS;
} | 0 | [
"CWE-125"
] | yara | 83d799804648c2a0895d40a19835d9b757c6fa4e | 154,839,914,799,944,700,000,000,000,000,000,000,000 | 33 | Fix issue #646 (#648)
* Fix issue #646 and some edge cases with wide regexps using \b and \B
* Rename function IS_WORD_CHAR to _yr_re_is_word_char |
static int remove_recursively(const char * path) {
DIR *d = opendir(path);
size_t path_len = strlen(path);
int r = -1;
size_t len;
char *buf;
if (d) {
struct dirent *p;
r = 0;
while ((r==0) && (p=readdir(d))) {
/* Skip the names "." and ".." as we don't want to recurse on them. */
if (!strcmp(p->d_name, ".") || !strcmp(p->d_name, ".."))
continue;
len = path_len + strlen(p->d_name) + 2;
buf = (char *) malloc(len);
if (buf) {
struct stat statbuf;
snprintf(buf, len, "%s/%s", path, p->d_name);
if (stat(buf, &statbuf) == 0) {
if (S_ISDIR(statbuf.st_mode))
r = remove_recursively(buf);
else
r = unlink(buf);
}
free(buf);
}
}
closedir(d);
}
if (r == 0)
r = rmdir(path);
return r;
} | 0 | [
"CWE-476"
] | ntopng | 01f47e04fd7c8d54399c9e465f823f0017069f8f | 38,859,911,846,200,020,000,000,000,000,000,000,000 | 44 | Security fix: prevents empty host from being used |
static void exif_iif_add_buffer(image_info_type *image_info, int section_index, char *name, int length, char *value TSRMLS_DC)
{
image_info_data *info_data;
image_info_data *list;
if (value) {
list = safe_erealloc(image_info->info_list[section_index].list, (image_info->info_list[section_index].count+1), sizeof(image_info_data), 0);
image_info->info_list[section_index].list = list;
info_data = &image_info->info_list[section_index].list[image_info->info_list[section_index].count];
info_data->tag = TAG_NONE;
info_data->format = TAG_FMT_UNDEFINED;
info_data->length = length;
info_data->name = estrdup(name);
info_data->value.s = safe_emalloc(length, 1, 1);
memcpy(info_data->value.s, value, length);
info_data->value.s[length] = 0;
image_info->sections_found |= 1<<section_index;
image_info->info_list[section_index].count++;
}
} | 0 | [
"CWE-119"
] | php-src | ddb207e7fa2e9adeba021a1303c3781efda5409b | 73,691,749,757,688,280,000,000,000,000,000,000,000 | 20 | Fix bug #68113 (Heap corruption in exif_thumbnail()) |
RPVector *r_bin_wasm_get_types(RBinWasmObj *bin) {
r_return_val_if_fail (bin && bin->g_sections, NULL);
return bin->g_types? bin->g_types: parse_unique_subsec_vec_by_id (bin, R_BIN_WASM_SECTION_TYPE);
} | 0 | [
"CWE-787"
] | radare2 | b4ca66f5d4363d68a6379e5706353b3bde5104a4 | 122,206,294,775,900,500,000,000,000,000,000,000,000 | 4 | Fix #20336 - wasm bin parser ##crash |
brcmf_cfg80211_sched_scan_start(struct wiphy *wiphy,
struct net_device *ndev,
struct cfg80211_sched_scan_request *request)
{
struct brcmf_if *ifp = netdev_priv(ndev);
struct brcmf_cfg80211_info *cfg = wiphy_priv(wiphy);
struct brcmf_pno_net_param_le pfn;
int i;
int ret = 0;
brcmf_dbg(SCAN, "Enter n_match_sets:%d n_ssids:%d\n",
request->n_match_sets, request->n_ssids);
if (test_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status)) {
brcmf_err("Scanning already: status (%lu)\n", cfg->scan_status);
return -EAGAIN;
}
if (test_bit(BRCMF_SCAN_STATUS_SUPPRESS, &cfg->scan_status)) {
brcmf_err("Scanning suppressed: status (%lu)\n",
cfg->scan_status);
return -EAGAIN;
}
if (!request->n_ssids || !request->n_match_sets) {
brcmf_dbg(SCAN, "Invalid sched scan req!! n_ssids:%d\n",
request->n_ssids);
return -EINVAL;
}
if (request->n_ssids > 0) {
for (i = 0; i < request->n_ssids; i++) {
/* Active scan req for ssids */
brcmf_dbg(SCAN, ">>> Active scan req for ssid (%s)\n",
request->ssids[i].ssid);
/* match_set ssids is a supert set of n_ssid list,
* so we need not add these set separately.
*/
}
}
if (request->n_match_sets > 0) {
/* clean up everything */
ret = brcmf_dev_pno_clean(ndev);
if (ret < 0) {
brcmf_err("failed error=%d\n", ret);
return ret;
}
/* configure pno */
if (brcmf_dev_pno_config(ifp, request))
return -EINVAL;
/* configure each match set */
for (i = 0; i < request->n_match_sets; i++) {
struct cfg80211_ssid *ssid;
u32 ssid_len;
ssid = &request->match_sets[i].ssid;
ssid_len = ssid->ssid_len;
if (!ssid_len) {
brcmf_err("skip broadcast ssid\n");
continue;
}
pfn.auth = cpu_to_le32(WLAN_AUTH_OPEN);
pfn.wpa_auth = cpu_to_le32(BRCMF_PNO_WPA_AUTH_ANY);
pfn.wsec = cpu_to_le32(0);
pfn.infra = cpu_to_le32(1);
pfn.flags = cpu_to_le32(1 << BRCMF_PNO_HIDDEN_BIT);
pfn.ssid.SSID_len = cpu_to_le32(ssid_len);
memcpy(pfn.ssid.SSID, ssid->ssid, ssid_len);
ret = brcmf_fil_iovar_data_set(ifp, "pfn_add", &pfn,
sizeof(pfn));
brcmf_dbg(SCAN, ">>> PNO filter %s for ssid (%s)\n",
ret == 0 ? "set" : "failed", ssid->ssid);
}
/* Enable the PNO */
if (brcmf_fil_iovar_int_set(ifp, "pfn", 1) < 0) {
brcmf_err("PNO enable failed!! ret=%d\n", ret);
return -EINVAL;
}
} else {
return -EINVAL;
}
return 0;
} | 0 | [
"CWE-119",
"CWE-703"
] | linux | ded89912156b1a47d940a0c954c43afbabd0c42c | 330,415,683,553,765,040,000,000,000,000,000,000,000 | 87 | brcmfmac: avoid potential stack overflow in brcmf_cfg80211_start_ap()
User-space can choose to omit NL80211_ATTR_SSID and only provide raw
IE TLV data. When doing so it can provide SSID IE with length exceeding
the allowed size. The driver further processes this IE copying it
into a local variable without checking the length. Hence stack can be
corrupted and used as exploit.
Cc: [email protected] # v4.7
Reported-by: Daxing Guo <[email protected]>
Reviewed-by: Hante Meuleman <[email protected]>
Reviewed-by: Pieter-Paul Giesberts <[email protected]>
Reviewed-by: Franky Lin <[email protected]>
Signed-off-by: Arend van Spriel <[email protected]>
Signed-off-by: Kalle Valo <[email protected]> |
static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
u32 exit_reason, u32 exit_intr_info,
unsigned long exit_qualification)
{
/* update guest state fields: */
vmcs12->guest_cr0 = vmcs12_guest_cr0(vcpu, vmcs12);
vmcs12->guest_cr4 = vmcs12_guest_cr4(vcpu, vmcs12);
vmcs12->guest_rsp = kvm_register_read(vcpu, VCPU_REGS_RSP);
vmcs12->guest_rip = kvm_register_read(vcpu, VCPU_REGS_RIP);
vmcs12->guest_rflags = vmcs_readl(GUEST_RFLAGS);
vmcs12->guest_es_selector = vmcs_read16(GUEST_ES_SELECTOR);
vmcs12->guest_cs_selector = vmcs_read16(GUEST_CS_SELECTOR);
vmcs12->guest_ss_selector = vmcs_read16(GUEST_SS_SELECTOR);
vmcs12->guest_ds_selector = vmcs_read16(GUEST_DS_SELECTOR);
vmcs12->guest_fs_selector = vmcs_read16(GUEST_FS_SELECTOR);
vmcs12->guest_gs_selector = vmcs_read16(GUEST_GS_SELECTOR);
vmcs12->guest_ldtr_selector = vmcs_read16(GUEST_LDTR_SELECTOR);
vmcs12->guest_tr_selector = vmcs_read16(GUEST_TR_SELECTOR);
vmcs12->guest_es_limit = vmcs_read32(GUEST_ES_LIMIT);
vmcs12->guest_cs_limit = vmcs_read32(GUEST_CS_LIMIT);
vmcs12->guest_ss_limit = vmcs_read32(GUEST_SS_LIMIT);
vmcs12->guest_ds_limit = vmcs_read32(GUEST_DS_LIMIT);
vmcs12->guest_fs_limit = vmcs_read32(GUEST_FS_LIMIT);
vmcs12->guest_gs_limit = vmcs_read32(GUEST_GS_LIMIT);
vmcs12->guest_ldtr_limit = vmcs_read32(GUEST_LDTR_LIMIT);
vmcs12->guest_tr_limit = vmcs_read32(GUEST_TR_LIMIT);
vmcs12->guest_gdtr_limit = vmcs_read32(GUEST_GDTR_LIMIT);
vmcs12->guest_idtr_limit = vmcs_read32(GUEST_IDTR_LIMIT);
vmcs12->guest_es_ar_bytes = vmcs_read32(GUEST_ES_AR_BYTES);
vmcs12->guest_cs_ar_bytes = vmcs_read32(GUEST_CS_AR_BYTES);
vmcs12->guest_ss_ar_bytes = vmcs_read32(GUEST_SS_AR_BYTES);
vmcs12->guest_ds_ar_bytes = vmcs_read32(GUEST_DS_AR_BYTES);
vmcs12->guest_fs_ar_bytes = vmcs_read32(GUEST_FS_AR_BYTES);
vmcs12->guest_gs_ar_bytes = vmcs_read32(GUEST_GS_AR_BYTES);
vmcs12->guest_ldtr_ar_bytes = vmcs_read32(GUEST_LDTR_AR_BYTES);
vmcs12->guest_tr_ar_bytes = vmcs_read32(GUEST_TR_AR_BYTES);
vmcs12->guest_es_base = vmcs_readl(GUEST_ES_BASE);
vmcs12->guest_cs_base = vmcs_readl(GUEST_CS_BASE);
vmcs12->guest_ss_base = vmcs_readl(GUEST_SS_BASE);
vmcs12->guest_ds_base = vmcs_readl(GUEST_DS_BASE);
vmcs12->guest_fs_base = vmcs_readl(GUEST_FS_BASE);
vmcs12->guest_gs_base = vmcs_readl(GUEST_GS_BASE);
vmcs12->guest_ldtr_base = vmcs_readl(GUEST_LDTR_BASE);
vmcs12->guest_tr_base = vmcs_readl(GUEST_TR_BASE);
vmcs12->guest_gdtr_base = vmcs_readl(GUEST_GDTR_BASE);
vmcs12->guest_idtr_base = vmcs_readl(GUEST_IDTR_BASE);
vmcs12->guest_interruptibility_info =
vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
vmcs12->guest_pending_dbg_exceptions =
vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS);
if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED)
vmcs12->guest_activity_state = GUEST_ACTIVITY_HLT;
else
vmcs12->guest_activity_state = GUEST_ACTIVITY_ACTIVE;
if (nested_cpu_has_preemption_timer(vmcs12)) {
if (vmcs12->vm_exit_controls &
VM_EXIT_SAVE_VMX_PREEMPTION_TIMER)
vmcs12->vmx_preemption_timer_value =
vmx_get_preemption_timer_value(vcpu);
hrtimer_cancel(&to_vmx(vcpu)->nested.preemption_timer);
}
/*
* In some cases (usually, nested EPT), L2 is allowed to change its
* own CR3 without exiting. If it has changed it, we must keep it.
* Of course, if L0 is using shadow page tables, GUEST_CR3 was defined
* by L0, not L1 or L2, so we mustn't unconditionally copy it to vmcs12.
*
* Additionally, restore L2's PDPTR to vmcs12.
*/
if (enable_ept) {
vmcs12->guest_cr3 = vmcs_read64(GUEST_CR3);
vmcs12->guest_pdptr0 = vmcs_read64(GUEST_PDPTR0);
vmcs12->guest_pdptr1 = vmcs_read64(GUEST_PDPTR1);
vmcs12->guest_pdptr2 = vmcs_read64(GUEST_PDPTR2);
vmcs12->guest_pdptr3 = vmcs_read64(GUEST_PDPTR3);
}
vmcs12->vm_entry_controls =
(vmcs12->vm_entry_controls & ~VM_ENTRY_IA32E_MODE) |
(vm_entry_controls_get(to_vmx(vcpu)) & VM_ENTRY_IA32E_MODE);
if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_DEBUG_CONTROLS) {
kvm_get_dr(vcpu, 7, (unsigned long *)&vmcs12->guest_dr7);
vmcs12->guest_ia32_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL);
}
/* TODO: These cannot have changed unless we have MSR bitmaps and
* the relevant bit asks not to trap the change */
if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_IA32_PAT)
vmcs12->guest_ia32_pat = vmcs_read64(GUEST_IA32_PAT);
if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_IA32_EFER)
vmcs12->guest_ia32_efer = vcpu->arch.efer;
vmcs12->guest_sysenter_cs = vmcs_read32(GUEST_SYSENTER_CS);
vmcs12->guest_sysenter_esp = vmcs_readl(GUEST_SYSENTER_ESP);
vmcs12->guest_sysenter_eip = vmcs_readl(GUEST_SYSENTER_EIP);
if (vmx_mpx_supported())
vmcs12->guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS);
/* update exit information fields: */
vmcs12->vm_exit_reason = exit_reason;
vmcs12->exit_qualification = exit_qualification;
vmcs12->vm_exit_intr_info = exit_intr_info;
if ((vmcs12->vm_exit_intr_info &
(INTR_INFO_VALID_MASK | INTR_INFO_DELIVER_CODE_MASK)) ==
(INTR_INFO_VALID_MASK | INTR_INFO_DELIVER_CODE_MASK))
vmcs12->vm_exit_intr_error_code =
vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
vmcs12->idt_vectoring_info_field = 0;
vmcs12->vm_exit_instruction_len = vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
vmcs12->vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
if (!(vmcs12->vm_exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY)) {
/* vm_entry_intr_info_field is cleared on exit. Emulate this
* instead of reading the real value. */
vmcs12->vm_entry_intr_info_field &= ~INTR_INFO_VALID_MASK;
/*
* Transfer the event that L0 or L1 may wanted to inject into
* L2 to IDT_VECTORING_INFO_FIELD.
*/
vmcs12_save_pending_event(vcpu, vmcs12);
}
/*
* Drop what we picked up for L2 via vmx_complete_interrupts. It is
* preserved above and would only end up incorrectly in L1.
*/
vcpu->arch.nmi_injected = false;
kvm_clear_exception_queue(vcpu);
kvm_clear_interrupt_queue(vcpu);
} | 0 | [] | kvm | a642fc305053cc1c6e47e4f4df327895747ab485 | 22,371,835,413,611,370,000,000,000,000,000,000,000 | 138 | kvm: vmx: handle invvpid vm exit gracefully
On systems with invvpid instruction support (corresponding bit in
IA32_VMX_EPT_VPID_CAP MSR is set) guest invocation of invvpid
causes vm exit, which is currently not handled and results in
propagation of unknown exit to userspace.
Fix this by installing an invvpid vm exit handler.
This is CVE-2014-3646.
Cc: [email protected]
Signed-off-by: Petr Matousek <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]> |
static inline void native_store_gdt(struct desc_ptr *dtr)
{
asm volatile("sgdt %0":"=m" (*dtr));
} | 0 | [
"CWE-119"
] | linux-2.6 | 5ac37f87ff18843aabab84cf75b2f8504c2d81fe | 248,974,016,667,785,650,000,000,000,000,000,000,000 | 4 | x86: fix ldt limit for 64 bit
Fix size of LDT entries. On x86-64, ldt_desc is a double-sized descriptor.
Signed-off-by: Michael Karcher <[email protected]>
Signed-off-by: Ingo Molnar <[email protected]> |
static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t size)
{
struct dma_iommu_mapping *mapping = dev->archdata.mapping;
/*
* add optional in-page offset from iova to size and align
* result to page size
*/
size = PAGE_ALIGN((iova & ~PAGE_MASK) + size);
iova &= PAGE_MASK;
iommu_unmap(mapping->domain, iova, size);
__free_iova(mapping, iova, size);
return 0;
} | 0 | [
"CWE-284",
"CWE-264"
] | linux | 0ea1ec713f04bdfac343c9702b21cd3a7c711826 | 257,991,369,449,313,670,000,000,000,000,000,000,000 | 15 | ARM: dma-mapping: don't allow DMA mappings to be marked executable
DMA mapping permissions were being derived from pgprot_kernel directly
without using PAGE_KERNEL. This causes them to be marked with executable
permission, which is not what we want. Fix this.
Signed-off-by: Russell King <[email protected]> |
static void _perf_event_enable(struct perf_event *event)
{
struct perf_event_context *ctx = event->ctx;
struct task_struct *task = ctx->task;
if (!task) {
/*
* Enable the event on the cpu that it's on
*/
cpu_function_call(event->cpu, __perf_event_enable, event);
return;
}
raw_spin_lock_irq(&ctx->lock);
if (event->state >= PERF_EVENT_STATE_INACTIVE)
goto out;
/*
* If the event is in error state, clear that first.
* That way, if we see the event in error state below, we
* know that it has gone back into error state, as distinct
* from the task having been scheduled away before the
* cross-call arrived.
*/
if (event->state == PERF_EVENT_STATE_ERROR)
event->state = PERF_EVENT_STATE_OFF;
retry:
if (!ctx->is_active) {
__perf_event_mark_enabled(event);
goto out;
}
raw_spin_unlock_irq(&ctx->lock);
if (!task_function_call(task, __perf_event_enable, event))
return;
raw_spin_lock_irq(&ctx->lock);
/*
* If the context is active and the event is still off,
* we need to retry the cross-call.
*/
if (ctx->is_active && event->state == PERF_EVENT_STATE_OFF) {
/*
* task could have been flipped by a concurrent
* perf_event_context_sched_out()
*/
task = ctx->task;
goto retry;
}
out:
raw_spin_unlock_irq(&ctx->lock);
} | 0 | [
"CWE-284",
"CWE-264"
] | linux | f63a8daa5812afef4f06c962351687e1ff9ccb2b | 150,569,732,910,092,760,000,000,000,000,000,000,000 | 56 | perf: Fix event->ctx locking
There have been a few reported issues wrt. the lack of locking around
changing event->ctx. This patch tries to address those.
It avoids the whole rwsem thing; and while it appears to work, please
give it some thought in review.
What I did fail at is sensible runtime checks on the use of
event->ctx, the RCU use makes it very hard.
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Cc: Paul E. McKenney <[email protected]>
Cc: Jiri Olsa <[email protected]>
Cc: Arnaldo Carvalho de Melo <[email protected]>
Cc: Linus Torvalds <[email protected]>
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]> |
static void _GTextFieldReplace(GTextField *gt, const unichar_t *str) {
unichar_t *old = gt->oldtext;
unichar_t *new = malloc((u_strlen(gt->text)-(gt->sel_end-gt->sel_start) + u_strlen(str)+1)*sizeof(unichar_t));
gt->oldtext = gt->text;
gt->sel_oldstart = gt->sel_start;
gt->sel_oldend = gt->sel_end;
gt->sel_oldbase = gt->sel_base;
u_strncpy(new,gt->text,gt->sel_start);
u_strcpy(new+gt->sel_start,str);
gt->sel_start = u_strlen(new);
u_strcpy(new+gt->sel_start,gt->text+gt->sel_end);
gt->text = new;
gt->sel_end = gt->sel_base = gt->sel_start;
free(old);
GTextFieldRefigureLines(gt,gt->sel_oldstart);
} | 0 | [
"CWE-119",
"CWE-787"
] | fontforge | 626f751752875a0ddd74b9e217b6f4828713573c | 105,390,430,836,590,200,000,000,000,000,000,000,000 | 19 | Warn users before discarding their unsaved scripts (#3852)
* Warn users before discarding their unsaved scripts
This closes #3846. |
void * pvPortMalloc( size_t xWantedSize )
{
void * pvReturn = NULL;
static uint8_t * pucAlignedHeap = NULL;
/* Ensure that blocks are always aligned to the required number of bytes. */
#if ( portBYTE_ALIGNMENT != 1 )
{
if( xWantedSize & portBYTE_ALIGNMENT_MASK )
{
/* Byte alignment required. */
xWantedSize += ( portBYTE_ALIGNMENT - ( xWantedSize & portBYTE_ALIGNMENT_MASK ) );
}
}
#endif
vTaskSuspendAll();
{
if( pucAlignedHeap == NULL )
{
/* Ensure the heap starts on a correctly aligned boundary. */
pucAlignedHeap = ( uint8_t * ) ( ( ( portPOINTER_SIZE_TYPE ) & ucHeap[ portBYTE_ALIGNMENT ] ) & ( ~( ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ) ) );
}
/* Check there is enough room left for the allocation. */
if( ( ( xNextFreeByte + xWantedSize ) < configADJUSTED_HEAP_SIZE ) &&
( ( xNextFreeByte + xWantedSize ) > xNextFreeByte ) ) /* Check for overflow. */
{
/* Return the next free byte then increment the index past this
* block. */
pvReturn = pucAlignedHeap + xNextFreeByte;
xNextFreeByte += xWantedSize;
}
traceMALLOC( pvReturn, xWantedSize );
}
( void ) xTaskResumeAll();
#if ( configUSE_MALLOC_FAILED_HOOK == 1 )
{
if( pvReturn == NULL )
{
extern void vApplicationMallocFailedHook( void );
vApplicationMallocFailedHook();
}
}
#endif
return pvReturn;
}
| 1 | [
"CWE-200",
"CWE-119"
] | FreeRTOS-Kernel | c7a9a01c94987082b223d3e59969ede64363da63 | 267,641,896,482,212,160,000,000,000,000,000,000,000 | 50 | Improve heap2 bounds checking (#224)
* Improve heap bounds checking in pvPortMalloc |
ZEND_API zval* ZEND_FASTCALL zend_hash_minmax(const HashTable *ht, compare_func_t compar, uint32_t flag)
{
uint32_t idx;
Bucket *p, *res;
IS_CONSISTENT(ht);
if (ht->nNumOfElements == 0 ) {
return NULL;
}
idx = 0;
while (1) {
if (idx == ht->nNumUsed) {
return NULL;
}
if (Z_TYPE(ht->arData[idx].val) != IS_UNDEF) break;
idx++;
}
res = ht->arData + idx;
for (; idx < ht->nNumUsed; idx++) {
p = ht->arData + idx;
if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
if (flag) {
if (compar(res, p) < 0) { /* max */
res = p;
}
} else {
if (compar(res, p) > 0) { /* min */
res = p;
}
}
}
return &res->val;
} | 0 | [
"CWE-190"
] | php-src | 4cc0286f2f3780abc6084bcdae5dce595daa3c12 | 55,825,852,574,900,600,000,000,000,000,000,000,000 | 36 | Fix #73832 - leave the table in a safe state if the size is too big. |
void nntp_bcache_update(struct NntpData *nntp_data)
{
mutt_bcache_list(nntp_data->bcache, nntp_bcache_delete, nntp_data);
} | 0 | [
"CWE-119",
"CWE-787"
] | neomutt | 6296f7153f0c9d5e5cd3aaf08f9731e56621bdd3 | 160,606,818,737,100,390,000,000,000,000,000,000,000 | 4 | Set length modifiers for group and desc
nntp_add_group parses a line controlled by the connected nntp server.
Restrict the maximum lengths read into the stack buffers group, and
desc. |
quit(
struct parse *pcmd,
FILE *fp
)
{
if (havehost)
closesocket(sockfd); /* cleanliness next to godliness */
exit(0);
} | 0 | [
"CWE-20"
] | ntp | 07a5b8141e354a998a52994c3c9cd547927e56ce | 131,300,589,830,956,660,000,000,000,000,000,000,000 | 9 | [TALOS-CAN-0063] avoid buffer overrun in ntpq |
void fastd_receive_unknown_free(void) {
size_t i;
for (i = 0; i < UNKNOWN_TABLES; i++)
free(ctx.unknown_handshakes[i]);
} | 0 | [
"CWE-617",
"CWE-119",
"CWE-284"
] | fastd | 737925113363b6130879729cdff9ccc46c33eaea | 155,689,497,157,253,260,000,000,000,000,000,000,000 | 5 | receive: fix buffer leak when receiving invalid packets
For fastd versions before v20, this was just a memory leak (which could
still be used for DoS, as it's remotely triggerable). With the new
buffer management of fastd v20, this will trigger an assertion failure
instead as soon as the buffer pool is empty. |
static void *shmem_follow_link(struct dentry *dentry, struct nameidata *nd)
{
struct page *page = NULL;
int res = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ, NULL);
nd_set_link(nd, res ? ERR_PTR(res) : kmap(page));
if (page)
unlock_page(page);
return page;
} | 0 | [
"CWE-400"
] | linux-2.6 | 14fcc23fdc78e9d32372553ccf21758a9bd56fa1 | 114,492,484,713,598,650,000,000,000,000,000,000,000 | 9 | tmpfs: fix kernel BUG in shmem_delete_inode
SuSE's insserve initscript ordering program hits kernel BUG at mm/shmem.c:814
on 2.6.26. It's using posix_fadvise on directories, and the shmem_readpage
method added in 2.6.23 is letting POSIX_FADV_WILLNEED allocate useless pages
to a tmpfs directory, incrementing i_blocks count but never decrementing it.
Fix this by assigning shmem_aops (pointing to readpage and writepage and
set_page_dirty) only when it's needed, on a regular file or a long symlink.
Many thanks to Kel for outstanding bugreport and steps to reproduce it.
Reported-by: Kel Modderman <[email protected]>
Tested-by: Kel Modderman <[email protected]>
Signed-off-by: Hugh Dickins <[email protected]>
Cc: <[email protected]> [2.6.25.x, 2.6.26.x]
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
itc_type_find (GstTypeFind * tf, gpointer unused)
{
DataScanCtx c = { 0, NULL, 0 };
guint8 magic[8] = { 0x00, 0x00, 0x01, 0x1C, 0x69, 0x74, 0x63, 0x68 };
guint8 preamble[4] = { 0x00, 0x00, 0x00, 0x02 };
guint8 artwork_marker[8] = { 0x00, 0x00, 0x00, 0x00, 0x61, 0x72, 0x74, 0x77 };
GstTypeFindProbability itc_prob = GST_TYPE_FIND_NONE;
int i;
if (G_UNLIKELY (!data_scan_ctx_ensure_data (tf, &c, 8)))
return;
if (memcmp (c.data, magic, 8))
return;
/* At least we found the right magic */
itc_prob = GST_TYPE_FIND_MINIMUM;
data_scan_ctx_advance (tf, &c, 8);
if (G_UNLIKELY (!data_scan_ctx_ensure_data (tf, &c, 12)))
goto done;
/* Check preamble 3 consecutive times */
for (i = 0; i < 3; i++) {
if (memcmp (c.data, preamble, 4))
goto done;
data_scan_ctx_advance (tf, &c, 4);
}
itc_prob = GST_TYPE_FIND_POSSIBLE;
if (G_UNLIKELY (!data_scan_ctx_ensure_data (tf, &c, 8)))
goto done;
/* Look for "artw" marker */
if (memcmp (c.data, artwork_marker, 8))
goto done;
itc_prob = GST_TYPE_FIND_LIKELY;
data_scan_ctx_advance (tf, &c, 8);
if (G_UNLIKELY (!data_scan_ctx_ensure_data (tf, &c, 256)))
goto done;
/* ...and 256 0x00 padding bytes on what looks like the header's end */
for (i = 0; i < 256; i++) {
if (c.data[i])
goto done;
}
itc_prob = GST_TYPE_FIND_NEARLY_CERTAIN;
done:
gst_type_find_suggest (tf, itc_prob, ITC_CAPS);
} | 0 | [
"CWE-125"
] | gst-plugins-base | 2fdccfd64fc609e44e9c4b8eed5bfdc0ab9c9095 | 86,239,662,756,234,600,000,000,000,000,000,000,000 | 55 | typefind: bounds check windows ico detection
Fixes out of bounds read
https://bugzilla.gnome.org/show_bug.cgi?id=774902 |
void PSOutputDev::writePSName(char *s) {
char *p;
char c;
p = s;
while ((c = *p++)) {
if (c <= (char)0x20 || c >= (char)0x7f ||
c == '(' || c == ')' || c == '<' || c == '>' ||
c == '[' || c == ']' || c == '{' || c == '}' ||
c == '/' || c == '%' || c == '\\') {
writePSFmt("#{0:02x}", c & 0xff);
} else {
writePSChar(c);
}
}
} | 0 | [] | poppler | abf167af8b15e5f3b510275ce619e6fdb42edd40 | 216,109,906,098,973,570,000,000,000,000,000,000,000 | 16 | Implement tiling/patterns in SplashOutputDev
Fixes bug 13518 |
int MMDB_aget_value(MMDB_entry_s *const start,
MMDB_entry_data_s *const entry_data,
const char *const *const path)
{
MMDB_s *mmdb = start->mmdb;
uint32_t offset = start->offset;
memset(entry_data, 0, sizeof(MMDB_entry_data_s));
DEBUG_NL;
DEBUG_MSG("looking up value by path");
CHECKED_DECODE_ONE_FOLLOW(mmdb, offset, entry_data);
DEBUG_NL;
DEBUG_MSGF("top level element is a %s", type_num_to_name(entry_data->type));
/* Can this happen? It'd probably represent a pathological case under
* normal use, but there's nothing preventing someone from passing an
* invalid MMDB_entry_s struct to this function */
if (!entry_data->has_data) {
return MMDB_INVALID_LOOKUP_PATH_ERROR;
}
const char *path_elem;
int i = 0;
while (NULL != (path_elem = path[i++])) {
DEBUG_NL;
DEBUG_MSGF("path elem = %s", path_elem);
/* XXX - it'd be good to find a quicker way to skip through these
entries that doesn't involve decoding them
completely. Basically we need to just use the size from the
control byte to advance our pointer rather than calling
decode_one(). */
if (entry_data->type == MMDB_DATA_TYPE_ARRAY) {
int status = lookup_path_in_array(path_elem, mmdb, entry_data);
if (MMDB_SUCCESS != status) {
memset(entry_data, 0, sizeof(MMDB_entry_data_s));
return status;
}
} else if (entry_data->type == MMDB_DATA_TYPE_MAP) {
int status = lookup_path_in_map(path_elem, mmdb, entry_data);
if (MMDB_SUCCESS != status) {
memset(entry_data, 0, sizeof(MMDB_entry_data_s));
return status;
}
} else {
/* Once we make the code traverse maps & arrays without calling
* decode_one() we can get rid of this. */
memset(entry_data, 0, sizeof(MMDB_entry_data_s));
return MMDB_LOOKUP_PATH_DOES_NOT_MATCH_DATA_ERROR;
}
}
return MMDB_SUCCESS;
} | 0 | [] | libmaxminddb | 51255f113fe3c7b63ffe957636a7656a3ff9d1ff | 64,600,433,368,027,090,000,000,000,000,000,000,000 | 56 | Fix several segfaults from missing bounds checks
These were found using afl-fuzz. There are several more similar failure
that warrant investigation. If we want to add tests, I have several
corrupt databases we could add, but these are trivially reproducible
when using the existing test databases as input to afl-fuzz. |
static inline void clear_bss_descriptor(struct bss_descriptor *bss)
{
/* Don't blow away ->list, just BSS data */
memset(bss, 0, offsetof(struct bss_descriptor, list));
} | 0 | [
"CWE-119"
] | linux-2.6 | 48735d8d8bd701b1e0cd3d49c21e5e385ddcb077 | 266,694,280,160,941,470,000,000,000,000,000,000,000 | 5 | libertas: fix buffer overrun
If somebody sends an invalid beacon/probe response, that can trash the
whole BSS descriptor. The descriptor is, luckily, large enough so that
it cannot scribble past the end of it; it's well above 400 bytes long.
Signed-off-by: Johannes Berg <[email protected]>
Cc: [email protected] [2.6.24-2.6.27, bug present in some form since driver was added (2.6.22)]
Signed-off-by: John W. Linville <[email protected]> |
static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
unsigned long len,
struct slice_mask available,
int psize, unsigned long high_limit)
{
int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
unsigned long addr, found, next_end;
struct vm_unmapped_area_info info;
info.flags = 0;
info.length = len;
info.align_mask = PAGE_MASK & ((1ul << pshift) - 1);
info.align_offset = 0;
addr = TASK_UNMAPPED_BASE;
/*
* Check till the allow max value for this mmap request
*/
while (addr < high_limit) {
info.low_limit = addr;
if (!slice_scan_available(addr, available, 1, &addr))
continue;
next_slice:
/*
* At this point [info.low_limit; addr) covers
* available slices only and ends at a slice boundary.
* Check if we need to reduce the range, or if we can
* extend it to cover the next available slice.
*/
if (addr >= high_limit)
addr = high_limit;
else if (slice_scan_available(addr, available, 1, &next_end)) {
addr = next_end;
goto next_slice;
}
info.high_limit = addr;
found = vm_unmapped_area(&info);
if (!(found & ~PAGE_MASK))
return found;
}
return -ENOMEM;
} | 0 | [
"CWE-119"
] | linux | 1be7107fbe18eed3e319a6c3e83c78254b693acb | 339,590,158,781,797,800,000,000,000,000,000,000,000 | 45 | mm: larger stack guard gap, between vmas
Stack guard page is a useful feature to reduce a risk of stack smashing
into a different mapping. We have been using a single page gap which
is sufficient to prevent having stack adjacent to a different mapping.
But this seems to be insufficient in the light of the stack usage in
userspace. E.g. glibc uses as large as 64kB alloca() in many commonly
used functions. Others use constructs liks gid_t buffer[NGROUPS_MAX]
which is 256kB or stack strings with MAX_ARG_STRLEN.
This will become especially dangerous for suid binaries and the default
no limit for the stack size limit because those applications can be
tricked to consume a large portion of the stack and a single glibc call
could jump over the guard page. These attacks are not theoretical,
unfortunatelly.
Make those attacks less probable by increasing the stack guard gap
to 1MB (on systems with 4k pages; but make it depend on the page size
because systems with larger base pages might cap stack allocations in
the PAGE_SIZE units) which should cover larger alloca() and VLA stack
allocations. It is obviously not a full fix because the problem is
somehow inherent, but it should reduce attack space a lot.
One could argue that the gap size should be configurable from userspace,
but that can be done later when somebody finds that the new 1MB is wrong
for some special case applications. For now, add a kernel command line
option (stack_guard_gap) to specify the stack gap size (in page units).
Implementation wise, first delete all the old code for stack guard page:
because although we could get away with accounting one extra page in a
stack vma, accounting a larger gap can break userspace - case in point,
a program run with "ulimit -S -v 20000" failed when the 1MB gap was
counted for RLIMIT_AS; similar problems could come with RLIMIT_MLOCK
and strict non-overcommit mode.
Instead of keeping gap inside the stack vma, maintain the stack guard
gap as a gap between vmas: using vm_start_gap() in place of vm_start
(or vm_end_gap() in place of vm_end if VM_GROWSUP) in just those few
places which need to respect the gap - mainly arch_get_unmapped_area(),
and and the vma tree's subtree_gap support for that.
Original-patch-by: Oleg Nesterov <[email protected]>
Original-patch-by: Michal Hocko <[email protected]>
Signed-off-by: Hugh Dickins <[email protected]>
Acked-by: Michal Hocko <[email protected]>
Tested-by: Helge Deller <[email protected]> # parisc
Signed-off-by: Linus Torvalds <[email protected]> |
static void free_smi_msg_list(struct list_head *q)
{
struct ipmi_smi_msg *msg, *msg2;
list_for_each_entry_safe(msg, msg2, q, link) {
list_del(&msg->link);
ipmi_free_smi_msg(msg);
}
} | 0 | [
"CWE-416",
"CWE-284"
] | linux | 77f8269606bf95fcb232ee86f6da80886f1dfae8 | 191,055,239,268,844,430,000,000,000,000,000,000,000 | 9 | ipmi: fix use-after-free of user->release_barrier.rda
When we do the following test, we got oops in ipmi_msghandler driver
while((1))
do
service ipmievd restart & service ipmievd restart
done
---------------------------------------------------------------
[ 294.230186] Unable to handle kernel paging request at virtual address 0000803fea6ea008
[ 294.230188] Mem abort info:
[ 294.230190] ESR = 0x96000004
[ 294.230191] Exception class = DABT (current EL), IL = 32 bits
[ 294.230193] SET = 0, FnV = 0
[ 294.230194] EA = 0, S1PTW = 0
[ 294.230195] Data abort info:
[ 294.230196] ISV = 0, ISS = 0x00000004
[ 294.230197] CM = 0, WnR = 0
[ 294.230199] user pgtable: 4k pages, 48-bit VAs, pgdp = 00000000a1c1b75a
[ 294.230201] [0000803fea6ea008] pgd=0000000000000000
[ 294.230204] Internal error: Oops: 96000004 [#1] SMP
[ 294.235211] Modules linked in: nls_utf8 isofs rpcrdma ib_iser ib_srpt target_core_mod ib_srp scsi_transport_srp ib_ipoib rdma_ucm ib_umad rdma_cm ib_cm iw_cm dm_mirror dm_region_hash dm_log dm_mod aes_ce_blk crypto_simd cryptd aes_ce_cipher ghash_ce sha2_ce ses sha256_arm64 sha1_ce hibmc_drm hisi_sas_v2_hw enclosure sg hisi_sas_main sbsa_gwdt ip_tables mlx5_ib ib_uverbs marvell ib_core mlx5_core ixgbe ipmi_si mdio hns_dsaf ipmi_devintf ipmi_msghandler hns_enet_drv hns_mdio
[ 294.277745] CPU: 3 PID: 0 Comm: swapper/3 Kdump: loaded Not tainted 5.0.0-rc2+ #113
[ 294.285511] Hardware name: Huawei TaiShan 2280 /BC11SPCD, BIOS 1.37 11/21/2017
[ 294.292835] pstate: 80000005 (Nzcv daif -PAN -UAO)
[ 294.297695] pc : __srcu_read_lock+0x38/0x58
[ 294.301940] lr : acquire_ipmi_user+0x2c/0x70 [ipmi_msghandler]
[ 294.307853] sp : ffff00001001bc80
[ 294.311208] x29: ffff00001001bc80 x28: ffff0000117e5000
[ 294.316594] x27: 0000000000000000 x26: dead000000000100
[ 294.321980] x25: dead000000000200 x24: ffff803f6bd06800
[ 294.327366] x23: 0000000000000000 x22: 0000000000000000
[ 294.332752] x21: ffff00001001bd04 x20: ffff80df33d19018
[ 294.338137] x19: ffff80df33d19018 x18: 0000000000000000
[ 294.343523] x17: 0000000000000000 x16: 0000000000000000
[ 294.348908] x15: 0000000000000000 x14: 0000000000000002
[ 294.354293] x13: 0000000000000000 x12: 0000000000000000
[ 294.359679] x11: 0000000000000000 x10: 0000000000100000
[ 294.365065] x9 : 0000000000000000 x8 : 0000000000000004
[ 294.370451] x7 : 0000000000000000 x6 : ffff80df34558678
[ 294.375836] x5 : 000000000000000c x4 : 0000000000000000
[ 294.381221] x3 : 0000000000000001 x2 : 0000803fea6ea000
[ 294.386607] x1 : 0000803fea6ea008 x0 : 0000000000000001
[ 294.391994] Process swapper/3 (pid: 0, stack limit = 0x0000000083087293)
[ 294.398791] Call trace:
[ 294.401266] __srcu_read_lock+0x38/0x58
[ 294.405154] acquire_ipmi_user+0x2c/0x70 [ipmi_msghandler]
[ 294.410716] deliver_response+0x80/0xf8 [ipmi_msghandler]
[ 294.416189] deliver_local_response+0x28/0x68 [ipmi_msghandler]
[ 294.422193] handle_one_recv_msg+0x158/0xcf8 [ipmi_msghandler]
[ 294.432050] handle_new_recv_msgs+0xc0/0x210 [ipmi_msghandler]
[ 294.441984] smi_recv_tasklet+0x8c/0x158 [ipmi_msghandler]
[ 294.451618] tasklet_action_common.isra.5+0x88/0x138
[ 294.460661] tasklet_action+0x2c/0x38
[ 294.468191] __do_softirq+0x120/0x2f8
[ 294.475561] irq_exit+0x134/0x140
[ 294.482445] __handle_domain_irq+0x6c/0xc0
[ 294.489954] gic_handle_irq+0xb8/0x178
[ 294.497037] el1_irq+0xb0/0x140
[ 294.503381] arch_cpu_idle+0x34/0x1a8
[ 294.510096] do_idle+0x1d4/0x290
[ 294.516322] cpu_startup_entry+0x28/0x30
[ 294.523230] secondary_start_kernel+0x184/0x1d0
[ 294.530657] Code: d538d082 d2800023 8b010c81 8b020021 (c85f7c25)
[ 294.539746] ---[ end trace 8a7a880dee570b29 ]---
[ 294.547341] Kernel panic - not syncing: Fatal exception in interrupt
[ 294.556837] SMP: stopping secondary CPUs
[ 294.563996] Kernel Offset: disabled
[ 294.570515] CPU features: 0x002,21006008
[ 294.577638] Memory Limit: none
[ 294.587178] Starting crashdump kernel...
[ 294.594314] Bye!
Because the user->release_barrier.rda is freed in ipmi_destroy_user(), but
the refcount is not zero, when acquire_ipmi_user() uses user->release_barrier.rda
in __srcu_read_lock(), it causes oops.
Fix this by calling cleanup_srcu_struct() when the refcount is zero.
Fixes: e86ee2d44b44 ("ipmi: Rework locking and shutdown for hot remove")
Cc: [email protected] # 4.18
Signed-off-by: Yang Yingliang <[email protected]>
Signed-off-by: Corey Minyard <[email protected]> |
GF_Err gf_isom_hevc_set_inband_config(GF_ISOFile *the_file, u32 trackNumber, u32 DescriptionIndex, Bool keep_xps)
{
return gf_isom_hevc_config_update_ex(the_file, trackNumber, DescriptionIndex, NULL, GF_ISOM_HVCC_SET_INBAND, keep_xps);
} | 0 | [
"CWE-401"
] | gpac | 0a85029d694f992f3631e2f249e4999daee15cbf | 72,803,011,321,351,800,000,000,000,000,000,000,000 | 4 | fixed #1785 (fuzz) |
static int sctp_getsockopt_default_prinfo(struct sock *sk, int len,
char __user *optval,
int __user *optlen)
{
struct sctp_default_prinfo info;
struct sctp_association *asoc;
int retval = -EFAULT;
if (len < sizeof(info)) {
retval = -EINVAL;
goto out;
}
len = sizeof(info);
if (copy_from_user(&info, optval, len))
goto out;
asoc = sctp_id2assoc(sk, info.pr_assoc_id);
if (asoc) {
info.pr_policy = SCTP_PR_POLICY(asoc->default_flags);
info.pr_value = asoc->default_timetolive;
} else if (!info.pr_assoc_id) {
struct sctp_sock *sp = sctp_sk(sk);
info.pr_policy = SCTP_PR_POLICY(sp->default_flags);
info.pr_value = sp->default_timetolive;
} else {
retval = -EINVAL;
goto out;
}
if (put_user(len, optlen))
goto out;
if (copy_to_user(optval, &info, len))
goto out;
retval = 0;
out:
return retval;
} | 0 | [
"CWE-617",
"CWE-362"
] | linux | 2dcab598484185dea7ec22219c76dcdd59e3cb90 | 53,702,284,275,865,370,000,000,000,000,000,000,000 | 42 | sctp: avoid BUG_ON on sctp_wait_for_sndbuf
Alexander Popov reported that an application may trigger a BUG_ON in
sctp_wait_for_sndbuf if the socket tx buffer is full, a thread is
waiting on it to queue more data and meanwhile another thread peels off
the association being used by the first thread.
This patch replaces the BUG_ON call with a proper error handling. It
will return -EPIPE to the original sendmsg call, similarly to what would
have been done if the association wasn't found in the first place.
Acked-by: Alexander Popov <[email protected]>
Signed-off-by: Marcelo Ricardo Leitner <[email protected]>
Reviewed-by: Xin Long <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
int mp_unpack_full(lua_State *L, int limit, int offset) {
size_t len;
const char *s;
mp_cur c;
int cnt; /* Number of objects unpacked */
int decode_all = (!limit && !offset);
s = luaL_checklstring(L,1,&len); /* if no match, exits */
if (offset < 0 || limit < 0) /* requesting negative off or lim is invalid */
return luaL_error(L,
"Invalid request to unpack with offset of %d and limit of %d.",
offset, len);
else if (offset > len)
return luaL_error(L,
"Start offset %d greater than input length %d.", offset, len);
if (decode_all) limit = INT_MAX;
mp_cur_init(&c,(const unsigned char *)s+offset,len-offset);
/* We loop over the decode because this could be a stream
* of multiple top-level values serialized together */
for(cnt = 0; c.left > 0 && cnt < limit; cnt++) {
mp_decode_to_lua_type(L,&c);
if (c.err == MP_CUR_ERROR_EOF) {
return luaL_error(L,"Missing bytes in input.");
} else if (c.err == MP_CUR_ERROR_BADFMT) {
return luaL_error(L,"Bad data format in input.");
}
}
if (!decode_all) {
/* c->left is the remaining size of the input buffer.
* subtract the entire buffer size from the unprocessed size
* to get our next start offset */
int offset = len - c.left;
/* Return offset -1 when we have have processed the entire buffer. */
lua_pushinteger(L, c.left == 0 ? -1 : offset);
/* Results are returned with the arg elements still
* in place. Lua takes care of only returning
* elements above the args for us.
* In this case, we have one arg on the stack
* for this function, so we insert our first return
* value at position 2. */
lua_insert(L, 2);
cnt += 1; /* increase return count by one to make room for offset */
}
return cnt;
} | 1 | [
"CWE-119",
"CWE-787"
] | redis | 5ccb6f7a791bf3490357b00a898885759d98bab0 | 226,390,211,612,254,000,000,000,000,000,000,000,000 | 52 | Security: more cmsgpack fixes by @soloestoy.
@soloestoy sent me this additional fixes, after searching for similar
problems to the one reported in mp_pack(). I'm committing the changes
because it was not possible during to make a public PR to protect Redis
users and give Redis providers some time to patch their systems. |
static int rtnetlink_event(struct notifier_block *this, unsigned long event, void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
switch (event) {
case NETDEV_REBOOT:
case NETDEV_CHANGEMTU:
case NETDEV_CHANGEADDR:
case NETDEV_CHANGENAME:
case NETDEV_FEAT_CHANGE:
case NETDEV_BONDING_FAILOVER:
case NETDEV_POST_TYPE_CHANGE:
case NETDEV_NOTIFY_PEERS:
case NETDEV_CHANGEUPPER:
case NETDEV_RESEND_IGMP:
case NETDEV_CHANGEINFODATA:
case NETDEV_CHANGELOWERSTATE:
case NETDEV_CHANGE_TX_QUEUE_LEN:
rtmsg_ifinfo_event(RTM_NEWLINK, dev, 0, rtnl_get_event(event),
GFP_KERNEL, NULL);
break;
default:
break;
}
return NOTIFY_DONE;
} | 0 | [
"CWE-476"
] | linux | f428fe4a04cc339166c8bbd489789760de3a0cee | 45,472,273,683,781,560,000,000,000,000,000,000,000 | 26 | rtnetlink: give a user socket to get_target_net()
This function is used from two places: rtnl_dump_ifinfo and
rtnl_getlink. In rtnl_getlink(), we give a request skb into
get_target_net(), but in rtnl_dump_ifinfo, we give a response skb
into get_target_net().
The problem here is that NETLINK_CB() isn't initialized for the response
skb. In both cases we can get a user socket and give it instead of skb
into get_target_net().
This bug was found by syzkaller with this call-trace:
kasan: GPF could be caused by NULL-ptr deref or user memory access
general protection fault: 0000 [#1] SMP KASAN
Modules linked in:
CPU: 1 PID: 3149 Comm: syzkaller140561 Not tainted 4.15.0-rc4-mm1+ #47
Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS
Google 01/01/2011
RIP: 0010:__netlink_ns_capable+0x8b/0x120 net/netlink/af_netlink.c:868
RSP: 0018:ffff8801c880f348 EFLAGS: 00010206
RAX: dffffc0000000000 RBX: 0000000000000000 RCX: ffffffff8443f900
RDX: 000000000000007b RSI: ffffffff86510f40 RDI: 00000000000003d8
RBP: ffff8801c880f360 R08: 0000000000000000 R09: 1ffff10039101e4f
R10: 0000000000000000 R11: 0000000000000001 R12: ffffffff86510f40
R13: 000000000000000c R14: 0000000000000004 R15: 0000000000000011
FS: 0000000001a1a880(0000) GS:ffff8801db300000(0000) knlGS:0000000000000000
CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
CR2: 0000000020151000 CR3: 00000001c9511005 CR4: 00000000001606e0
DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
Call Trace:
netlink_ns_capable+0x26/0x30 net/netlink/af_netlink.c:886
get_target_net+0x9d/0x120 net/core/rtnetlink.c:1765
rtnl_dump_ifinfo+0x2e5/0xee0 net/core/rtnetlink.c:1806
netlink_dump+0x48c/0xce0 net/netlink/af_netlink.c:2222
__netlink_dump_start+0x4f0/0x6d0 net/netlink/af_netlink.c:2319
netlink_dump_start include/linux/netlink.h:214 [inline]
rtnetlink_rcv_msg+0x7f0/0xb10 net/core/rtnetlink.c:4485
netlink_rcv_skb+0x21e/0x460 net/netlink/af_netlink.c:2441
rtnetlink_rcv+0x1c/0x20 net/core/rtnetlink.c:4540
netlink_unicast_kernel net/netlink/af_netlink.c:1308 [inline]
netlink_unicast+0x4be/0x6a0 net/netlink/af_netlink.c:1334
netlink_sendmsg+0xa4a/0xe60 net/netlink/af_netlink.c:1897
Cc: Jiri Benc <[email protected]>
Fixes: 79e1ad148c84 ("rtnetlink: use netnsid to query interface")
Signed-off-by: Andrei Vagin <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
static void fm10k_update_itr(struct fm10k_ring_container *ring_container)
{
unsigned int avg_wire_size, packets, itr_round;
/* Only update ITR if we are using adaptive setting */
if (!ITR_IS_ADAPTIVE(ring_container->itr))
goto clear_counts;
packets = ring_container->total_packets;
if (!packets)
goto clear_counts;
avg_wire_size = ring_container->total_bytes / packets;
/* The following is a crude approximation of:
* wmem_default / (size + overhead) = desired_pkts_per_int
* rate / bits_per_byte / (size + ethernet overhead) = pkt_rate
* (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value
*
* Assuming wmem_default is 212992 and overhead is 640 bytes per
* packet, (256 skb, 64 headroom, 320 shared info), we can reduce the
* formula down to
*
* (34 * (size + 24)) / (size + 640) = ITR
*
* We first do some math on the packet size and then finally bitshift
* by 8 after rounding up. We also have to account for PCIe link speed
* difference as ITR scales based on this.
*/
if (avg_wire_size <= 360) {
/* Start at 250K ints/sec and gradually drop to 77K ints/sec */
avg_wire_size *= 8;
avg_wire_size += 376;
} else if (avg_wire_size <= 1152) {
/* 77K ints/sec to 45K ints/sec */
avg_wire_size *= 3;
avg_wire_size += 2176;
} else if (avg_wire_size <= 1920) {
/* 45K ints/sec to 38K ints/sec */
avg_wire_size += 4480;
} else {
/* plateau at a limit of 38K ints/sec */
avg_wire_size = 6656;
}
/* Perform final bitshift for division after rounding up to ensure
* that the calculation will never get below a 1. The bit shift
* accounts for changes in the ITR due to PCIe link speed.
*/
itr_round = READ_ONCE(ring_container->itr_scale) + 8;
avg_wire_size += BIT(itr_round) - 1;
avg_wire_size >>= itr_round;
/* write back value and retain adaptive flag */
ring_container->itr = avg_wire_size | FM10K_ITR_ADAPTIVE;
clear_counts:
ring_container->total_bytes = 0;
ring_container->total_packets = 0;
} | 0 | [
"CWE-476"
] | linux | 01ca667133d019edc9f0a1f70a272447c84ec41f | 218,201,102,113,300,240,000,000,000,000,000,000,000 | 60 | fm10k: Fix a potential NULL pointer dereference
Syzkaller report this:
kasan: GPF could be caused by NULL-ptr deref or user memory access
general protection fault: 0000 [#1] SMP KASAN PTI
CPU: 0 PID: 4378 Comm: syz-executor.0 Tainted: G C 5.0.0+ #5
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.10.2-1ubuntu1 04/01/2014
RIP: 0010:__lock_acquire+0x95b/0x3200 kernel/locking/lockdep.c:3573
Code: 00 0f 85 28 1e 00 00 48 81 c4 08 01 00 00 5b 5d 41 5c 41 5d 41 5e 41 5f c3 4c 89 ea 48 b8 00 00 00 00 00 fc ff df 48 c1 ea 03 <80> 3c 02 00 0f 85 cc 24 00 00 49 81 7d 00 e0 de 03 a6 41 bc 00 00
RSP: 0018:ffff8881e3c07a40 EFLAGS: 00010002
RAX: dffffc0000000000 RBX: 0000000000000000 RCX: 0000000000000000
RDX: 0000000000000010 RSI: 0000000000000000 RDI: 0000000000000080
RBP: 0000000000000000 R08: 0000000000000001 R09: 0000000000000000
R10: ffff8881e3c07d98 R11: ffff8881c7f21f80 R12: 0000000000000001
R13: 0000000000000080 R14: 0000000000000000 R15: 0000000000000001
FS: 00007fce2252e700(0000) GS:ffff8881f2400000(0000) knlGS:0000000000000000
CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
CR2: 00007fffc7eb0228 CR3: 00000001e5bea002 CR4: 00000000007606f0
DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
PKRU: 55555554
Call Trace:
lock_acquire+0xff/0x2c0 kernel/locking/lockdep.c:4211
__mutex_lock_common kernel/locking/mutex.c:925 [inline]
__mutex_lock+0xdf/0x1050 kernel/locking/mutex.c:1072
drain_workqueue+0x24/0x3f0 kernel/workqueue.c:2934
destroy_workqueue+0x23/0x630 kernel/workqueue.c:4319
__do_sys_delete_module kernel/module.c:1018 [inline]
__se_sys_delete_module kernel/module.c:961 [inline]
__x64_sys_delete_module+0x30c/0x480 kernel/module.c:961
do_syscall_64+0x9f/0x450 arch/x86/entry/common.c:290
entry_SYSCALL_64_after_hwframe+0x49/0xbe
RIP: 0033:0x462e99
Code: f7 d8 64 89 02 b8 ff ff ff ff c3 66 0f 1f 44 00 00 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 c7 c1 bc ff ff ff f7 d8 64 89 01 48
RSP: 002b:00007fce2252dc58 EFLAGS: 00000246 ORIG_RAX: 00000000000000b0
RAX: ffffffffffffffda RBX: 000000000073bf00 RCX: 0000000000462e99
RDX: 0000000000000000 RSI: 0000000000000000 RDI: 0000000020000140
RBP: 0000000000000002 R08: 0000000000000000 R09: 0000000000000000
R10: 0000000000000000 R11: 0000000000000246 R12: 00007fce2252e6bc
R13: 00000000004bcca9 R14: 00000000006f6b48 R15: 00000000ffffffff
If alloc_workqueue fails, it should return -ENOMEM, otherwise may
trigger this NULL pointer dereference while unloading drivers.
Reported-by: Hulk Robot <[email protected]>
Fixes: 0a38c17a21a0 ("fm10k: Remove create_workqueue")
Signed-off-by: Yue Haibing <[email protected]>
Tested-by: Andrew Bowers <[email protected]>
Signed-off-by: Jeff Kirsher <[email protected]> |
free_refs_data(Image *img) {
free(img->refs); img->refs = NULL;
img->refcnt = 0; img->refcap = 0;
} | 0 | [
"CWE-787"
] | kitty | 82c137878c2b99100a3cdc1c0f0efea069313901 | 108,788,549,779,997,900,000,000,000,000,000,000,000 | 4 | Graphics protocol: Dont return filename in the error message when opening file fails, since filenames can contain control characters
Fixes #3128 |
static char *socket_http_get_recursive(const char *url, int *code, int *rlen, ut32 redirections) {
if (code) {
*code = 0;
}
if (rlen) {
*rlen = 0;
}
char *curl_env = r_sys_getenv ("R2_CURL");
if (!R_STR_ISEMPTY (curl_env) && atoi (curl_env)) {
int len;
char *escaped_url = r_str_escape_sh (url);
char *command = r_str_newf ("curl -sfL -o - \"%s\"", escaped_url);
char *res = r_sys_cmd_str (command, NULL, &len);
free (escaped_url);
free (command);
free (curl_env);
if (!res) {
return NULL;
}
if (res) {
if (code) {
*code = 200;
}
if (rlen) {
*rlen = len;
}
}
return res;
}
free (curl_env);
#if __WINDOWS__
return http_get_w32 (url, code, rlen);
#else
RSocket *s;
int ssl = r_str_startswith (url, "https://");
#if !HAVE_LIB_SSL
if (ssl) {
eprintf ("Tried to get '%s', but SSL support is disabled, set R2_CURL=1 to use curl\n", url);
return NULL;
}
#endif
char *response, *host, *path, *port = "80";
char *uri = strdup (url);
if (!uri) {
return NULL;
}
host = strstr (uri, "://");
if (!host) {
free (uri);
eprintf ("r_socket_http_get: Invalid URI");
return NULL;
}
host += 3;
port = strchr (host, ':');
if (!port) {
port = ssl? "443": "80";
path = host;
} else {
*port++ = 0;
path = port;
}
path = strchr (path, '/');
if (!path) {
path = "";
} else {
*path++ = 0;
}
s = r_socket_new (ssl);
if (!s) {
eprintf ("r_socket_http_get: Cannot create socket\n");
free (uri);
return NULL;
}
if (r_socket_connect_tcp (s, host, port, 0)) {
r_socket_printf (s,
"GET /%s HTTP/1.1\r\n"
"User-Agent: radare2 "R2_VERSION"\r\n"
"Accept: */*\r\n"
"Host: %s:%s\r\n"
"\r\n", path, host, port);
response = socket_http_answer (s, code, rlen, redirections);
} else {
eprintf ("Cannot connect to %s:%s\n", host, port);
response = NULL;
}
free (uri);
r_socket_free (s);
return response;
#endif
} | 0 | [
"CWE-78"
] | radare2 | 04edfa82c1f3fa2bc3621ccdad2f93bdbf00e4f9 | 108,392,738,544,011,660,000,000,000,000,000,000,000 | 90 | Fix command injection on PDB download (#16966)
* Fix r_sys_mkdirp with absolute path on Windows
* Fix build with --with-openssl
* Use RBuffer in r_socket_http_answer()
* r_socket_http_answer: Fix read for big responses
* Implement r_str_escape_sh()
* Cleanup r_socket_connect() on Windows
* Fix socket being created without a protocol
* Fix socket connect with SSL ##socket
* Use select() in r_socket_ready()
* Fix read failing if received only protocol answer
* Fix double-free
* r_socket_http_get: Fail if req. SSL with no support
* Follow redirects in r_socket_http_answer()
* Fix r_socket_http_get result length with R2_CURL=1
* Also follow redirects
* Avoid using curl for downloading PDBs
* Use r_socket_http_get() on UNIXs
* Use WinINet API on Windows for r_socket_http_get()
* Fix command injection
* Fix r_sys_cmd_str_full output for binary data
* Validate GUID on PDB download
* Pass depth to socket_http_get_recursive()
* Remove 'r_' and '__' from static function names
* Fix is_valid_guid
* Fix for comments |
int gnutls_x509_ext_export_aia(gnutls_x509_aia_t aia,
gnutls_datum_t * ext)
{
int ret, result;
ASN1_TYPE c2 = ASN1_TYPE_EMPTY;
unsigned int i;
ret = asn1_create_element(_gnutls_get_pkix(),
"PKIX1.AuthorityInfoAccessSyntax", &c2);
if (ret != ASN1_SUCCESS) {
gnutls_assert();
return _gnutls_asn2err(ret);
}
/* 1. create a new element.
*/
for (i=0;i<aia->size;i++) {
result = asn1_write_value(c2, "", "NEW", 1);
if (result != ASN1_SUCCESS) {
gnutls_assert();
ret = _gnutls_asn2err(result);
goto cleanup;
}
/* 2. Add the OID.
*/
result = asn1_write_value(c2, "?LAST.accessMethod", aia->aia[i].oid.data, 1);
if (result != ASN1_SUCCESS) {
gnutls_assert();
ret = _gnutls_asn2err(result);
goto cleanup;
}
ret =
_gnutls_write_general_name(c2,
"?LAST.accessLocation",
aia->aia[i].san_type,
aia->aia[i].san.data,
aia->aia[i].san.size);
if (ret < 0) {
gnutls_assert();
goto cleanup;
}
}
ret = _gnutls_x509_der_encode(c2, "", ext, 0);
if (ret < 0) {
gnutls_assert();
goto cleanup;
}
cleanup:
asn1_delete_structure(&c2);
return ret;
} | 0 | [] | gnutls | d6972be33264ecc49a86cd0958209cd7363af1e9 | 218,377,086,775,288,200,000,000,000,000,000,000,000 | 56 | eliminated double-free in the parsing of dist points
Reported by Robert Święcki. |
test_weeknum(int from, int to, int f, double sg)
{
int j;
fprintf(stderr, "test_weeknum: %d...%d (%d) - %.0f\n",
from, to, to - from, sg);
for (j = from; j <= to; j++) {
int y, w, d, rj, ns;
c_jd_to_weeknum(j, f, sg, &y, &w, &d);
c_weeknum_to_jd(y, w, d, f, sg, &rj, &ns);
if (j != rj) {
fprintf(stderr, "%d != %d\n", j, rj);
return 0;
}
}
return 1;
} | 0 | [] | date | 3959accef8da5c128f8a8e2fd54e932a4fb253b0 | 33,365,550,573,425,906,000,000,000,000,000,000,000 | 18 | Add length limit option for methods that parses date strings
`Date.parse` now raises an ArgumentError when a given date string is
longer than 128. You can configure the limit by giving `limit` keyword
arguments like `Date.parse(str, limit: 1000)`. If you pass `limit: nil`,
the limit is disabled.
Not only `Date.parse` but also the following methods are changed.
* Date._parse
* Date.parse
* DateTime.parse
* Date._iso8601
* Date.iso8601
* DateTime.iso8601
* Date._rfc3339
* Date.rfc3339
* DateTime.rfc3339
* Date._xmlschema
* Date.xmlschema
* DateTime.xmlschema
* Date._rfc2822
* Date.rfc2822
* DateTime.rfc2822
* Date._rfc822
* Date.rfc822
* DateTime.rfc822
* Date._jisx0301
* Date.jisx0301
* DateTime.jisx0301 |
static int crypto_nivaead_default(struct crypto_alg *alg, u32 type, u32 mask)
{
struct rtattr *tb[3];
struct {
struct rtattr attr;
struct crypto_attr_type data;
} ptype;
struct {
struct rtattr attr;
struct crypto_attr_alg data;
} palg;
struct crypto_template *tmpl;
struct crypto_instance *inst;
struct crypto_alg *larval;
const char *geniv;
int err;
larval = crypto_larval_lookup(alg->cra_driver_name,
CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_GENIV,
CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
err = PTR_ERR(larval);
if (IS_ERR(larval))
goto out;
err = -EAGAIN;
if (!crypto_is_larval(larval))
goto drop_larval;
ptype.attr.rta_len = sizeof(ptype);
ptype.attr.rta_type = CRYPTOA_TYPE;
ptype.data.type = type | CRYPTO_ALG_GENIV;
/* GENIV tells the template that we're making a default geniv. */
ptype.data.mask = mask | CRYPTO_ALG_GENIV;
tb[0] = &ptype.attr;
palg.attr.rta_len = sizeof(palg);
palg.attr.rta_type = CRYPTOA_ALG;
/* Must use the exact name to locate ourselves. */
memcpy(palg.data.name, alg->cra_driver_name, CRYPTO_MAX_ALG_NAME);
tb[1] = &palg.attr;
tb[2] = NULL;
geniv = alg->cra_aead.geniv;
tmpl = crypto_lookup_template(geniv);
err = -ENOENT;
if (!tmpl)
goto kill_larval;
inst = tmpl->alloc(tb);
err = PTR_ERR(inst);
if (IS_ERR(inst))
goto put_tmpl;
if ((err = crypto_register_instance(tmpl, inst))) {
tmpl->free(inst);
goto put_tmpl;
}
/* Redo the lookup to use the instance we just registered. */
err = -EAGAIN;
put_tmpl:
crypto_tmpl_put(tmpl);
kill_larval:
crypto_larval_kill(larval);
drop_larval:
crypto_mod_put(larval);
out:
crypto_mod_put(alg);
return err;
} | 0 | [
"CWE-310"
] | linux | 9a5467bf7b6e9e02ec9c3da4e23747c05faeaac6 | 129,906,069,780,198,390,000,000,000,000,000,000,000 | 73 | crypto: user - fix info leaks in report API
Three errors resulting in kernel memory disclosure:
1/ The structures used for the netlink based crypto algorithm report API
are located on the stack. As snprintf() does not fill the remainder of
the buffer with null bytes, those stack bytes will be disclosed to users
of the API. Switch to strncpy() to fix this.
2/ crypto_report_one() does not initialize all field of struct
crypto_user_alg. Fix this to fix the heap info leak.
3/ For the module name we should copy only as many bytes as
module_name() returns -- not as much as the destination buffer could
hold. But the current code does not and therefore copies random data
from behind the end of the module name, as the module name is always
shorter than CRYPTO_MAX_ALG_NAME.
Also switch to use strncpy() to copy the algorithm's name and
driver_name. They are strings, after all.
Signed-off-by: Mathias Krause <[email protected]>
Cc: Steffen Klassert <[email protected]>
Signed-off-by: Herbert Xu <[email protected]> |
static gint compare_expire(gconstpointer a, gconstpointer b)
{
const struct dhcp_lease *lease1 = a;
const struct dhcp_lease *lease2 = b;
return lease2->expire - lease1->expire;
} | 0 | [] | connman | 58d397ba74873384aee449690a9070bacd5676fa | 39,351,947,982,530,406,000,000,000,000,000,000,000 | 7 | gdhcp: Avoid reading invalid data in dhcp_get_option |
RList *r_bin_le_get_libs(r_bin_le_obj_t *bin) {
RList *l = r_list_newf ((RListFree)free);
if (!l) {
return NULL;
}
LE_image_header *h = bin->header;
ut64 offset = (ut64)h->impmod + bin->headerOff;
ut64 end = offset + h->impproc - h->impmod;
while (offset < end) {
char *name = __read_nonnull_str_at (bin->buf, &offset);
if (!name) {
break;
}
r_list_append (l, name);
}
return l;
} | 0 | [
"CWE-252"
] | radare2 | d7ea20fb2e1433ebece9f004d87ad8f2377af23d | 155,604,771,221,758,130,000,000,000,000,000,000,000 | 17 | Fix #18923 - Fix resource exhaustion bug in LE binary (#18926) |
void wasm_obj_free(RBinWasmObj *bin) {
if (bin) {
r_buf_free (bin->buf);
r_list_free (bin->g_sections);
r_pvector_free (bin->g_types);
r_pvector_free (bin->g_imports);
r_pvector_free (bin->g_funcs);
r_pvector_free (bin->g_tables);
r_pvector_free (bin->g_memories);
r_pvector_free (bin->g_globals);
r_pvector_free (bin->g_exports);
r_pvector_free (bin->g_elements);
r_pvector_free (bin->g_codes);
r_pvector_free (bin->g_datas);
r_list_free (bin->g_names);
free (bin);
}
} | 0 | [
"CWE-787"
] | radare2 | b4ca66f5d4363d68a6379e5706353b3bde5104a4 | 270,434,175,603,150,930,000,000,000,000,000,000,000 | 18 | Fix #20336 - wasm bin parser ##crash |
static void vmx_save_host_state(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
int i;
if (vmx->host_state.loaded)
return;
vmx->host_state.loaded = 1;
/*
* Set host fs and gs selectors. Unfortunately, 22.2.3 does not
* allow segment selectors with cpl > 0 or ti == 1.
*/
vmx->host_state.ldt_sel = kvm_read_ldt();
vmx->host_state.gs_ldt_reload_needed = vmx->host_state.ldt_sel;
savesegment(fs, vmx->host_state.fs_sel);
if (!(vmx->host_state.fs_sel & 7)) {
vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel);
vmx->host_state.fs_reload_needed = 0;
} else {
vmcs_write16(HOST_FS_SELECTOR, 0);
vmx->host_state.fs_reload_needed = 1;
}
savesegment(gs, vmx->host_state.gs_sel);
if (!(vmx->host_state.gs_sel & 7))
vmcs_write16(HOST_GS_SELECTOR, vmx->host_state.gs_sel);
else {
vmcs_write16(HOST_GS_SELECTOR, 0);
vmx->host_state.gs_ldt_reload_needed = 1;
}
#ifdef CONFIG_X86_64
savesegment(ds, vmx->host_state.ds_sel);
savesegment(es, vmx->host_state.es_sel);
#endif
#ifdef CONFIG_X86_64
vmcs_writel(HOST_FS_BASE, read_msr(MSR_FS_BASE));
vmcs_writel(HOST_GS_BASE, read_msr(MSR_GS_BASE));
#else
vmcs_writel(HOST_FS_BASE, segment_base(vmx->host_state.fs_sel));
vmcs_writel(HOST_GS_BASE, segment_base(vmx->host_state.gs_sel));
#endif
#ifdef CONFIG_X86_64
rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
if (is_long_mode(&vmx->vcpu))
wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
#endif
for (i = 0; i < vmx->save_nmsrs; ++i)
kvm_set_shared_msr(vmx->guest_msrs[i].index,
vmx->guest_msrs[i].data,
vmx->guest_msrs[i].mask);
} | 0 | [
"CWE-20"
] | linux | bfd0a56b90005f8c8a004baf407ad90045c2b11e | 138,088,489,924,996,110,000,000,000,000,000,000,000 | 54 | nEPT: Nested INVEPT
If we let L1 use EPT, we should probably also support the INVEPT instruction.
In our current nested EPT implementation, when L1 changes its EPT table
for L2 (i.e., EPT12), L0 modifies the shadow EPT table (EPT02), and in
the course of this modification already calls INVEPT. But if last level
of shadow page is unsync not all L1's changes to EPT12 are intercepted,
which means roots need to be synced when L1 calls INVEPT. Global INVEPT
should not be different since roots are synced by kvm_mmu_load() each
time EPTP02 changes.
Reviewed-by: Xiao Guangrong <[email protected]>
Signed-off-by: Nadav Har'El <[email protected]>
Signed-off-by: Jun Nakajima <[email protected]>
Signed-off-by: Xinhao Xu <[email protected]>
Signed-off-by: Yang Zhang <[email protected]>
Signed-off-by: Gleb Natapov <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]> |
DECODE_JSON(ExtensionObject) {
ALLOW_NULL;
CHECK_OBJECT;
/* Search for Encoding */
size_t searchEncodingResult = 0;
status ret = lookAheadForKey(UA_JSONKEY_ENCODING, ctx, parseCtx, &searchEncodingResult);
/* If no encoding found it is structure encoding */
if(ret != UA_STATUSCODE_GOOD) {
UA_NodeId typeId;
UA_NodeId_init(&typeId);
size_t searchTypeIdResult = 0;
ret = lookAheadForKey(UA_JSONKEY_TYPEID, ctx, parseCtx, &searchTypeIdResult);
if(ret != UA_STATUSCODE_GOOD) {
/* TYPEID not found, abort */
return UA_STATUSCODE_BADENCODINGERROR;
}
/* parse the nodeid */
/*for restore*/
UA_UInt16 index = parseCtx->index;
parseCtx->index = (UA_UInt16)searchTypeIdResult;
ret = NodeId_decodeJson(&typeId, &UA_TYPES[UA_TYPES_NODEID], ctx, parseCtx, true);
if(ret != UA_STATUSCODE_GOOD)
return ret;
/*restore*/
parseCtx->index = index;
const UA_DataType *typeOfBody = UA_findDataType(&typeId);
if(!typeOfBody) {
/*dont decode body: 1. save as bytestring, 2. jump over*/
dst->encoding = UA_EXTENSIONOBJECT_ENCODED_BYTESTRING;
UA_NodeId_copy(&typeId, &dst->content.encoded.typeId);
/*Check if Object in Extentionobject*/
if(getJsmnType(parseCtx) != JSMN_OBJECT) {
UA_NodeId_deleteMembers(&typeId);
return UA_STATUSCODE_BADDECODINGERROR;
}
/*Search for Body to save*/
size_t searchBodyResult = 0;
ret = lookAheadForKey(UA_JSONKEY_BODY, ctx, parseCtx, &searchBodyResult);
if(ret != UA_STATUSCODE_GOOD) {
/*No Body*/
UA_NodeId_deleteMembers(&typeId);
return UA_STATUSCODE_BADDECODINGERROR;
}
if(searchBodyResult >= (size_t)parseCtx->tokenCount) {
/*index not in Tokenarray*/
UA_NodeId_deleteMembers(&typeId);
return UA_STATUSCODE_BADDECODINGERROR;
}
/* Get the size of the Object as a string, not the Object key count! */
UA_Int64 sizeOfJsonString =(parseCtx->tokenArray[searchBodyResult].end -
parseCtx->tokenArray[searchBodyResult].start);
char* bodyJsonString = (char*)(ctx->pos + parseCtx->tokenArray[searchBodyResult].start);
if(sizeOfJsonString <= 0) {
UA_NodeId_deleteMembers(&typeId);
return UA_STATUSCODE_BADDECODINGERROR;
}
/* Save encoded as bytestring. */
ret = UA_ByteString_allocBuffer(&dst->content.encoded.body, (size_t)sizeOfJsonString);
if(ret != UA_STATUSCODE_GOOD) {
UA_NodeId_deleteMembers(&typeId);
return ret;
}
memcpy(dst->content.encoded.body.data, bodyJsonString, (size_t)sizeOfJsonString);
size_t tokenAfteExtensionObject = 0;
jumpOverObject(ctx, parseCtx, &tokenAfteExtensionObject);
if(tokenAfteExtensionObject == 0) {
/*next object token not found*/
UA_NodeId_deleteMembers(&typeId);
UA_ByteString_deleteMembers(&dst->content.encoded.body);
return UA_STATUSCODE_BADDECODINGERROR;
}
parseCtx->index = (UA_UInt16)tokenAfteExtensionObject;
return UA_STATUSCODE_GOOD;
}
/*Type id not used anymore, typeOfBody has type*/
UA_NodeId_deleteMembers(&typeId);
/*Set Found Type*/
dst->content.decoded.type = typeOfBody;
dst->encoding = UA_EXTENSIONOBJECT_DECODED;
if(searchTypeIdResult != 0) {
dst->content.decoded.data = UA_new(typeOfBody);
if(!dst->content.decoded.data)
return UA_STATUSCODE_BADOUTOFMEMORY;
UA_NodeId typeId_dummy;
DecodeEntry entries[2] = {
{UA_JSONKEY_TYPEID, &typeId_dummy, (decodeJsonSignature) NodeId_decodeJson, false, NULL},
{UA_JSONKEY_BODY, dst->content.decoded.data,
(decodeJsonSignature) decodeJsonJumpTable[typeOfBody->typeKind], false, NULL}
};
return decodeFields(ctx, parseCtx, entries, 2, typeOfBody);
} else {
return UA_STATUSCODE_BADDECODINGERROR;
}
} else { /* UA_JSONKEY_ENCODING found */
/*Parse the encoding*/
UA_UInt64 encoding = 0;
char *extObjEncoding = (char*)(ctx->pos + parseCtx->tokenArray[searchEncodingResult].start);
size_t size = (size_t)(parseCtx->tokenArray[searchEncodingResult].end - parseCtx->tokenArray[searchEncodingResult].start);
atoiUnsigned(extObjEncoding, size, &encoding);
if(encoding == 1) {
/* BYTESTRING in Json Body */
dst->encoding = UA_EXTENSIONOBJECT_ENCODED_BYTESTRING;
UA_UInt16 encodingTypeJson;
DecodeEntry entries[3] = {
{UA_JSONKEY_ENCODING, &encodingTypeJson, (decodeJsonSignature) UInt16_decodeJson, false, NULL},
{UA_JSONKEY_BODY, &dst->content.encoded.body, (decodeJsonSignature) String_decodeJson, false, NULL},
{UA_JSONKEY_TYPEID, &dst->content.encoded.typeId, (decodeJsonSignature) NodeId_decodeJson, false, NULL}
};
return decodeFields(ctx, parseCtx, entries, 3, type);
} else if(encoding == 2) {
/* XmlElement in Json Body */
dst->encoding = UA_EXTENSIONOBJECT_ENCODED_XML;
UA_UInt16 encodingTypeJson;
DecodeEntry entries[3] = {
{UA_JSONKEY_ENCODING, &encodingTypeJson, (decodeJsonSignature) UInt16_decodeJson, false, NULL},
{UA_JSONKEY_BODY, &dst->content.encoded.body, (decodeJsonSignature) String_decodeJson, false, NULL},
{UA_JSONKEY_TYPEID, &dst->content.encoded.typeId, (decodeJsonSignature) NodeId_decodeJson, false, NULL}
};
return decodeFields(ctx, parseCtx, entries, 3, type);
} else {
return UA_STATUSCODE_BADDECODINGERROR;
}
}
return UA_STATUSCODE_BADNOTIMPLEMENTED;
} | 0 | [
"CWE-703",
"CWE-787"
] | open62541 | c800e2987b10bb3af6ef644b515b5d6392f8861d | 239,877,374,248,034,330,000,000,000,000,000,000,000 | 149 | fix(json): Check max recursion depth in more places |
R_API wchar_t* r_str_mb_to_wc_l(const char *buf, int len) {
wchar_t *res_buf = NULL;
size_t sz;
bool fail = true;
if (!buf || len <= 0) {
return NULL;
}
sz = mbstowcs (NULL, buf, len);
if (sz == (size_t)-1) {
goto err_r_str_mb_to_wc;
}
res_buf = (wchar_t *)calloc (1, (sz + 1) * sizeof (wchar_t));
if (!res_buf) {
goto err_r_str_mb_to_wc;
}
sz = mbstowcs (res_buf, buf, sz + 1);
if (sz == (size_t)-1) {
goto err_r_str_mb_to_wc;
}
fail = false;
err_r_str_mb_to_wc:
if (fail) {
R_FREE (res_buf);
}
return res_buf;
} | 0 | [
"CWE-78"
] | radare2 | 04edfa82c1f3fa2bc3621ccdad2f93bdbf00e4f9 | 94,868,956,090,127,750,000,000,000,000,000,000,000 | 27 | Fix command injection on PDB download (#16966)
* Fix r_sys_mkdirp with absolute path on Windows
* Fix build with --with-openssl
* Use RBuffer in r_socket_http_answer()
* r_socket_http_answer: Fix read for big responses
* Implement r_str_escape_sh()
* Cleanup r_socket_connect() on Windows
* Fix socket being created without a protocol
* Fix socket connect with SSL ##socket
* Use select() in r_socket_ready()
* Fix read failing if received only protocol answer
* Fix double-free
* r_socket_http_get: Fail if req. SSL with no support
* Follow redirects in r_socket_http_answer()
* Fix r_socket_http_get result length with R2_CURL=1
* Also follow redirects
* Avoid using curl for downloading PDBs
* Use r_socket_http_get() on UNIXs
* Use WinINet API on Windows for r_socket_http_get()
* Fix command injection
* Fix r_sys_cmd_str_full output for binary data
* Validate GUID on PDB download
* Pass depth to socket_http_get_recursive()
* Remove 'r_' and '__' from static function names
* Fix is_valid_guid
* Fix for comments |
static Image *ReadPNMImage(const ImageInfo *image_info,ExceptionInfo *exception)
{
#define ThrowPNMException(exception,message) \
{ \
if (comment_info.comment != (char *) NULL) \
comment_info.comment=DestroyString(comment_info.comment); \
ThrowReaderException((exception),(message)); \
}
char
format;
CommentInfo
comment_info;
double
quantum_scale;
Image
*image;
MagickBooleanType
status;
QuantumAny
max_value;
QuantumInfo
*quantum_info;
QuantumType
quantum_type;
size_t
depth,
extent,
packet_size;
ssize_t
count,
row,
y;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImage(image_info);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Read PNM image.
*/
count=ReadBlob(image,1,(unsigned char *) &format);
do
{
/*
Initialize image structure.
*/
comment_info.comment=AcquireString(NULL);
comment_info.extent=MagickPathExtent;
if ((count != 1) || (format != 'P'))
ThrowPNMException(CorruptImageError,"ImproperImageHeader");
max_value=1;
quantum_type=RGBQuantum;
quantum_scale=1.0;
format=(char) ReadBlobByte(image);
if (format != '7')
{
/*
PBM, PGM, PPM, and PNM.
*/
image->columns=PNMInteger(image,&comment_info,10);
image->rows=PNMInteger(image,&comment_info,10);
if ((format == 'f') || (format == 'F'))
{
char
scale[MaxTextExtent];
if (ReadBlobString(image,scale) != (char *) NULL)
quantum_scale=StringToDouble(scale,(char **) NULL);
}
else
{
if ((format == '1') || (format == '4'))
max_value=1; /* bitmap */
else
max_value=PNMInteger(image,&comment_info,10);
}
}
else
{
char
keyword[MaxTextExtent],
value[MaxTextExtent];
int
c;
register char
*p;
/*
PAM.
*/
for (c=ReadBlobByte(image); c != EOF; c=ReadBlobByte(image))
{
while (isspace((int) ((unsigned char) c)) != 0)
c=ReadBlobByte(image);
if (c == '#')
{
/*
Comment.
*/
c=PNMComment(image,&comment_info);
c=ReadBlobByte(image);
while (isspace((int) ((unsigned char) c)) != 0)
c=ReadBlobByte(image);
}
p=keyword;
do
{
if ((size_t) (p-keyword) < (MaxTextExtent-1))
*p++=c;
c=ReadBlobByte(image);
} while (isalnum(c));
*p='\0';
if (LocaleCompare(keyword,"endhdr") == 0)
break;
while (isspace((int) ((unsigned char) c)) != 0)
c=ReadBlobByte(image);
p=value;
while (isalnum(c) || (c == '_'))
{
if ((size_t) (p-value) < (MaxTextExtent-1))
*p++=c;
c=ReadBlobByte(image);
}
*p='\0';
/*
Assign a value to the specified keyword.
*/
if (LocaleCompare(keyword,"depth") == 0)
packet_size=StringToUnsignedLong(value);
(void) packet_size;
if (LocaleCompare(keyword,"height") == 0)
image->rows=StringToUnsignedLong(value);
if (LocaleCompare(keyword,"maxval") == 0)
max_value=StringToUnsignedLong(value);
if (LocaleCompare(keyword,"TUPLTYPE") == 0)
{
if (LocaleCompare(value,"BLACKANDWHITE") == 0)
{
(void) SetImageColorspace(image,GRAYColorspace);
quantum_type=GrayQuantum;
}
if (LocaleCompare(value,"BLACKANDWHITE_ALPHA") == 0)
{
(void) SetImageColorspace(image,GRAYColorspace);
image->matte=MagickTrue;
quantum_type=GrayAlphaQuantum;
}
if (LocaleCompare(value,"GRAYSCALE") == 0)
{
(void) SetImageColorspace(image,GRAYColorspace);
quantum_type=GrayQuantum;
}
if (LocaleCompare(value,"GRAYSCALE_ALPHA") == 0)
{
(void) SetImageColorspace(image,GRAYColorspace);
image->matte=MagickTrue;
quantum_type=GrayAlphaQuantum;
}
if (LocaleCompare(value,"RGB_ALPHA") == 0)
{
quantum_type=RGBAQuantum;
image->matte=MagickTrue;
}
if (LocaleCompare(value,"CMYK") == 0)
{
(void) SetImageColorspace(image,CMYKColorspace);
quantum_type=CMYKQuantum;
}
if (LocaleCompare(value,"CMYK_ALPHA") == 0)
{
(void) SetImageColorspace(image,CMYKColorspace);
image->matte=MagickTrue;
quantum_type=CMYKAQuantum;
}
}
if (LocaleCompare(keyword,"width") == 0)
image->columns=StringToUnsignedLong(value);
}
}
if ((image->columns == 0) || (image->rows == 0))
ThrowPNMException(CorruptImageError,"NegativeOrZeroImageSize");
if ((max_value == 0) || (max_value > 4294967295U))
ThrowPNMException(CorruptImageError,"ImproperImageHeader");
for (depth=1; GetQuantumRange(depth) < max_value; depth++) ;
image->depth=depth;
if ((image_info->ping != MagickFalse) && (image_info->number_scenes != 0))
if (image->scene >= (image_info->scene+image_info->number_scenes-1))
break;
if ((MagickSizeType) (image->columns*image->rows/8) > GetBlobSize(image))
ThrowPNMException(CorruptImageError,"InsufficientImageDataInFile");
status=SetImageExtent(image,image->columns,image->rows);
if (status == MagickFalse)
{
InheritException(exception,&image->exception);
return(DestroyImageList(image));
}
(void) ResetImagePixels(image,exception);
/*
Convert PNM pixels.
*/
row=0;
switch (format)
{
case '1':
{
/*
Convert PBM image to pixel packets.
*/
(void) SetImageColorspace(image,GRAYColorspace);
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register PixelPacket
*magick_restrict q;
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRed(q,PNMInteger(image,&comment_info,2) == 0 ?
QuantumRange : 0);
if (EOFBlob(image) != MagickFalse)
break;
SetPixelGreen(q,GetPixelRed(q));
SetPixelBlue(q,GetPixelRed(q));
q++;
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
if (EOFBlob(image) != MagickFalse)
break;
}
image->type=BilevelType;
break;
}
case '2':
{
size_t
intensity;
/*
Convert PGM image to pixel packets.
*/
(void) SetImageColorspace(image,GRAYColorspace);
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register PixelPacket
*magick_restrict q;
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
intensity=ScaleAnyToQuantum(PNMInteger(image,&comment_info,10),
max_value);
if (EOFBlob(image) != MagickFalse)
break;
SetPixelRed(q,intensity);
SetPixelGreen(q,GetPixelRed(q));
SetPixelBlue(q,GetPixelRed(q));
q++;
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
if (EOFBlob(image) != MagickFalse)
break;
}
image->type=GrayscaleType;
break;
}
case '3':
{
/*
Convert PNM image to pixel packets.
*/
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register PixelPacket
*magick_restrict q;
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
QuantumAny
pixel;
pixel=ScaleAnyToQuantum(PNMInteger(image,&comment_info,10),
max_value);
if (EOFBlob(image) != MagickFalse)
break;
SetPixelRed(q,pixel);
pixel=ScaleAnyToQuantum(PNMInteger(image,&comment_info,10),
max_value);
SetPixelGreen(q,pixel);
pixel=ScaleAnyToQuantum(PNMInteger(image,&comment_info,10),
max_value);
SetPixelBlue(q,pixel);
q++;
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
if (EOFBlob(image) != MagickFalse)
break;
}
break;
}
case '4':
{
/*
Convert PBM raw image to pixel packets.
*/
(void) SetImageColorspace(image,GRAYColorspace);
quantum_type=GrayQuantum;
if (image->storage_class == PseudoClass)
quantum_type=IndexQuantum;
quantum_info=AcquireQuantumInfo(image_info,image);
if (quantum_info == (QuantumInfo *) NULL)
ThrowPNMException(ResourceLimitError,"MemoryAllocationFailed");
SetQuantumMinIsWhite(quantum_info,MagickTrue);
extent=GetQuantumExtent(image,quantum_info,quantum_type);
for (y=0; y < (ssize_t) image->rows; y++)
{
const unsigned char
*pixels;
MagickBooleanType
sync;
register PixelPacket
*magick_restrict q;
ssize_t
count,
offset;
size_t
length;
pixels=(unsigned char *) ReadBlobStream(image,extent,
GetQuantumPixels(quantum_info),&count);
if (count != (ssize_t) extent)
break;
if ((image->progress_monitor != (MagickProgressMonitor) NULL) &&
(image->previous == (Image *) NULL))
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,LoadImageTag,(MagickOffsetType)
row,image->rows);
if (proceed == MagickFalse)
break;
}
offset=row++;
q=QueueAuthenticPixels(image,0,offset,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
break;
length=ImportQuantumPixels(image,(CacheView *) NULL,quantum_info,
quantum_type,pixels,exception);
if (length != extent)
break;
sync=SyncAuthenticPixels(image,exception);
if (sync == MagickFalse)
break;
}
quantum_info=DestroyQuantumInfo(quantum_info);
SetQuantumImageType(image,quantum_type);
break;
}
case '5':
{
/*
Convert PGM raw image to pixel packets.
*/
(void) SetImageColorspace(image,GRAYColorspace);
quantum_type=GrayQuantum;
extent=(image->depth <= 8 ? 1 : image->depth <= 16 ? 2 : 4)*
image->columns;
quantum_info=AcquireQuantumInfo(image_info,image);
if (quantum_info == (QuantumInfo *) NULL)
ThrowPNMException(ResourceLimitError,"MemoryAllocationFailed");
for (y=0; y < (ssize_t) image->rows; y++)
{
const unsigned char
*pixels;
MagickBooleanType
sync;
register const unsigned char
*magick_restrict p;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
ssize_t
count,
offset;
pixels=(unsigned char *) ReadBlobStream(image,extent,
GetQuantumPixels(quantum_info),&count);
if (count != (ssize_t) extent)
break;
if ((image->progress_monitor != (MagickProgressMonitor) NULL) &&
(image->previous == (Image *) NULL))
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,LoadImageTag,(MagickOffsetType)
row,image->rows);
if (proceed == MagickFalse)
break;
}
offset=row++;
q=QueueAuthenticPixels(image,0,offset,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
break;
p=pixels;
switch (image->depth)
{
case 8:
case 16:
case 32:
{
(void) ImportQuantumPixels(image,(CacheView *) NULL,quantum_info,
quantum_type,pixels,exception);
break;
}
default:
{
unsigned int
pixel;
if (image->depth <= 8)
{
unsigned char
pixel;
for (x=0; x < (ssize_t) image->columns; x++)
{
p=PushCharPixel(p,&pixel);
SetPixelRed(q,ScaleAnyToQuantum(pixel,max_value));
SetPixelGreen(q,GetPixelRed(q));
SetPixelBlue(q,GetPixelRed(q));
q++;
}
break;
}
if (image->depth <= 16)
{
unsigned short
pixel;
for (x=0; x < (ssize_t) image->columns; x++)
{
p=PushShortPixel(MSBEndian,p,&pixel);
SetPixelRed(q,ScaleAnyToQuantum(pixel,max_value));
SetPixelGreen(q,GetPixelRed(q));
SetPixelBlue(q,GetPixelRed(q));
q++;
}
break;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
p=PushLongPixel(MSBEndian,p,&pixel);
SetPixelRed(q,ScaleAnyToQuantum(pixel,max_value));
SetPixelGreen(q,GetPixelRed(q));
SetPixelBlue(q,GetPixelRed(q));
q++;
}
break;
}
}
sync=SyncAuthenticPixels(image,exception);
if (sync == MagickFalse)
break;
}
quantum_info=DestroyQuantumInfo(quantum_info);
SetQuantumImageType(image,quantum_type);
break;
}
case '6':
{
/*
Convert PNM raster image to pixel packets.
*/
quantum_type=RGBQuantum;
extent=3*(image->depth <= 8 ? 1 : image->depth <= 16 ? 2 : 4)*
image->columns;
quantum_info=AcquireQuantumInfo(image_info,image);
if (quantum_info == (QuantumInfo *) NULL)
ThrowPNMException(ResourceLimitError,"MemoryAllocationFailed");
(void) SetQuantumEndian(image,quantum_info,MSBEndian);
for (y=0; y < (ssize_t) image->rows; y++)
{
const unsigned char
*pixels;
MagickBooleanType
sync;
register const unsigned char
*magick_restrict p;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
ssize_t
count,
offset;
pixels=(unsigned char *) ReadBlobStream(image,extent,
GetQuantumPixels(quantum_info),&count);
if (count != (ssize_t) extent)
break;
if ((image->progress_monitor != (MagickProgressMonitor) NULL) &&
(image->previous == (Image *) NULL))
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,LoadImageTag,(MagickOffsetType)
row,image->rows);
if (proceed == MagickFalse)
break;
}
offset=row++;
q=QueueAuthenticPixels(image,0,offset,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
break;
p=pixels;
switch (image->depth)
{
case 8:
{
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRed(q,ScaleCharToQuantum(*p++));
SetPixelGreen(q,ScaleCharToQuantum(*p++));
SetPixelBlue(q,ScaleCharToQuantum(*p++));
q->opacity=OpaqueOpacity;
q++;
}
break;
}
case 16:
{
unsigned short
pixel;
for (x=0; x < (ssize_t) image->columns; x++)
{
p=PushShortPixel(MSBEndian,p,&pixel);
SetPixelRed(q,ScaleShortToQuantum(pixel));
p=PushShortPixel(MSBEndian,p,&pixel);
SetPixelGreen(q,ScaleShortToQuantum(pixel));
p=PushShortPixel(MSBEndian,p,&pixel);
SetPixelBlue(q,ScaleShortToQuantum(pixel));
SetPixelOpacity(q,OpaqueOpacity);
q++;
}
break;
}
case 32:
{
unsigned int
pixel;
for (x=0; x < (ssize_t) image->columns; x++)
{
p=PushLongPixel(MSBEndian,p,&pixel);
SetPixelRed(q,ScaleLongToQuantum(pixel));
p=PushLongPixel(MSBEndian,p,&pixel);
SetPixelGreen(q,ScaleLongToQuantum(pixel));
p=PushLongPixel(MSBEndian,p,&pixel);
SetPixelBlue(q,ScaleLongToQuantum(pixel));
SetPixelOpacity(q,OpaqueOpacity);
q++;
}
break;
}
default:
{
unsigned int
pixel;
if (image->depth <= 8)
{
unsigned char
pixel;
for (x=0; x < (ssize_t) image->columns; x++)
{
p=PushCharPixel(p,&pixel);
SetPixelRed(q,ScaleAnyToQuantum(pixel,max_value));
p=PushCharPixel(p,&pixel);
SetPixelGreen(q,ScaleAnyToQuantum(pixel,max_value));
p=PushCharPixel(p,&pixel);
SetPixelBlue(q,ScaleAnyToQuantum(pixel,max_value));
SetPixelOpacity(q,OpaqueOpacity);
q++;
}
break;
}
if (image->depth <= 16)
{
unsigned short
pixel;
for (x=0; x < (ssize_t) image->columns; x++)
{
p=PushShortPixel(MSBEndian,p,&pixel);
SetPixelRed(q,ScaleAnyToQuantum(pixel,max_value));
p=PushShortPixel(MSBEndian,p,&pixel);
SetPixelGreen(q,ScaleAnyToQuantum(pixel,max_value));
p=PushShortPixel(MSBEndian,p,&pixel);
SetPixelBlue(q,ScaleAnyToQuantum(pixel,max_value));
SetPixelOpacity(q,OpaqueOpacity);
q++;
}
break;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
p=PushLongPixel(MSBEndian,p,&pixel);
SetPixelRed(q,ScaleAnyToQuantum(pixel,max_value));
p=PushLongPixel(MSBEndian,p,&pixel);
SetPixelGreen(q,ScaleAnyToQuantum(pixel,max_value));
p=PushLongPixel(MSBEndian,p,&pixel);
SetPixelBlue(q,ScaleAnyToQuantum(pixel,max_value));
SetPixelOpacity(q,OpaqueOpacity);
q++;
}
break;
}
break;
}
sync=SyncAuthenticPixels(image,exception);
if (sync == MagickFalse)
break;
}
quantum_info=DestroyQuantumInfo(quantum_info);
break;
}
case '7':
{
register IndexPacket
*indexes;
size_t
channels;
/*
Convert PAM raster image to pixel packets.
*/
switch (quantum_type)
{
case GrayQuantum:
case GrayAlphaQuantum:
{
channels=1;
break;
}
case CMYKQuantum:
case CMYKAQuantum:
{
channels=4;
break;
}
default:
{
channels=3;
break;
}
}
if (image->matte != MagickFalse)
channels++;
extent=channels*(image->depth <= 8 ? 1 : image->depth <= 16 ? 2 : 4)*
image->columns;
quantum_info=AcquireQuantumInfo(image_info,image);
if (quantum_info == (QuantumInfo *) NULL)
ThrowPNMException(ResourceLimitError,"MemoryAllocationFailed");
for (y=0; y < (ssize_t) image->rows; y++)
{
const unsigned char
*pixels;
MagickBooleanType
sync;
register const unsigned char
*magick_restrict p;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
ssize_t
count,
offset;
pixels=(unsigned char *) ReadBlobStream(image,extent,
GetQuantumPixels(quantum_info),&count);
if (count != (ssize_t) extent)
break;
if ((image->progress_monitor != (MagickProgressMonitor) NULL) &&
(image->previous == (Image *) NULL))
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,LoadImageTag,(MagickOffsetType)
row,image->rows);
if (proceed == MagickFalse)
break;
}
offset=row++;
q=QueueAuthenticPixels(image,0,offset,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
break;
indexes=GetAuthenticIndexQueue(image);
p=pixels;
switch (image->depth)
{
case 8:
case 16:
case 32:
{
(void) ImportQuantumPixels(image,(CacheView *) NULL,quantum_info,
quantum_type,pixels,exception);
break;
}
default:
{
switch (quantum_type)
{
case GrayQuantum:
case GrayAlphaQuantum:
{
unsigned int
pixel;
if (image->depth <= 8)
{
unsigned char
pixel;
for (x=0; x < (ssize_t) image->columns; x++)
{
p=PushCharPixel(p,&pixel);
SetPixelRed(q,ScaleAnyToQuantum(pixel,max_value));
SetPixelGreen(q,GetPixelRed(q));
SetPixelBlue(q,GetPixelRed(q));
SetPixelOpacity(q,OpaqueOpacity);
if (image->matte != MagickFalse)
{
p=PushCharPixel(p,&pixel);
if (image->depth != 1)
SetPixelOpacity(q,ScaleAnyToQuantum(pixel,
max_value));
else
SetPixelOpacity(q,QuantumRange-ScaleAnyToQuantum(
pixel,max_value));
}
q++;
}
break;
}
if (image->depth <= 16)
{
unsigned short
pixel;
for (x=0; x < (ssize_t) image->columns; x++)
{
p=PushShortPixel(MSBEndian,p,&pixel);
SetPixelRed(q,ScaleAnyToQuantum(pixel,max_value));
SetPixelGreen(q,GetPixelRed(q));
SetPixelBlue(q,GetPixelRed(q));
SetPixelOpacity(q,OpaqueOpacity);
if (image->matte != MagickFalse)
{
p=PushShortPixel(MSBEndian,p,&pixel);
SetPixelOpacity(q,ScaleAnyToQuantum(pixel,
max_value));
}
q++;
}
break;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
p=PushLongPixel(MSBEndian,p,&pixel);
SetPixelRed(q,ScaleAnyToQuantum(pixel,max_value));
SetPixelGreen(q,GetPixelRed(q));
SetPixelBlue(q,GetPixelRed(q));
SetPixelOpacity(q,OpaqueOpacity);
if (image->matte != MagickFalse)
{
p=PushLongPixel(MSBEndian,p,&pixel);
SetPixelOpacity(q,ScaleAnyToQuantum(pixel,max_value));
}
q++;
}
break;
}
case CMYKQuantum:
case CMYKAQuantum:
{
unsigned int
pixel;
if (image->depth <= 8)
{
unsigned char
pixel;
for (x=0; x < (ssize_t) image->columns; x++)
{
p=PushCharPixel(p,&pixel);
SetPixelRed(q,ScaleAnyToQuantum(pixel,max_value));
p=PushCharPixel(p,&pixel);
SetPixelGreen(q,ScaleAnyToQuantum(pixel,max_value));
p=PushCharPixel(p,&pixel);
SetPixelBlue(q,ScaleAnyToQuantum(pixel,max_value));
p=PushCharPixel(p,&pixel);
SetPixelIndex(indexes+x,ScaleAnyToQuantum(pixel,
max_value));
SetPixelOpacity(q,OpaqueOpacity);
if (image->matte != MagickFalse)
{
p=PushCharPixel(p,&pixel);
SetPixelOpacity(q,ScaleAnyToQuantum(pixel,
max_value));
}
q++;
}
break;
}
if (image->depth <= 16)
{
unsigned short
pixel;
for (x=0; x < (ssize_t) image->columns; x++)
{
p=PushShortPixel(MSBEndian,p,&pixel);
SetPixelRed(q,ScaleAnyToQuantum(pixel,max_value));
p=PushShortPixel(MSBEndian,p,&pixel);
SetPixelGreen(q,ScaleAnyToQuantum(pixel,max_value));
p=PushShortPixel(MSBEndian,p,&pixel);
SetPixelBlue(q,ScaleAnyToQuantum(pixel,max_value));
p=PushShortPixel(MSBEndian,p,&pixel);
SetPixelIndex(indexes+x,ScaleAnyToQuantum(pixel,
max_value));
SetPixelOpacity(q,OpaqueOpacity);
if (image->matte != MagickFalse)
{
p=PushShortPixel(MSBEndian,p,&pixel);
SetPixelOpacity(q,ScaleAnyToQuantum(pixel,
max_value));
}
q++;
}
break;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
p=PushLongPixel(MSBEndian,p,&pixel);
SetPixelRed(q,ScaleAnyToQuantum(pixel,max_value));
p=PushLongPixel(MSBEndian,p,&pixel);
SetPixelGreen(q,ScaleAnyToQuantum(pixel,max_value));
p=PushLongPixel(MSBEndian,p,&pixel);
SetPixelBlue(q,ScaleAnyToQuantum(pixel,max_value));
p=PushLongPixel(MSBEndian,p,&pixel);
SetPixelIndex(indexes+x,ScaleAnyToQuantum(pixel,max_value));
SetPixelOpacity(q,OpaqueOpacity);
if (image->matte != MagickFalse)
{
p=PushLongPixel(MSBEndian,p,&pixel);
SetPixelOpacity(q,ScaleAnyToQuantum(pixel,max_value));
}
q++;
}
break;
}
default:
{
unsigned int
pixel;
if (image->depth <= 8)
{
unsigned char
pixel;
for (x=0; x < (ssize_t) image->columns; x++)
{
p=PushCharPixel(p,&pixel);
SetPixelRed(q,ScaleAnyToQuantum(pixel,max_value));
p=PushCharPixel(p,&pixel);
SetPixelGreen(q,ScaleAnyToQuantum(pixel,max_value));
p=PushCharPixel(p,&pixel);
SetPixelBlue(q,ScaleAnyToQuantum(pixel,max_value));
SetPixelOpacity(q,OpaqueOpacity);
if (image->matte != MagickFalse)
{
p=PushCharPixel(p,&pixel);
SetPixelOpacity(q,ScaleAnyToQuantum(pixel,
max_value));
}
q++;
}
break;
}
if (image->depth <= 16)
{
unsigned short
pixel;
for (x=0; x < (ssize_t) image->columns; x++)
{
p=PushShortPixel(MSBEndian,p,&pixel);
SetPixelRed(q,ScaleAnyToQuantum(pixel,max_value));
p=PushShortPixel(MSBEndian,p,&pixel);
SetPixelGreen(q,ScaleAnyToQuantum(pixel,max_value));
p=PushShortPixel(MSBEndian,p,&pixel);
SetPixelBlue(q,ScaleAnyToQuantum(pixel,max_value));
SetPixelOpacity(q,OpaqueOpacity);
if (image->matte != MagickFalse)
{
p=PushShortPixel(MSBEndian,p,&pixel);
SetPixelOpacity(q,ScaleAnyToQuantum(pixel,
max_value));
}
q++;
}
break;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
p=PushLongPixel(MSBEndian,p,&pixel);
SetPixelRed(q,ScaleAnyToQuantum(pixel,max_value));
p=PushLongPixel(MSBEndian,p,&pixel);
SetPixelGreen(q,ScaleAnyToQuantum(pixel,max_value));
p=PushLongPixel(MSBEndian,p,&pixel);
SetPixelBlue(q,ScaleAnyToQuantum(pixel,max_value));
SetPixelOpacity(q,OpaqueOpacity);
if (image->matte != MagickFalse)
{
p=PushLongPixel(MSBEndian,p,&pixel);
SetPixelOpacity(q,ScaleAnyToQuantum(pixel,max_value));
}
q++;
}
break;
}
}
break;
}
}
sync=SyncAuthenticPixels(image,exception);
if (sync == MagickFalse)
break;
}
quantum_info=DestroyQuantumInfo(quantum_info);
SetQuantumImageType(image,quantum_type);
break;
}
case 'F':
case 'f':
{
/*
Convert PFM raster image to pixel packets.
*/
if (format == 'f')
(void) SetImageColorspace(image,GRAYColorspace);
quantum_type=format == 'f' ? GrayQuantum : RGBQuantum;
image->endian=quantum_scale < 0.0 ? LSBEndian : MSBEndian;
image->depth=32;
quantum_info=AcquireQuantumInfo(image_info,image);
if (quantum_info == (QuantumInfo *) NULL)
ThrowPNMException(ResourceLimitError,"MemoryAllocationFailed");
status=SetQuantumDepth(image,quantum_info,32);
if (status == MagickFalse)
ThrowPNMException(ResourceLimitError,"MemoryAllocationFailed");
status=SetQuantumFormat(image,quantum_info,FloatingPointQuantumFormat);
if (status == MagickFalse)
ThrowPNMException(ResourceLimitError,"MemoryAllocationFailed");
SetQuantumScale(quantum_info,(MagickRealType) QuantumRange*
fabs(quantum_scale));
extent=GetQuantumExtent(image,quantum_info,quantum_type);
for (y=0; y < (ssize_t) image->rows; y++)
{
const unsigned char
*pixels;
MagickBooleanType
sync;
register PixelPacket
*magick_restrict q;
ssize_t
count,
offset;
size_t
length;
pixels=(unsigned char *) ReadBlobStream(image,extent,
GetQuantumPixels(quantum_info),&count);
if ((size_t) count != extent)
break;
if ((image->progress_monitor != (MagickProgressMonitor) NULL) &&
(image->previous == (Image *) NULL))
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,LoadImageTag,(MagickOffsetType)
row,image->rows);
if (proceed == MagickFalse)
break;
}
offset=row++;
q=QueueAuthenticPixels(image,0,(ssize_t) (image->rows-offset-1),
image->columns,1,exception);
if (q == (PixelPacket *) NULL)
break;
length=ImportQuantumPixels(image,(CacheView *) NULL,quantum_info,
quantum_type,pixels,exception);
if (length != extent)
break;
sync=SyncAuthenticPixels(image,exception);
if (sync == MagickFalse)
break;
}
quantum_info=DestroyQuantumInfo(quantum_info);
SetQuantumImageType(image,quantum_type);
break;
}
default:
ThrowPNMException(CorruptImageError,"ImproperImageHeader");
}
if (*comment_info.comment != '\0')
(void) SetImageProperty(image,"comment",comment_info.comment);
comment_info.comment=DestroyString(comment_info.comment);
if (y < (ssize_t) image->rows)
ThrowPNMException(CorruptImageError,"UnableToReadImageData");
if (EOFBlob(image) != MagickFalse)
{
(void) ThrowMagickException(exception,GetMagickModule(),
CorruptImageError,"UnexpectedEndOfFile","`%s'",image->filename);
break;
}
/*
Proceed to next image.
*/
if (image_info->number_scenes != 0)
if (image->scene >= (image_info->scene+image_info->number_scenes-1))
break;
if ((format == '1') || (format == '2') || (format == '3'))
do
{
/*
Skip to end of line.
*/
count=ReadBlob(image,1,(unsigned char *) &format);
if (count != 1)
break;
if (format == 'P')
break;
} while (format != '\n');
count=ReadBlob(image,1,(unsigned char *) &format);
if ((count == 1) && (format == 'P'))
{
/*
Allocate next image structure.
*/
AcquireNextImage(image_info,image);
if (GetNextImageInList(image) == (Image *) NULL)
{
status=MagickFalse;
break;
}
image=SyncNextImageInList(image);
status=SetImageProgress(image,LoadImagesTag,TellBlob(image),
GetBlobSize(image));
if (status == MagickFalse)
break;
}
} while ((count == 1) && (format == 'P'));
(void) CloseBlob(image);
if (status == MagickFalse)
return(DestroyImageList(image));
return(GetFirstImageInList(image));
} | 0 | [
"CWE-119",
"CWE-787"
] | ImageMagick6 | cb5ec7d98195aa74d5ed299b38eff2a68122f3fa | 143,513,773,471,566,990,000,000,000,000,000,000,000 | 1,166 | https://github.com/ImageMagick/ImageMagick/issues/1612 |
static int nfs_init_server(struct nfs_server *server, const struct nfs_mount_data *data)
{
struct nfs_client *clp;
int error, nfsvers = 2;
dprintk("--> nfs_init_server()\n");
#ifdef CONFIG_NFS_V3
if (data->flags & NFS_MOUNT_VER3)
nfsvers = 3;
#endif
/* Allocate or find a client reference we can use */
clp = nfs_get_client(data->hostname, &data->addr, nfsvers);
if (IS_ERR(clp)) {
dprintk("<-- nfs_init_server() = error %ld\n", PTR_ERR(clp));
return PTR_ERR(clp);
}
error = nfs_init_client(clp, data);
if (error < 0)
goto error;
server->nfs_client = clp;
/* Initialise the client representation from the mount data */
server->flags = data->flags & NFS_MOUNT_FLAGMASK;
if (data->rsize)
server->rsize = nfs_block_size(data->rsize, NULL);
if (data->wsize)
server->wsize = nfs_block_size(data->wsize, NULL);
server->acregmin = data->acregmin * HZ;
server->acregmax = data->acregmax * HZ;
server->acdirmin = data->acdirmin * HZ;
server->acdirmax = data->acdirmax * HZ;
/* Start lockd here, before we might error out */
error = nfs_start_lockd(server);
if (error < 0)
goto error;
error = nfs_init_server_rpcclient(server, data->pseudoflavor);
if (error < 0)
goto error;
server->namelen = data->namlen;
/* Create a client RPC handle for the NFSv3 ACL management interface */
nfs_init_server_aclclient(server);
if (clp->cl_nfsversion == 3) {
if (server->namelen == 0 || server->namelen > NFS3_MAXNAMLEN)
server->namelen = NFS3_MAXNAMLEN;
if (!(data->flags & NFS_MOUNT_NORDIRPLUS))
server->caps |= NFS_CAP_READDIRPLUS;
} else {
if (server->namelen == 0 || server->namelen > NFS2_MAXNAMLEN)
server->namelen = NFS2_MAXNAMLEN;
}
dprintk("<-- nfs_init_server() = 0 [new %p]\n", clp);
return 0;
error:
server->nfs_client = NULL;
nfs_put_client(clp);
dprintk("<-- nfs_init_server() = xerror %d\n", error);
return error;
} | 1 | [
"CWE-20"
] | linux-2.6 | 54af3bb543c071769141387a42deaaab5074da55 | 116,871,658,680,555,200,000,000,000,000,000,000,000 | 69 | NFS: Fix an Oops in encode_lookup()
It doesn't look as if the NFS file name limit is being initialised correctly
in the struct nfs_server. Make sure that we limit whatever is being set in
nfs_probe_fsinfo() and nfs_init_server().
Also ensure that readdirplus and nfs4_path_walk respect our file name
limits.
Signed-off-by: Trond Myklebust <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
static int do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, pte_t *page_table, pmd_t *pmd,
unsigned int flags, pte_t orig_pte)
{
pgoff_t pgoff = (((address & PAGE_MASK)
- vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
pte_unmap(page_table);
return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte);
} | 0 | [
"CWE-264"
] | linux-2.6 | 1a5a9906d4e8d1976b701f889d8f35d54b928f25 | 177,638,665,582,251,220,000,000,000,000,000,000,000 | 10 | mm: thp: fix pmd_bad() triggering in code paths holding mmap_sem read mode
In some cases it may happen that pmd_none_or_clear_bad() is called with
the mmap_sem hold in read mode. In those cases the huge page faults can
allocate hugepmds under pmd_none_or_clear_bad() and that can trigger a
false positive from pmd_bad() that will not like to see a pmd
materializing as trans huge.
It's not khugepaged causing the problem, khugepaged holds the mmap_sem
in write mode (and all those sites must hold the mmap_sem in read mode
to prevent pagetables to go away from under them, during code review it
seems vm86 mode on 32bit kernels requires that too unless it's
restricted to 1 thread per process or UP builds). The race is only with
the huge pagefaults that can convert a pmd_none() into a
pmd_trans_huge().
Effectively all these pmd_none_or_clear_bad() sites running with
mmap_sem in read mode are somewhat speculative with the page faults, and
the result is always undefined when they run simultaneously. This is
probably why it wasn't common to run into this. For example if the
madvise(MADV_DONTNEED) runs zap_page_range() shortly before the page
fault, the hugepage will not be zapped, if the page fault runs first it
will be zapped.
Altering pmd_bad() not to error out if it finds hugepmds won't be enough
to fix this, because zap_pmd_range would then proceed to call
zap_pte_range (which would be incorrect if the pmd become a
pmd_trans_huge()).
The simplest way to fix this is to read the pmd in the local stack
(regardless of what we read, no need of actual CPU barriers, only
compiler barrier needed), and be sure it is not changing under the code
that computes its value. Even if the real pmd is changing under the
value we hold on the stack, we don't care. If we actually end up in
zap_pte_range it means the pmd was not none already and it was not huge,
and it can't become huge from under us (khugepaged locking explained
above).
All we need is to enforce that there is no way anymore that in a code
path like below, pmd_trans_huge can be false, but pmd_none_or_clear_bad
can run into a hugepmd. The overhead of a barrier() is just a compiler
tweak and should not be measurable (I only added it for THP builds). I
don't exclude different compiler versions may have prevented the race
too by caching the value of *pmd on the stack (that hasn't been
verified, but it wouldn't be impossible considering
pmd_none_or_clear_bad, pmd_bad, pmd_trans_huge, pmd_none are all inlines
and there's no external function called in between pmd_trans_huge and
pmd_none_or_clear_bad).
if (pmd_trans_huge(*pmd)) {
if (next-addr != HPAGE_PMD_SIZE) {
VM_BUG_ON(!rwsem_is_locked(&tlb->mm->mmap_sem));
split_huge_page_pmd(vma->vm_mm, pmd);
} else if (zap_huge_pmd(tlb, vma, pmd, addr))
continue;
/* fall through */
}
if (pmd_none_or_clear_bad(pmd))
Because this race condition could be exercised without special
privileges this was reported in CVE-2012-1179.
The race was identified and fully explained by Ulrich who debugged it.
I'm quoting his accurate explanation below, for reference.
====== start quote =======
mapcount 0 page_mapcount 1
kernel BUG at mm/huge_memory.c:1384!
At some point prior to the panic, a "bad pmd ..." message similar to the
following is logged on the console:
mm/memory.c:145: bad pmd ffff8800376e1f98(80000000314000e7).
The "bad pmd ..." message is logged by pmd_clear_bad() before it clears
the page's PMD table entry.
143 void pmd_clear_bad(pmd_t *pmd)
144 {
-> 145 pmd_ERROR(*pmd);
146 pmd_clear(pmd);
147 }
After the PMD table entry has been cleared, there is an inconsistency
between the actual number of PMD table entries that are mapping the page
and the page's map count (_mapcount field in struct page). When the page
is subsequently reclaimed, __split_huge_page() detects this inconsistency.
1381 if (mapcount != page_mapcount(page))
1382 printk(KERN_ERR "mapcount %d page_mapcount %d\n",
1383 mapcount, page_mapcount(page));
-> 1384 BUG_ON(mapcount != page_mapcount(page));
The root cause of the problem is a race of two threads in a multithreaded
process. Thread B incurs a page fault on a virtual address that has never
been accessed (PMD entry is zero) while Thread A is executing an madvise()
system call on a virtual address within the same 2 MB (huge page) range.
virtual address space
.---------------------.
| |
| |
.-|---------------------|
| | |
| | |<-- B(fault)
| | |
2 MB | |/////////////////////|-.
huge < |/////////////////////| > A(range)
page | |/////////////////////|-'
| | |
| | |
'-|---------------------|
| |
| |
'---------------------'
- Thread A is executing an madvise(..., MADV_DONTNEED) system call
on the virtual address range "A(range)" shown in the picture.
sys_madvise
// Acquire the semaphore in shared mode.
down_read(¤t->mm->mmap_sem)
...
madvise_vma
switch (behavior)
case MADV_DONTNEED:
madvise_dontneed
zap_page_range
unmap_vmas
unmap_page_range
zap_pud_range
zap_pmd_range
//
// Assume that this huge page has never been accessed.
// I.e. content of the PMD entry is zero (not mapped).
//
if (pmd_trans_huge(*pmd)) {
// We don't get here due to the above assumption.
}
//
// Assume that Thread B incurred a page fault and
.---------> // sneaks in here as shown below.
| //
| if (pmd_none_or_clear_bad(pmd))
| {
| if (unlikely(pmd_bad(*pmd)))
| pmd_clear_bad
| {
| pmd_ERROR
| // Log "bad pmd ..." message here.
| pmd_clear
| // Clear the page's PMD entry.
| // Thread B incremented the map count
| // in page_add_new_anon_rmap(), but
| // now the page is no longer mapped
| // by a PMD entry (-> inconsistency).
| }
| }
|
v
- Thread B is handling a page fault on virtual address "B(fault)" shown
in the picture.
...
do_page_fault
__do_page_fault
// Acquire the semaphore in shared mode.
down_read_trylock(&mm->mmap_sem)
...
handle_mm_fault
if (pmd_none(*pmd) && transparent_hugepage_enabled(vma))
// We get here due to the above assumption (PMD entry is zero).
do_huge_pmd_anonymous_page
alloc_hugepage_vma
// Allocate a new transparent huge page here.
...
__do_huge_pmd_anonymous_page
...
spin_lock(&mm->page_table_lock)
...
page_add_new_anon_rmap
// Here we increment the page's map count (starts at -1).
atomic_set(&page->_mapcount, 0)
set_pmd_at
// Here we set the page's PMD entry which will be cleared
// when Thread A calls pmd_clear_bad().
...
spin_unlock(&mm->page_table_lock)
The mmap_sem does not prevent the race because both threads are acquiring
it in shared mode (down_read). Thread B holds the page_table_lock while
the page's map count and PMD table entry are updated. However, Thread A
does not synchronize on that lock.
====== end quote =======
[[email protected]: checkpatch fixes]
Reported-by: Ulrich Obergfell <[email protected]>
Signed-off-by: Andrea Arcangeli <[email protected]>
Acked-by: Johannes Weiner <[email protected]>
Cc: Mel Gorman <[email protected]>
Cc: Hugh Dickins <[email protected]>
Cc: Dave Jones <[email protected]>
Acked-by: Larry Woodman <[email protected]>
Acked-by: Rik van Riel <[email protected]>
Cc: <[email protected]> [2.6.38+]
Cc: Mark Salter <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
modify_contact_rename_handler (LDAPOp *op,
LDAPMessage *res)
{
LDAPModifyOp *modify_op = (LDAPModifyOp *) op;
EBookBackendLDAP *bl = E_BOOK_BACKEND_LDAP (op->backend);
gchar *ldap_error_msg = NULL;
gint ldap_error;
LDAPMod **ldap_mods;
gboolean differences;
gint modify_contact_msgid;
g_rec_mutex_lock (&eds_ldap_handler_lock);
if (!bl->priv->ldap) {
g_rec_mutex_unlock (&eds_ldap_handler_lock);
e_data_book_respond_modify_contacts (op->book,
op->opid,
EDB_ERROR_NOT_CONNECTED (),
NULL);
ldap_op_finished (op);
return;
}
g_rec_mutex_unlock (&eds_ldap_handler_lock);
/* was a rename necessary? */
if (modify_op->new_id) {
if (LDAP_RES_RENAME != ldap_msgtype (res)) {
e_data_book_respond_modify_contacts (op->book,
op->opid,
EDB_ERROR_MSG_TYPE (ldap_msgtype (res)),
NULL);
ldap_op_finished (op);
return;
}
g_rec_mutex_lock (&eds_ldap_handler_lock);
if (bl->priv->ldap) {
ldap_parse_result (
bl->priv->ldap, res, &ldap_error,
NULL, &ldap_error_msg, NULL, NULL, 0);
} else {
ldap_error = LDAP_SERVER_DOWN;
}
g_rec_mutex_unlock (&eds_ldap_handler_lock);
if (ldap_error != LDAP_SUCCESS) {
g_warning (
"modify_contact_rename_handler: %02X (%s), additional info: %s",
ldap_error,
ldap_err2string (ldap_error), ldap_error_msg);
} else {
if (bl->priv->cache)
e_book_backend_cache_add_contact (bl->priv->cache, modify_op->contact);
}
if (ldap_error_msg)
ldap_memfree (ldap_error_msg);
if (ldap_error != LDAP_SUCCESS) {
e_data_book_respond_modify_contacts (op->book,
op->opid,
ldap_error_to_response (ldap_error),
NULL);
ldap_op_finished (op);
return;
}
/* rename was successful => replace old IDs */
e_contact_set (modify_op->current_contact, E_CONTACT_UID, modify_op->new_id);
e_contact_set (modify_op->contact, E_CONTACT_UID, modify_op->new_id);
modify_op->id = e_contact_get_const (modify_op->contact, E_CONTACT_UID);
}
differences = modify_op->mod_array->len > 0;
if (differences) {
/* remove the NULL at the end */
g_ptr_array_remove (modify_op->mod_array, NULL);
/* add our objectclass(es), making sure
* evolutionPerson is there if it's supported */
if (e_contact_get (modify_op->current_contact, E_CONTACT_IS_LIST))
add_objectclass_mod (bl, modify_op->mod_array, modify_op->existing_objectclasses, TRUE, TRUE);
else
add_objectclass_mod (bl, modify_op->mod_array, modify_op->existing_objectclasses, FALSE, TRUE);
/* then put the NULL back */
g_ptr_array_add (modify_op->mod_array, NULL);
ldap_mods = (LDAPMod **) modify_op->mod_array->pdata;
#ifdef LDAP_DEBUG_MODIFY
if (enable_debug) {
gint i;
printf ("Sending the following to the server as MOD\n");
for (i = 0; g_ptr_array_index (modify_op->mod_array, i); i++) {
LDAPMod *mod = g_ptr_array_index (modify_op->mod_array, i);
if (mod->mod_op & LDAP_MOD_DELETE)
printf ("del ");
else if (mod->mod_op & LDAP_MOD_REPLACE)
printf ("rep ");
else
printf ("add ");
if (mod->mod_op & LDAP_MOD_BVALUES)
printf ("ber ");
else
printf (" ");
printf (" %s:\n", mod->mod_type);
if (mod->mod_op & LDAP_MOD_BVALUES) {
gint j;
for (j = 0; mod->mod_bvalues && mod->mod_bvalues[j] && mod->mod_bvalues[j]->bv_val; j++)
printf ("\t\t'%s'\n", mod->mod_bvalues[j]->bv_val);
} else {
gint j;
for (j = 0; mod->mod_values && mod->mod_values[j]; j++)
printf ("\t\t'%s'\n", mod->mod_values[j]);
}
}
}
#endif
/* actually perform the ldap modify */
g_rec_mutex_lock (&eds_ldap_handler_lock);
if (bl->priv->ldap) {
ldap_error = ldap_modify_ext (
bl->priv->ldap, modify_op->id, ldap_mods,
NULL, NULL, &modify_contact_msgid);
} else {
ldap_error = LDAP_SERVER_DOWN;
}
g_rec_mutex_unlock (&eds_ldap_handler_lock);
if (ldap_error == LDAP_SUCCESS) {
op->handler = modify_contact_modify_handler;
ldap_op_change_id (
(LDAPOp *) modify_op,
modify_contact_msgid);
} else {
g_warning ("ldap_modify_ext returned %d\n", ldap_error);
e_data_book_respond_modify_contacts (op->book,
op->opid,
ldap_error_to_response (ldap_error),
NULL);
ldap_op_finished (op);
return;
}
} else {
e_data_book_respond_modify_contacts (op->book,
op->opid,
e_data_book_create_error_fmt (E_DATA_BOOK_STATUS_OTHER_ERROR,
_("%s: Unhandled result type %d returned"), G_STRFUNC, ldap_msgtype (res)),
NULL);
ldap_op_finished (op);
}
} | 0 | [] | evolution-data-server | 34bad61738e2127736947ac50e0c7969cc944972 | 249,069,500,087,090,960,000,000,000,000,000,000,000 | 153 | Bug 796174 - strcat() considered unsafe for buffer overflow |
static int ser_to_dev(int ser, dev_t *dev_no)
{
if (ser < 0 || ser > (255 - 64)) {
pr_err("speakup: Invalid ser param. Must be between 0 and 191 inclusive.\n");
return -EINVAL;
}
*dev_no = MKDEV(4, (64 + ser));
return 0;
} | 0 | [
"CWE-362",
"CWE-763"
] | linux | d4122754442799187d5d537a9c039a49a67e57f1 | 26,819,908,792,649,517,000,000,000,000,000,000,000 | 10 | speakup: Do not let the line discipline be used several times
Speakup has only one speakup_tty variable to store the tty it is managing. This
makes sense since its codebase currently assumes that there is only one user who
controls the screen reading.
That however means that we have to forbid using the line discipline several
times, otherwise the second closure would try to free a NULL ldisc_data, leading to
general protection fault: 0000 [#1] SMP KASAN PTI
RIP: 0010:spk_ttyio_ldisc_close+0x2c/0x60
Call Trace:
tty_ldisc_release+0xa2/0x340
tty_release_struct+0x17/0xd0
tty_release+0x9d9/0xcc0
__fput+0x231/0x740
task_work_run+0x12c/0x1a0
do_exit+0x9b5/0x2230
? release_task+0x1240/0x1240
? __do_page_fault+0x562/0xa30
do_group_exit+0xd5/0x2a0
__x64_sys_exit_group+0x35/0x40
do_syscall_64+0x89/0x2b0
? page_fault+0x8/0x30
entry_SYSCALL_64_after_hwframe+0x44/0xa9
Cc: [email protected]
Reported-by: 秦世松 <[email protected]>
Signed-off-by: Samuel Thibault <[email protected]>
Tested-by: Shisong Qin <[email protected]>
Link: https://lore.kernel.org/r/20201110183541.fzgnlwhjpgqzjeth@function
Signed-off-by: Greg Kroah-Hartman <[email protected]> |
NDIS_STATUS ParaNdis_FinishSpecificInitialization(PARANDIS_ADAPTER *pContext)
{
NDIS_STATUS status = NDIS_STATUS_SUCCESS;
NET_BUFFER_LIST_POOL_PARAMETERS PoolParams;
NDIS_MINIPORT_INTERRUPT_CHARACTERISTICS mic;
DEBUG_ENTRY(0);
NdisZeroMemory(&mic, sizeof(mic));
mic.Header.Type = NDIS_OBJECT_TYPE_MINIPORT_INTERRUPT;
mic.Header.Revision = NDIS_MINIPORT_INTERRUPT_REVISION_1;
mic.Header.Size = NDIS_SIZEOF_MINIPORT_INTERRUPT_CHARACTERISTICS_REVISION_1;
mic.DisableInterruptHandler = MiniportDisableInterruptEx;
mic.EnableInterruptHandler = MiniportEnableInterruptEx;
mic.InterruptDpcHandler = MiniportInterruptDPC;
mic.InterruptHandler = MiniportInterrupt;
if (pContext->bUsingMSIX)
{
mic.MsiSupported = TRUE;
mic.MsiSyncWithAllMessages = TRUE;
mic.EnableMessageInterruptHandler = MiniportEnableMSIInterrupt;
mic.DisableMessageInterruptHandler = MiniportDisableMSIInterrupt;
mic.MessageInterruptHandler = MiniportMSIInterrupt;
mic.MessageInterruptDpcHandler = MiniportMSIInterruptDpc;
}
PoolParams.Header.Type = NDIS_OBJECT_TYPE_DEFAULT;
PoolParams.Header.Size = sizeof(PoolParams);
PoolParams.Header.Revision = NET_BUFFER_LIST_POOL_PARAMETERS_REVISION_1;
PoolParams.ProtocolId = NDIS_PROTOCOL_ID_DEFAULT;
PoolParams.fAllocateNetBuffer = TRUE;
PoolParams.ContextSize = 0;
PoolParams.PoolTag = PARANDIS_MEMORY_TAG;
PoolParams.DataSize = 0;
pContext->BufferListsPool = NdisAllocateNetBufferListPool(pContext->MiniportHandle, &PoolParams);
if (!pContext->BufferListsPool)
{
status = NDIS_STATUS_RESOURCES;
}
if (status == NDIS_STATUS_SUCCESS)
{
status = NdisMRegisterInterruptEx(pContext->MiniportHandle, pContext, &mic, &pContext->InterruptHandle);
}
#ifdef DBG
if (pContext->bUsingMSIX)
{
DPrintf(0, ("[%s] MSIX message table %savailable, count = %u\n", __FUNCTION__, (mic.MessageInfoTable == nullptr ? "not " : ""),
(mic.MessageInfoTable == nullptr ? 0 : mic.MessageInfoTable->MessageCount)));
}
else
{
DPrintf(0, ("[%s] Not using MSIX\n", __FUNCTION__));
}
#endif
if (status == NDIS_STATUS_SUCCESS)
{
NDIS_SG_DMA_DESCRIPTION sgDesc;
sgDesc.Header.Type = NDIS_OBJECT_TYPE_SG_DMA_DESCRIPTION;
sgDesc.Header.Revision = NDIS_SG_DMA_DESCRIPTION_REVISION_1;
sgDesc.Header.Size = sizeof(sgDesc);
sgDesc.Flags = NDIS_SG_DMA_64_BIT_ADDRESS;
sgDesc.MaximumPhysicalMapping = 0x10000; // 64K
sgDesc.ProcessSGListHandler = ProcessSGListHandler;
sgDesc.SharedMemAllocateCompleteHandler = SharedMemAllocateCompleteHandler;
sgDesc.ScatterGatherListSize = 0; // OUT value
status = NdisMRegisterScatterGatherDma(pContext->MiniportHandle, &sgDesc, &pContext->DmaHandle);
if (status != NDIS_STATUS_SUCCESS)
{
DPrintf(0, ("[%s] ERROR: NdisMRegisterScatterGatherDma failed (%X)!\n", __FUNCTION__, status));
}
else
{
DPrintf(0, ("[%s] SG recommended size %d\n", __FUNCTION__, sgDesc.ScatterGatherListSize));
}
}
if (status == NDIS_STATUS_SUCCESS)
{
if (NDIS_CONNECT_MESSAGE_BASED == mic.InterruptType)
{
pContext->pMSIXInfoTable = mic.MessageInfoTable;
}
else if (pContext->bUsingMSIX)
{
DPrintf(0, ("[%s] ERROR: Interrupt type %d, message table %p\n",
__FUNCTION__, mic.InterruptType, mic.MessageInfoTable));
status = NDIS_STATUS_RESOURCE_CONFLICT;
}
ParaNdis6_ApplyOffloadPersistentConfiguration(pContext);
DebugParseOffloadBits();
}
DEBUG_EXIT_STATUS(0, status);
return status;
} | 0 | [
"CWE-20"
] | kvm-guest-drivers-windows | 723416fa4210b7464b28eab89cc76252e6193ac1 | 6,065,173,519,896,257,000,000,000,000,000,000,000 | 96 | NetKVM: BZ#1169718: Checking the length only on read
Signed-off-by: Joseph Hindin <[email protected]> |
void setRecordTtl(const std::chrono::seconds& ttl) { record_ttl_ = ttl; } | 0 | [
"CWE-400"
] | envoy | 542f84c66e9f6479bc31c6f53157c60472b25240 | 5,599,555,749,717,216,000,000,000,000,000,000,000 | 1 | overload: Runtime configurable global connection limits (#147)
Signed-off-by: Tony Allen <[email protected]> |
void setExperimentalSettings(QuicServerConnectionState& conn) {
conn.lossState.reorderingThreshold =
std::numeric_limits<decltype(conn.lossState.reorderingThreshold)>::max();
} | 0 | [
"CWE-617",
"CWE-703"
] | mvfst | a67083ff4b8dcbb7ee2839da6338032030d712b0 | 50,874,907,498,806,690,000,000,000,000,000,000,000 | 4 | Close connection if we derive an extra 1-rtt write cipher
Summary: Fixes CVE-2021-24029
Reviewed By: mjoras, lnicco
Differential Revision: D26613890
fbshipit-source-id: 19bb2be2c731808144e1a074ece313fba11f1945 |
static int sctp_getsockopt_hmac_ident(struct sock *sk, int len,
char __user *optval, int __user *optlen)
{
struct sctp_hmacalgo __user *p = (void __user *)optval;
struct sctp_hmac_algo_param *hmacs;
__u16 data_len = 0;
u32 num_idents;
if (!sctp_auth_enable)
return -EACCES;
hmacs = sctp_sk(sk)->ep->auth_hmacs_list;
data_len = ntohs(hmacs->param_hdr.length) - sizeof(sctp_paramhdr_t);
if (len < sizeof(struct sctp_hmacalgo) + data_len)
return -EINVAL;
len = sizeof(struct sctp_hmacalgo) + data_len;
num_idents = data_len / sizeof(u16);
if (put_user(len, optlen))
return -EFAULT;
if (put_user(num_idents, &p->shmac_num_idents))
return -EFAULT;
if (copy_to_user(p->shmac_idents, hmacs->hmac_ids, data_len))
return -EFAULT;
return 0;
} | 0 | [] | linux-2.6 | 5e739d1752aca4e8f3e794d431503bfca3162df4 | 143,569,549,659,727,820,000,000,000,000,000,000,000 | 28 | sctp: fix potential panics in the SCTP-AUTH API.
All of the SCTP-AUTH socket options could cause a panic
if the extension is disabled and the API is envoked.
Additionally, there were some additional assumptions that
certain pointers would always be valid which may not
always be the case.
This patch hardens the API and address all of the crash
scenarios.
Signed-off-by: Vlad Yasevich <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
static void kvmgt_put_vfio_device(void *vgpu)
{
if (WARN_ON(!((struct intel_vgpu *)vgpu)->vdev.vfio_device))
return;
vfio_device_put(((struct intel_vgpu *)vgpu)->vdev.vfio_device);
} | 0 | [
"CWE-20"
] | linux | 51b00d8509dc69c98740da2ad07308b630d3eb7d | 208,859,654,568,311,100,000,000,000,000,000,000,000 | 7 | drm/i915/gvt: Fix mmap range check
This is to fix missed mmap range check on vGPU bar2 region
and only allow to map vGPU allocated GMADDR range, which means
user space should support sparse mmap to get proper offset for
mmap vGPU aperture. And this takes care of actual pgoff in mmap
request as original code always does from beginning of vGPU
aperture.
Fixes: 659643f7d814 ("drm/i915/gvt/kvmgt: add vfio/mdev support to KVMGT")
Cc: "Monroy, Rodrigo Axel" <[email protected]>
Cc: "Orrala Contreras, Alfredo" <[email protected]>
Cc: [email protected] # v4.10+
Reviewed-by: Hang Yuan <[email protected]>
Signed-off-by: Zhenyu Wang <[email protected]> |
static int shutdown_veth(struct lxc_handler *handler, struct lxc_netdev *netdev)
{
char *veth1;
int err;
if (netdev->priv.veth_attr.pair)
veth1 = netdev->priv.veth_attr.pair;
else
veth1 = netdev->priv.veth_attr.veth1;
if (netdev->downscript) {
err = run_script(handler->name, "net", netdev->downscript,
"down", "veth", veth1, (char*) NULL);
if (err)
return -1;
}
return 0;
} | 0 | [
"CWE-59",
"CWE-61"
] | lxc | 592fd47a6245508b79fe6ac819fe6d3b2c1289be | 332,071,050,922,870,450,000,000,000,000,000,000,000 | 18 | CVE-2015-1335: Protect container mounts against symlinks
When a container starts up, lxc sets up the container's inital fstree
by doing a bunch of mounting, guided by the container configuration
file. The container config is owned by the admin or user on the host,
so we do not try to guard against bad entries. However, since the
mount target is in the container, it's possible that the container admin
could divert the mount with symbolic links. This could bypass proper
container startup (i.e. confinement of a root-owned container by the
restrictive apparmor policy, by diverting the required write to
/proc/self/attr/current), or bypass the (path-based) apparmor policy
by diverting, say, /proc to /mnt in the container.
To prevent this,
1. do not allow mounts to paths containing symbolic links
2. do not allow bind mounts from relative paths containing symbolic
links.
Details:
Define safe_mount which ensures that the container has not inserted any
symbolic links into any mount targets for mounts to be done during
container setup.
The host's mount path may contain symbolic links. As it is under the
control of the administrator, that's ok. So safe_mount begins the check
for symbolic links after the rootfs->mount, by opening that directory.
It opens each directory along the path using openat() relative to the
parent directory using O_NOFOLLOW. When the target is reached, it
mounts onto /proc/self/fd/<targetfd>.
Use safe_mount() in mount_entry(), when mounting container proc,
and when needed. In particular, safe_mount() need not be used in
any case where:
1. the mount is done in the container's namespace
2. the mount is for the container's rootfs
3. the mount is relative to a tmpfs or proc/sysfs which we have
just safe_mount()ed ourselves
Since we were using proc/net as a temporary placeholder for /proc/sys/net
during container startup, and proc/net is a symbolic link, use proc/tty
instead.
Update the lxc.container.conf manpage with details about the new
restrictions.
Finally, add a testcase to test some symbolic link possibilities.
Reported-by: Roman Fiedler
Signed-off-by: Serge Hallyn <[email protected]>
Acked-by: Stéphane Graber <[email protected]> |
set_mouse_topline(win_T *wp)
{
orig_topline = wp->w_topline;
# ifdef FEAT_DIFF
orig_topfill = wp->w_topfill;
# endif
} | 0 | [
"CWE-125",
"CWE-787"
] | vim | e178af5a586ea023622d460779fdcabbbfac0908 | 41,724,941,187,475,720,000,000,000,000,000,000,000 | 7 | patch 8.2.5160: accessing invalid memory after changing terminal size
Problem: Accessing invalid memory after changing terminal size.
Solution: Adjust cmdline_row and msg_row to the value of Rows. |
PGTYPEStimestamp_from_asc(char *str, char **endptr)
{
timestamp result;
#ifdef HAVE_INT64_TIMESTAMP
int64 noresult = 0;
#else
double noresult = 0.0;
#endif
fsec_t fsec;
struct tm tt,
*tm = &tt;
int dtype;
int nf;
char *field[MAXDATEFIELDS];
int ftype[MAXDATEFIELDS];
char lowstr[MAXDATELEN + MAXDATEFIELDS];
char *realptr;
char **ptr = (endptr != NULL) ? endptr : &realptr;
if (strlen(str) >= sizeof(lowstr))
{
errno = PGTYPES_TS_BAD_TIMESTAMP;
return (noresult);
}
if (ParseDateTime(str, lowstr, field, ftype, &nf, ptr) != 0 ||
DecodeDateTime(field, ftype, nf, &dtype, tm, &fsec, 0) != 0)
{
errno = PGTYPES_TS_BAD_TIMESTAMP;
return (noresult);
}
switch (dtype)
{
case DTK_DATE:
if (tm2timestamp(tm, fsec, NULL, &result) != 0)
{
errno = PGTYPES_TS_BAD_TIMESTAMP;
return (noresult);
}
break;
case DTK_EPOCH:
result = SetEpochTimestamp();
break;
case DTK_LATE:
TIMESTAMP_NOEND(result);
break;
case DTK_EARLY:
TIMESTAMP_NOBEGIN(result);
break;
case DTK_INVALID:
errno = PGTYPES_TS_BAD_TIMESTAMP;
return (noresult);
default:
errno = PGTYPES_TS_BAD_TIMESTAMP;
return (noresult);
}
/* AdjustTimestampForTypmod(&result, typmod); */
/*
* Since it's difficult to test for noresult, make sure errno is 0 if no
* error occurred.
*/
errno = 0;
return result;
} | 1 | [
"CWE-416",
"CWE-119"
] | postgres | 4318daecc959886d001a6e79c6ea853e8b1dfb4b | 325,193,846,501,532,700,000,000,000,000,000,000,000 | 73 | Fix handling of wide datetime input/output.
Many server functions use the MAXDATELEN constant to size a buffer for
parsing or displaying a datetime value. It was much too small for the
longest possible interval output and slightly too small for certain
valid timestamp input, particularly input with a long timezone name.
The long input was rejected needlessly; the long output caused
interval_out() to overrun its buffer. ECPG's pgtypes library has a copy
of the vulnerable functions, which bore the same vulnerabilities along
with some of its own. In contrast to the server, certain long inputs
caused stack overflow rather than failing cleanly. Back-patch to 8.4
(all supported versions).
Reported by Daniel Schüssler, reviewed by Tom Lane.
Security: CVE-2014-0063 |
STBIDEF int stbi_info_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *comp)
{
stbi__context s;
stbi__start_mem(&s,buffer,len);
return stbi__info_main(&s,x,y,comp);
} | 0 | [
"CWE-787"
] | stb | 5ba0baaa269b3fd681828e0e3b3ac0f1472eaf40 | 288,973,013,594,247,100,000,000,000,000,000,000,000 | 6 | stb_image: Reject fractional JPEG component subsampling ratios
The component resamplers are not written to support this and I've
never seen it happen in a real (non-crafted) JPEG file so I'm
fine rejecting this as outright corrupt.
Fixes issue #1178. |
static void ahci_reg_init(AHCIState *s)
{
int i;
s->control_regs.cap = (s->ports - 1) |
(AHCI_NUM_COMMAND_SLOTS << 8) |
(AHCI_SUPPORTED_SPEED_GEN1 << AHCI_SUPPORTED_SPEED) |
HOST_CAP_NCQ | HOST_CAP_AHCI;
s->control_regs.impl = (1 << s->ports) - 1;
s->control_regs.version = AHCI_VERSION_1_0;
for (i = 0; i < s->ports; i++) {
s->dev[i].port_state = STATE_RUN;
}
} | 0 | [
"CWE-399"
] | qemu | 3251bdcf1c67427d964517053c3d185b46e618e8 | 238,641,736,894,910,950,000,000,000,000,000,000,000 | 17 | ide: Correct handling of malformed/short PRDTs
This impacts both BMDMA and AHCI HBA interfaces for IDE.
Currently, we confuse the difference between a PRDT having
"0 bytes" and a PRDT having "0 complete sectors."
When we receive an incomplete sector, inconsistent error checking
leads to an infinite loop wherein the call succeeds, but it
didn't give us enough bytes -- leading us to re-call the
DMA chain over and over again. This leads to, in the BMDMA case,
leaked memory for short PRDTs, and infinite loops and resource
usage in the AHCI case.
The .prepare_buf() callback is reworked to return the number of
bytes that it successfully prepared. 0 is a valid, non-error
answer that means the table was empty and described no bytes.
-1 indicates an error.
Our current implementation uses the io_buffer in IDEState to
ultimately describe the size of a prepared scatter-gather list.
Even though the AHCI PRDT/SGList can be as large as 256GiB, the
AHCI command header limits transactions to just 4GiB. ATA8-ACS3,
however, defines the largest transaction to be an LBA48 command
that transfers 65,536 sectors. With a 512 byte sector size, this
is just 32MiB.
Since our current state structures use the int type to describe
the size of the buffer, and this state is migrated as int32, we
are limited to describing 2GiB buffer sizes unless we change the
migration protocol.
For this reason, this patch begins to unify the assertions in the
IDE pathways that the scatter-gather list provided by either the
AHCI PRDT or the PCI BMDMA PRDs can only describe, at a maximum,
2GiB. This should be resilient enough unless we need a sector
size that exceeds 32KiB.
Further, the likelihood of any guest operating system actually
attempting to transfer this much data in a single operation is
very slim.
To this end, the IDEState variables have been updated to more
explicitly clarify our maximum supported size. Callers to the
prepare_buf callback have been reworked to understand the new
return code, and all versions of the prepare_buf callback have
been adjusted accordingly.
Lastly, the ahci_populate_sglist helper, relied upon by the
AHCI implementation of .prepare_buf() as well as the PCI
implementation of the callback have had overflow assertions
added to help make clear the reasonings behind the various
type changes.
[Added %d -> %"PRId64" fix John sent because off_pos changed from int to
int64_t.
--Stefan]
Signed-off-by: John Snow <[email protected]>
Reviewed-by: Paolo Bonzini <[email protected]>
Message-id: [email protected]
Signed-off-by: Stefan Hajnoczi <[email protected]> |
static void read_revisions_from_stdin(struct rev_info *revs,
struct cmdline_pathspec *prune)
{
struct strbuf sb;
int seen_dashdash = 0;
int save_warning;
save_warning = warn_on_object_refname_ambiguity;
warn_on_object_refname_ambiguity = 0;
strbuf_init(&sb, 1000);
while (strbuf_getwholeline(&sb, stdin, '\n') != EOF) {
int len = sb.len;
if (len && sb.buf[len - 1] == '\n')
sb.buf[--len] = '\0';
if (!len)
break;
if (sb.buf[0] == '-') {
if (len == 2 && sb.buf[1] == '-') {
seen_dashdash = 1;
break;
}
die("options not supported in --stdin mode");
}
if (handle_revision_arg(sb.buf, revs, 0,
REVARG_CANNOT_BE_FILENAME))
die("bad revision '%s'", sb.buf);
}
if (seen_dashdash)
read_pathspec_from_stdin(revs, &sb, prune);
strbuf_release(&sb);
warn_on_object_refname_ambiguity = save_warning;
} | 0 | [
"CWE-119",
"CWE-787"
] | git | 34fa79a6cde56d6d428ab0d3160cb094ebad3305 | 293,814,719,858,528,400,000,000,000,000,000,000,000 | 34 | prefer memcpy to strcpy
When we already know the length of a string (e.g., because
we just malloc'd to fit it), it's nicer to use memcpy than
strcpy, as it makes it more obvious that we are not going to
overflow the buffer (because the size we pass matches the
size in the allocation).
This also eliminates calls to strcpy, which make auditing
the code base harder.
Signed-off-by: Jeff King <[email protected]>
Signed-off-by: Junio C Hamano <[email protected]> |
static int _mysql_commit_txn(void *conn, const sasl_utils_t *utils)
{
return _mysql_exec(conn, "COMMIT", NULL, 0, NULL, utils);
} | 0 | [
"CWE-89"
] | cyrus-sasl | 9eff746c9daecbcc0041b09a5a51ba30738cdcbc | 288,480,641,635,596,180,000,000,000,000,000,000,000 | 4 | CVE-2022-24407 Escape password for SQL insert/update commands.
Signed-off-by: Klaus Espenlaub <[email protected]> |
static void ide_atapi_cmd_read_dma_cb(void *opaque, int ret)
{
IDEState *s = opaque;
int data_offset, n;
if (ret < 0) {
if (ide_handle_rw_error(s, -ret, ide_dma_cmd_to_retry(s->dma_cmd))) {
if (s->bus->error_status) {
s->bus->dma->aiocb = NULL;
return;
}
goto eot;
}
}
if (s->io_buffer_size > 0) {
/*
* For a cdrom read sector command (s->lba != -1),
* adjust the lba for the next s->io_buffer_size chunk
* and dma the current chunk.
* For a command != read (s->lba == -1), just transfer
* the reply data.
*/
if (s->lba != -1) {
if (s->cd_sector_size == 2352) {
n = 1;
cd_data_to_raw(s->io_buffer, s->lba);
} else {
n = s->io_buffer_size >> 11;
}
s->lba += n;
}
s->packet_transfer_size -= s->io_buffer_size;
if (s->bus->dma->ops->rw_buf(s->bus->dma, 1) == 0)
goto eot;
}
if (s->packet_transfer_size <= 0) {
s->status = READY_STAT | SEEK_STAT;
s->nsector = (s->nsector & ~7) | ATAPI_INT_REASON_IO | ATAPI_INT_REASON_CD;
ide_set_irq(s->bus);
goto eot;
}
s->io_buffer_index = 0;
if (s->cd_sector_size == 2352) {
n = 1;
s->io_buffer_size = s->cd_sector_size;
data_offset = 16;
} else {
n = s->packet_transfer_size >> 11;
if (n > (IDE_DMA_BUF_SECTORS / 4))
n = (IDE_DMA_BUF_SECTORS / 4);
s->io_buffer_size = n * 2048;
data_offset = 0;
}
trace_ide_atapi_cmd_read_dma_cb_aio(s, s->lba, n);
qemu_iovec_init_buf(&s->bus->dma->qiov, s->io_buffer + data_offset,
n * ATAPI_SECTOR_SIZE);
s->bus->dma->aiocb = ide_buffered_readv(s, (int64_t)s->lba << 2,
&s->bus->dma->qiov, n * 4,
ide_atapi_cmd_read_dma_cb, s);
return;
eot:
if (ret < 0) {
block_acct_failed(blk_get_stats(s->blk), &s->acct);
} else {
block_acct_done(blk_get_stats(s->blk), &s->acct);
}
ide_set_inactive(s, false);
} | 0 | [
"CWE-125"
] | qemu | 813212288970c39b1800f63e83ac6e96588095c6 | 87,769,128,997,591,080,000,000,000,000,000,000,000 | 73 | ide: atapi: assert that the buffer pointer is in range
A case was reported where s->io_buffer_index can be out of range.
The report skimped on the details but it seems to be triggered
by s->lba == -1 on the READ/READ CD paths (e.g. by sending an
ATAPI command with LBA = 0xFFFFFFFF). For now paper over it
with assertions. The first one ensures that there is no overflow
when incrementing s->io_buffer_index, the second checks for the
buffer overrun.
Note that the buffer overrun is only a read, so I am not sure
if the assertion failure is actually less harmful than the overrun.
Signed-off-by: Paolo Bonzini <[email protected]>
Message-id: [email protected]
Reviewed-by: Kevin Wolf <[email protected]>
Signed-off-by: Peter Maydell <[email protected]> |
aucmd_restbuf(
aco_save_T *aco) // structure holding saved values
{
int dummy;
win_T *save_curwin;
if (aco->use_aucmd_win)
{
--curbuf->b_nwindows;
// Find "aucmd_win", it can't be closed, but it may be in another tab
// page. Do not trigger autocommands here.
block_autocmds();
if (curwin != aucmd_win)
{
tabpage_T *tp;
win_T *wp;
FOR_ALL_TAB_WINDOWS(tp, wp)
{
if (wp == aucmd_win)
{
if (tp != curtab)
goto_tabpage_tp(tp, TRUE, TRUE);
win_goto(aucmd_win);
goto win_found;
}
}
}
win_found:
// Remove the window and frame from the tree of frames.
(void)winframe_remove(curwin, &dummy, NULL);
win_remove(curwin, NULL);
aucmd_win_used = FALSE;
last_status(FALSE); // may need to remove last status line
if (!valid_tabpage_win(curtab))
// no valid window in current tabpage
close_tabpage(curtab);
restore_snapshot(SNAP_AUCMD_IDX, FALSE);
(void)win_comp_pos(); // recompute window positions
unblock_autocmds();
save_curwin = win_find_by_id(aco->save_curwin_id);
if (save_curwin != NULL)
curwin = save_curwin;
else
// Hmm, original window disappeared. Just use the first one.
curwin = firstwin;
curbuf = curwin->w_buffer;
#ifdef FEAT_JOB_CHANNEL
// May need to restore insert mode for a prompt buffer.
entering_window(curwin);
#endif
prevwin = win_find_by_id(aco->save_prevwin_id);
#ifdef FEAT_EVAL
vars_clear(&aucmd_win->w_vars->dv_hashtab); // free all w: variables
hash_init(&aucmd_win->w_vars->dv_hashtab); // re-use the hashtab
#endif
vim_free(globaldir);
globaldir = aco->globaldir;
// the buffer contents may have changed
check_cursor();
if (curwin->w_topline > curbuf->b_ml.ml_line_count)
{
curwin->w_topline = curbuf->b_ml.ml_line_count;
#ifdef FEAT_DIFF
curwin->w_topfill = 0;
#endif
}
#if defined(FEAT_GUI)
if (gui.in_use)
{
// Hide the scrollbars from the aucmd_win and update.
gui_mch_enable_scrollbar(
&aucmd_win->w_scrollbars[SBAR_LEFT], FALSE);
gui_mch_enable_scrollbar(
&aucmd_win->w_scrollbars[SBAR_RIGHT], FALSE);
gui_may_update_scrollbars();
}
#endif
}
else
{
// Restore curwin. Use the window ID, a window may have been closed
// and the memory re-used for another one.
save_curwin = win_find_by_id(aco->save_curwin_id);
if (save_curwin != NULL)
{
// Restore the buffer which was previously edited by curwin, if
// it was changed, we are still the same window and the buffer is
// valid.
if (curwin->w_id == aco->new_curwin_id
&& curbuf != aco->new_curbuf.br_buf
&& bufref_valid(&aco->new_curbuf)
&& aco->new_curbuf.br_buf->b_ml.ml_mfp != NULL)
{
# if defined(FEAT_SYN_HL) || defined(FEAT_SPELL)
if (curwin->w_s == &curbuf->b_s)
curwin->w_s = &aco->new_curbuf.br_buf->b_s;
# endif
--curbuf->b_nwindows;
curbuf = aco->new_curbuf.br_buf;
curwin->w_buffer = curbuf;
++curbuf->b_nwindows;
}
curwin = save_curwin;
curbuf = curwin->w_buffer;
prevwin = win_find_by_id(aco->save_prevwin_id);
// In case the autocommand moves the cursor to a position that
// does not exist in curbuf.
check_cursor();
}
}
check_cursor(); // just in case lines got deleted
VIsual_active = aco->save_VIsual_active;
if (VIsual_active)
check_pos(curbuf, &VIsual);
} | 0 | [
"CWE-122",
"CWE-787"
] | vim | 5fa9f23a63651a8abdb074b4fc2ec9b1adc6b089 | 294,349,244,149,800,900,000,000,000,000,000,000,000 | 123 | patch 9.0.0061: ml_get error with nested autocommand
Problem: ml_get error with nested autocommand.
Solution: Also check line numbers for a nested autocommand. (closes #10761) |
static int cma_init_conn_qp(struct rdma_id_private *id_priv, struct ib_qp *qp)
{
struct ib_qp_attr qp_attr;
int qp_attr_mask, ret;
qp_attr.qp_state = IB_QPS_INIT;
ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
if (ret)
return ret;
return ib_modify_qp(qp, &qp_attr, qp_attr_mask);
} | 0 | [
"CWE-416"
] | linux | bc0bdc5afaa740d782fbf936aaeebd65e5c2921d | 175,073,478,322,982,600,000,000,000,000,000,000,000 | 12 | RDMA/cma: Do not change route.addr.src_addr.ss_family
If the state is not idle then rdma_bind_addr() will immediately fail and
no change to global state should happen.
For instance if the state is already RDMA_CM_LISTEN then this will corrupt
the src_addr and would cause the test in cma_cancel_operation():
if (cma_any_addr(cma_src_addr(id_priv)) && !id_priv->cma_dev)
To view a mangled src_addr, eg with a IPv6 loopback address but an IPv4
family, failing the test.
This would manifest as this trace from syzkaller:
BUG: KASAN: use-after-free in __list_add_valid+0x93/0xa0 lib/list_debug.c:26
Read of size 8 at addr ffff8881546491e0 by task syz-executor.1/32204
CPU: 1 PID: 32204 Comm: syz-executor.1 Not tainted 5.12.0-rc8-syzkaller #0
Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011
Call Trace:
__dump_stack lib/dump_stack.c:79 [inline]
dump_stack+0x141/0x1d7 lib/dump_stack.c:120
print_address_description.constprop.0.cold+0x5b/0x2f8 mm/kasan/report.c:232
__kasan_report mm/kasan/report.c:399 [inline]
kasan_report.cold+0x7c/0xd8 mm/kasan/report.c:416
__list_add_valid+0x93/0xa0 lib/list_debug.c:26
__list_add include/linux/list.h:67 [inline]
list_add_tail include/linux/list.h:100 [inline]
cma_listen_on_all drivers/infiniband/core/cma.c:2557 [inline]
rdma_listen+0x787/0xe00 drivers/infiniband/core/cma.c:3751
ucma_listen+0x16a/0x210 drivers/infiniband/core/ucma.c:1102
ucma_write+0x259/0x350 drivers/infiniband/core/ucma.c:1732
vfs_write+0x28e/0xa30 fs/read_write.c:603
ksys_write+0x1ee/0x250 fs/read_write.c:658
do_syscall_64+0x2d/0x70 arch/x86/entry/common.c:46
entry_SYSCALL_64_after_hwframe+0x44/0xae
Which is indicating that an rdma_id_private was destroyed without doing
cma_cancel_listens().
Instead of trying to re-use the src_addr memory to indirectly create an
any address build one explicitly on the stack and bind to that as any
other normal flow would do.
Link: https://lore.kernel.org/r/[email protected]
Cc: [email protected]
Fixes: 732d41c545bb ("RDMA/cma: Make the locking for automatic state transition more clear")
Reported-by: [email protected]
Tested-by: Hao Sun <[email protected]>
Reviewed-by: Leon Romanovsky <[email protected]>
Signed-off-by: Jason Gunthorpe <[email protected]> |
static void shrink_ple_window(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
struct vmcb_control_area *control = &svm->vmcb->control;
int old = control->pause_filter_count;
control->pause_filter_count =
__shrink_ple_window(old,
pause_filter_count,
pause_filter_count_shrink,
pause_filter_count);
if (control->pause_filter_count != old) {
vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
trace_kvm_ple_window_update(vcpu->vcpu_id,
control->pause_filter_count, old);
}
} | 0 | [
"CWE-862"
] | kvm | 0f923e07124df069ba68d8bb12324398f4b6b709 | 234,562,199,489,655,430,000,000,000,000,000,000,000 | 17 | KVM: nSVM: avoid picking up unsupported bits from L2 in int_ctl (CVE-2021-3653)
* Invert the mask of bits that we pick from L2 in
nested_vmcb02_prepare_control
* Invert and explicitly use VIRQ related bits bitmask in svm_clear_vintr
This fixes a security issue that allowed a malicious L1 to run L2 with
AVIC enabled, which allowed the L2 to exploit the uninitialized and enabled
AVIC to read/write the host physical memory at some offsets.
Fixes: 3d6368ef580a ("KVM: SVM: Add VMRUN handler")
Signed-off-by: Maxim Levitsky <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]> |
static int setup_netfront_split(struct netfront_queue *queue)
{
int err;
err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->tx_evtchn);
if (err < 0)
goto fail;
err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->rx_evtchn);
if (err < 0)
goto alloc_rx_evtchn_fail;
snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name),
"%s-tx", queue->name);
err = bind_evtchn_to_irqhandler_lateeoi(queue->tx_evtchn,
xennet_tx_interrupt, 0,
queue->tx_irq_name, queue);
if (err < 0)
goto bind_tx_fail;
queue->tx_irq = err;
snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name),
"%s-rx", queue->name);
err = bind_evtchn_to_irqhandler_lateeoi(queue->rx_evtchn,
xennet_rx_interrupt, 0,
queue->rx_irq_name, queue);
if (err < 0)
goto bind_rx_fail;
queue->rx_irq = err;
return 0;
bind_rx_fail:
unbind_from_irqhandler(queue->tx_irq, queue);
queue->tx_irq = 0;
bind_tx_fail:
xenbus_free_evtchn(queue->info->xbdev, queue->rx_evtchn);
queue->rx_evtchn = 0;
alloc_rx_evtchn_fail:
xenbus_free_evtchn(queue->info->xbdev, queue->tx_evtchn);
queue->tx_evtchn = 0;
fail:
return err;
} | 0 | [] | linux | f63c2c2032c2e3caad9add3b82cc6e91c376fd26 | 62,020,820,702,061,740,000,000,000,000,000,000,000 | 43 | xen-netfront: restore __skb_queue_tail() positioning in xennet_get_responses()
The commit referenced below moved the invocation past the "next" label,
without any explanation. In fact this allows misbehaving backends undue
control over the domain the frontend runs in, as earlier detected errors
require the skb to not be freed (it may be retained for later processing
via xennet_move_rx_slot(), or it may simply be unsafe to have it freed).
This is CVE-2022-33743 / XSA-405.
Fixes: 6c5aa6fc4def ("xen networking: add basic XDP support for xen-netfront")
Signed-off-by: Jan Beulich <[email protected]>
Reviewed-by: Juergen Gross <[email protected]>
Signed-off-by: Juergen Gross <[email protected]> |
VAR* var_from_env(const char *name, const char *def_val)
{
const char *tmp;
VAR *v;
if (!(tmp = getenv(name)))
tmp = def_val;
v = var_init(0, name, strlen(name), tmp, strlen(tmp));
my_hash_insert(&var_hash, (uchar*)v);
return v;
} | 0 | [
"CWE-284",
"CWE-295"
] | mysql-server | 3bd5589e1a5a93f9c224badf983cd65c45215390 | 104,422,058,768,856,210,000,000,000,000,000,000,000 | 11 | WL#6791 : Redefine client --ssl option to imply enforced encryption
# Changed the meaning of the --ssl=1 option of all client binaries
to mean force ssl, not try ssl and fail over to eunecrypted
# Added a new MYSQL_OPT_SSL_ENFORCE mysql_options()
option to specify that an ssl connection is required.
# Added a new macro SSL_SET_OPTIONS() to the client
SSL handling headers that sets all the relevant SSL options at
once.
# Revamped all of the current native clients to use the new macro
# Removed some Windows line endings.
# Added proper handling of the new option into the ssl helper
headers.
# If SSL is mandatory assume that the media is secure enough
for the sha256 plugin to do unencrypted password exchange even
before establishing a connection.
# Set the default ssl cipher to DHE-RSA-AES256-SHA if none is
specified.
# updated test cases that require a non-default cipher to spawn
a mysql command line tool binary since mysqltest has no support
for specifying ciphers.
# updated the replication slave connection code to always enforce
SSL if any of the SSL config options is present.
# test cases added and updated.
# added a mysql_get_option() API to return mysql_options()
values. Used the new API inside the sha256 plugin.
# Fixed compilation warnings because of unused variables.
# Fixed test failures (mysql_ssl and bug13115401)
# Fixed whitespace issues.
# Fully implemented the mysql_get_option() function.
# Added a test case for mysql_get_option()
# fixed some trailing whitespace issues
# fixed some uint/int warnings in mysql_client_test.c
# removed shared memory option from non-windows get_options
tests
# moved MYSQL_OPT_LOCAL_INFILE to the uint options |
static int build_audio_procunit(struct mixer_build *state, int unitid,
void *raw_desc, struct procunit_info *list,
char *name)
{
struct uac_processing_unit_descriptor *desc = raw_desc;
int num_ins = desc->bNrInPins;
struct usb_mixer_elem_info *cval;
struct snd_kcontrol *kctl;
int i, err, nameid, type, len;
struct procunit_info *info;
struct procunit_value_info *valinfo;
const struct usbmix_name_map *map;
static struct procunit_value_info default_value_info[] = {
{ 0x01, "Switch", USB_MIXER_BOOLEAN },
{ 0 }
};
static struct procunit_info default_info = {
0, NULL, default_value_info
};
if (desc->bLength < 13 || desc->bLength < 13 + num_ins ||
desc->bLength < num_ins + uac_processing_unit_bControlSize(desc, state->mixer->protocol)) {
usb_audio_err(state->chip, "invalid %s descriptor (id %d)\n", name, unitid);
return -EINVAL;
}
for (i = 0; i < num_ins; i++) {
err = parse_audio_unit(state, desc->baSourceID[i]);
if (err < 0)
return err;
}
type = le16_to_cpu(desc->wProcessType);
for (info = list; info && info->type; info++)
if (info->type == type)
break;
if (!info || !info->type)
info = &default_info;
for (valinfo = info->values; valinfo->control; valinfo++) {
__u8 *controls = uac_processing_unit_bmControls(desc, state->mixer->protocol);
if (state->mixer->protocol == UAC_VERSION_1) {
if (!(controls[valinfo->control / 8] &
(1 << ((valinfo->control % 8) - 1))))
continue;
} else { /* UAC_VERSION_2/3 */
if (!uac_v2v3_control_is_readable(controls[valinfo->control / 8],
valinfo->control))
continue;
}
map = find_map(state->map, unitid, valinfo->control);
if (check_ignored_ctl(map))
continue;
cval = kzalloc(sizeof(*cval), GFP_KERNEL);
if (!cval)
return -ENOMEM;
snd_usb_mixer_elem_init_std(&cval->head, state->mixer, unitid);
cval->control = valinfo->control;
cval->val_type = valinfo->val_type;
cval->channels = 1;
if (state->mixer->protocol > UAC_VERSION_1 &&
!uac_v2v3_control_is_writeable(controls[valinfo->control / 8],
valinfo->control))
cval->master_readonly = 1;
/* get min/max values */
switch (type) {
case UAC_PROCESS_UP_DOWNMIX: {
bool mode_sel = false;
switch (state->mixer->protocol) {
case UAC_VERSION_1:
case UAC_VERSION_2:
default:
if (cval->control == UAC_UD_MODE_SELECT)
mode_sel = true;
break;
case UAC_VERSION_3:
if (cval->control == UAC3_UD_MODE_SELECT)
mode_sel = true;
break;
}
if (mode_sel) {
__u8 *control_spec = uac_processing_unit_specific(desc,
state->mixer->protocol);
cval->min = 1;
cval->max = control_spec[0];
cval->res = 1;
cval->initialized = 1;
break;
}
get_min_max(cval, valinfo->min_value);
break;
}
case USB_XU_CLOCK_RATE:
/*
* E-Mu USB 0404/0202/TrackerPre/0204
* samplerate control quirk
*/
cval->min = 0;
cval->max = 5;
cval->res = 1;
cval->initialized = 1;
break;
default:
get_min_max(cval, valinfo->min_value);
break;
}
kctl = snd_ctl_new1(&mixer_procunit_ctl, cval);
if (!kctl) {
kfree(cval);
return -ENOMEM;
}
kctl->private_free = snd_usb_mixer_elem_free;
if (check_mapped_name(map, kctl->id.name, sizeof(kctl->id.name))) {
/* nothing */ ;
} else if (info->name) {
strlcpy(kctl->id.name, info->name, sizeof(kctl->id.name));
} else {
nameid = uac_processing_unit_iProcessing(desc, state->mixer->protocol);
len = 0;
if (nameid)
len = snd_usb_copy_string_desc(state->chip,
nameid,
kctl->id.name,
sizeof(kctl->id.name));
if (!len)
strlcpy(kctl->id.name, name, sizeof(kctl->id.name));
}
append_ctl_name(kctl, " ");
append_ctl_name(kctl, valinfo->suffix);
usb_audio_dbg(state->chip,
"[%d] PU [%s] ch = %d, val = %d/%d\n",
cval->head.id, kctl->id.name, cval->channels,
cval->min, cval->max);
err = snd_usb_mixer_add_control(&cval->head, kctl);
if (err < 0)
return err;
}
return 0;
} | 1 | [
"CWE-125"
] | linux | f4351a199cc120ff9d59e06d02e8657d08e6cc46 | 167,516,714,831,142,730,000,000,000,000,000,000,000 | 150 | ALSA: usb-audio: Avoid access before bLength check in build_audio_procunit()
The parser for the processing unit reads bNrInPins field before the
bLength sanity check, which may lead to an out-of-bound access when a
malformed descriptor is given. Fix it by assignment after the bLength
check.
Cc: <[email protected]>
Signed-off-by: Takashi Iwai <[email protected]> |
int unit_stop(Unit *u) {
UnitActiveState state;
Unit *following;
assert(u);
state = unit_active_state(u);
if (UNIT_IS_INACTIVE_OR_FAILED(state))
return -EALREADY;
if ((following = unit_following(u))) {
log_debug_unit(u->id, "Redirecting stop request from %s to %s.",
u->id, following->id);
return unit_stop(following);
}
unit_status_log_starting_stopping_reloading(u, JOB_STOP);
unit_status_print_starting_stopping(u, JOB_STOP);
if (!UNIT_VTABLE(u)->stop)
return -EBADR;
unit_add_to_dbus_queue(u);
return UNIT_VTABLE(u)->stop(u);
} | 0 | [] | systemd | 5ba6985b6c8ef85a8bcfeb1b65239c863436e75b | 177,764,762,867,379,280,000,000,000,000,000,000,000 | 26 | core: allow PIDs to be watched by two units at the same time
In some cases it is interesting to map a PID to two units at the same
time. For example, when a user logs in via a getty, which is reexeced to
/sbin/login that binary will be explicitly referenced as main pid of the
getty service, as well as implicitly referenced as part of the session
scope. |
bool Predicant_to_list_comparator::alloc_comparators(THD *thd, uint nargs)
{
size_t nbytes= sizeof(Predicant_to_value_comparator) * nargs;
if (!(m_comparators= (Predicant_to_value_comparator *) thd->alloc(nbytes)))
return true;
memset(m_comparators, 0, nbytes);
return false;
} | 0 | [
"CWE-617"
] | server | 807945f2eb5fa22e6f233cc17b85a2e141efe2c8 | 125,821,132,308,218,370,000,000,000,000,000,000,000 | 8 | MDEV-26402: A SEGV in Item_field::used_tables/update_depend_map_for_order...
When doing condition pushdown from HAVING into WHERE,
Item_equal::create_pushable_equalities() calls
item->set_extraction_flag(IMMUTABLE_FL) for constant items.
Then, Item::cleanup_excluding_immutables_processor() checks for this flag
to see if it should call item->cleanup() or leave the item as-is.
The failure happens when a constant item has a non-constant one inside it,
like:
(tbl.col=0 AND impossible_cond)
item->walk(cleanup_excluding_immutables_processor) works in a bottom-up
way so it
1. will call Item_func_eq(tbl.col=0)->cleanup()
2. will not call Item_cond_and->cleanup (as the AND is constant)
This creates an item tree where a fixed Item has an un-fixed Item inside
it which eventually causes an assertion failure.
Fixed by introducing this rule: instead of just calling
item->set_extraction_flag(IMMUTABLE_FL);
we call Item::walk() to set the flag for all sub-items of the item. |
static void flat_print_int(WriterContext *wctx, const char *key, long long int value)
{
printf("%s%s=%lld\n", wctx->section_pbuf[wctx->level].str, key, value);
} | 0 | [
"CWE-476"
] | FFmpeg | 837cb4325b712ff1aab531bf41668933f61d75d2 | 332,170,398,768,729,700,000,000,000,000,000,000,000 | 4 | ffprobe: Fix null pointer dereference with color primaries
Found-by: AD-lab of venustech
Signed-off-by: Michael Niedermayer <[email protected]> |
static RzList *entries(RzBinFile *bf) {
if (!bf) {
return NULL;
}
LuacBinInfo *bin_info_obj = GET_INTERNAL_BIN_INFO_OBJ(bf);
if (!bin_info_obj) {
return NULL;
}
return bin_info_obj->entry_list;
} | 1 | [
"CWE-200",
"CWE-787"
] | rizin | 05bbd147caccc60162d6fba9baaaf24befa281cd | 3,339,740,345,189,327,600,000,000,000,000,000,000 | 11 | Fix oob read on _luac_build_info and luac memleaks |
set_bound_node_opt_info(OptNode* opt, MinMaxLen* plen)
{
mml_copy(&(opt->sb.mm), plen);
mml_copy(&(opt->spr.mm), plen);
mml_copy(&(opt->map.mm), plen);
} | 0 | [
"CWE-787"
] | oniguruma | cbe9f8bd9cfc6c3c87a60fbae58fa1a85db59df0 | 73,436,386,748,309,580,000,000,000,000,000,000,000 | 6 | #207: Out-of-bounds write |
MagickExport Image *SeparateImage(const Image *image,
const ChannelType channel_type,ExceptionInfo *exception)
{
#define GetChannelBit(mask,bit) (((size_t) (mask) >> (size_t) (bit)) & 0x01)
#define SeparateImageTag "Separate/Image"
CacheView
*image_view,
*separate_view;
Image
*separate_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Initialize separate image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
separate_image=CloneImage(image,0,0,MagickTrue,exception);
if (separate_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(separate_image,DirectClass,exception) == MagickFalse)
{
separate_image=DestroyImage(separate_image);
return((Image *) NULL);
}
separate_image->alpha_trait=UndefinedPixelTrait;
(void) SetImageColorspace(separate_image,GRAYColorspace,exception);
separate_image->gamma=image->gamma;
/*
Separate image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
separate_view=AcquireAuthenticCacheView(separate_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(separate_view,0,y,separate_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
SetPixelChannel(separate_image,GrayPixelChannel,(Quantum) 0,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits == UndefinedPixelTrait) ||
(GetChannelBit(channel_type,channel) == 0))
continue;
SetPixelChannel(separate_image,GrayPixelChannel,p[i],q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(separate_image);
}
if (SyncCacheViewAuthenticPixels(separate_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SeparateImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
separate_view=DestroyCacheView(separate_view);
image_view=DestroyCacheView(image_view);
(void) SetImageChannelMask(separate_image,DefaultChannels);
if (status == MagickFalse)
separate_image=DestroyImage(separate_image);
return(separate_image);
} | 0 | [
"CWE-416"
] | ImageMagick | a47e7a994766b92b10d4a87df8c1c890c8b170f3 | 149,180,868,841,131,060,000,000,000,000,000,000,000 | 115 | https://github.com/ImageMagick/ImageMagick/issues/1724 |
static inline void SetPixelL(const Image *restrict image,const Quantum L,
Quantum *restrict pixel)
{
if (image->channel_map[LPixelChannel].traits != UndefinedPixelTrait)
pixel[image->channel_map[LPixelChannel].offset]=L;
} | 0 | [
"CWE-119",
"CWE-787"
] | ImageMagick | 450bd716ed3b9186dd10f9e60f630a3d9eeea2a4 | 233,418,085,121,755,420,000,000,000,000,000,000,000 | 6 | |
find_link_ref(struct link_ref **references, uint8_t *name, size_t length)
{
unsigned int hash = hash_link_ref(name, length);
struct link_ref *ref = NULL;
ref = references[hash % REF_TABLE_SIZE];
while (ref != NULL) {
if (ref->id == hash)
return ref;
ref = ref->next;
}
return NULL;
} | 0 | [] | redcarpet | e5a10516d07114d582d13b9125b733008c61c242 | 318,179,644,235,667,740,000,000,000,000,000,000,000 | 16 | Avoid rewinding previous inline when auto-linking
When a bit like "[email protected]" is processed, first the emphasis is
rendered, then the 1 is output verbatim. When the `@` is encountered,
Redcarpet tries to find the "local part" of the address and stops when
it encounters an invalid char (i.e. here the `!`).
The problem is that when it searches for the local part, Redcarpet
rewinds the characters but here, the emphasis is already rendered so
the previous HTML tag is rewinded as well and is not correctly closed. |
static void netif_free_rx_queues(struct net_device *dev)
{
unsigned int i, count = dev->num_rx_queues;
/* netif_alloc_rx_queues alloc failed, resources have been unreg'ed */
if (!dev->_rx)
return;
for (i = 0; i < count; i++)
xdp_rxq_info_unreg(&dev->_rx[i].xdp_rxq);
kvfree(dev->_rx); | 0 | [
"CWE-416"
] | linux | a4270d6795b0580287453ea55974d948393e66ef | 63,074,186,095,752,350,000,000,000,000,000,000,000 | 13 | net-gro: fix use-after-free read in napi_gro_frags()
If a network driver provides to napi_gro_frags() an
skb with a page fragment of exactly 14 bytes, the call
to gro_pull_from_frag0() will 'consume' the fragment
by calling skb_frag_unref(skb, 0), and the page might
be freed and reused.
Reading eth->h_proto at the end of napi_frags_skb() might
read mangled data, or crash under specific debugging features.
BUG: KASAN: use-after-free in napi_frags_skb net/core/dev.c:5833 [inline]
BUG: KASAN: use-after-free in napi_gro_frags+0xc6f/0xd10 net/core/dev.c:5841
Read of size 2 at addr ffff88809366840c by task syz-executor599/8957
CPU: 1 PID: 8957 Comm: syz-executor599 Not tainted 5.2.0-rc1+ #32
Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011
Call Trace:
__dump_stack lib/dump_stack.c:77 [inline]
dump_stack+0x172/0x1f0 lib/dump_stack.c:113
print_address_description.cold+0x7c/0x20d mm/kasan/report.c:188
__kasan_report.cold+0x1b/0x40 mm/kasan/report.c:317
kasan_report+0x12/0x20 mm/kasan/common.c:614
__asan_report_load_n_noabort+0xf/0x20 mm/kasan/generic_report.c:142
napi_frags_skb net/core/dev.c:5833 [inline]
napi_gro_frags+0xc6f/0xd10 net/core/dev.c:5841
tun_get_user+0x2f3c/0x3ff0 drivers/net/tun.c:1991
tun_chr_write_iter+0xbd/0x156 drivers/net/tun.c:2037
call_write_iter include/linux/fs.h:1872 [inline]
do_iter_readv_writev+0x5f8/0x8f0 fs/read_write.c:693
do_iter_write fs/read_write.c:970 [inline]
do_iter_write+0x184/0x610 fs/read_write.c:951
vfs_writev+0x1b3/0x2f0 fs/read_write.c:1015
do_writev+0x15b/0x330 fs/read_write.c:1058
Fixes: a50e233c50db ("net-gro: restore frag0 optimization")
Signed-off-by: Eric Dumazet <[email protected]>
Reported-by: syzbot <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
clear_hostname_list ()
{
register int i;
if (hostname_list_initialized == 0)
return;
for (i = 0; i < hostname_list_length; i++)
free (hostname_list[i]);
hostname_list_length = hostname_list_initialized = 0;
} | 0 | [
"CWE-20"
] | bash | 4f747edc625815f449048579f6e65869914dd715 | 152,859,667,815,408,580,000,000,000,000,000,000,000 | 10 | Bash-4.4 patch 7 |
select_entry_guard_for_circuit(guard_selection_t *gs,
guard_usage_t usage,
const entry_guard_restriction_t *rst,
unsigned *state_out)
{
const int need_descriptor = (usage == GUARD_USAGE_TRAFFIC);
tor_assert(gs);
tor_assert(state_out);
if (!gs->primary_guards_up_to_date)
entry_guards_update_primary(gs);
int num_entry_guards = get_n_primary_guards_to_use(usage);
smartlist_t *usable_primary_guards = smartlist_new();
/* "If any entry in PRIMARY_GUARDS has {is_reachable} status of
<maybe> or <yes>, return the first such guard." */
SMARTLIST_FOREACH_BEGIN(gs->primary_entry_guards, entry_guard_t *, guard) {
entry_guard_consider_retry(guard);
if (! entry_guard_obeys_restriction(guard, rst))
continue;
if (guard->is_reachable != GUARD_REACHABLE_NO) {
if (need_descriptor && !guard_has_descriptor(guard)) {
continue;
}
*state_out = GUARD_CIRC_STATE_USABLE_ON_COMPLETION;
guard->last_tried_to_connect = approx_time();
smartlist_add(usable_primary_guards, guard);
if (smartlist_len(usable_primary_guards) >= num_entry_guards)
break;
}
} SMARTLIST_FOREACH_END(guard);
if (smartlist_len(usable_primary_guards)) {
entry_guard_t *guard = smartlist_choose(usable_primary_guards);
smartlist_free(usable_primary_guards);
log_info(LD_GUARD, "Selected primary guard %s for circuit.",
entry_guard_describe(guard));
return guard;
}
smartlist_free(usable_primary_guards);
/* "Otherwise, if the ordered intersection of {CONFIRMED_GUARDS}
and {USABLE_FILTERED_GUARDS} is nonempty, return the first
entry in that intersection that has {is_pending} set to
false." */
SMARTLIST_FOREACH_BEGIN(gs->confirmed_entry_guards, entry_guard_t *, guard) {
if (guard->is_primary)
continue; /* we already considered this one. */
if (! entry_guard_obeys_restriction(guard, rst))
continue;
entry_guard_consider_retry(guard);
if (guard->is_usable_filtered_guard && ! guard->is_pending) {
if (need_descriptor && !guard_has_descriptor(guard))
continue; /* not a bug */
guard->is_pending = 1;
guard->last_tried_to_connect = approx_time();
*state_out = GUARD_CIRC_STATE_USABLE_IF_NO_BETTER_GUARD;
log_info(LD_GUARD, "No primary guards available. Selected confirmed "
"guard %s for circuit. Will try other guards before using "
"this circuit.",
entry_guard_describe(guard));
return guard;
}
} SMARTLIST_FOREACH_END(guard);
/* "Otherwise, if there is no such entry, select a member at
random from {USABLE_FILTERED_GUARDS}." */
{
entry_guard_t *guard;
unsigned flags = 0;
if (need_descriptor)
flags |= SAMPLE_EXCLUDE_NO_DESCRIPTOR;
guard = sample_reachable_filtered_entry_guards(gs,
rst,
SAMPLE_EXCLUDE_CONFIRMED |
SAMPLE_EXCLUDE_PRIMARY |
SAMPLE_EXCLUDE_PENDING |
flags);
if (guard == NULL) {
log_info(LD_GUARD, "Absolutely no sampled guards were available. "
"Marking all guards for retry and starting from top again.");
mark_all_guards_maybe_reachable(gs);
return NULL;
}
guard->is_pending = 1;
guard->last_tried_to_connect = approx_time();
*state_out = GUARD_CIRC_STATE_USABLE_IF_NO_BETTER_GUARD;
log_info(LD_GUARD, "No primary or confirmed guards available. Selected "
"random guard %s for circuit. Will try other guards before "
"using this circuit.",
entry_guard_describe(guard));
return guard;
}
} | 0 | [
"CWE-200"
] | tor | 665baf5ed5c6186d973c46cdea165c0548027350 | 161,264,033,072,488,800,000,000,000,000,000,000,000 | 95 | Consider the exit family when applying guard restrictions.
When the new path selection logic went into place, I accidentally
dropped the code that considered the _family_ of the exit node when
deciding if the guard was usable, and we didn't catch that during
code review.
This patch makes the guard_restriction_t code consider the exit
family as well, and adds some (hopefully redundant) checks for the
case where we lack a node_t for a guard but we have a bridge_info_t
for it.
Fixes bug 22753; bugfix on 0.3.0.1-alpha. Tracked as TROVE-2016-006
and CVE-2017-0377. |
static inline bool ext4_encrypted_inode(struct inode *inode)
{
return ext4_test_inode_flag(inode, EXT4_INODE_ENCRYPT); | 0 | [
"CWE-787"
] | linux | c37e9e013469521d9adb932d17a1795c139b36db | 145,149,920,984,093,560,000,000,000,000,000,000,000 | 4 | ext4: add more inode number paranoia checks
If there is a directory entry pointing to a system inode (such as a
journal inode), complain and declare the file system to be corrupted.
Also, if the superblock's first inode number field is too small,
refuse to mount the file system.
This addresses CVE-2018-10882.
https://bugzilla.kernel.org/show_bug.cgi?id=200069
Signed-off-by: Theodore Ts'o <[email protected]>
Cc: [email protected] |
~CImgDisplay() {
assign();
delete[] _keys;
delete[] _released_keys;
} | 0 | [
"CWE-770"
] | cimg | 619cb58dd90b4e03ac68286c70ed98acbefd1c90 | 88,041,028,270,659,200,000,000,000,000,000,000,000 | 5 | CImg<>::load_bmp() and CImg<>::load_pandore(): Check that dimensions encoded in file does not exceed file size. |
static void io_rsrc_node_switch(struct io_ring_ctx *ctx,
struct io_rsrc_data *data_to_kill)
{
WARN_ON_ONCE(!ctx->rsrc_backup_node);
WARN_ON_ONCE(data_to_kill && !ctx->rsrc_node);
if (data_to_kill) {
struct io_rsrc_node *rsrc_node = ctx->rsrc_node;
rsrc_node->rsrc_data = data_to_kill;
io_rsrc_ref_lock(ctx);
list_add_tail(&rsrc_node->node, &ctx->rsrc_ref_list);
io_rsrc_ref_unlock(ctx);
atomic_inc(&data_to_kill->refs);
percpu_ref_kill(&rsrc_node->refs);
ctx->rsrc_node = NULL;
}
if (!ctx->rsrc_node) {
ctx->rsrc_node = ctx->rsrc_backup_node;
ctx->rsrc_backup_node = NULL;
} | 0 | [
"CWE-787"
] | linux | d1f82808877bb10d3deee7cf3374a4eb3fb582db | 252,102,489,337,426,800,000,000,000,000,000,000,000 | 24 | io_uring: truncate lengths larger than MAX_RW_COUNT on provide buffers
Read and write operations are capped to MAX_RW_COUNT. Some read ops rely on
that limit, and that is not guaranteed by the IORING_OP_PROVIDE_BUFFERS.
Truncate those lengths when doing io_add_buffers, so buffer addresses still
use the uncapped length.
Also, take the chance and change struct io_buffer len member to __u32, so
it matches struct io_provide_buffer len member.
This fixes CVE-2021-3491, also reported as ZDI-CAN-13546.
Fixes: ddf0322db79c ("io_uring: add IORING_OP_PROVIDE_BUFFERS")
Reported-by: Billy Jheng Bing-Jhong (@st424204)
Signed-off-by: Thadeu Lima de Souza Cascardo <[email protected]>
Signed-off-by: Jens Axboe <[email protected]> |
proto_write_byte_array (p11_rpc_message *msg,
CK_BYTE_PTR array,
CK_ULONG len,
CK_RV ret)
{
assert (msg != NULL);
/*
* When returning an byte array, in many cases we need to pass
* an invalid array along with a length, which signifies CKR_BUFFER_TOO_SMALL.
*/
switch (ret) {
case CKR_BUFFER_TOO_SMALL:
array = NULL;
/* fall through */
case CKR_OK:
break;
/* Pass all other errors straight through */
default:
return ret;
};
if (!p11_rpc_message_write_byte_array (msg, array, len))
return PREP_ERROR;
return CKR_OK;
} | 0 | [
"CWE-190"
] | p11-kit | 5307a1d21a50cacd06f471a873a018d23ba4b963 | 329,412,320,759,502,700,000,000,000,000,000,000,000 | 29 | Check for arithmetic overflows before allocating |
static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int err;
lockdep_assert_held(&trans_pcie->mutex);
err = iwl_pcie_prepare_card_hw(trans);
if (err) {
IWL_ERR(trans, "Error while preparing HW: %d\n", err);
return err;
}
err = iwl_trans_pcie_clear_persistence_bit(trans);
if (err)
return err;
iwl_trans_pcie_sw_reset(trans);
err = iwl_pcie_apm_init(trans);
if (err)
return err;
iwl_pcie_init_msix(trans_pcie);
/* From now on, the op_mode will be kept updated about RF kill state */
iwl_enable_rfkill_int(trans);
trans_pcie->opmode_down = false;
/* Set is_down to false here so that...*/
trans_pcie->is_down = false;
/* ...rfkill can call stop_device and set it false if needed */
iwl_pcie_check_hw_rf_kill(trans);
return 0;
} | 0 | [
"CWE-476"
] | linux | 8188a18ee2e48c9a7461139838048363bfce3fef | 69,802,161,060,567,470,000,000,000,000,000,000,000 | 38 | iwlwifi: pcie: fix rb_allocator workqueue allocation
We don't handle failures in the rb_allocator workqueue allocation
correctly. To fix that, move the code earlier so the cleanup is
easier and we don't have to undo all the interrupt allocations in
this case.
Signed-off-by: Johannes Berg <[email protected]>
Signed-off-by: Luca Coelho <[email protected]> |
rfbTranslateNone(char *table, rfbPixelFormat *in, rfbPixelFormat *out,
char *iptr, char *optr, int bytesBetweenInputLines,
int width, int height)
{
int bytesPerOutputLine = width * (out->bitsPerPixel / 8);
while (height > 0) {
memcpy(optr, iptr, bytesPerOutputLine);
iptr += bytesBetweenInputLines;
optr += bytesPerOutputLine;
height--;
}
} | 0 | [] | libvncserver | 53073c8d7e232151ea2ecd8a1243124121e10e2d | 52,387,821,354,165,930,000,000,000,000,000,000,000 | 13 | libvncserver: fix pointer aliasing/alignment issue
Accessing byte-aligned data through uint16_t pointers can cause crashes
on some platforms or reduce the performance. Therefore ensure a proper
stack alignment. |
static int dccp_feat_push_change(struct list_head *fn_list, u8 feat, u8 local,
u8 mandatory, dccp_feat_val *fval)
{
struct dccp_feat_entry *new = dccp_feat_entry_new(fn_list, feat, local);
if (new == NULL)
return -ENOMEM;
new->feat_num = feat;
new->is_local = local;
new->state = FEAT_INITIALISING;
new->needs_confirm = false;
new->empty_confirm = false;
new->val = *fval;
new->needs_mandatory = mandatory;
return 0;
} | 0 | [
"CWE-401"
] | linux | 1d3ff0950e2b40dc861b1739029649d03f591820 | 278,169,493,919,800,800,000,000,000,000,000,000,000 | 18 | dccp: Fix memleak in __feat_register_sp
If dccp_feat_push_change fails, we forget free the mem
which is alloced by kmemdup in dccp_feat_clone_sp_val.
Reported-by: Hulk Robot <[email protected]>
Fixes: e8ef967a54f4 ("dccp: Registration routines for changing feature values")
Reviewed-by: Mukesh Ojha <[email protected]>
Signed-off-by: YueHaibing <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
static int io_async_cancel_one(struct io_uring_task *tctx,
struct io_cancel_data *cd)
{
enum io_wq_cancel cancel_ret;
int ret = 0;
bool all;
if (!tctx || !tctx->io_wq)
return -ENOENT;
all = cd->flags & (IORING_ASYNC_CANCEL_ALL|IORING_ASYNC_CANCEL_ANY);
cancel_ret = io_wq_cancel_cb(tctx->io_wq, io_cancel_cb, cd, all);
switch (cancel_ret) {
case IO_WQ_CANCEL_OK:
ret = 0;
break;
case IO_WQ_CANCEL_RUNNING:
ret = -EALREADY;
break;
case IO_WQ_CANCEL_NOTFOUND:
ret = -ENOENT;
break;
}
return ret;
} | 0 | [
"CWE-193"
] | linux | 47abea041f897d64dbd5777f0cf7745148f85d75 | 123,439,136,291,978,250,000,000,000,000,000,000,000 | 26 | io_uring: fix off-by-one in sync cancelation file check
The passed in index should be validated against the number of registered
files we have, it needs to be smaller than the index value to avoid going
one beyond the end.
Fixes: 78a861b94959 ("io_uring: add sync cancelation API through io_uring_register()")
Reported-by: Luo Likang <[email protected]>
Signed-off-by: Jens Axboe <[email protected]> |
int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
const struct nlmsghdr *nlh,
struct netlink_dump_control *control)
{
struct netlink_callback *cb;
struct sock *sk;
struct netlink_sock *nlk;
int ret;
refcount_inc(&skb->users);
sk = netlink_lookup(sock_net(ssk), ssk->sk_protocol, NETLINK_CB(skb).portid);
if (sk == NULL) {
ret = -ECONNREFUSED;
goto error_free;
}
nlk = nlk_sk(sk);
mutex_lock(nlk->cb_mutex);
/* A dump is in progress... */
if (nlk->cb_running) {
ret = -EBUSY;
goto error_unlock;
}
/* add reference of module which cb->dump belongs to */
if (!try_module_get(control->module)) {
ret = -EPROTONOSUPPORT;
goto error_unlock;
}
cb = &nlk->cb;
memset(cb, 0, sizeof(*cb));
cb->start = control->start;
cb->dump = control->dump;
cb->done = control->done;
cb->nlh = nlh;
cb->data = control->data;
cb->module = control->module;
cb->min_dump_alloc = control->min_dump_alloc;
cb->skb = skb;
if (cb->start) {
ret = cb->start(cb);
if (ret)
goto error_unlock;
}
nlk->cb_running = true;
nlk->dump_done_errno = INT_MAX;
mutex_unlock(nlk->cb_mutex);
ret = netlink_dump(sk);
sock_put(sk);
if (ret)
return ret;
/* We successfully started a dump, by returning -EINTR we
* signal not to send ACK even if it was requested.
*/
return -EINTR;
error_unlock:
sock_put(sk);
mutex_unlock(nlk->cb_mutex);
error_free:
kfree_skb(skb);
return ret;
} | 0 | [
"CWE-200"
] | linux | 93c647643b48f0131f02e45da3bd367d80443291 | 98,151,696,158,610,530,000,000,000,000,000,000,000 | 71 | netlink: Add netns check on taps
Currently, a nlmon link inside a child namespace can observe systemwide
netlink activity. Filter the traffic so that nlmon can only sniff
netlink messages from its own netns.
Test case:
vpnns -- bash -c "ip link add nlmon0 type nlmon; \
ip link set nlmon0 up; \
tcpdump -i nlmon0 -q -w /tmp/nlmon.pcap -U" &
sudo ip xfrm state add src 10.1.1.1 dst 10.1.1.2 proto esp \
spi 0x1 mode transport \
auth sha1 0x6162633132330000000000000000000000000000 \
enc aes 0x00000000000000000000000000000000
grep --binary abc123 /tmp/nlmon.pcap
Signed-off-by: Kevin Cernekee <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
DSA *DSAparams_dup(DSA *dsa)
{
return ASN1_item_dup(ASN1_ITEM_rptr(DSAparams), dsa);
} | 0 | [
"CWE-310"
] | openssl | 684400ce192dac51df3d3e92b61830a6ef90be3e | 97,177,573,138,996,530,000,000,000,000,000,000,000 | 4 | Fix various certificate fingerprint issues.
By using non-DER or invalid encodings outside the signed portion of a
certificate the fingerprint can be changed without breaking the signature.
Although no details of the signed portion of the certificate can be changed
this can cause problems with some applications: e.g. those using the
certificate fingerprint for blacklists.
1. Reject signatures with non zero unused bits.
If the BIT STRING containing the signature has non zero unused bits reject
the signature. All current signature algorithms require zero unused bits.
2. Check certificate algorithm consistency.
Check the AlgorithmIdentifier inside TBS matches the one in the
certificate signature. NB: this will result in signature failure
errors for some broken certificates.
3. Check DSA/ECDSA signatures use DER.
Reencode DSA/ECDSA signatures and compare with the original received
signature. Return an error if there is a mismatch.
This will reject various cases including garbage after signature
(thanks to Antti Karjalainen and Tuomo Untinen from the Codenomicon CROSS
program for discovering this case) and use of BER or invalid ASN.1 INTEGERs
(negative or with leading zeroes).
CVE-2014-8275
Reviewed-by: Emilia Käsper <[email protected]> |
ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path,
ext4_lblk_t block)
{
int depth = ext_depth(inode);
ext4_lblk_t len;
ext4_lblk_t lblock;
struct ext4_extent *ex;
struct extent_status es;
ex = path[depth].p_ext;
if (ex == NULL) {
/* there is no extent yet, so gap is [0;-] */
lblock = 0;
len = EXT_MAX_BLOCKS;
ext_debug("cache gap(whole file):");
} else if (block < le32_to_cpu(ex->ee_block)) {
lblock = block;
len = le32_to_cpu(ex->ee_block) - block;
ext_debug("cache gap(before): %u [%u:%u]",
block,
le32_to_cpu(ex->ee_block),
ext4_ext_get_actual_len(ex));
} else if (block >= le32_to_cpu(ex->ee_block)
+ ext4_ext_get_actual_len(ex)) {
ext4_lblk_t next;
lblock = le32_to_cpu(ex->ee_block)
+ ext4_ext_get_actual_len(ex);
next = ext4_ext_next_allocated_block(path);
ext_debug("cache gap(after): [%u:%u] %u",
le32_to_cpu(ex->ee_block),
ext4_ext_get_actual_len(ex),
block);
BUG_ON(next == lblock);
len = next - lblock;
} else {
BUG();
}
ext4_es_find_delayed_extent_range(inode, lblock, lblock + len - 1, &es);
if (es.es_len) {
/* There's delayed extent containing lblock? */
if (es.es_lblk <= lblock)
return;
len = min(es.es_lblk - lblock, len);
}
ext_debug(" -> %u:%u\n", lblock, len);
ext4_es_insert_extent(inode, lblock, len, ~0, EXTENT_STATUS_HOLE);
} | 0 | [
"CWE-362"
] | linux | ea3d7209ca01da209cda6f0dea8be9cc4b7a933b | 209,026,241,472,768,870,000,000,000,000,000,000,000 | 49 | ext4: fix races between page faults and hole punching
Currently, page faults and hole punching are completely unsynchronized.
This can result in page fault faulting in a page into a range that we
are punching after truncate_pagecache_range() has been called and thus
we can end up with a page mapped to disk blocks that will be shortly
freed. Filesystem corruption will shortly follow. Note that the same
race is avoided for truncate by checking page fault offset against
i_size but there isn't similar mechanism available for punching holes.
Fix the problem by creating new rw semaphore i_mmap_sem in inode and
grab it for writing over truncate, hole punching, and other functions
removing blocks from extent tree and for read over page faults. We
cannot easily use i_data_sem for this since that ranks below transaction
start and we need something ranking above it so that it can be held over
the whole truncate / hole punching operation. Also remove various
workarounds we had in the code to reduce race window when page fault
could have created pages with stale mapping information.
Signed-off-by: Jan Kara <[email protected]>
Signed-off-by: Theodore Ts'o <[email protected]> |
if (ctx->check_main_role && !main_role_set) {
GF_MPD_Descriptor *desc;
desc = gf_mpd_descriptor_new(NULL, "urn:mpeg:dash:role:2011", "main");
gf_list_add(set->role, desc);
}
}
static void rewrite_dep_ids(GF_DasherCtx *ctx, GF_DashStream *base_ds)
{
u32 i, count = gf_list_count(ctx->pids);
for (i=0; i<count; i++) {
GF_DashStream *ds = gf_list_get(ctx->pids, i);
if (ds->src_id != base_ds->src_id) continue;
if (!ds->dep_id || !ds->rep) continue; | 0 | [
"CWE-787"
] | gpac | ea1eca00fd92fa17f0e25ac25652622924a9a6a0 | 286,355,107,080,191,980,000,000,000,000,000,000,000 | 14 | fixed #2138 |
const wchar_t *LibRaw_bigfile_datastream::wfname()
{
return wfilename.size()>0?wfilename.c_str():NULL;
} | 0 | [
"CWE-703"
] | LibRaw | 11909cc59e712e09b508dda729b99aeaac2b29ad | 289,477,738,562,036,170,000,000,000,000,000,000,000 | 4 | cumulated data checks patch |
static int parse_part_sign_sha256 (sockent_t *se, /* {{{ */
void **ret_buffer, size_t *ret_buffer_len, int flags)
{
static c_complain_t complain_no_users = C_COMPLAIN_INIT_STATIC;
char *buffer;
size_t buffer_len;
size_t buffer_offset;
size_t username_len;
char *secret;
part_signature_sha256_t pss;
uint16_t pss_head_length;
char hash[sizeof (pss.hash)];
gcry_md_hd_t hd;
gcry_error_t err;
unsigned char *hash_ptr;
buffer = *ret_buffer;
buffer_len = *ret_buffer_len;
buffer_offset = 0;
if (se->data.server.userdb == NULL)
{
c_complain (LOG_NOTICE, &complain_no_users,
"network plugin: Received signed network packet but can't verify it "
"because no user DB has been configured. Will accept it.");
return (0);
}
/* Check if the buffer has enough data for this structure. */
if (buffer_len <= PART_SIGNATURE_SHA256_SIZE)
return (-ENOMEM);
/* Read type and length header */
BUFFER_READ (&pss.head.type, sizeof (pss.head.type));
BUFFER_READ (&pss.head.length, sizeof (pss.head.length));
pss_head_length = ntohs (pss.head.length);
/* Check if the `pss_head_length' is within bounds. */
if ((pss_head_length <= PART_SIGNATURE_SHA256_SIZE)
|| (pss_head_length > buffer_len))
{
ERROR ("network plugin: HMAC-SHA-256 with invalid length received.");
return (-1);
}
/* Copy the hash. */
BUFFER_READ (pss.hash, sizeof (pss.hash));
/* Calculate username length (without null byte) and allocate memory */
username_len = pss_head_length - PART_SIGNATURE_SHA256_SIZE;
pss.username = malloc (username_len + 1);
if (pss.username == NULL)
return (-ENOMEM);
/* Read the username */
BUFFER_READ (pss.username, username_len);
pss.username[username_len] = 0;
assert (buffer_offset == pss_head_length);
/* Query the password */
secret = fbh_get (se->data.server.userdb, pss.username);
if (secret == NULL)
{
ERROR ("network plugin: Unknown user: %s", pss.username);
sfree (pss.username);
return (-ENOENT);
}
/* Create a hash device and check the HMAC */
hd = NULL;
err = gcry_md_open (&hd, GCRY_MD_SHA256, GCRY_MD_FLAG_HMAC);
if (err != 0)
{
ERROR ("network plugin: Creating HMAC-SHA-256 object failed: %s",
gcry_strerror (err));
sfree (secret);
sfree (pss.username);
return (-1);
}
err = gcry_md_setkey (hd, secret, strlen (secret));
if (err != 0)
{
ERROR ("network plugin: gcry_md_setkey failed: %s", gcry_strerror (err));
gcry_md_close (hd);
sfree (secret);
sfree (pss.username);
return (-1);
}
gcry_md_write (hd,
buffer + PART_SIGNATURE_SHA256_SIZE,
buffer_len - PART_SIGNATURE_SHA256_SIZE);
hash_ptr = gcry_md_read (hd, GCRY_MD_SHA256);
if (hash_ptr == NULL)
{
ERROR ("network plugin: gcry_md_read failed.");
gcry_md_close (hd);
sfree (secret);
sfree (pss.username);
return (-1);
}
memcpy (hash, hash_ptr, sizeof (hash));
/* Clean up */
gcry_md_close (hd);
hd = NULL;
if (memcmp (pss.hash, hash, sizeof (pss.hash)) != 0)
{
WARNING ("network plugin: Verifying HMAC-SHA-256 signature failed: "
"Hash mismatch.");
}
else
{
parse_packet (se, buffer + buffer_offset, buffer_len - buffer_offset,
flags | PP_SIGNED, pss.username);
}
sfree (secret);
sfree (pss.username);
*ret_buffer = buffer + buffer_len;
*ret_buffer_len = 0;
return (0);
} /* }}} int parse_part_sign_sha256 */ | 0 | [
"CWE-119",
"CWE-787"
] | collectd | b589096f907052b3a4da2b9ccc9b0e2e888dfc18 | 2,111,180,598,815,970,600,000,000,000,000,000,000 | 132 | network plugin: Fix heap overflow in parse_packet().
Emilien Gaspar has identified a heap overflow in parse_packet(), the
function used by the network plugin to parse incoming network packets.
This is a vulnerability in collectd, though the scope is not clear at
this point. At the very least specially crafted network packets can be
used to crash the daemon. We can't rule out a potential remote code
execution though.
Fixes: CVE-2016-6254 |
static inline void eventpoll_init_file(struct file *file)
{
INIT_LIST_HEAD(&file->f_ep_links);
} | 1 | [] | linux-2.6 | 28d82dc1c4edbc352129f97f4ca22624d1fe61de | 273,787,696,074,068,800,000,000,000,000,000,000,000 | 4 | epoll: limit paths
The current epoll code can be tickled to run basically indefinitely in
both loop detection path check (on ep_insert()), and in the wakeup paths.
The programs that tickle this behavior set up deeply linked networks of
epoll file descriptors that cause the epoll algorithms to traverse them
indefinitely. A couple of these sample programs have been previously
posted in this thread: https://lkml.org/lkml/2011/2/25/297.
To fix the loop detection path check algorithms, I simply keep track of
the epoll nodes that have been already visited. Thus, the loop detection
becomes proportional to the number of epoll file descriptor and links.
This dramatically decreases the run-time of the loop check algorithm. In
one diabolical case I tried it reduced the run-time from 15 mintues (all
in kernel time) to .3 seconds.
Fixing the wakeup paths could be done at wakeup time in a similar manner
by keeping track of nodes that have already been visited, but the
complexity is harder, since there can be multiple wakeups on different
cpus...Thus, I've opted to limit the number of possible wakeup paths when
the paths are created.
This is accomplished, by noting that the end file descriptor points that
are found during the loop detection pass (from the newly added link), are
actually the sources for wakeup events. I keep a list of these file
descriptors and limit the number and length of these paths that emanate
from these 'source file descriptors'. In the current implemetation I
allow 1000 paths of length 1, 500 of length 2, 100 of length 3, 50 of
length 4 and 10 of length 5. Note that it is sufficient to check the
'source file descriptors' reachable from the newly added link, since no
other 'source file descriptors' will have newly added links. This allows
us to check only the wakeup paths that may have gotten too long, and not
re-check all possible wakeup paths on the system.
In terms of the path limit selection, I think its first worth noting that
the most common case for epoll, is probably the model where you have 1
epoll file descriptor that is monitoring n number of 'source file
descriptors'. In this case, each 'source file descriptor' has a 1 path of
length 1. Thus, I believe that the limits I'm proposing are quite
reasonable and in fact may be too generous. Thus, I'm hoping that the
proposed limits will not prevent any workloads that currently work to
fail.
In terms of locking, I have extended the use of the 'epmutex' to all
epoll_ctl add and remove operations. Currently its only used in a subset
of the add paths. I need to hold the epmutex, so that we can correctly
traverse a coherent graph, to check the number of paths. I believe that
this additional locking is probably ok, since its in the setup/teardown
paths, and doesn't affect the running paths, but it certainly is going to
add some extra overhead. Also, worth noting is that the epmuex was
recently added to the ep_ctl add operations in the initial path loop
detection code using the argument that it was not on a critical path.
Another thing to note here, is the length of epoll chains that is allowed.
Currently, eventpoll.c defines:
/* Maximum number of nesting allowed inside epoll sets */
#define EP_MAX_NESTS 4
This basically means that I am limited to a graph depth of 5 (EP_MAX_NESTS
+ 1). However, this limit is currently only enforced during the loop
check detection code, and only when the epoll file descriptors are added
in a certain order. Thus, this limit is currently easily bypassed. The
newly added check for wakeup paths, stricly limits the wakeup paths to a
length of 5, regardless of the order in which ep's are linked together.
Thus, a side-effect of the new code is a more consistent enforcement of
the graph depth.
Thus far, I've tested this, using the sample programs previously
mentioned, which now either return quickly or return -EINVAL. I've also
testing using the piptest.c epoll tester, which showed no difference in
performance. I've also created a number of different epoll networks and
tested that they behave as expectded.
I believe this solves the original diabolical test cases, while still
preserving the sane epoll nesting.
Signed-off-by: Jason Baron <[email protected]>
Cc: Nelson Elhage <[email protected]>
Cc: Davide Libenzi <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
void Item_func_group_concat::cleanup()
{
DBUG_ENTER("Item_func_group_concat::cleanup");
Item_sum::cleanup();
/*
Free table and tree if they belong to this item (if item have not pointer
to original item from which was made copy => it own its objects )
*/
if (!original)
{
delete tmp_table_param;
tmp_table_param= 0;
if (table)
{
THD *thd= table->in_use;
if (table->blob_storage)
delete table->blob_storage;
free_tmp_table(thd, table);
table= 0;
if (tree)
{
delete_tree(tree);
tree= 0;
}
if (unique_filter)
{
delete unique_filter;
unique_filter= NULL;
}
}
DBUG_ASSERT(tree == 0);
}
/*
As the ORDER structures pointed to by the elements of the
'order' array may be modified in find_order_in_list() called
from Item_func_group_concat::setup() to point to runtime
created objects, we need to reset them back to the original
arguments of the function.
*/
ORDER **order_ptr= order;
for (uint i= 0; i < arg_count_order; i++)
{
(*order_ptr)->item= &args[arg_count_field + i];
order_ptr++;
}
DBUG_VOID_RETURN;
} | 0 | [
"CWE-120"
] | server | eca207c46293bc72dd8d0d5622153fab4d3fccf1 | 320,377,613,532,561,820,000,000,000,000,000,000,000 | 48 | MDEV-25317 Assertion `scale <= precision' failed in decimal_bin_size And Assertion `scale >= 0 && precision > 0 && scale <= precision' failed in decimal_bin_size_inline/decimal_bin_size.
Precision should be kept below DECIMAL_MAX_SCALE for computations.
It can be bigger in Item_decimal. I'd fix this too but it changes the
existing behaviour so problemmatic to ix. |
mono_loader_lock_if_inited (void)
{
if (loader_lock_inited)
mono_loader_lock ();
} | 0 | [] | mono | 8e890a3bf80a4620e417814dc14886b1bbd17625 | 169,559,412,662,811,700,000,000,000,000,000,000,000 | 5 | Search for dllimported shared libs in the base directory, not cwd.
* loader.c: we don't search the current directory anymore for shared
libraries referenced in DllImport attributes, as it has a slight
security risk. We search in the same directory where the referencing
image was loaded from, instead. Fixes bug# 641915. |
QPDF_BOOL qpdf_allow_modify_form(qpdf_data qpdf)
{
QTC::TC("qpdf", "qpdf-c called qpdf_allow_modify_form");
return qpdf->qpdf->allowModifyForm();
} | 0 | [
"CWE-787"
] | qpdf | d71f05ca07eb5c7cfa4d6d23e5c1f2a800f52e8e | 309,607,994,227,491,700,000,000,000,000,000,000,000 | 5 | Fix sign and conversion warnings (major)
This makes all integer type conversions that have potential data loss
explicit with calls that do range checks and raise an exception. After
this commit, qpdf builds with no warnings when -Wsign-conversion
-Wconversion is used with gcc or clang or when -W3 -Wd4800 is used
with MSVC. This significantly reduces the likelihood of potential
crashes from bogus integer values.
There are some parts of the code that take int when they should take
size_t or an offset. Such places would make qpdf not support files
with more than 2^31 of something that usually wouldn't be so large. In
the event that such a file shows up and is valid, at least qpdf would
raise an error in the right spot so the issue could be legitimately
addressed rather than failing in some weird way because of a silent
overflow condition. |
int rdma_listen(struct rdma_cm_id *id, int backlog)
{
struct rdma_id_private *id_priv;
int ret;
id_priv = container_of(id, struct rdma_id_private, id);
if (id_priv->state == RDMA_CM_IDLE) {
id->route.addr.src_addr.ss_family = AF_INET;
ret = rdma_bind_addr(id, cma_src_addr(id_priv));
if (ret)
return ret;
}
if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_LISTEN))
return -EINVAL;
if (id_priv->reuseaddr) {
ret = cma_bind_listen(id_priv);
if (ret)
goto err;
}
id_priv->backlog = backlog;
if (id->device) {
switch (rdma_node_get_transport(id->device->node_type)) {
case RDMA_TRANSPORT_IB:
ret = cma_ib_listen(id_priv);
if (ret)
goto err;
break;
case RDMA_TRANSPORT_IWARP:
ret = cma_iw_listen(id_priv, backlog);
if (ret)
goto err;
break;
default:
ret = -ENOSYS;
goto err;
}
} else
cma_listen_on_all(id_priv);
return 0;
err:
id_priv->backlog = 0;
cma_comp_exch(id_priv, RDMA_CM_LISTEN, RDMA_CM_ADDR_BOUND);
return ret;
} | 0 | [
"CWE-20"
] | linux | b2853fd6c2d0f383dbdf7427e263eb576a633867 | 330,599,404,861,044,920,000,000,000,000,000,000,000 | 48 | IB/core: Don't resolve passive side RoCE L2 address in CMA REQ handler
The code that resolves the passive side source MAC within the rdma_cm
connection request handler was both redundant and buggy, so remove it.
It was redundant since later, when an RC QP is modified to RTR state,
the resolution will take place in the ib_core module. It was buggy
because this callback also deals with UD SIDR exchange, for which we
incorrectly looked at the REQ member of the CM event and dereferenced
a random value.
Fixes: dd5f03beb4f7 ("IB/core: Ethernet L2 attributes in verbs/cm structures")
Signed-off-by: Moni Shoua <[email protected]>
Signed-off-by: Or Gerlitz <[email protected]>
Signed-off-by: Roland Dreier <[email protected]> |
cifs_echo_request(struct work_struct *work)
{
int rc;
struct TCP_Server_Info *server = container_of(work,
struct TCP_Server_Info, echo.work);
/*
* We cannot send an echo until the NEGOTIATE_PROTOCOL request is
* done, which is indicated by maxBuf != 0. Also, no need to ping if
* we got a response recently
*/
if (server->maxBuf == 0 ||
time_before(jiffies, server->lstrp + SMB_ECHO_INTERVAL - HZ))
goto requeue_echo;
rc = CIFSSMBEcho(server);
if (rc)
cFYI(1, "Unable to send echo request to server: %s",
server->hostname);
requeue_echo:
queue_delayed_work(system_nrt_wq, &server->echo, SMB_ECHO_INTERVAL);
} | 0 | [
"CWE-20"
] | linux | 70945643722ffeac779d2529a348f99567fa5c33 | 157,822,495,609,566,970,000,000,000,000,000,000,000 | 23 | cifs: always do is_path_accessible check in cifs_mount
Currently, we skip doing the is_path_accessible check in cifs_mount if
there is no prefixpath. I have a report of at least one server however
that allows a TREE_CONNECT to a share that has a DFS referral at its
root. The reporter in this case was using a UNC that had no prefixpath,
so the is_path_accessible check was not triggered and the box later hit
a BUG() because we were chasing a DFS referral on the root dentry for
the mount.
This patch fixes this by removing the check for a zero-length
prefixpath. That should make the is_path_accessible check be done in
this situation and should allow the client to chase the DFS referral at
mount time instead.
Cc: [email protected]
Reported-and-Tested-by: Yogesh Sharma <[email protected]>
Signed-off-by: Jeff Layton <[email protected]>
Signed-off-by: Steve French <[email protected]> |
Subsets and Splits