python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: GPL-2.0
/*
* Cryptographic API.
*
* s390 implementation of the AES Cipher Algorithm with protected keys.
*
* s390 Version:
* Copyright IBM Corp. 2017, 2023
* Author(s): Martin Schwidefsky <[email protected]>
* Harald Freudenberger <[email protected]>
*/
#define KMSG_COMPONENT "paes_s390"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <crypto/aes.h>
#include <crypto/algapi.h>
#include <linux/bug.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/cpufeature.h>
#include <linux/init.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
#include <linux/delay.h>
#include <crypto/internal/skcipher.h>
#include <crypto/xts.h>
#include <asm/cpacf.h>
#include <asm/pkey.h>
/*
* Key blobs smaller/bigger than these defines are rejected
* by the common code even before the individual setkey function
* is called. As paes can handle different kinds of key blobs
* and padding is also possible, the limits need to be generous.
*/
#define PAES_MIN_KEYSIZE 16
#define PAES_MAX_KEYSIZE MAXEP11AESKEYBLOBSIZE
static u8 *ctrblk;
static DEFINE_MUTEX(ctrblk_lock);
static cpacf_mask_t km_functions, kmc_functions, kmctr_functions;
struct key_blob {
/*
* Small keys will be stored in the keybuf. Larger keys are
* stored in extra allocated memory. In both cases does
* key point to the memory where the key is stored.
* The code distinguishes by checking keylen against
* sizeof(keybuf). See the two following helper functions.
*/
u8 *key;
u8 keybuf[128];
unsigned int keylen;
};
static inline int _key_to_kb(struct key_blob *kb,
const u8 *key,
unsigned int keylen)
{
struct clearkey_header {
u8 type;
u8 res0[3];
u8 version;
u8 res1[3];
u32 keytype;
u32 len;
} __packed * h;
switch (keylen) {
case 16:
case 24:
case 32:
/* clear key value, prepare pkey clear key token in keybuf */
memset(kb->keybuf, 0, sizeof(kb->keybuf));
h = (struct clearkey_header *) kb->keybuf;
h->version = 0x02; /* TOKVER_CLEAR_KEY */
h->keytype = (keylen - 8) >> 3;
h->len = keylen;
memcpy(kb->keybuf + sizeof(*h), key, keylen);
kb->keylen = sizeof(*h) + keylen;
kb->key = kb->keybuf;
break;
default:
/* other key material, let pkey handle this */
if (keylen <= sizeof(kb->keybuf))
kb->key = kb->keybuf;
else {
kb->key = kmalloc(keylen, GFP_KERNEL);
if (!kb->key)
return -ENOMEM;
}
memcpy(kb->key, key, keylen);
kb->keylen = keylen;
break;
}
return 0;
}
static inline void _free_kb_keybuf(struct key_blob *kb)
{
if (kb->key && kb->key != kb->keybuf
&& kb->keylen > sizeof(kb->keybuf)) {
kfree_sensitive(kb->key);
kb->key = NULL;
}
}
struct s390_paes_ctx {
struct key_blob kb;
struct pkey_protkey pk;
spinlock_t pk_lock;
unsigned long fc;
};
struct s390_pxts_ctx {
struct key_blob kb[2];
struct pkey_protkey pk[2];
spinlock_t pk_lock;
unsigned long fc;
};
static inline int __paes_keyblob2pkey(struct key_blob *kb,
struct pkey_protkey *pk)
{
int i, ret;
/* try three times in case of failure */
for (i = 0; i < 3; i++) {
if (i > 0 && ret == -EAGAIN && in_task())
if (msleep_interruptible(1000))
return -EINTR;
ret = pkey_keyblob2pkey(kb->key, kb->keylen,
pk->protkey, &pk->len, &pk->type);
if (ret == 0)
break;
}
return ret;
}
static inline int __paes_convert_key(struct s390_paes_ctx *ctx)
{
int ret;
struct pkey_protkey pkey;
pkey.len = sizeof(pkey.protkey);
ret = __paes_keyblob2pkey(&ctx->kb, &pkey);
if (ret)
return ret;
spin_lock_bh(&ctx->pk_lock);
memcpy(&ctx->pk, &pkey, sizeof(pkey));
spin_unlock_bh(&ctx->pk_lock);
return 0;
}
static int ecb_paes_init(struct crypto_skcipher *tfm)
{
struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
ctx->kb.key = NULL;
spin_lock_init(&ctx->pk_lock);
return 0;
}
static void ecb_paes_exit(struct crypto_skcipher *tfm)
{
struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
_free_kb_keybuf(&ctx->kb);
}
static inline int __ecb_paes_set_key(struct s390_paes_ctx *ctx)
{
int rc;
unsigned long fc;
rc = __paes_convert_key(ctx);
if (rc)
return rc;
/* Pick the correct function code based on the protected key type */
fc = (ctx->pk.type == PKEY_KEYTYPE_AES_128) ? CPACF_KM_PAES_128 :
(ctx->pk.type == PKEY_KEYTYPE_AES_192) ? CPACF_KM_PAES_192 :
(ctx->pk.type == PKEY_KEYTYPE_AES_256) ? CPACF_KM_PAES_256 : 0;
/* Check if the function code is available */
ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
return ctx->fc ? 0 : -EINVAL;
}
static int ecb_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
unsigned int key_len)
{
int rc;
struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
_free_kb_keybuf(&ctx->kb);
rc = _key_to_kb(&ctx->kb, in_key, key_len);
if (rc)
return rc;
return __ecb_paes_set_key(ctx);
}
static int ecb_paes_crypt(struct skcipher_request *req, unsigned long modifier)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
struct skcipher_walk walk;
unsigned int nbytes, n, k;
int ret;
struct {
u8 key[MAXPROTKEYSIZE];
} param;
ret = skcipher_walk_virt(&walk, req, false);
if (ret)
return ret;
spin_lock_bh(&ctx->pk_lock);
memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE);
spin_unlock_bh(&ctx->pk_lock);
while ((nbytes = walk.nbytes) != 0) {
/* only use complete blocks */
n = nbytes & ~(AES_BLOCK_SIZE - 1);
k = cpacf_km(ctx->fc | modifier, ¶m,
walk.dst.virt.addr, walk.src.virt.addr, n);
if (k)
ret = skcipher_walk_done(&walk, nbytes - k);
if (k < n) {
if (__paes_convert_key(ctx))
return skcipher_walk_done(&walk, -EIO);
spin_lock_bh(&ctx->pk_lock);
memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE);
spin_unlock_bh(&ctx->pk_lock);
}
}
return ret;
}
static int ecb_paes_encrypt(struct skcipher_request *req)
{
return ecb_paes_crypt(req, 0);
}
static int ecb_paes_decrypt(struct skcipher_request *req)
{
return ecb_paes_crypt(req, CPACF_DECRYPT);
}
static struct skcipher_alg ecb_paes_alg = {
.base.cra_name = "ecb(paes)",
.base.cra_driver_name = "ecb-paes-s390",
.base.cra_priority = 401, /* combo: aes + ecb + 1 */
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct s390_paes_ctx),
.base.cra_module = THIS_MODULE,
.base.cra_list = LIST_HEAD_INIT(ecb_paes_alg.base.cra_list),
.init = ecb_paes_init,
.exit = ecb_paes_exit,
.min_keysize = PAES_MIN_KEYSIZE,
.max_keysize = PAES_MAX_KEYSIZE,
.setkey = ecb_paes_set_key,
.encrypt = ecb_paes_encrypt,
.decrypt = ecb_paes_decrypt,
};
static int cbc_paes_init(struct crypto_skcipher *tfm)
{
struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
ctx->kb.key = NULL;
spin_lock_init(&ctx->pk_lock);
return 0;
}
static void cbc_paes_exit(struct crypto_skcipher *tfm)
{
struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
_free_kb_keybuf(&ctx->kb);
}
static inline int __cbc_paes_set_key(struct s390_paes_ctx *ctx)
{
int rc;
unsigned long fc;
rc = __paes_convert_key(ctx);
if (rc)
return rc;
/* Pick the correct function code based on the protected key type */
fc = (ctx->pk.type == PKEY_KEYTYPE_AES_128) ? CPACF_KMC_PAES_128 :
(ctx->pk.type == PKEY_KEYTYPE_AES_192) ? CPACF_KMC_PAES_192 :
(ctx->pk.type == PKEY_KEYTYPE_AES_256) ? CPACF_KMC_PAES_256 : 0;
/* Check if the function code is available */
ctx->fc = (fc && cpacf_test_func(&kmc_functions, fc)) ? fc : 0;
return ctx->fc ? 0 : -EINVAL;
}
static int cbc_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
unsigned int key_len)
{
int rc;
struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
_free_kb_keybuf(&ctx->kb);
rc = _key_to_kb(&ctx->kb, in_key, key_len);
if (rc)
return rc;
return __cbc_paes_set_key(ctx);
}
static int cbc_paes_crypt(struct skcipher_request *req, unsigned long modifier)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
struct skcipher_walk walk;
unsigned int nbytes, n, k;
int ret;
struct {
u8 iv[AES_BLOCK_SIZE];
u8 key[MAXPROTKEYSIZE];
} param;
ret = skcipher_walk_virt(&walk, req, false);
if (ret)
return ret;
memcpy(param.iv, walk.iv, AES_BLOCK_SIZE);
spin_lock_bh(&ctx->pk_lock);
memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE);
spin_unlock_bh(&ctx->pk_lock);
while ((nbytes = walk.nbytes) != 0) {
/* only use complete blocks */
n = nbytes & ~(AES_BLOCK_SIZE - 1);
k = cpacf_kmc(ctx->fc | modifier, ¶m,
walk.dst.virt.addr, walk.src.virt.addr, n);
if (k) {
memcpy(walk.iv, param.iv, AES_BLOCK_SIZE);
ret = skcipher_walk_done(&walk, nbytes - k);
}
if (k < n) {
if (__paes_convert_key(ctx))
return skcipher_walk_done(&walk, -EIO);
spin_lock_bh(&ctx->pk_lock);
memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE);
spin_unlock_bh(&ctx->pk_lock);
}
}
return ret;
}
static int cbc_paes_encrypt(struct skcipher_request *req)
{
return cbc_paes_crypt(req, 0);
}
static int cbc_paes_decrypt(struct skcipher_request *req)
{
return cbc_paes_crypt(req, CPACF_DECRYPT);
}
static struct skcipher_alg cbc_paes_alg = {
.base.cra_name = "cbc(paes)",
.base.cra_driver_name = "cbc-paes-s390",
.base.cra_priority = 402, /* ecb-paes-s390 + 1 */
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct s390_paes_ctx),
.base.cra_module = THIS_MODULE,
.base.cra_list = LIST_HEAD_INIT(cbc_paes_alg.base.cra_list),
.init = cbc_paes_init,
.exit = cbc_paes_exit,
.min_keysize = PAES_MIN_KEYSIZE,
.max_keysize = PAES_MAX_KEYSIZE,
.ivsize = AES_BLOCK_SIZE,
.setkey = cbc_paes_set_key,
.encrypt = cbc_paes_encrypt,
.decrypt = cbc_paes_decrypt,
};
static int xts_paes_init(struct crypto_skcipher *tfm)
{
struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm);
ctx->kb[0].key = NULL;
ctx->kb[1].key = NULL;
spin_lock_init(&ctx->pk_lock);
return 0;
}
static void xts_paes_exit(struct crypto_skcipher *tfm)
{
struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm);
_free_kb_keybuf(&ctx->kb[0]);
_free_kb_keybuf(&ctx->kb[1]);
}
static inline int __xts_paes_convert_key(struct s390_pxts_ctx *ctx)
{
struct pkey_protkey pkey0, pkey1;
pkey0.len = sizeof(pkey0.protkey);
pkey1.len = sizeof(pkey1.protkey);
if (__paes_keyblob2pkey(&ctx->kb[0], &pkey0) ||
__paes_keyblob2pkey(&ctx->kb[1], &pkey1))
return -EINVAL;
spin_lock_bh(&ctx->pk_lock);
memcpy(&ctx->pk[0], &pkey0, sizeof(pkey0));
memcpy(&ctx->pk[1], &pkey1, sizeof(pkey1));
spin_unlock_bh(&ctx->pk_lock);
return 0;
}
static inline int __xts_paes_set_key(struct s390_pxts_ctx *ctx)
{
unsigned long fc;
if (__xts_paes_convert_key(ctx))
return -EINVAL;
if (ctx->pk[0].type != ctx->pk[1].type)
return -EINVAL;
/* Pick the correct function code based on the protected key type */
fc = (ctx->pk[0].type == PKEY_KEYTYPE_AES_128) ? CPACF_KM_PXTS_128 :
(ctx->pk[0].type == PKEY_KEYTYPE_AES_256) ?
CPACF_KM_PXTS_256 : 0;
/* Check if the function code is available */
ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
return ctx->fc ? 0 : -EINVAL;
}
static int xts_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
unsigned int xts_key_len)
{
int rc;
struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm);
u8 ckey[2 * AES_MAX_KEY_SIZE];
unsigned int ckey_len, key_len;
if (xts_key_len % 2)
return -EINVAL;
key_len = xts_key_len / 2;
_free_kb_keybuf(&ctx->kb[0]);
_free_kb_keybuf(&ctx->kb[1]);
rc = _key_to_kb(&ctx->kb[0], in_key, key_len);
if (rc)
return rc;
rc = _key_to_kb(&ctx->kb[1], in_key + key_len, key_len);
if (rc)
return rc;
rc = __xts_paes_set_key(ctx);
if (rc)
return rc;
/*
* xts_verify_key verifies the key length is not odd and makes
* sure that the two keys are not the same. This can be done
* on the two protected keys as well
*/
ckey_len = (ctx->pk[0].type == PKEY_KEYTYPE_AES_128) ?
AES_KEYSIZE_128 : AES_KEYSIZE_256;
memcpy(ckey, ctx->pk[0].protkey, ckey_len);
memcpy(ckey + ckey_len, ctx->pk[1].protkey, ckey_len);
return xts_verify_key(tfm, ckey, 2*ckey_len);
}
static int xts_paes_crypt(struct skcipher_request *req, unsigned long modifier)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm);
struct skcipher_walk walk;
unsigned int keylen, offset, nbytes, n, k;
int ret;
struct {
u8 key[MAXPROTKEYSIZE]; /* key + verification pattern */
u8 tweak[16];
u8 block[16];
u8 bit[16];
u8 xts[16];
} pcc_param;
struct {
u8 key[MAXPROTKEYSIZE]; /* key + verification pattern */
u8 init[16];
} xts_param;
ret = skcipher_walk_virt(&walk, req, false);
if (ret)
return ret;
keylen = (ctx->pk[0].type == PKEY_KEYTYPE_AES_128) ? 48 : 64;
offset = (ctx->pk[0].type == PKEY_KEYTYPE_AES_128) ? 16 : 0;
memset(&pcc_param, 0, sizeof(pcc_param));
memcpy(pcc_param.tweak, walk.iv, sizeof(pcc_param.tweak));
spin_lock_bh(&ctx->pk_lock);
memcpy(pcc_param.key + offset, ctx->pk[1].protkey, keylen);
memcpy(xts_param.key + offset, ctx->pk[0].protkey, keylen);
spin_unlock_bh(&ctx->pk_lock);
cpacf_pcc(ctx->fc, pcc_param.key + offset);
memcpy(xts_param.init, pcc_param.xts, 16);
while ((nbytes = walk.nbytes) != 0) {
/* only use complete blocks */
n = nbytes & ~(AES_BLOCK_SIZE - 1);
k = cpacf_km(ctx->fc | modifier, xts_param.key + offset,
walk.dst.virt.addr, walk.src.virt.addr, n);
if (k)
ret = skcipher_walk_done(&walk, nbytes - k);
if (k < n) {
if (__xts_paes_convert_key(ctx))
return skcipher_walk_done(&walk, -EIO);
spin_lock_bh(&ctx->pk_lock);
memcpy(xts_param.key + offset,
ctx->pk[0].protkey, keylen);
spin_unlock_bh(&ctx->pk_lock);
}
}
return ret;
}
static int xts_paes_encrypt(struct skcipher_request *req)
{
return xts_paes_crypt(req, 0);
}
static int xts_paes_decrypt(struct skcipher_request *req)
{
return xts_paes_crypt(req, CPACF_DECRYPT);
}
static struct skcipher_alg xts_paes_alg = {
.base.cra_name = "xts(paes)",
.base.cra_driver_name = "xts-paes-s390",
.base.cra_priority = 402, /* ecb-paes-s390 + 1 */
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct s390_pxts_ctx),
.base.cra_module = THIS_MODULE,
.base.cra_list = LIST_HEAD_INIT(xts_paes_alg.base.cra_list),
.init = xts_paes_init,
.exit = xts_paes_exit,
.min_keysize = 2 * PAES_MIN_KEYSIZE,
.max_keysize = 2 * PAES_MAX_KEYSIZE,
.ivsize = AES_BLOCK_SIZE,
.setkey = xts_paes_set_key,
.encrypt = xts_paes_encrypt,
.decrypt = xts_paes_decrypt,
};
static int ctr_paes_init(struct crypto_skcipher *tfm)
{
struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
ctx->kb.key = NULL;
spin_lock_init(&ctx->pk_lock);
return 0;
}
static void ctr_paes_exit(struct crypto_skcipher *tfm)
{
struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
_free_kb_keybuf(&ctx->kb);
}
static inline int __ctr_paes_set_key(struct s390_paes_ctx *ctx)
{
int rc;
unsigned long fc;
rc = __paes_convert_key(ctx);
if (rc)
return rc;
/* Pick the correct function code based on the protected key type */
fc = (ctx->pk.type == PKEY_KEYTYPE_AES_128) ? CPACF_KMCTR_PAES_128 :
(ctx->pk.type == PKEY_KEYTYPE_AES_192) ? CPACF_KMCTR_PAES_192 :
(ctx->pk.type == PKEY_KEYTYPE_AES_256) ?
CPACF_KMCTR_PAES_256 : 0;
/* Check if the function code is available */
ctx->fc = (fc && cpacf_test_func(&kmctr_functions, fc)) ? fc : 0;
return ctx->fc ? 0 : -EINVAL;
}
static int ctr_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
unsigned int key_len)
{
int rc;
struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
_free_kb_keybuf(&ctx->kb);
rc = _key_to_kb(&ctx->kb, in_key, key_len);
if (rc)
return rc;
return __ctr_paes_set_key(ctx);
}
static unsigned int __ctrblk_init(u8 *ctrptr, u8 *iv, unsigned int nbytes)
{
unsigned int i, n;
/* only use complete blocks, max. PAGE_SIZE */
memcpy(ctrptr, iv, AES_BLOCK_SIZE);
n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(AES_BLOCK_SIZE - 1);
for (i = (n / AES_BLOCK_SIZE) - 1; i > 0; i--) {
memcpy(ctrptr + AES_BLOCK_SIZE, ctrptr, AES_BLOCK_SIZE);
crypto_inc(ctrptr + AES_BLOCK_SIZE, AES_BLOCK_SIZE);
ctrptr += AES_BLOCK_SIZE;
}
return n;
}
static int ctr_paes_crypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
u8 buf[AES_BLOCK_SIZE], *ctrptr;
struct skcipher_walk walk;
unsigned int nbytes, n, k;
int ret, locked;
struct {
u8 key[MAXPROTKEYSIZE];
} param;
ret = skcipher_walk_virt(&walk, req, false);
if (ret)
return ret;
spin_lock_bh(&ctx->pk_lock);
memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE);
spin_unlock_bh(&ctx->pk_lock);
locked = mutex_trylock(&ctrblk_lock);
while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
n = AES_BLOCK_SIZE;
if (nbytes >= 2*AES_BLOCK_SIZE && locked)
n = __ctrblk_init(ctrblk, walk.iv, nbytes);
ctrptr = (n > AES_BLOCK_SIZE) ? ctrblk : walk.iv;
k = cpacf_kmctr(ctx->fc, ¶m, walk.dst.virt.addr,
walk.src.virt.addr, n, ctrptr);
if (k) {
if (ctrptr == ctrblk)
memcpy(walk.iv, ctrptr + k - AES_BLOCK_SIZE,
AES_BLOCK_SIZE);
crypto_inc(walk.iv, AES_BLOCK_SIZE);
ret = skcipher_walk_done(&walk, nbytes - k);
}
if (k < n) {
if (__paes_convert_key(ctx)) {
if (locked)
mutex_unlock(&ctrblk_lock);
return skcipher_walk_done(&walk, -EIO);
}
spin_lock_bh(&ctx->pk_lock);
memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE);
spin_unlock_bh(&ctx->pk_lock);
}
}
if (locked)
mutex_unlock(&ctrblk_lock);
/*
* final block may be < AES_BLOCK_SIZE, copy only nbytes
*/
if (nbytes) {
while (1) {
if (cpacf_kmctr(ctx->fc, ¶m, buf,
walk.src.virt.addr, AES_BLOCK_SIZE,
walk.iv) == AES_BLOCK_SIZE)
break;
if (__paes_convert_key(ctx))
return skcipher_walk_done(&walk, -EIO);
spin_lock_bh(&ctx->pk_lock);
memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE);
spin_unlock_bh(&ctx->pk_lock);
}
memcpy(walk.dst.virt.addr, buf, nbytes);
crypto_inc(walk.iv, AES_BLOCK_SIZE);
ret = skcipher_walk_done(&walk, nbytes);
}
return ret;
}
static struct skcipher_alg ctr_paes_alg = {
.base.cra_name = "ctr(paes)",
.base.cra_driver_name = "ctr-paes-s390",
.base.cra_priority = 402, /* ecb-paes-s390 + 1 */
.base.cra_blocksize = 1,
.base.cra_ctxsize = sizeof(struct s390_paes_ctx),
.base.cra_module = THIS_MODULE,
.base.cra_list = LIST_HEAD_INIT(ctr_paes_alg.base.cra_list),
.init = ctr_paes_init,
.exit = ctr_paes_exit,
.min_keysize = PAES_MIN_KEYSIZE,
.max_keysize = PAES_MAX_KEYSIZE,
.ivsize = AES_BLOCK_SIZE,
.setkey = ctr_paes_set_key,
.encrypt = ctr_paes_crypt,
.decrypt = ctr_paes_crypt,
.chunksize = AES_BLOCK_SIZE,
};
static inline void __crypto_unregister_skcipher(struct skcipher_alg *alg)
{
if (!list_empty(&alg->base.cra_list))
crypto_unregister_skcipher(alg);
}
static void paes_s390_fini(void)
{
__crypto_unregister_skcipher(&ctr_paes_alg);
__crypto_unregister_skcipher(&xts_paes_alg);
__crypto_unregister_skcipher(&cbc_paes_alg);
__crypto_unregister_skcipher(&ecb_paes_alg);
if (ctrblk)
free_page((unsigned long) ctrblk);
}
static int __init paes_s390_init(void)
{
int ret;
/* Query available functions for KM, KMC and KMCTR */
cpacf_query(CPACF_KM, &km_functions);
cpacf_query(CPACF_KMC, &kmc_functions);
cpacf_query(CPACF_KMCTR, &kmctr_functions);
if (cpacf_test_func(&km_functions, CPACF_KM_PAES_128) ||
cpacf_test_func(&km_functions, CPACF_KM_PAES_192) ||
cpacf_test_func(&km_functions, CPACF_KM_PAES_256)) {
ret = crypto_register_skcipher(&ecb_paes_alg);
if (ret)
goto out_err;
}
if (cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_128) ||
cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_192) ||
cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_256)) {
ret = crypto_register_skcipher(&cbc_paes_alg);
if (ret)
goto out_err;
}
if (cpacf_test_func(&km_functions, CPACF_KM_PXTS_128) ||
cpacf_test_func(&km_functions, CPACF_KM_PXTS_256)) {
ret = crypto_register_skcipher(&xts_paes_alg);
if (ret)
goto out_err;
}
if (cpacf_test_func(&kmctr_functions, CPACF_KMCTR_PAES_128) ||
cpacf_test_func(&kmctr_functions, CPACF_KMCTR_PAES_192) ||
cpacf_test_func(&kmctr_functions, CPACF_KMCTR_PAES_256)) {
ctrblk = (u8 *) __get_free_page(GFP_KERNEL);
if (!ctrblk) {
ret = -ENOMEM;
goto out_err;
}
ret = crypto_register_skcipher(&ctr_paes_alg);
if (ret)
goto out_err;
}
return 0;
out_err:
paes_s390_fini();
return ret;
}
module_init(paes_s390_init);
module_exit(paes_s390_fini);
MODULE_ALIAS_CRYPTO("paes");
MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm with protected keys");
MODULE_LICENSE("GPL");
| linux-master | arch/s390/crypto/paes_s390.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Crypto-API module for CRC-32 algorithms implemented with the
* z/Architecture Vector Extension Facility.
*
* Copyright IBM Corp. 2015
* Author(s): Hendrik Brueckner <[email protected]>
*/
#define KMSG_COMPONENT "crc32-vx"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/module.h>
#include <linux/cpufeature.h>
#include <linux/crc32.h>
#include <crypto/internal/hash.h>
#include <asm/fpu/api.h>
#define CRC32_BLOCK_SIZE 1
#define CRC32_DIGEST_SIZE 4
#define VX_MIN_LEN 64
#define VX_ALIGNMENT 16L
#define VX_ALIGN_MASK (VX_ALIGNMENT - 1)
struct crc_ctx {
u32 key;
};
struct crc_desc_ctx {
u32 crc;
};
/* Prototypes for functions in assembly files */
u32 crc32_le_vgfm_16(u32 crc, unsigned char const *buf, size_t size);
u32 crc32_be_vgfm_16(u32 crc, unsigned char const *buf, size_t size);
u32 crc32c_le_vgfm_16(u32 crc, unsigned char const *buf, size_t size);
/*
* DEFINE_CRC32_VX() - Define a CRC-32 function using the vector extension
*
* Creates a function to perform a particular CRC-32 computation. Depending
* on the message buffer, the hardware-accelerated or software implementation
* is used. Note that the message buffer is aligned to improve fetch
* operations of VECTOR LOAD MULTIPLE instructions.
*
*/
#define DEFINE_CRC32_VX(___fname, ___crc32_vx, ___crc32_sw) \
static u32 __pure ___fname(u32 crc, \
unsigned char const *data, size_t datalen) \
{ \
struct kernel_fpu vxstate; \
unsigned long prealign, aligned, remaining; \
\
if (datalen < VX_MIN_LEN + VX_ALIGN_MASK) \
return ___crc32_sw(crc, data, datalen); \
\
if ((unsigned long)data & VX_ALIGN_MASK) { \
prealign = VX_ALIGNMENT - \
((unsigned long)data & VX_ALIGN_MASK); \
datalen -= prealign; \
crc = ___crc32_sw(crc, data, prealign); \
data = (void *)((unsigned long)data + prealign); \
} \
\
aligned = datalen & ~VX_ALIGN_MASK; \
remaining = datalen & VX_ALIGN_MASK; \
\
kernel_fpu_begin(&vxstate, KERNEL_VXR_LOW); \
crc = ___crc32_vx(crc, data, aligned); \
kernel_fpu_end(&vxstate, KERNEL_VXR_LOW); \
\
if (remaining) \
crc = ___crc32_sw(crc, data + aligned, remaining); \
\
return crc; \
}
DEFINE_CRC32_VX(crc32_le_vx, crc32_le_vgfm_16, crc32_le)
DEFINE_CRC32_VX(crc32_be_vx, crc32_be_vgfm_16, crc32_be)
DEFINE_CRC32_VX(crc32c_le_vx, crc32c_le_vgfm_16, __crc32c_le)
static int crc32_vx_cra_init_zero(struct crypto_tfm *tfm)
{
struct crc_ctx *mctx = crypto_tfm_ctx(tfm);
mctx->key = 0;
return 0;
}
static int crc32_vx_cra_init_invert(struct crypto_tfm *tfm)
{
struct crc_ctx *mctx = crypto_tfm_ctx(tfm);
mctx->key = ~0;
return 0;
}
static int crc32_vx_init(struct shash_desc *desc)
{
struct crc_ctx *mctx = crypto_shash_ctx(desc->tfm);
struct crc_desc_ctx *ctx = shash_desc_ctx(desc);
ctx->crc = mctx->key;
return 0;
}
static int crc32_vx_setkey(struct crypto_shash *tfm, const u8 *newkey,
unsigned int newkeylen)
{
struct crc_ctx *mctx = crypto_shash_ctx(tfm);
if (newkeylen != sizeof(mctx->key))
return -EINVAL;
mctx->key = le32_to_cpu(*(__le32 *)newkey);
return 0;
}
static int crc32be_vx_setkey(struct crypto_shash *tfm, const u8 *newkey,
unsigned int newkeylen)
{
struct crc_ctx *mctx = crypto_shash_ctx(tfm);
if (newkeylen != sizeof(mctx->key))
return -EINVAL;
mctx->key = be32_to_cpu(*(__be32 *)newkey);
return 0;
}
static int crc32le_vx_final(struct shash_desc *desc, u8 *out)
{
struct crc_desc_ctx *ctx = shash_desc_ctx(desc);
*(__le32 *)out = cpu_to_le32p(&ctx->crc);
return 0;
}
static int crc32be_vx_final(struct shash_desc *desc, u8 *out)
{
struct crc_desc_ctx *ctx = shash_desc_ctx(desc);
*(__be32 *)out = cpu_to_be32p(&ctx->crc);
return 0;
}
static int crc32c_vx_final(struct shash_desc *desc, u8 *out)
{
struct crc_desc_ctx *ctx = shash_desc_ctx(desc);
/*
* Perform a final XOR with 0xFFFFFFFF to be in sync
* with the generic crc32c shash implementation.
*/
*(__le32 *)out = ~cpu_to_le32p(&ctx->crc);
return 0;
}
static int __crc32le_vx_finup(u32 *crc, const u8 *data, unsigned int len,
u8 *out)
{
*(__le32 *)out = cpu_to_le32(crc32_le_vx(*crc, data, len));
return 0;
}
static int __crc32be_vx_finup(u32 *crc, const u8 *data, unsigned int len,
u8 *out)
{
*(__be32 *)out = cpu_to_be32(crc32_be_vx(*crc, data, len));
return 0;
}
static int __crc32c_vx_finup(u32 *crc, const u8 *data, unsigned int len,
u8 *out)
{
/*
* Perform a final XOR with 0xFFFFFFFF to be in sync
* with the generic crc32c shash implementation.
*/
*(__le32 *)out = ~cpu_to_le32(crc32c_le_vx(*crc, data, len));
return 0;
}
#define CRC32_VX_FINUP(alg, func) \
static int alg ## _vx_finup(struct shash_desc *desc, const u8 *data, \
unsigned int datalen, u8 *out) \
{ \
return __ ## alg ## _vx_finup(shash_desc_ctx(desc), \
data, datalen, out); \
}
CRC32_VX_FINUP(crc32le, crc32_le_vx)
CRC32_VX_FINUP(crc32be, crc32_be_vx)
CRC32_VX_FINUP(crc32c, crc32c_le_vx)
#define CRC32_VX_DIGEST(alg, func) \
static int alg ## _vx_digest(struct shash_desc *desc, const u8 *data, \
unsigned int len, u8 *out) \
{ \
return __ ## alg ## _vx_finup(crypto_shash_ctx(desc->tfm), \
data, len, out); \
}
CRC32_VX_DIGEST(crc32le, crc32_le_vx)
CRC32_VX_DIGEST(crc32be, crc32_be_vx)
CRC32_VX_DIGEST(crc32c, crc32c_le_vx)
#define CRC32_VX_UPDATE(alg, func) \
static int alg ## _vx_update(struct shash_desc *desc, const u8 *data, \
unsigned int datalen) \
{ \
struct crc_desc_ctx *ctx = shash_desc_ctx(desc); \
ctx->crc = func(ctx->crc, data, datalen); \
return 0; \
}
CRC32_VX_UPDATE(crc32le, crc32_le_vx)
CRC32_VX_UPDATE(crc32be, crc32_be_vx)
CRC32_VX_UPDATE(crc32c, crc32c_le_vx)
static struct shash_alg crc32_vx_algs[] = {
/* CRC-32 LE */
{
.init = crc32_vx_init,
.setkey = crc32_vx_setkey,
.update = crc32le_vx_update,
.final = crc32le_vx_final,
.finup = crc32le_vx_finup,
.digest = crc32le_vx_digest,
.descsize = sizeof(struct crc_desc_ctx),
.digestsize = CRC32_DIGEST_SIZE,
.base = {
.cra_name = "crc32",
.cra_driver_name = "crc32-vx",
.cra_priority = 200,
.cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
.cra_blocksize = CRC32_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crc_ctx),
.cra_module = THIS_MODULE,
.cra_init = crc32_vx_cra_init_zero,
},
},
/* CRC-32 BE */
{
.init = crc32_vx_init,
.setkey = crc32be_vx_setkey,
.update = crc32be_vx_update,
.final = crc32be_vx_final,
.finup = crc32be_vx_finup,
.digest = crc32be_vx_digest,
.descsize = sizeof(struct crc_desc_ctx),
.digestsize = CRC32_DIGEST_SIZE,
.base = {
.cra_name = "crc32be",
.cra_driver_name = "crc32be-vx",
.cra_priority = 200,
.cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
.cra_blocksize = CRC32_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crc_ctx),
.cra_module = THIS_MODULE,
.cra_init = crc32_vx_cra_init_zero,
},
},
/* CRC-32C LE */
{
.init = crc32_vx_init,
.setkey = crc32_vx_setkey,
.update = crc32c_vx_update,
.final = crc32c_vx_final,
.finup = crc32c_vx_finup,
.digest = crc32c_vx_digest,
.descsize = sizeof(struct crc_desc_ctx),
.digestsize = CRC32_DIGEST_SIZE,
.base = {
.cra_name = "crc32c",
.cra_driver_name = "crc32c-vx",
.cra_priority = 200,
.cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
.cra_blocksize = CRC32_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crc_ctx),
.cra_module = THIS_MODULE,
.cra_init = crc32_vx_cra_init_invert,
},
},
};
static int __init crc_vx_mod_init(void)
{
return crypto_register_shashes(crc32_vx_algs,
ARRAY_SIZE(crc32_vx_algs));
}
static void __exit crc_vx_mod_exit(void)
{
crypto_unregister_shashes(crc32_vx_algs, ARRAY_SIZE(crc32_vx_algs));
}
module_cpu_feature_match(S390_CPU_FEATURE_VXRS, crc_vx_mod_init);
module_exit(crc_vx_mod_exit);
MODULE_AUTHOR("Hendrik Brueckner <[email protected]>");
MODULE_LICENSE("GPL");
MODULE_ALIAS_CRYPTO("crc32");
MODULE_ALIAS_CRYPTO("crc32-vx");
MODULE_ALIAS_CRYPTO("crc32c");
MODULE_ALIAS_CRYPTO("crc32c-vx");
| linux-master | arch/s390/crypto/crc32-vx.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Cryptographic API.
*
* s390 implementation of the SHA256 and SHA224 Secure Hash Algorithm.
*
* s390 Version:
* Copyright IBM Corp. 2005, 2011
* Author(s): Jan Glauber ([email protected])
*/
#include <crypto/internal/hash.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/cpufeature.h>
#include <crypto/sha2.h>
#include <asm/cpacf.h>
#include "sha.h"
static int s390_sha256_init(struct shash_desc *desc)
{
struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
sctx->state[0] = SHA256_H0;
sctx->state[1] = SHA256_H1;
sctx->state[2] = SHA256_H2;
sctx->state[3] = SHA256_H3;
sctx->state[4] = SHA256_H4;
sctx->state[5] = SHA256_H5;
sctx->state[6] = SHA256_H6;
sctx->state[7] = SHA256_H7;
sctx->count = 0;
sctx->func = CPACF_KIMD_SHA_256;
return 0;
}
static int sha256_export(struct shash_desc *desc, void *out)
{
struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
struct sha256_state *octx = out;
octx->count = sctx->count;
memcpy(octx->state, sctx->state, sizeof(octx->state));
memcpy(octx->buf, sctx->buf, sizeof(octx->buf));
return 0;
}
static int sha256_import(struct shash_desc *desc, const void *in)
{
struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
const struct sha256_state *ictx = in;
sctx->count = ictx->count;
memcpy(sctx->state, ictx->state, sizeof(ictx->state));
memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf));
sctx->func = CPACF_KIMD_SHA_256;
return 0;
}
static struct shash_alg sha256_alg = {
.digestsize = SHA256_DIGEST_SIZE,
.init = s390_sha256_init,
.update = s390_sha_update,
.final = s390_sha_final,
.export = sha256_export,
.import = sha256_import,
.descsize = sizeof(struct s390_sha_ctx),
.statesize = sizeof(struct sha256_state),
.base = {
.cra_name = "sha256",
.cra_driver_name= "sha256-s390",
.cra_priority = 300,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
};
static int s390_sha224_init(struct shash_desc *desc)
{
struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
sctx->state[0] = SHA224_H0;
sctx->state[1] = SHA224_H1;
sctx->state[2] = SHA224_H2;
sctx->state[3] = SHA224_H3;
sctx->state[4] = SHA224_H4;
sctx->state[5] = SHA224_H5;
sctx->state[6] = SHA224_H6;
sctx->state[7] = SHA224_H7;
sctx->count = 0;
sctx->func = CPACF_KIMD_SHA_256;
return 0;
}
static struct shash_alg sha224_alg = {
.digestsize = SHA224_DIGEST_SIZE,
.init = s390_sha224_init,
.update = s390_sha_update,
.final = s390_sha_final,
.export = sha256_export,
.import = sha256_import,
.descsize = sizeof(struct s390_sha_ctx),
.statesize = sizeof(struct sha256_state),
.base = {
.cra_name = "sha224",
.cra_driver_name= "sha224-s390",
.cra_priority = 300,
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
};
static int __init sha256_s390_init(void)
{
int ret;
if (!cpacf_query_func(CPACF_KIMD, CPACF_KIMD_SHA_256))
return -ENODEV;
ret = crypto_register_shash(&sha256_alg);
if (ret < 0)
goto out;
ret = crypto_register_shash(&sha224_alg);
if (ret < 0)
crypto_unregister_shash(&sha256_alg);
out:
return ret;
}
static void __exit sha256_s390_fini(void)
{
crypto_unregister_shash(&sha224_alg);
crypto_unregister_shash(&sha256_alg);
}
module_cpu_feature_match(S390_CPU_FEATURE_MSA, sha256_s390_init);
module_exit(sha256_s390_fini);
MODULE_ALIAS_CRYPTO("sha256");
MODULE_ALIAS_CRYPTO("sha224");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("SHA256 and SHA224 Secure Hash Algorithm");
| linux-master | arch/s390/crypto/sha256_s390.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Cryptographic API.
*
* s390 implementation of the GHASH algorithm for GCM (Galois/Counter Mode).
*
* Copyright IBM Corp. 2011
* Author(s): Gerald Schaefer <[email protected]>
*/
#include <crypto/internal/hash.h>
#include <linux/module.h>
#include <linux/cpufeature.h>
#include <asm/cpacf.h>
#define GHASH_BLOCK_SIZE 16
#define GHASH_DIGEST_SIZE 16
struct ghash_ctx {
u8 key[GHASH_BLOCK_SIZE];
};
struct ghash_desc_ctx {
u8 icv[GHASH_BLOCK_SIZE];
u8 key[GHASH_BLOCK_SIZE];
u8 buffer[GHASH_BLOCK_SIZE];
u32 bytes;
};
static int ghash_init(struct shash_desc *desc)
{
struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
memset(dctx, 0, sizeof(*dctx));
memcpy(dctx->key, ctx->key, GHASH_BLOCK_SIZE);
return 0;
}
static int ghash_setkey(struct crypto_shash *tfm,
const u8 *key, unsigned int keylen)
{
struct ghash_ctx *ctx = crypto_shash_ctx(tfm);
if (keylen != GHASH_BLOCK_SIZE)
return -EINVAL;
memcpy(ctx->key, key, GHASH_BLOCK_SIZE);
return 0;
}
static int ghash_update(struct shash_desc *desc,
const u8 *src, unsigned int srclen)
{
struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
unsigned int n;
u8 *buf = dctx->buffer;
if (dctx->bytes) {
u8 *pos = buf + (GHASH_BLOCK_SIZE - dctx->bytes);
n = min(srclen, dctx->bytes);
dctx->bytes -= n;
srclen -= n;
memcpy(pos, src, n);
src += n;
if (!dctx->bytes) {
cpacf_kimd(CPACF_KIMD_GHASH, dctx, buf,
GHASH_BLOCK_SIZE);
}
}
n = srclen & ~(GHASH_BLOCK_SIZE - 1);
if (n) {
cpacf_kimd(CPACF_KIMD_GHASH, dctx, src, n);
src += n;
srclen -= n;
}
if (srclen) {
dctx->bytes = GHASH_BLOCK_SIZE - srclen;
memcpy(buf, src, srclen);
}
return 0;
}
static int ghash_flush(struct ghash_desc_ctx *dctx)
{
u8 *buf = dctx->buffer;
if (dctx->bytes) {
u8 *pos = buf + (GHASH_BLOCK_SIZE - dctx->bytes);
memset(pos, 0, dctx->bytes);
cpacf_kimd(CPACF_KIMD_GHASH, dctx, buf, GHASH_BLOCK_SIZE);
dctx->bytes = 0;
}
return 0;
}
static int ghash_final(struct shash_desc *desc, u8 *dst)
{
struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
int ret;
ret = ghash_flush(dctx);
if (!ret)
memcpy(dst, dctx->icv, GHASH_BLOCK_SIZE);
return ret;
}
static struct shash_alg ghash_alg = {
.digestsize = GHASH_DIGEST_SIZE,
.init = ghash_init,
.update = ghash_update,
.final = ghash_final,
.setkey = ghash_setkey,
.descsize = sizeof(struct ghash_desc_ctx),
.base = {
.cra_name = "ghash",
.cra_driver_name = "ghash-s390",
.cra_priority = 300,
.cra_blocksize = GHASH_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct ghash_ctx),
.cra_module = THIS_MODULE,
},
};
static int __init ghash_mod_init(void)
{
if (!cpacf_query_func(CPACF_KIMD, CPACF_KIMD_GHASH))
return -ENODEV;
return crypto_register_shash(&ghash_alg);
}
static void __exit ghash_mod_exit(void)
{
crypto_unregister_shash(&ghash_alg);
}
module_cpu_feature_match(S390_CPU_FEATURE_MSA, ghash_mod_init);
module_exit(ghash_mod_exit);
MODULE_ALIAS_CRYPTO("ghash");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("GHASH hash function, s390 implementation");
| linux-master | arch/s390/crypto/ghash_s390.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Cryptographic API.
*
* s390 implementation of the AES Cipher Algorithm.
*
* s390 Version:
* Copyright IBM Corp. 2005, 2017
* Author(s): Jan Glauber ([email protected])
* Sebastian Siewior ([email protected]> SW-Fallback
* Patrick Steuer <[email protected]>
* Harald Freudenberger <[email protected]>
*
* Derived from "crypto/aes_generic.c"
*/
#define KMSG_COMPONENT "aes_s390"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <crypto/aes.h>
#include <crypto/algapi.h>
#include <crypto/ghash.h>
#include <crypto/internal/aead.h>
#include <crypto/internal/cipher.h>
#include <crypto/internal/skcipher.h>
#include <crypto/scatterwalk.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/cpufeature.h>
#include <linux/init.h>
#include <linux/mutex.h>
#include <linux/fips.h>
#include <linux/string.h>
#include <crypto/xts.h>
#include <asm/cpacf.h>
static u8 *ctrblk;
static DEFINE_MUTEX(ctrblk_lock);
static cpacf_mask_t km_functions, kmc_functions, kmctr_functions,
kma_functions;
struct s390_aes_ctx {
u8 key[AES_MAX_KEY_SIZE];
int key_len;
unsigned long fc;
union {
struct crypto_skcipher *skcipher;
struct crypto_cipher *cip;
} fallback;
};
struct s390_xts_ctx {
u8 key[32];
u8 pcc_key[32];
int key_len;
unsigned long fc;
struct crypto_skcipher *fallback;
};
struct gcm_sg_walk {
struct scatter_walk walk;
unsigned int walk_bytes;
u8 *walk_ptr;
unsigned int walk_bytes_remain;
u8 buf[AES_BLOCK_SIZE];
unsigned int buf_bytes;
u8 *ptr;
unsigned int nbytes;
};
static int setkey_fallback_cip(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len)
{
struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
sctx->fallback.cip->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
sctx->fallback.cip->base.crt_flags |= (tfm->crt_flags &
CRYPTO_TFM_REQ_MASK);
return crypto_cipher_setkey(sctx->fallback.cip, in_key, key_len);
}
static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len)
{
struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
unsigned long fc;
/* Pick the correct function code based on the key length */
fc = (key_len == 16) ? CPACF_KM_AES_128 :
(key_len == 24) ? CPACF_KM_AES_192 :
(key_len == 32) ? CPACF_KM_AES_256 : 0;
/* Check if the function code is available */
sctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
if (!sctx->fc)
return setkey_fallback_cip(tfm, in_key, key_len);
sctx->key_len = key_len;
memcpy(sctx->key, in_key, key_len);
return 0;
}
static void crypto_aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
if (unlikely(!sctx->fc)) {
crypto_cipher_encrypt_one(sctx->fallback.cip, out, in);
return;
}
cpacf_km(sctx->fc, &sctx->key, out, in, AES_BLOCK_SIZE);
}
static void crypto_aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
if (unlikely(!sctx->fc)) {
crypto_cipher_decrypt_one(sctx->fallback.cip, out, in);
return;
}
cpacf_km(sctx->fc | CPACF_DECRYPT,
&sctx->key, out, in, AES_BLOCK_SIZE);
}
static int fallback_init_cip(struct crypto_tfm *tfm)
{
const char *name = tfm->__crt_alg->cra_name;
struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
sctx->fallback.cip = crypto_alloc_cipher(name, 0,
CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(sctx->fallback.cip)) {
pr_err("Allocating AES fallback algorithm %s failed\n",
name);
return PTR_ERR(sctx->fallback.cip);
}
return 0;
}
static void fallback_exit_cip(struct crypto_tfm *tfm)
{
struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
crypto_free_cipher(sctx->fallback.cip);
sctx->fallback.cip = NULL;
}
static struct crypto_alg aes_alg = {
.cra_name = "aes",
.cra_driver_name = "aes-s390",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_CIPHER |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct s390_aes_ctx),
.cra_module = THIS_MODULE,
.cra_init = fallback_init_cip,
.cra_exit = fallback_exit_cip,
.cra_u = {
.cipher = {
.cia_min_keysize = AES_MIN_KEY_SIZE,
.cia_max_keysize = AES_MAX_KEY_SIZE,
.cia_setkey = aes_set_key,
.cia_encrypt = crypto_aes_encrypt,
.cia_decrypt = crypto_aes_decrypt,
}
}
};
static int setkey_fallback_skcipher(struct crypto_skcipher *tfm, const u8 *key,
unsigned int len)
{
struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
crypto_skcipher_clear_flags(sctx->fallback.skcipher,
CRYPTO_TFM_REQ_MASK);
crypto_skcipher_set_flags(sctx->fallback.skcipher,
crypto_skcipher_get_flags(tfm) &
CRYPTO_TFM_REQ_MASK);
return crypto_skcipher_setkey(sctx->fallback.skcipher, key, len);
}
static int fallback_skcipher_crypt(struct s390_aes_ctx *sctx,
struct skcipher_request *req,
unsigned long modifier)
{
struct skcipher_request *subreq = skcipher_request_ctx(req);
*subreq = *req;
skcipher_request_set_tfm(subreq, sctx->fallback.skcipher);
return (modifier & CPACF_DECRYPT) ?
crypto_skcipher_decrypt(subreq) :
crypto_skcipher_encrypt(subreq);
}
static int ecb_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
unsigned int key_len)
{
struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
unsigned long fc;
/* Pick the correct function code based on the key length */
fc = (key_len == 16) ? CPACF_KM_AES_128 :
(key_len == 24) ? CPACF_KM_AES_192 :
(key_len == 32) ? CPACF_KM_AES_256 : 0;
/* Check if the function code is available */
sctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
if (!sctx->fc)
return setkey_fallback_skcipher(tfm, in_key, key_len);
sctx->key_len = key_len;
memcpy(sctx->key, in_key, key_len);
return 0;
}
static int ecb_aes_crypt(struct skcipher_request *req, unsigned long modifier)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
struct skcipher_walk walk;
unsigned int nbytes, n;
int ret;
if (unlikely(!sctx->fc))
return fallback_skcipher_crypt(sctx, req, modifier);
ret = skcipher_walk_virt(&walk, req, false);
while ((nbytes = walk.nbytes) != 0) {
/* only use complete blocks */
n = nbytes & ~(AES_BLOCK_SIZE - 1);
cpacf_km(sctx->fc | modifier, sctx->key,
walk.dst.virt.addr, walk.src.virt.addr, n);
ret = skcipher_walk_done(&walk, nbytes - n);
}
return ret;
}
static int ecb_aes_encrypt(struct skcipher_request *req)
{
return ecb_aes_crypt(req, 0);
}
static int ecb_aes_decrypt(struct skcipher_request *req)
{
return ecb_aes_crypt(req, CPACF_DECRYPT);
}
static int fallback_init_skcipher(struct crypto_skcipher *tfm)
{
const char *name = crypto_tfm_alg_name(&tfm->base);
struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
sctx->fallback.skcipher = crypto_alloc_skcipher(name, 0,
CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC);
if (IS_ERR(sctx->fallback.skcipher)) {
pr_err("Allocating AES fallback algorithm %s failed\n",
name);
return PTR_ERR(sctx->fallback.skcipher);
}
crypto_skcipher_set_reqsize(tfm, sizeof(struct skcipher_request) +
crypto_skcipher_reqsize(sctx->fallback.skcipher));
return 0;
}
static void fallback_exit_skcipher(struct crypto_skcipher *tfm)
{
struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
crypto_free_skcipher(sctx->fallback.skcipher);
}
static struct skcipher_alg ecb_aes_alg = {
.base.cra_name = "ecb(aes)",
.base.cra_driver_name = "ecb-aes-s390",
.base.cra_priority = 401, /* combo: aes + ecb + 1 */
.base.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct s390_aes_ctx),
.base.cra_module = THIS_MODULE,
.init = fallback_init_skcipher,
.exit = fallback_exit_skcipher,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.setkey = ecb_aes_set_key,
.encrypt = ecb_aes_encrypt,
.decrypt = ecb_aes_decrypt,
};
static int cbc_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
unsigned int key_len)
{
struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
unsigned long fc;
/* Pick the correct function code based on the key length */
fc = (key_len == 16) ? CPACF_KMC_AES_128 :
(key_len == 24) ? CPACF_KMC_AES_192 :
(key_len == 32) ? CPACF_KMC_AES_256 : 0;
/* Check if the function code is available */
sctx->fc = (fc && cpacf_test_func(&kmc_functions, fc)) ? fc : 0;
if (!sctx->fc)
return setkey_fallback_skcipher(tfm, in_key, key_len);
sctx->key_len = key_len;
memcpy(sctx->key, in_key, key_len);
return 0;
}
static int cbc_aes_crypt(struct skcipher_request *req, unsigned long modifier)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
struct skcipher_walk walk;
unsigned int nbytes, n;
int ret;
struct {
u8 iv[AES_BLOCK_SIZE];
u8 key[AES_MAX_KEY_SIZE];
} param;
if (unlikely(!sctx->fc))
return fallback_skcipher_crypt(sctx, req, modifier);
ret = skcipher_walk_virt(&walk, req, false);
if (ret)
return ret;
memcpy(param.iv, walk.iv, AES_BLOCK_SIZE);
memcpy(param.key, sctx->key, sctx->key_len);
while ((nbytes = walk.nbytes) != 0) {
/* only use complete blocks */
n = nbytes & ~(AES_BLOCK_SIZE - 1);
cpacf_kmc(sctx->fc | modifier, ¶m,
walk.dst.virt.addr, walk.src.virt.addr, n);
memcpy(walk.iv, param.iv, AES_BLOCK_SIZE);
ret = skcipher_walk_done(&walk, nbytes - n);
}
memzero_explicit(¶m, sizeof(param));
return ret;
}
static int cbc_aes_encrypt(struct skcipher_request *req)
{
return cbc_aes_crypt(req, 0);
}
static int cbc_aes_decrypt(struct skcipher_request *req)
{
return cbc_aes_crypt(req, CPACF_DECRYPT);
}
static struct skcipher_alg cbc_aes_alg = {
.base.cra_name = "cbc(aes)",
.base.cra_driver_name = "cbc-aes-s390",
.base.cra_priority = 402, /* ecb-aes-s390 + 1 */
.base.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct s390_aes_ctx),
.base.cra_module = THIS_MODULE,
.init = fallback_init_skcipher,
.exit = fallback_exit_skcipher,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
.setkey = cbc_aes_set_key,
.encrypt = cbc_aes_encrypt,
.decrypt = cbc_aes_decrypt,
};
static int xts_fallback_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int len)
{
struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
crypto_skcipher_clear_flags(xts_ctx->fallback, CRYPTO_TFM_REQ_MASK);
crypto_skcipher_set_flags(xts_ctx->fallback,
crypto_skcipher_get_flags(tfm) &
CRYPTO_TFM_REQ_MASK);
return crypto_skcipher_setkey(xts_ctx->fallback, key, len);
}
static int xts_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
unsigned int key_len)
{
struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
unsigned long fc;
int err;
err = xts_fallback_setkey(tfm, in_key, key_len);
if (err)
return err;
/* Pick the correct function code based on the key length */
fc = (key_len == 32) ? CPACF_KM_XTS_128 :
(key_len == 64) ? CPACF_KM_XTS_256 : 0;
/* Check if the function code is available */
xts_ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
if (!xts_ctx->fc)
return 0;
/* Split the XTS key into the two subkeys */
key_len = key_len / 2;
xts_ctx->key_len = key_len;
memcpy(xts_ctx->key, in_key, key_len);
memcpy(xts_ctx->pcc_key, in_key + key_len, key_len);
return 0;
}
static int xts_aes_crypt(struct skcipher_request *req, unsigned long modifier)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
struct skcipher_walk walk;
unsigned int offset, nbytes, n;
int ret;
struct {
u8 key[32];
u8 tweak[16];
u8 block[16];
u8 bit[16];
u8 xts[16];
} pcc_param;
struct {
u8 key[32];
u8 init[16];
} xts_param;
if (req->cryptlen < AES_BLOCK_SIZE)
return -EINVAL;
if (unlikely(!xts_ctx->fc || (req->cryptlen % AES_BLOCK_SIZE) != 0)) {
struct skcipher_request *subreq = skcipher_request_ctx(req);
*subreq = *req;
skcipher_request_set_tfm(subreq, xts_ctx->fallback);
return (modifier & CPACF_DECRYPT) ?
crypto_skcipher_decrypt(subreq) :
crypto_skcipher_encrypt(subreq);
}
ret = skcipher_walk_virt(&walk, req, false);
if (ret)
return ret;
offset = xts_ctx->key_len & 0x10;
memset(pcc_param.block, 0, sizeof(pcc_param.block));
memset(pcc_param.bit, 0, sizeof(pcc_param.bit));
memset(pcc_param.xts, 0, sizeof(pcc_param.xts));
memcpy(pcc_param.tweak, walk.iv, sizeof(pcc_param.tweak));
memcpy(pcc_param.key + offset, xts_ctx->pcc_key, xts_ctx->key_len);
cpacf_pcc(xts_ctx->fc, pcc_param.key + offset);
memcpy(xts_param.key + offset, xts_ctx->key, xts_ctx->key_len);
memcpy(xts_param.init, pcc_param.xts, 16);
while ((nbytes = walk.nbytes) != 0) {
/* only use complete blocks */
n = nbytes & ~(AES_BLOCK_SIZE - 1);
cpacf_km(xts_ctx->fc | modifier, xts_param.key + offset,
walk.dst.virt.addr, walk.src.virt.addr, n);
ret = skcipher_walk_done(&walk, nbytes - n);
}
memzero_explicit(&pcc_param, sizeof(pcc_param));
memzero_explicit(&xts_param, sizeof(xts_param));
return ret;
}
static int xts_aes_encrypt(struct skcipher_request *req)
{
return xts_aes_crypt(req, 0);
}
static int xts_aes_decrypt(struct skcipher_request *req)
{
return xts_aes_crypt(req, CPACF_DECRYPT);
}
static int xts_fallback_init(struct crypto_skcipher *tfm)
{
const char *name = crypto_tfm_alg_name(&tfm->base);
struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
xts_ctx->fallback = crypto_alloc_skcipher(name, 0,
CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC);
if (IS_ERR(xts_ctx->fallback)) {
pr_err("Allocating XTS fallback algorithm %s failed\n",
name);
return PTR_ERR(xts_ctx->fallback);
}
crypto_skcipher_set_reqsize(tfm, sizeof(struct skcipher_request) +
crypto_skcipher_reqsize(xts_ctx->fallback));
return 0;
}
static void xts_fallback_exit(struct crypto_skcipher *tfm)
{
struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
crypto_free_skcipher(xts_ctx->fallback);
}
static struct skcipher_alg xts_aes_alg = {
.base.cra_name = "xts(aes)",
.base.cra_driver_name = "xts-aes-s390",
.base.cra_priority = 402, /* ecb-aes-s390 + 1 */
.base.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct s390_xts_ctx),
.base.cra_module = THIS_MODULE,
.init = xts_fallback_init,
.exit = xts_fallback_exit,
.min_keysize = 2 * AES_MIN_KEY_SIZE,
.max_keysize = 2 * AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
.setkey = xts_aes_set_key,
.encrypt = xts_aes_encrypt,
.decrypt = xts_aes_decrypt,
};
static int ctr_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
unsigned int key_len)
{
struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
unsigned long fc;
/* Pick the correct function code based on the key length */
fc = (key_len == 16) ? CPACF_KMCTR_AES_128 :
(key_len == 24) ? CPACF_KMCTR_AES_192 :
(key_len == 32) ? CPACF_KMCTR_AES_256 : 0;
/* Check if the function code is available */
sctx->fc = (fc && cpacf_test_func(&kmctr_functions, fc)) ? fc : 0;
if (!sctx->fc)
return setkey_fallback_skcipher(tfm, in_key, key_len);
sctx->key_len = key_len;
memcpy(sctx->key, in_key, key_len);
return 0;
}
static unsigned int __ctrblk_init(u8 *ctrptr, u8 *iv, unsigned int nbytes)
{
unsigned int i, n;
/* only use complete blocks, max. PAGE_SIZE */
memcpy(ctrptr, iv, AES_BLOCK_SIZE);
n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(AES_BLOCK_SIZE - 1);
for (i = (n / AES_BLOCK_SIZE) - 1; i > 0; i--) {
memcpy(ctrptr + AES_BLOCK_SIZE, ctrptr, AES_BLOCK_SIZE);
crypto_inc(ctrptr + AES_BLOCK_SIZE, AES_BLOCK_SIZE);
ctrptr += AES_BLOCK_SIZE;
}
return n;
}
static int ctr_aes_crypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
u8 buf[AES_BLOCK_SIZE], *ctrptr;
struct skcipher_walk walk;
unsigned int n, nbytes;
int ret, locked;
if (unlikely(!sctx->fc))
return fallback_skcipher_crypt(sctx, req, 0);
locked = mutex_trylock(&ctrblk_lock);
ret = skcipher_walk_virt(&walk, req, false);
while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
n = AES_BLOCK_SIZE;
if (nbytes >= 2*AES_BLOCK_SIZE && locked)
n = __ctrblk_init(ctrblk, walk.iv, nbytes);
ctrptr = (n > AES_BLOCK_SIZE) ? ctrblk : walk.iv;
cpacf_kmctr(sctx->fc, sctx->key, walk.dst.virt.addr,
walk.src.virt.addr, n, ctrptr);
if (ctrptr == ctrblk)
memcpy(walk.iv, ctrptr + n - AES_BLOCK_SIZE,
AES_BLOCK_SIZE);
crypto_inc(walk.iv, AES_BLOCK_SIZE);
ret = skcipher_walk_done(&walk, nbytes - n);
}
if (locked)
mutex_unlock(&ctrblk_lock);
/*
* final block may be < AES_BLOCK_SIZE, copy only nbytes
*/
if (nbytes) {
cpacf_kmctr(sctx->fc, sctx->key, buf, walk.src.virt.addr,
AES_BLOCK_SIZE, walk.iv);
memcpy(walk.dst.virt.addr, buf, nbytes);
crypto_inc(walk.iv, AES_BLOCK_SIZE);
ret = skcipher_walk_done(&walk, 0);
}
return ret;
}
static struct skcipher_alg ctr_aes_alg = {
.base.cra_name = "ctr(aes)",
.base.cra_driver_name = "ctr-aes-s390",
.base.cra_priority = 402, /* ecb-aes-s390 + 1 */
.base.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
.base.cra_blocksize = 1,
.base.cra_ctxsize = sizeof(struct s390_aes_ctx),
.base.cra_module = THIS_MODULE,
.init = fallback_init_skcipher,
.exit = fallback_exit_skcipher,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
.setkey = ctr_aes_set_key,
.encrypt = ctr_aes_crypt,
.decrypt = ctr_aes_crypt,
.chunksize = AES_BLOCK_SIZE,
};
static int gcm_aes_setkey(struct crypto_aead *tfm, const u8 *key,
unsigned int keylen)
{
struct s390_aes_ctx *ctx = crypto_aead_ctx(tfm);
switch (keylen) {
case AES_KEYSIZE_128:
ctx->fc = CPACF_KMA_GCM_AES_128;
break;
case AES_KEYSIZE_192:
ctx->fc = CPACF_KMA_GCM_AES_192;
break;
case AES_KEYSIZE_256:
ctx->fc = CPACF_KMA_GCM_AES_256;
break;
default:
return -EINVAL;
}
memcpy(ctx->key, key, keylen);
ctx->key_len = keylen;
return 0;
}
static int gcm_aes_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
{
switch (authsize) {
case 4:
case 8:
case 12:
case 13:
case 14:
case 15:
case 16:
break;
default:
return -EINVAL;
}
return 0;
}
static void gcm_walk_start(struct gcm_sg_walk *gw, struct scatterlist *sg,
unsigned int len)
{
memset(gw, 0, sizeof(*gw));
gw->walk_bytes_remain = len;
scatterwalk_start(&gw->walk, sg);
}
static inline unsigned int _gcm_sg_clamp_and_map(struct gcm_sg_walk *gw)
{
struct scatterlist *nextsg;
gw->walk_bytes = scatterwalk_clamp(&gw->walk, gw->walk_bytes_remain);
while (!gw->walk_bytes) {
nextsg = sg_next(gw->walk.sg);
if (!nextsg)
return 0;
scatterwalk_start(&gw->walk, nextsg);
gw->walk_bytes = scatterwalk_clamp(&gw->walk,
gw->walk_bytes_remain);
}
gw->walk_ptr = scatterwalk_map(&gw->walk);
return gw->walk_bytes;
}
static inline void _gcm_sg_unmap_and_advance(struct gcm_sg_walk *gw,
unsigned int nbytes)
{
gw->walk_bytes_remain -= nbytes;
scatterwalk_unmap(gw->walk_ptr);
scatterwalk_advance(&gw->walk, nbytes);
scatterwalk_done(&gw->walk, 0, gw->walk_bytes_remain);
gw->walk_ptr = NULL;
}
static int gcm_in_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded)
{
int n;
if (gw->buf_bytes && gw->buf_bytes >= minbytesneeded) {
gw->ptr = gw->buf;
gw->nbytes = gw->buf_bytes;
goto out;
}
if (gw->walk_bytes_remain == 0) {
gw->ptr = NULL;
gw->nbytes = 0;
goto out;
}
if (!_gcm_sg_clamp_and_map(gw)) {
gw->ptr = NULL;
gw->nbytes = 0;
goto out;
}
if (!gw->buf_bytes && gw->walk_bytes >= minbytesneeded) {
gw->ptr = gw->walk_ptr;
gw->nbytes = gw->walk_bytes;
goto out;
}
while (1) {
n = min(gw->walk_bytes, AES_BLOCK_SIZE - gw->buf_bytes);
memcpy(gw->buf + gw->buf_bytes, gw->walk_ptr, n);
gw->buf_bytes += n;
_gcm_sg_unmap_and_advance(gw, n);
if (gw->buf_bytes >= minbytesneeded) {
gw->ptr = gw->buf;
gw->nbytes = gw->buf_bytes;
goto out;
}
if (!_gcm_sg_clamp_and_map(gw)) {
gw->ptr = NULL;
gw->nbytes = 0;
goto out;
}
}
out:
return gw->nbytes;
}
static int gcm_out_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded)
{
if (gw->walk_bytes_remain == 0) {
gw->ptr = NULL;
gw->nbytes = 0;
goto out;
}
if (!_gcm_sg_clamp_and_map(gw)) {
gw->ptr = NULL;
gw->nbytes = 0;
goto out;
}
if (gw->walk_bytes >= minbytesneeded) {
gw->ptr = gw->walk_ptr;
gw->nbytes = gw->walk_bytes;
goto out;
}
scatterwalk_unmap(gw->walk_ptr);
gw->walk_ptr = NULL;
gw->ptr = gw->buf;
gw->nbytes = sizeof(gw->buf);
out:
return gw->nbytes;
}
static int gcm_in_walk_done(struct gcm_sg_walk *gw, unsigned int bytesdone)
{
if (gw->ptr == NULL)
return 0;
if (gw->ptr == gw->buf) {
int n = gw->buf_bytes - bytesdone;
if (n > 0) {
memmove(gw->buf, gw->buf + bytesdone, n);
gw->buf_bytes = n;
} else
gw->buf_bytes = 0;
} else
_gcm_sg_unmap_and_advance(gw, bytesdone);
return bytesdone;
}
static int gcm_out_walk_done(struct gcm_sg_walk *gw, unsigned int bytesdone)
{
int i, n;
if (gw->ptr == NULL)
return 0;
if (gw->ptr == gw->buf) {
for (i = 0; i < bytesdone; i += n) {
if (!_gcm_sg_clamp_and_map(gw))
return i;
n = min(gw->walk_bytes, bytesdone - i);
memcpy(gw->walk_ptr, gw->buf + i, n);
_gcm_sg_unmap_and_advance(gw, n);
}
} else
_gcm_sg_unmap_and_advance(gw, bytesdone);
return bytesdone;
}
static int gcm_aes_crypt(struct aead_request *req, unsigned int flags)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct s390_aes_ctx *ctx = crypto_aead_ctx(tfm);
unsigned int ivsize = crypto_aead_ivsize(tfm);
unsigned int taglen = crypto_aead_authsize(tfm);
unsigned int aadlen = req->assoclen;
unsigned int pclen = req->cryptlen;
int ret = 0;
unsigned int n, len, in_bytes, out_bytes,
min_bytes, bytes, aad_bytes, pc_bytes;
struct gcm_sg_walk gw_in, gw_out;
u8 tag[GHASH_DIGEST_SIZE];
struct {
u32 _[3]; /* reserved */
u32 cv; /* Counter Value */
u8 t[GHASH_DIGEST_SIZE];/* Tag */
u8 h[AES_BLOCK_SIZE]; /* Hash-subkey */
u64 taadl; /* Total AAD Length */
u64 tpcl; /* Total Plain-/Cipher-text Length */
u8 j0[GHASH_BLOCK_SIZE];/* initial counter value */
u8 k[AES_MAX_KEY_SIZE]; /* Key */
} param;
/*
* encrypt
* req->src: aad||plaintext
* req->dst: aad||ciphertext||tag
* decrypt
* req->src: aad||ciphertext||tag
* req->dst: aad||plaintext, return 0 or -EBADMSG
* aad, plaintext and ciphertext may be empty.
*/
if (flags & CPACF_DECRYPT)
pclen -= taglen;
len = aadlen + pclen;
memset(¶m, 0, sizeof(param));
param.cv = 1;
param.taadl = aadlen * 8;
param.tpcl = pclen * 8;
memcpy(param.j0, req->iv, ivsize);
*(u32 *)(param.j0 + ivsize) = 1;
memcpy(param.k, ctx->key, ctx->key_len);
gcm_walk_start(&gw_in, req->src, len);
gcm_walk_start(&gw_out, req->dst, len);
do {
min_bytes = min_t(unsigned int,
aadlen > 0 ? aadlen : pclen, AES_BLOCK_SIZE);
in_bytes = gcm_in_walk_go(&gw_in, min_bytes);
out_bytes = gcm_out_walk_go(&gw_out, min_bytes);
bytes = min(in_bytes, out_bytes);
if (aadlen + pclen <= bytes) {
aad_bytes = aadlen;
pc_bytes = pclen;
flags |= CPACF_KMA_LAAD | CPACF_KMA_LPC;
} else {
if (aadlen <= bytes) {
aad_bytes = aadlen;
pc_bytes = (bytes - aadlen) &
~(AES_BLOCK_SIZE - 1);
flags |= CPACF_KMA_LAAD;
} else {
aad_bytes = bytes & ~(AES_BLOCK_SIZE - 1);
pc_bytes = 0;
}
}
if (aad_bytes > 0)
memcpy(gw_out.ptr, gw_in.ptr, aad_bytes);
cpacf_kma(ctx->fc | flags, ¶m,
gw_out.ptr + aad_bytes,
gw_in.ptr + aad_bytes, pc_bytes,
gw_in.ptr, aad_bytes);
n = aad_bytes + pc_bytes;
if (gcm_in_walk_done(&gw_in, n) != n)
return -ENOMEM;
if (gcm_out_walk_done(&gw_out, n) != n)
return -ENOMEM;
aadlen -= aad_bytes;
pclen -= pc_bytes;
} while (aadlen + pclen > 0);
if (flags & CPACF_DECRYPT) {
scatterwalk_map_and_copy(tag, req->src, len, taglen, 0);
if (crypto_memneq(tag, param.t, taglen))
ret = -EBADMSG;
} else
scatterwalk_map_and_copy(param.t, req->dst, len, taglen, 1);
memzero_explicit(¶m, sizeof(param));
return ret;
}
static int gcm_aes_encrypt(struct aead_request *req)
{
return gcm_aes_crypt(req, CPACF_ENCRYPT);
}
static int gcm_aes_decrypt(struct aead_request *req)
{
return gcm_aes_crypt(req, CPACF_DECRYPT);
}
static struct aead_alg gcm_aes_aead = {
.setkey = gcm_aes_setkey,
.setauthsize = gcm_aes_setauthsize,
.encrypt = gcm_aes_encrypt,
.decrypt = gcm_aes_decrypt,
.ivsize = GHASH_BLOCK_SIZE - sizeof(u32),
.maxauthsize = GHASH_DIGEST_SIZE,
.chunksize = AES_BLOCK_SIZE,
.base = {
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct s390_aes_ctx),
.cra_priority = 900,
.cra_name = "gcm(aes)",
.cra_driver_name = "gcm-aes-s390",
.cra_module = THIS_MODULE,
},
};
static struct crypto_alg *aes_s390_alg;
static struct skcipher_alg *aes_s390_skcipher_algs[4];
static int aes_s390_skciphers_num;
static struct aead_alg *aes_s390_aead_alg;
static int aes_s390_register_skcipher(struct skcipher_alg *alg)
{
int ret;
ret = crypto_register_skcipher(alg);
if (!ret)
aes_s390_skcipher_algs[aes_s390_skciphers_num++] = alg;
return ret;
}
static void aes_s390_fini(void)
{
if (aes_s390_alg)
crypto_unregister_alg(aes_s390_alg);
while (aes_s390_skciphers_num--)
crypto_unregister_skcipher(aes_s390_skcipher_algs[aes_s390_skciphers_num]);
if (ctrblk)
free_page((unsigned long) ctrblk);
if (aes_s390_aead_alg)
crypto_unregister_aead(aes_s390_aead_alg);
}
static int __init aes_s390_init(void)
{
int ret;
/* Query available functions for KM, KMC, KMCTR and KMA */
cpacf_query(CPACF_KM, &km_functions);
cpacf_query(CPACF_KMC, &kmc_functions);
cpacf_query(CPACF_KMCTR, &kmctr_functions);
cpacf_query(CPACF_KMA, &kma_functions);
if (cpacf_test_func(&km_functions, CPACF_KM_AES_128) ||
cpacf_test_func(&km_functions, CPACF_KM_AES_192) ||
cpacf_test_func(&km_functions, CPACF_KM_AES_256)) {
ret = crypto_register_alg(&aes_alg);
if (ret)
goto out_err;
aes_s390_alg = &aes_alg;
ret = aes_s390_register_skcipher(&ecb_aes_alg);
if (ret)
goto out_err;
}
if (cpacf_test_func(&kmc_functions, CPACF_KMC_AES_128) ||
cpacf_test_func(&kmc_functions, CPACF_KMC_AES_192) ||
cpacf_test_func(&kmc_functions, CPACF_KMC_AES_256)) {
ret = aes_s390_register_skcipher(&cbc_aes_alg);
if (ret)
goto out_err;
}
if (cpacf_test_func(&km_functions, CPACF_KM_XTS_128) ||
cpacf_test_func(&km_functions, CPACF_KM_XTS_256)) {
ret = aes_s390_register_skcipher(&xts_aes_alg);
if (ret)
goto out_err;
}
if (cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_128) ||
cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_192) ||
cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_256)) {
ctrblk = (u8 *) __get_free_page(GFP_KERNEL);
if (!ctrblk) {
ret = -ENOMEM;
goto out_err;
}
ret = aes_s390_register_skcipher(&ctr_aes_alg);
if (ret)
goto out_err;
}
if (cpacf_test_func(&kma_functions, CPACF_KMA_GCM_AES_128) ||
cpacf_test_func(&kma_functions, CPACF_KMA_GCM_AES_192) ||
cpacf_test_func(&kma_functions, CPACF_KMA_GCM_AES_256)) {
ret = crypto_register_aead(&gcm_aes_aead);
if (ret)
goto out_err;
aes_s390_aead_alg = &gcm_aes_aead;
}
return 0;
out_err:
aes_s390_fini();
return ret;
}
module_cpu_feature_match(S390_CPU_FEATURE_MSA, aes_s390_init);
module_exit(aes_s390_fini);
MODULE_ALIAS_CRYPTO("aes-all");
MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm");
MODULE_LICENSE("GPL");
MODULE_IMPORT_NS(CRYPTO_INTERNAL);
| linux-master | arch/s390/crypto/aes_s390.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Access to PCI I/O memory from user space programs.
*
* Copyright IBM Corp. 2014
* Author(s): Alexey Ishchuk <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/syscalls.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/errno.h>
#include <linux/pci.h>
#include <asm/asm-extable.h>
#include <asm/pci_io.h>
#include <asm/pci_debug.h>
static inline void zpci_err_mmio(u8 cc, u8 status, u64 offset)
{
struct {
u64 offset;
u8 cc;
u8 status;
} data = {offset, cc, status};
zpci_err_hex(&data, sizeof(data));
}
static inline int __pcistb_mio_inuser(
void __iomem *ioaddr, const void __user *src,
u64 len, u8 *status)
{
int cc = -ENXIO;
asm volatile (
" sacf 256\n"
"0: .insn rsy,0xeb00000000d4,%[len],%[ioaddr],%[src]\n"
"1: ipm %[cc]\n"
" srl %[cc],28\n"
"2: sacf 768\n"
EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
: [cc] "+d" (cc), [len] "+d" (len)
: [ioaddr] "a" (ioaddr), [src] "Q" (*((u8 __force *)src))
: "cc", "memory");
*status = len >> 24 & 0xff;
return cc;
}
static inline int __pcistg_mio_inuser(
void __iomem *ioaddr, const void __user *src,
u64 ulen, u8 *status)
{
union register_pair ioaddr_len = {.even = (u64 __force)ioaddr, .odd = ulen};
int cc = -ENXIO;
u64 val = 0;
u64 cnt = ulen;
u8 tmp;
/*
* copy 0 < @len <= 8 bytes from @src into the right most bytes of
* a register, then store it to PCI at @ioaddr while in secondary
* address space. pcistg then uses the user mappings.
*/
asm volatile (
" sacf 256\n"
"0: llgc %[tmp],0(%[src])\n"
"4: sllg %[val],%[val],8\n"
" aghi %[src],1\n"
" ogr %[val],%[tmp]\n"
" brctg %[cnt],0b\n"
"1: .insn rre,0xb9d40000,%[val],%[ioaddr_len]\n"
"2: ipm %[cc]\n"
" srl %[cc],28\n"
"3: sacf 768\n"
EX_TABLE(0b, 3b) EX_TABLE(4b, 3b) EX_TABLE(1b, 3b) EX_TABLE(2b, 3b)
:
[src] "+a" (src), [cnt] "+d" (cnt),
[val] "+d" (val), [tmp] "=d" (tmp),
[cc] "+d" (cc), [ioaddr_len] "+&d" (ioaddr_len.pair)
:: "cc", "memory");
*status = ioaddr_len.odd >> 24 & 0xff;
/* did we read everything from user memory? */
if (!cc && cnt != 0)
cc = -EFAULT;
return cc;
}
static inline int __memcpy_toio_inuser(void __iomem *dst,
const void __user *src, size_t n)
{
int size, rc = 0;
u8 status = 0;
if (!src)
return -EINVAL;
while (n > 0) {
size = zpci_get_max_write_size((u64 __force) dst,
(u64 __force) src, n,
ZPCI_MAX_WRITE_SIZE);
if (size > 8) /* main path */
rc = __pcistb_mio_inuser(dst, src, size, &status);
else
rc = __pcistg_mio_inuser(dst, src, size, &status);
if (rc)
break;
src += size;
dst += size;
n -= size;
}
if (rc)
zpci_err_mmio(rc, status, (__force u64) dst);
return rc;
}
SYSCALL_DEFINE3(s390_pci_mmio_write, unsigned long, mmio_addr,
const void __user *, user_buffer, size_t, length)
{
u8 local_buf[64];
void __iomem *io_addr;
void *buf;
struct vm_area_struct *vma;
pte_t *ptep;
spinlock_t *ptl;
long ret;
if (!zpci_is_enabled())
return -ENODEV;
if (length <= 0 || PAGE_SIZE - (mmio_addr & ~PAGE_MASK) < length)
return -EINVAL;
/*
* We only support write access to MIO capable devices if we are on
* a MIO enabled system. Otherwise we would have to check for every
* address if it is a special ZPCI_ADDR and would have to do
* a pfn lookup which we don't need for MIO capable devices. Currently
* ISM devices are the only devices without MIO support and there is no
* known need for accessing these from userspace.
*/
if (static_branch_likely(&have_mio)) {
ret = __memcpy_toio_inuser((void __iomem *) mmio_addr,
user_buffer,
length);
return ret;
}
if (length > 64) {
buf = kmalloc(length, GFP_KERNEL);
if (!buf)
return -ENOMEM;
} else
buf = local_buf;
ret = -EFAULT;
if (copy_from_user(buf, user_buffer, length))
goto out_free;
mmap_read_lock(current->mm);
ret = -EINVAL;
vma = vma_lookup(current->mm, mmio_addr);
if (!vma)
goto out_unlock_mmap;
if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
goto out_unlock_mmap;
ret = -EACCES;
if (!(vma->vm_flags & VM_WRITE))
goto out_unlock_mmap;
ret = follow_pte(vma->vm_mm, mmio_addr, &ptep, &ptl);
if (ret)
goto out_unlock_mmap;
io_addr = (void __iomem *)((pte_pfn(*ptep) << PAGE_SHIFT) |
(mmio_addr & ~PAGE_MASK));
if ((unsigned long) io_addr < ZPCI_IOMAP_ADDR_BASE)
goto out_unlock_pt;
ret = zpci_memcpy_toio(io_addr, buf, length);
out_unlock_pt:
pte_unmap_unlock(ptep, ptl);
out_unlock_mmap:
mmap_read_unlock(current->mm);
out_free:
if (buf != local_buf)
kfree(buf);
return ret;
}
static inline int __pcilg_mio_inuser(
void __user *dst, const void __iomem *ioaddr,
u64 ulen, u8 *status)
{
union register_pair ioaddr_len = {.even = (u64 __force)ioaddr, .odd = ulen};
u64 cnt = ulen;
int shift = ulen * 8;
int cc = -ENXIO;
u64 val, tmp;
/*
* read 0 < @len <= 8 bytes from the PCI memory mapped at @ioaddr (in
* user space) into a register using pcilg then store these bytes at
* user address @dst
*/
asm volatile (
" sacf 256\n"
"0: .insn rre,0xb9d60000,%[val],%[ioaddr_len]\n"
"1: ipm %[cc]\n"
" srl %[cc],28\n"
" ltr %[cc],%[cc]\n"
" jne 4f\n"
"2: ahi %[shift],-8\n"
" srlg %[tmp],%[val],0(%[shift])\n"
"3: stc %[tmp],0(%[dst])\n"
"5: aghi %[dst],1\n"
" brctg %[cnt],2b\n"
"4: sacf 768\n"
EX_TABLE(0b, 4b) EX_TABLE(1b, 4b) EX_TABLE(3b, 4b) EX_TABLE(5b, 4b)
:
[ioaddr_len] "+&d" (ioaddr_len.pair),
[cc] "+d" (cc), [val] "=d" (val),
[dst] "+a" (dst), [cnt] "+d" (cnt), [tmp] "=d" (tmp),
[shift] "+d" (shift)
:: "cc", "memory");
/* did we write everything to the user space buffer? */
if (!cc && cnt != 0)
cc = -EFAULT;
*status = ioaddr_len.odd >> 24 & 0xff;
return cc;
}
static inline int __memcpy_fromio_inuser(void __user *dst,
const void __iomem *src,
unsigned long n)
{
int size, rc = 0;
u8 status;
while (n > 0) {
size = zpci_get_max_write_size((u64 __force) src,
(u64 __force) dst, n,
ZPCI_MAX_READ_SIZE);
rc = __pcilg_mio_inuser(dst, src, size, &status);
if (rc)
break;
src += size;
dst += size;
n -= size;
}
if (rc)
zpci_err_mmio(rc, status, (__force u64) dst);
return rc;
}
SYSCALL_DEFINE3(s390_pci_mmio_read, unsigned long, mmio_addr,
void __user *, user_buffer, size_t, length)
{
u8 local_buf[64];
void __iomem *io_addr;
void *buf;
struct vm_area_struct *vma;
pte_t *ptep;
spinlock_t *ptl;
long ret;
if (!zpci_is_enabled())
return -ENODEV;
if (length <= 0 || PAGE_SIZE - (mmio_addr & ~PAGE_MASK) < length)
return -EINVAL;
/*
* We only support read access to MIO capable devices if we are on
* a MIO enabled system. Otherwise we would have to check for every
* address if it is a special ZPCI_ADDR and would have to do
* a pfn lookup which we don't need for MIO capable devices. Currently
* ISM devices are the only devices without MIO support and there is no
* known need for accessing these from userspace.
*/
if (static_branch_likely(&have_mio)) {
ret = __memcpy_fromio_inuser(
user_buffer, (const void __iomem *)mmio_addr,
length);
return ret;
}
if (length > 64) {
buf = kmalloc(length, GFP_KERNEL);
if (!buf)
return -ENOMEM;
} else {
buf = local_buf;
}
mmap_read_lock(current->mm);
ret = -EINVAL;
vma = vma_lookup(current->mm, mmio_addr);
if (!vma)
goto out_unlock_mmap;
if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
goto out_unlock_mmap;
ret = -EACCES;
if (!(vma->vm_flags & VM_WRITE))
goto out_unlock_mmap;
ret = follow_pte(vma->vm_mm, mmio_addr, &ptep, &ptl);
if (ret)
goto out_unlock_mmap;
io_addr = (void __iomem *)((pte_pfn(*ptep) << PAGE_SHIFT) |
(mmio_addr & ~PAGE_MASK));
if ((unsigned long) io_addr < ZPCI_IOMAP_ADDR_BASE) {
ret = -EFAULT;
goto out_unlock_pt;
}
ret = zpci_memcpy_fromio(buf, io_addr, length);
out_unlock_pt:
pte_unmap_unlock(ptep, ptl);
out_unlock_mmap:
mmap_read_unlock(current->mm);
if (!ret && copy_to_user(user_buffer, buf, length))
ret = -EFAULT;
if (buf != local_buf)
kfree(buf);
return ret;
}
| linux-master | arch/s390/pci/pci_mmio.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2012
*
* Author(s):
* Jan Glauber <[email protected]>
*/
#define KMSG_COMPONENT "zpci"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/compat.h>
#include <linux/kernel.h>
#include <linux/miscdevice.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/delay.h>
#include <linux/pci.h>
#include <linux/uaccess.h>
#include <asm/asm-extable.h>
#include <asm/pci_debug.h>
#include <asm/pci_clp.h>
#include <asm/clp.h>
#include <uapi/asm/clp.h>
#include "pci_bus.h"
bool zpci_unique_uid;
void update_uid_checking(bool new)
{
if (zpci_unique_uid != new)
zpci_dbg(3, "uid checking:%d\n", new);
zpci_unique_uid = new;
}
static inline void zpci_err_clp(unsigned int rsp, int rc)
{
struct {
unsigned int rsp;
int rc;
} __packed data = {rsp, rc};
zpci_err_hex(&data, sizeof(data));
}
/*
* Call Logical Processor with c=1, lps=0 and command 1
* to get the bit mask of installed logical processors
*/
static inline int clp_get_ilp(unsigned long *ilp)
{
unsigned long mask;
int cc = 3;
asm volatile (
" .insn rrf,0xb9a00000,%[mask],%[cmd],8,0\n"
"0: ipm %[cc]\n"
" srl %[cc],28\n"
"1:\n"
EX_TABLE(0b, 1b)
: [cc] "+d" (cc), [mask] "=d" (mask) : [cmd] "a" (1)
: "cc");
*ilp = mask;
return cc;
}
/*
* Call Logical Processor with c=0, the give constant lps and an lpcb request.
*/
static __always_inline int clp_req(void *data, unsigned int lps)
{
struct { u8 _[CLP_BLK_SIZE]; } *req = data;
u64 ignored;
int cc = 3;
asm volatile (
" .insn rrf,0xb9a00000,%[ign],%[req],0,%[lps]\n"
"0: ipm %[cc]\n"
" srl %[cc],28\n"
"1:\n"
EX_TABLE(0b, 1b)
: [cc] "+d" (cc), [ign] "=d" (ignored), "+m" (*req)
: [req] "a" (req), [lps] "i" (lps)
: "cc");
return cc;
}
static void *clp_alloc_block(gfp_t gfp_mask)
{
return (void *) __get_free_pages(gfp_mask, get_order(CLP_BLK_SIZE));
}
static void clp_free_block(void *ptr)
{
free_pages((unsigned long) ptr, get_order(CLP_BLK_SIZE));
}
static void clp_store_query_pci_fngrp(struct zpci_dev *zdev,
struct clp_rsp_query_pci_grp *response)
{
zdev->tlb_refresh = response->refresh;
zdev->dma_mask = response->dasm;
zdev->msi_addr = response->msia;
zdev->max_msi = response->noi;
zdev->fmb_update = response->mui;
zdev->version = response->version;
zdev->maxstbl = response->maxstbl;
zdev->dtsm = response->dtsm;
switch (response->version) {
case 1:
zdev->max_bus_speed = PCIE_SPEED_5_0GT;
break;
default:
zdev->max_bus_speed = PCI_SPEED_UNKNOWN;
break;
}
}
static int clp_query_pci_fngrp(struct zpci_dev *zdev, u8 pfgid)
{
struct clp_req_rsp_query_pci_grp *rrb;
int rc;
rrb = clp_alloc_block(GFP_KERNEL);
if (!rrb)
return -ENOMEM;
memset(rrb, 0, sizeof(*rrb));
rrb->request.hdr.len = sizeof(rrb->request);
rrb->request.hdr.cmd = CLP_QUERY_PCI_FNGRP;
rrb->response.hdr.len = sizeof(rrb->response);
rrb->request.pfgid = pfgid;
rc = clp_req(rrb, CLP_LPS_PCI);
if (!rc && rrb->response.hdr.rsp == CLP_RC_OK)
clp_store_query_pci_fngrp(zdev, &rrb->response);
else {
zpci_err("Q PCI FGRP:\n");
zpci_err_clp(rrb->response.hdr.rsp, rc);
rc = -EIO;
}
clp_free_block(rrb);
return rc;
}
static int clp_store_query_pci_fn(struct zpci_dev *zdev,
struct clp_rsp_query_pci *response)
{
int i;
for (i = 0; i < PCI_STD_NUM_BARS; i++) {
zdev->bars[i].val = le32_to_cpu(response->bar[i]);
zdev->bars[i].size = response->bar_size[i];
}
zdev->start_dma = response->sdma;
zdev->end_dma = response->edma;
zdev->pchid = response->pchid;
zdev->pfgid = response->pfgid;
zdev->pft = response->pft;
zdev->vfn = response->vfn;
zdev->port = response->port;
zdev->uid = response->uid;
zdev->fmb_length = sizeof(u32) * response->fmb_len;
zdev->rid_available = response->rid_avail;
zdev->is_physfn = response->is_physfn;
if (!s390_pci_no_rid && zdev->rid_available)
zdev->devfn = response->rid & ZPCI_RID_MASK_DEVFN;
memcpy(zdev->pfip, response->pfip, sizeof(zdev->pfip));
if (response->util_str_avail) {
memcpy(zdev->util_str, response->util_str,
sizeof(zdev->util_str));
zdev->util_str_avail = 1;
}
zdev->mio_capable = response->mio_addr_avail;
for (i = 0; i < PCI_STD_NUM_BARS; i++) {
if (!(response->mio.valid & (1 << (PCI_STD_NUM_BARS - i - 1))))
continue;
zdev->bars[i].mio_wb = (void __iomem *) response->mio.addr[i].wb;
zdev->bars[i].mio_wt = (void __iomem *) response->mio.addr[i].wt;
}
return 0;
}
int clp_query_pci_fn(struct zpci_dev *zdev)
{
struct clp_req_rsp_query_pci *rrb;
int rc;
rrb = clp_alloc_block(GFP_KERNEL);
if (!rrb)
return -ENOMEM;
memset(rrb, 0, sizeof(*rrb));
rrb->request.hdr.len = sizeof(rrb->request);
rrb->request.hdr.cmd = CLP_QUERY_PCI_FN;
rrb->response.hdr.len = sizeof(rrb->response);
rrb->request.fh = zdev->fh;
rc = clp_req(rrb, CLP_LPS_PCI);
if (!rc && rrb->response.hdr.rsp == CLP_RC_OK) {
rc = clp_store_query_pci_fn(zdev, &rrb->response);
if (rc)
goto out;
rc = clp_query_pci_fngrp(zdev, rrb->response.pfgid);
} else {
zpci_err("Q PCI FN:\n");
zpci_err_clp(rrb->response.hdr.rsp, rc);
rc = -EIO;
}
out:
clp_free_block(rrb);
return rc;
}
/**
* clp_set_pci_fn() - Execute a command on a PCI function
* @zdev: Function that will be affected
* @fh: Out parameter for updated function handle
* @nr_dma_as: DMA address space number
* @command: The command code to execute
*
* Returns: 0 on success, < 0 for Linux errors (e.g. -ENOMEM), and
* > 0 for non-success platform responses
*/
static int clp_set_pci_fn(struct zpci_dev *zdev, u32 *fh, u8 nr_dma_as, u8 command)
{
struct clp_req_rsp_set_pci *rrb;
int rc, retries = 100;
u32 gisa = 0;
*fh = 0;
rrb = clp_alloc_block(GFP_KERNEL);
if (!rrb)
return -ENOMEM;
if (command != CLP_SET_DISABLE_PCI_FN)
gisa = zdev->gisa;
do {
memset(rrb, 0, sizeof(*rrb));
rrb->request.hdr.len = sizeof(rrb->request);
rrb->request.hdr.cmd = CLP_SET_PCI_FN;
rrb->response.hdr.len = sizeof(rrb->response);
rrb->request.fh = zdev->fh;
rrb->request.oc = command;
rrb->request.ndas = nr_dma_as;
rrb->request.gisa = gisa;
rc = clp_req(rrb, CLP_LPS_PCI);
if (rrb->response.hdr.rsp == CLP_RC_SETPCIFN_BUSY) {
retries--;
if (retries < 0)
break;
msleep(20);
}
} while (rrb->response.hdr.rsp == CLP_RC_SETPCIFN_BUSY);
if (!rc && rrb->response.hdr.rsp == CLP_RC_OK) {
*fh = rrb->response.fh;
} else {
zpci_err("Set PCI FN:\n");
zpci_err_clp(rrb->response.hdr.rsp, rc);
if (!rc)
rc = rrb->response.hdr.rsp;
}
clp_free_block(rrb);
return rc;
}
int clp_setup_writeback_mio(void)
{
struct clp_req_rsp_slpc_pci *rrb;
u8 wb_bit_pos;
int rc;
rrb = clp_alloc_block(GFP_KERNEL);
if (!rrb)
return -ENOMEM;
memset(rrb, 0, sizeof(*rrb));
rrb->request.hdr.len = sizeof(rrb->request);
rrb->request.hdr.cmd = CLP_SLPC;
rrb->response.hdr.len = sizeof(rrb->response);
rc = clp_req(rrb, CLP_LPS_PCI);
if (!rc && rrb->response.hdr.rsp == CLP_RC_OK) {
if (rrb->response.vwb) {
wb_bit_pos = rrb->response.mio_wb;
set_bit_inv(wb_bit_pos, &mio_wb_bit_mask);
zpci_dbg(3, "wb bit: %d\n", wb_bit_pos);
} else {
zpci_dbg(3, "wb bit: n.a.\n");
}
} else {
zpci_err("SLPC PCI:\n");
zpci_err_clp(rrb->response.hdr.rsp, rc);
rc = -EIO;
}
clp_free_block(rrb);
return rc;
}
int clp_enable_fh(struct zpci_dev *zdev, u32 *fh, u8 nr_dma_as)
{
int rc;
rc = clp_set_pci_fn(zdev, fh, nr_dma_as, CLP_SET_ENABLE_PCI_FN);
zpci_dbg(3, "ena fid:%x, fh:%x, rc:%d\n", zdev->fid, *fh, rc);
if (!rc && zpci_use_mio(zdev)) {
rc = clp_set_pci_fn(zdev, fh, nr_dma_as, CLP_SET_ENABLE_MIO);
zpci_dbg(3, "ena mio fid:%x, fh:%x, rc:%d\n",
zdev->fid, *fh, rc);
if (rc)
clp_disable_fh(zdev, fh);
}
return rc;
}
int clp_disable_fh(struct zpci_dev *zdev, u32 *fh)
{
int rc;
if (!zdev_enabled(zdev))
return 0;
rc = clp_set_pci_fn(zdev, fh, 0, CLP_SET_DISABLE_PCI_FN);
zpci_dbg(3, "dis fid:%x, fh:%x, rc:%d\n", zdev->fid, *fh, rc);
return rc;
}
static int clp_list_pci_req(struct clp_req_rsp_list_pci *rrb,
u64 *resume_token, int *nentries)
{
int rc;
memset(rrb, 0, sizeof(*rrb));
rrb->request.hdr.len = sizeof(rrb->request);
rrb->request.hdr.cmd = CLP_LIST_PCI;
/* store as many entries as possible */
rrb->response.hdr.len = CLP_BLK_SIZE - LIST_PCI_HDR_LEN;
rrb->request.resume_token = *resume_token;
/* Get PCI function handle list */
rc = clp_req(rrb, CLP_LPS_PCI);
if (rc || rrb->response.hdr.rsp != CLP_RC_OK) {
zpci_err("List PCI FN:\n");
zpci_err_clp(rrb->response.hdr.rsp, rc);
return -EIO;
}
update_uid_checking(rrb->response.uid_checking);
WARN_ON_ONCE(rrb->response.entry_size !=
sizeof(struct clp_fh_list_entry));
*nentries = (rrb->response.hdr.len - LIST_PCI_HDR_LEN) /
rrb->response.entry_size;
*resume_token = rrb->response.resume_token;
return rc;
}
static int clp_list_pci(struct clp_req_rsp_list_pci *rrb, void *data,
void (*cb)(struct clp_fh_list_entry *, void *))
{
u64 resume_token = 0;
int nentries, i, rc;
do {
rc = clp_list_pci_req(rrb, &resume_token, &nentries);
if (rc)
return rc;
for (i = 0; i < nentries; i++)
cb(&rrb->response.fh_list[i], data);
} while (resume_token);
return rc;
}
static int clp_find_pci(struct clp_req_rsp_list_pci *rrb, u32 fid,
struct clp_fh_list_entry *entry)
{
struct clp_fh_list_entry *fh_list;
u64 resume_token = 0;
int nentries, i, rc;
do {
rc = clp_list_pci_req(rrb, &resume_token, &nentries);
if (rc)
return rc;
fh_list = rrb->response.fh_list;
for (i = 0; i < nentries; i++) {
if (fh_list[i].fid == fid) {
*entry = fh_list[i];
return 0;
}
}
} while (resume_token);
return -ENODEV;
}
static void __clp_add(struct clp_fh_list_entry *entry, void *data)
{
struct zpci_dev *zdev;
if (!entry->vendor_id)
return;
zdev = get_zdev_by_fid(entry->fid);
if (zdev) {
zpci_zdev_put(zdev);
return;
}
zpci_create_device(entry->fid, entry->fh, entry->config_state);
}
int clp_scan_pci_devices(void)
{
struct clp_req_rsp_list_pci *rrb;
int rc;
rrb = clp_alloc_block(GFP_KERNEL);
if (!rrb)
return -ENOMEM;
rc = clp_list_pci(rrb, NULL, __clp_add);
clp_free_block(rrb);
return rc;
}
/*
* Get the current function handle of the function matching @fid
*/
int clp_refresh_fh(u32 fid, u32 *fh)
{
struct clp_req_rsp_list_pci *rrb;
struct clp_fh_list_entry entry;
int rc;
rrb = clp_alloc_block(GFP_NOWAIT);
if (!rrb)
return -ENOMEM;
rc = clp_find_pci(rrb, fid, &entry);
if (!rc)
*fh = entry.fh;
clp_free_block(rrb);
return rc;
}
int clp_get_state(u32 fid, enum zpci_state *state)
{
struct clp_req_rsp_list_pci *rrb;
struct clp_fh_list_entry entry;
int rc;
rrb = clp_alloc_block(GFP_ATOMIC);
if (!rrb)
return -ENOMEM;
rc = clp_find_pci(rrb, fid, &entry);
if (!rc) {
*state = entry.config_state;
} else if (rc == -ENODEV) {
*state = ZPCI_FN_STATE_RESERVED;
rc = 0;
}
clp_free_block(rrb);
return rc;
}
static int clp_base_slpc(struct clp_req *req, struct clp_req_rsp_slpc *lpcb)
{
unsigned long limit = PAGE_SIZE - sizeof(lpcb->request);
if (lpcb->request.hdr.len != sizeof(lpcb->request) ||
lpcb->response.hdr.len > limit)
return -EINVAL;
return clp_req(lpcb, CLP_LPS_BASE) ? -EOPNOTSUPP : 0;
}
static int clp_base_command(struct clp_req *req, struct clp_req_hdr *lpcb)
{
switch (lpcb->cmd) {
case 0x0001: /* store logical-processor characteristics */
return clp_base_slpc(req, (void *) lpcb);
default:
return -EINVAL;
}
}
static int clp_pci_slpc(struct clp_req *req, struct clp_req_rsp_slpc_pci *lpcb)
{
unsigned long limit = PAGE_SIZE - sizeof(lpcb->request);
if (lpcb->request.hdr.len != sizeof(lpcb->request) ||
lpcb->response.hdr.len > limit)
return -EINVAL;
return clp_req(lpcb, CLP_LPS_PCI) ? -EOPNOTSUPP : 0;
}
static int clp_pci_list(struct clp_req *req, struct clp_req_rsp_list_pci *lpcb)
{
unsigned long limit = PAGE_SIZE - sizeof(lpcb->request);
if (lpcb->request.hdr.len != sizeof(lpcb->request) ||
lpcb->response.hdr.len > limit)
return -EINVAL;
if (lpcb->request.reserved2 != 0)
return -EINVAL;
return clp_req(lpcb, CLP_LPS_PCI) ? -EOPNOTSUPP : 0;
}
static int clp_pci_query(struct clp_req *req,
struct clp_req_rsp_query_pci *lpcb)
{
unsigned long limit = PAGE_SIZE - sizeof(lpcb->request);
if (lpcb->request.hdr.len != sizeof(lpcb->request) ||
lpcb->response.hdr.len > limit)
return -EINVAL;
if (lpcb->request.reserved2 != 0 || lpcb->request.reserved3 != 0)
return -EINVAL;
return clp_req(lpcb, CLP_LPS_PCI) ? -EOPNOTSUPP : 0;
}
static int clp_pci_query_grp(struct clp_req *req,
struct clp_req_rsp_query_pci_grp *lpcb)
{
unsigned long limit = PAGE_SIZE - sizeof(lpcb->request);
if (lpcb->request.hdr.len != sizeof(lpcb->request) ||
lpcb->response.hdr.len > limit)
return -EINVAL;
if (lpcb->request.reserved2 != 0 || lpcb->request.reserved3 != 0 ||
lpcb->request.reserved4 != 0)
return -EINVAL;
return clp_req(lpcb, CLP_LPS_PCI) ? -EOPNOTSUPP : 0;
}
static int clp_pci_command(struct clp_req *req, struct clp_req_hdr *lpcb)
{
switch (lpcb->cmd) {
case 0x0001: /* store logical-processor characteristics */
return clp_pci_slpc(req, (void *) lpcb);
case 0x0002: /* list PCI functions */
return clp_pci_list(req, (void *) lpcb);
case 0x0003: /* query PCI function */
return clp_pci_query(req, (void *) lpcb);
case 0x0004: /* query PCI function group */
return clp_pci_query_grp(req, (void *) lpcb);
default:
return -EINVAL;
}
}
static int clp_normal_command(struct clp_req *req)
{
struct clp_req_hdr *lpcb;
void __user *uptr;
int rc;
rc = -EINVAL;
if (req->lps != 0 && req->lps != 2)
goto out;
rc = -ENOMEM;
lpcb = clp_alloc_block(GFP_KERNEL);
if (!lpcb)
goto out;
rc = -EFAULT;
uptr = (void __force __user *)(unsigned long) req->data_p;
if (copy_from_user(lpcb, uptr, PAGE_SIZE) != 0)
goto out_free;
rc = -EINVAL;
if (lpcb->fmt != 0 || lpcb->reserved1 != 0 || lpcb->reserved2 != 0)
goto out_free;
switch (req->lps) {
case 0:
rc = clp_base_command(req, lpcb);
break;
case 2:
rc = clp_pci_command(req, lpcb);
break;
}
if (rc)
goto out_free;
rc = -EFAULT;
if (copy_to_user(uptr, lpcb, PAGE_SIZE) != 0)
goto out_free;
rc = 0;
out_free:
clp_free_block(lpcb);
out:
return rc;
}
static int clp_immediate_command(struct clp_req *req)
{
void __user *uptr;
unsigned long ilp;
int exists;
if (req->cmd > 1 || clp_get_ilp(&ilp) != 0)
return -EINVAL;
uptr = (void __force __user *)(unsigned long) req->data_p;
if (req->cmd == 0) {
/* Command code 0: test for a specific processor */
exists = test_bit_inv(req->lps, &ilp);
return put_user(exists, (int __user *) uptr);
}
/* Command code 1: return bit mask of installed processors */
return put_user(ilp, (unsigned long __user *) uptr);
}
static long clp_misc_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
struct clp_req req;
void __user *argp;
if (cmd != CLP_SYNC)
return -EINVAL;
argp = is_compat_task() ? compat_ptr(arg) : (void __user *) arg;
if (copy_from_user(&req, argp, sizeof(req)))
return -EFAULT;
if (req.r != 0)
return -EINVAL;
return req.c ? clp_immediate_command(&req) : clp_normal_command(&req);
}
static int clp_misc_release(struct inode *inode, struct file *filp)
{
return 0;
}
static const struct file_operations clp_misc_fops = {
.owner = THIS_MODULE,
.open = nonseekable_open,
.release = clp_misc_release,
.unlocked_ioctl = clp_misc_ioctl,
.compat_ioctl = clp_misc_ioctl,
.llseek = no_llseek,
};
static struct miscdevice clp_misc_device = {
.minor = MISC_DYNAMIC_MINOR,
.name = "clp",
.fops = &clp_misc_fops,
};
builtin_misc_device(clp_misc_device);
| linux-master | arch/s390/pci/pci_clp.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2020
*
* Author(s):
* Pierre Morel <[email protected]>
*
*/
#define KMSG_COMPONENT "zpci"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/export.h>
#include <linux/delay.h>
#include <linux/seq_file.h>
#include <linux/jump_label.h>
#include <linux/pci.h>
#include <linux/printk.h>
#include <asm/pci_clp.h>
#include <asm/pci_dma.h>
#include "pci_bus.h"
#include "pci_iov.h"
static LIST_HEAD(zbus_list);
static DEFINE_MUTEX(zbus_list_lock);
static int zpci_nb_devices;
/* zpci_bus_prepare_device - Prepare a zPCI function for scanning
* @zdev: the zPCI function to be prepared
*
* The PCI resources for the function are set up and added to its zbus and the
* function is enabled. The function must be added to a zbus which must have
* a PCI bus created. If an error occurs the zPCI function is not enabled.
*
* Return: 0 on success, an error code otherwise
*/
static int zpci_bus_prepare_device(struct zpci_dev *zdev)
{
int rc, i;
if (!zdev_enabled(zdev)) {
rc = zpci_enable_device(zdev);
if (rc)
return rc;
rc = zpci_dma_init_device(zdev);
if (rc) {
zpci_disable_device(zdev);
return rc;
}
}
if (!zdev->has_resources) {
zpci_setup_bus_resources(zdev);
for (i = 0; i < PCI_STD_NUM_BARS; i++) {
if (zdev->bars[i].res)
pci_bus_add_resource(zdev->zbus->bus, zdev->bars[i].res, 0);
}
}
return 0;
}
/* zpci_bus_scan_device - Scan a single device adding it to the PCI core
* @zdev: the zdev to be scanned
*
* Scans the PCI function making it available to the common PCI code.
*
* Return: 0 on success, an error value otherwise
*/
int zpci_bus_scan_device(struct zpci_dev *zdev)
{
struct pci_dev *pdev;
int rc;
rc = zpci_bus_prepare_device(zdev);
if (rc)
return rc;
pdev = pci_scan_single_device(zdev->zbus->bus, zdev->devfn);
if (!pdev)
return -ENODEV;
pci_lock_rescan_remove();
pci_bus_add_device(pdev);
pci_unlock_rescan_remove();
return 0;
}
/* zpci_bus_remove_device - Removes the given zdev from the PCI core
* @zdev: the zdev to be removed from the PCI core
* @set_error: if true the device's error state is set to permanent failure
*
* Sets a zPCI device to a configured but offline state; the zPCI
* device is still accessible through its hotplug slot and the zPCI
* API but is removed from the common code PCI bus, making it
* no longer available to drivers.
*/
void zpci_bus_remove_device(struct zpci_dev *zdev, bool set_error)
{
struct zpci_bus *zbus = zdev->zbus;
struct pci_dev *pdev;
if (!zdev->zbus->bus)
return;
pdev = pci_get_slot(zbus->bus, zdev->devfn);
if (pdev) {
if (set_error)
pdev->error_state = pci_channel_io_perm_failure;
if (pdev->is_virtfn) {
zpci_iov_remove_virtfn(pdev, zdev->vfn);
/* balance pci_get_slot */
pci_dev_put(pdev);
return;
}
pci_stop_and_remove_bus_device_locked(pdev);
/* balance pci_get_slot */
pci_dev_put(pdev);
}
}
/* zpci_bus_scan_bus - Scan all configured zPCI functions on the bus
* @zbus: the zbus to be scanned
*
* Enables and scans all PCI functions on the bus making them available to the
* common PCI code. If a PCI function fails to be initialized an error will be
* returned but attempts will still be made for all other functions on the bus.
*
* Return: 0 on success, an error value otherwise
*/
int zpci_bus_scan_bus(struct zpci_bus *zbus)
{
struct zpci_dev *zdev;
int devfn, rc, ret = 0;
for (devfn = 0; devfn < ZPCI_FUNCTIONS_PER_BUS; devfn++) {
zdev = zbus->function[devfn];
if (zdev && zdev->state == ZPCI_FN_STATE_CONFIGURED) {
rc = zpci_bus_prepare_device(zdev);
if (rc)
ret = -EIO;
}
}
pci_lock_rescan_remove();
pci_scan_child_bus(zbus->bus);
pci_bus_add_devices(zbus->bus);
pci_unlock_rescan_remove();
return ret;
}
/* zpci_bus_scan_busses - Scan all registered busses
*
* Scan all available zbusses
*
*/
void zpci_bus_scan_busses(void)
{
struct zpci_bus *zbus = NULL;
mutex_lock(&zbus_list_lock);
list_for_each_entry(zbus, &zbus_list, bus_next) {
zpci_bus_scan_bus(zbus);
cond_resched();
}
mutex_unlock(&zbus_list_lock);
}
/* zpci_bus_create_pci_bus - Create the PCI bus associated with this zbus
* @zbus: the zbus holding the zdevices
* @fr: PCI root function that will determine the bus's domain, and bus speeed
* @ops: the pci operations
*
* The PCI function @fr determines the domain (its UID), multifunction property
* and maximum bus speed of the entire bus.
*
* Return: 0 on success, an error code otherwise
*/
static int zpci_bus_create_pci_bus(struct zpci_bus *zbus, struct zpci_dev *fr, struct pci_ops *ops)
{
struct pci_bus *bus;
int domain;
domain = zpci_alloc_domain((u16)fr->uid);
if (domain < 0)
return domain;
zbus->domain_nr = domain;
zbus->multifunction = fr->rid_available;
zbus->max_bus_speed = fr->max_bus_speed;
/*
* Note that the zbus->resources are taken over and zbus->resources
* is empty after a successful call
*/
bus = pci_create_root_bus(NULL, ZPCI_BUS_NR, ops, zbus, &zbus->resources);
if (!bus) {
zpci_free_domain(zbus->domain_nr);
return -EFAULT;
}
zbus->bus = bus;
return 0;
}
static void zpci_bus_release(struct kref *kref)
{
struct zpci_bus *zbus = container_of(kref, struct zpci_bus, kref);
if (zbus->bus) {
pci_lock_rescan_remove();
pci_stop_root_bus(zbus->bus);
zpci_free_domain(zbus->domain_nr);
pci_free_resource_list(&zbus->resources);
pci_remove_root_bus(zbus->bus);
pci_unlock_rescan_remove();
}
mutex_lock(&zbus_list_lock);
list_del(&zbus->bus_next);
mutex_unlock(&zbus_list_lock);
kfree(zbus);
}
static void zpci_bus_put(struct zpci_bus *zbus)
{
kref_put(&zbus->kref, zpci_bus_release);
}
static struct zpci_bus *zpci_bus_get(int pchid)
{
struct zpci_bus *zbus;
mutex_lock(&zbus_list_lock);
list_for_each_entry(zbus, &zbus_list, bus_next) {
if (pchid == zbus->pchid) {
kref_get(&zbus->kref);
goto out_unlock;
}
}
zbus = NULL;
out_unlock:
mutex_unlock(&zbus_list_lock);
return zbus;
}
static struct zpci_bus *zpci_bus_alloc(int pchid)
{
struct zpci_bus *zbus;
zbus = kzalloc(sizeof(*zbus), GFP_KERNEL);
if (!zbus)
return NULL;
zbus->pchid = pchid;
INIT_LIST_HEAD(&zbus->bus_next);
mutex_lock(&zbus_list_lock);
list_add_tail(&zbus->bus_next, &zbus_list);
mutex_unlock(&zbus_list_lock);
kref_init(&zbus->kref);
INIT_LIST_HEAD(&zbus->resources);
zbus->bus_resource.start = 0;
zbus->bus_resource.end = ZPCI_BUS_NR;
zbus->bus_resource.flags = IORESOURCE_BUS;
pci_add_resource(&zbus->resources, &zbus->bus_resource);
return zbus;
}
void pcibios_bus_add_device(struct pci_dev *pdev)
{
struct zpci_dev *zdev = to_zpci(pdev);
/*
* With pdev->no_vf_scan the common PCI probing code does not
* perform PF/VF linking.
*/
if (zdev->vfn) {
zpci_iov_setup_virtfn(zdev->zbus, pdev, zdev->vfn);
pdev->no_command_memory = 1;
}
}
static int zpci_bus_add_device(struct zpci_bus *zbus, struct zpci_dev *zdev)
{
int rc = -EINVAL;
if (zbus->function[zdev->devfn]) {
pr_err("devfn %04x is already assigned\n", zdev->devfn);
return rc;
}
zdev->zbus = zbus;
zbus->function[zdev->devfn] = zdev;
zpci_nb_devices++;
if (zbus->multifunction && !zdev->rid_available) {
WARN_ONCE(1, "rid_available not set for multifunction\n");
goto error;
}
rc = zpci_init_slot(zdev);
if (rc)
goto error;
zdev->has_hp_slot = 1;
return 0;
error:
zbus->function[zdev->devfn] = NULL;
zdev->zbus = NULL;
zpci_nb_devices--;
return rc;
}
int zpci_bus_device_register(struct zpci_dev *zdev, struct pci_ops *ops)
{
struct zpci_bus *zbus = NULL;
int rc = -EBADF;
if (zpci_nb_devices == ZPCI_NR_DEVICES) {
pr_warn("Adding PCI function %08x failed because the configured limit of %d is reached\n",
zdev->fid, ZPCI_NR_DEVICES);
return -ENOSPC;
}
if (zdev->devfn >= ZPCI_FUNCTIONS_PER_BUS)
return -EINVAL;
if (!s390_pci_no_rid && zdev->rid_available)
zbus = zpci_bus_get(zdev->pchid);
if (!zbus) {
zbus = zpci_bus_alloc(zdev->pchid);
if (!zbus)
return -ENOMEM;
}
if (!zbus->bus) {
/* The UID of the first PCI function registered with a zpci_bus
* is used as the domain number for that bus. Currently there
* is exactly one zpci_bus per domain.
*/
rc = zpci_bus_create_pci_bus(zbus, zdev, ops);
if (rc)
goto error;
}
rc = zpci_bus_add_device(zbus, zdev);
if (rc)
goto error;
return 0;
error:
pr_err("Adding PCI function %08x failed\n", zdev->fid);
zpci_bus_put(zbus);
return rc;
}
void zpci_bus_device_unregister(struct zpci_dev *zdev)
{
struct zpci_bus *zbus = zdev->zbus;
zpci_nb_devices--;
zbus->function[zdev->devfn] = NULL;
zpci_bus_put(zbus);
}
| linux-master | arch/s390/pci/pci_bus.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2012
*
* Author(s):
* Jan Glauber <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/export.h>
#include <linux/iommu-helper.h>
#include <linux/dma-map-ops.h>
#include <linux/vmalloc.h>
#include <linux/pci.h>
#include <asm/pci_dma.h>
static struct kmem_cache *dma_region_table_cache;
static struct kmem_cache *dma_page_table_cache;
static int s390_iommu_strict;
static u64 s390_iommu_aperture;
static u32 s390_iommu_aperture_factor = 1;
static int zpci_refresh_global(struct zpci_dev *zdev)
{
return zpci_refresh_trans((u64) zdev->fh << 32, zdev->start_dma,
zdev->iommu_pages * PAGE_SIZE);
}
unsigned long *dma_alloc_cpu_table(gfp_t gfp)
{
unsigned long *table, *entry;
table = kmem_cache_alloc(dma_region_table_cache, gfp);
if (!table)
return NULL;
for (entry = table; entry < table + ZPCI_TABLE_ENTRIES; entry++)
*entry = ZPCI_TABLE_INVALID;
return table;
}
static void dma_free_cpu_table(void *table)
{
kmem_cache_free(dma_region_table_cache, table);
}
static unsigned long *dma_alloc_page_table(gfp_t gfp)
{
unsigned long *table, *entry;
table = kmem_cache_alloc(dma_page_table_cache, gfp);
if (!table)
return NULL;
for (entry = table; entry < table + ZPCI_PT_ENTRIES; entry++)
*entry = ZPCI_PTE_INVALID;
return table;
}
static void dma_free_page_table(void *table)
{
kmem_cache_free(dma_page_table_cache, table);
}
static unsigned long *dma_get_seg_table_origin(unsigned long *rtep, gfp_t gfp)
{
unsigned long old_rte, rte;
unsigned long *sto;
rte = READ_ONCE(*rtep);
if (reg_entry_isvalid(rte)) {
sto = get_rt_sto(rte);
} else {
sto = dma_alloc_cpu_table(gfp);
if (!sto)
return NULL;
set_rt_sto(&rte, virt_to_phys(sto));
validate_rt_entry(&rte);
entry_clr_protected(&rte);
old_rte = cmpxchg(rtep, ZPCI_TABLE_INVALID, rte);
if (old_rte != ZPCI_TABLE_INVALID) {
/* Somone else was faster, use theirs */
dma_free_cpu_table(sto);
sto = get_rt_sto(old_rte);
}
}
return sto;
}
static unsigned long *dma_get_page_table_origin(unsigned long *step, gfp_t gfp)
{
unsigned long old_ste, ste;
unsigned long *pto;
ste = READ_ONCE(*step);
if (reg_entry_isvalid(ste)) {
pto = get_st_pto(ste);
} else {
pto = dma_alloc_page_table(gfp);
if (!pto)
return NULL;
set_st_pto(&ste, virt_to_phys(pto));
validate_st_entry(&ste);
entry_clr_protected(&ste);
old_ste = cmpxchg(step, ZPCI_TABLE_INVALID, ste);
if (old_ste != ZPCI_TABLE_INVALID) {
/* Somone else was faster, use theirs */
dma_free_page_table(pto);
pto = get_st_pto(old_ste);
}
}
return pto;
}
unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr,
gfp_t gfp)
{
unsigned long *sto, *pto;
unsigned int rtx, sx, px;
rtx = calc_rtx(dma_addr);
sto = dma_get_seg_table_origin(&rto[rtx], gfp);
if (!sto)
return NULL;
sx = calc_sx(dma_addr);
pto = dma_get_page_table_origin(&sto[sx], gfp);
if (!pto)
return NULL;
px = calc_px(dma_addr);
return &pto[px];
}
void dma_update_cpu_trans(unsigned long *ptep, phys_addr_t page_addr, int flags)
{
unsigned long pte;
pte = READ_ONCE(*ptep);
if (flags & ZPCI_PTE_INVALID) {
invalidate_pt_entry(&pte);
} else {
set_pt_pfaa(&pte, page_addr);
validate_pt_entry(&pte);
}
if (flags & ZPCI_TABLE_PROTECTED)
entry_set_protected(&pte);
else
entry_clr_protected(&pte);
xchg(ptep, pte);
}
static int __dma_update_trans(struct zpci_dev *zdev, phys_addr_t pa,
dma_addr_t dma_addr, size_t size, int flags)
{
unsigned int nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
phys_addr_t page_addr = (pa & PAGE_MASK);
unsigned long *entry;
int i, rc = 0;
if (!nr_pages)
return -EINVAL;
if (!zdev->dma_table)
return -EINVAL;
for (i = 0; i < nr_pages; i++) {
entry = dma_walk_cpu_trans(zdev->dma_table, dma_addr,
GFP_ATOMIC);
if (!entry) {
rc = -ENOMEM;
goto undo_cpu_trans;
}
dma_update_cpu_trans(entry, page_addr, flags);
page_addr += PAGE_SIZE;
dma_addr += PAGE_SIZE;
}
undo_cpu_trans:
if (rc && ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID)) {
flags = ZPCI_PTE_INVALID;
while (i-- > 0) {
page_addr -= PAGE_SIZE;
dma_addr -= PAGE_SIZE;
entry = dma_walk_cpu_trans(zdev->dma_table, dma_addr,
GFP_ATOMIC);
if (!entry)
break;
dma_update_cpu_trans(entry, page_addr, flags);
}
}
return rc;
}
static int __dma_purge_tlb(struct zpci_dev *zdev, dma_addr_t dma_addr,
size_t size, int flags)
{
unsigned long irqflags;
int ret;
/*
* With zdev->tlb_refresh == 0, rpcit is not required to establish new
* translations when previously invalid translation-table entries are
* validated. With lazy unmap, rpcit is skipped for previously valid
* entries, but a global rpcit is then required before any address can
* be re-used, i.e. after each iommu bitmap wrap-around.
*/
if ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID) {
if (!zdev->tlb_refresh)
return 0;
} else {
if (!s390_iommu_strict)
return 0;
}
ret = zpci_refresh_trans((u64) zdev->fh << 32, dma_addr,
PAGE_ALIGN(size));
if (ret == -ENOMEM && !s390_iommu_strict) {
/* enable the hypervisor to free some resources */
if (zpci_refresh_global(zdev))
goto out;
spin_lock_irqsave(&zdev->iommu_bitmap_lock, irqflags);
bitmap_andnot(zdev->iommu_bitmap, zdev->iommu_bitmap,
zdev->lazy_bitmap, zdev->iommu_pages);
bitmap_zero(zdev->lazy_bitmap, zdev->iommu_pages);
spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, irqflags);
ret = 0;
}
out:
return ret;
}
static int dma_update_trans(struct zpci_dev *zdev, phys_addr_t pa,
dma_addr_t dma_addr, size_t size, int flags)
{
int rc;
rc = __dma_update_trans(zdev, pa, dma_addr, size, flags);
if (rc)
return rc;
rc = __dma_purge_tlb(zdev, dma_addr, size, flags);
if (rc && ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID))
__dma_update_trans(zdev, pa, dma_addr, size, ZPCI_PTE_INVALID);
return rc;
}
void dma_free_seg_table(unsigned long entry)
{
unsigned long *sto = get_rt_sto(entry);
int sx;
for (sx = 0; sx < ZPCI_TABLE_ENTRIES; sx++)
if (reg_entry_isvalid(sto[sx]))
dma_free_page_table(get_st_pto(sto[sx]));
dma_free_cpu_table(sto);
}
void dma_cleanup_tables(unsigned long *table)
{
int rtx;
if (!table)
return;
for (rtx = 0; rtx < ZPCI_TABLE_ENTRIES; rtx++)
if (reg_entry_isvalid(table[rtx]))
dma_free_seg_table(table[rtx]);
dma_free_cpu_table(table);
}
static unsigned long __dma_alloc_iommu(struct device *dev,
unsigned long start, int size)
{
struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
return iommu_area_alloc(zdev->iommu_bitmap, zdev->iommu_pages,
start, size, zdev->start_dma >> PAGE_SHIFT,
dma_get_seg_boundary_nr_pages(dev, PAGE_SHIFT),
0);
}
static dma_addr_t dma_alloc_address(struct device *dev, int size)
{
struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
unsigned long offset, flags;
spin_lock_irqsave(&zdev->iommu_bitmap_lock, flags);
offset = __dma_alloc_iommu(dev, zdev->next_bit, size);
if (offset == -1) {
if (!s390_iommu_strict) {
/* global flush before DMA addresses are reused */
if (zpci_refresh_global(zdev))
goto out_error;
bitmap_andnot(zdev->iommu_bitmap, zdev->iommu_bitmap,
zdev->lazy_bitmap, zdev->iommu_pages);
bitmap_zero(zdev->lazy_bitmap, zdev->iommu_pages);
}
/* wrap-around */
offset = __dma_alloc_iommu(dev, 0, size);
if (offset == -1)
goto out_error;
}
zdev->next_bit = offset + size;
spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
return zdev->start_dma + offset * PAGE_SIZE;
out_error:
spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
return DMA_MAPPING_ERROR;
}
static void dma_free_address(struct device *dev, dma_addr_t dma_addr, int size)
{
struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
unsigned long flags, offset;
offset = (dma_addr - zdev->start_dma) >> PAGE_SHIFT;
spin_lock_irqsave(&zdev->iommu_bitmap_lock, flags);
if (!zdev->iommu_bitmap)
goto out;
if (s390_iommu_strict)
bitmap_clear(zdev->iommu_bitmap, offset, size);
else
bitmap_set(zdev->lazy_bitmap, offset, size);
out:
spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
}
static inline void zpci_err_dma(unsigned long rc, unsigned long addr)
{
struct {
unsigned long rc;
unsigned long addr;
} __packed data = {rc, addr};
zpci_err_hex(&data, sizeof(data));
}
static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction direction,
unsigned long attrs)
{
struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
unsigned long pa = page_to_phys(page) + offset;
int flags = ZPCI_PTE_VALID;
unsigned long nr_pages;
dma_addr_t dma_addr;
int ret;
/* This rounds up number of pages based on size and offset */
nr_pages = iommu_num_pages(pa, size, PAGE_SIZE);
dma_addr = dma_alloc_address(dev, nr_pages);
if (dma_addr == DMA_MAPPING_ERROR) {
ret = -ENOSPC;
goto out_err;
}
/* Use rounded up size */
size = nr_pages * PAGE_SIZE;
if (direction == DMA_NONE || direction == DMA_TO_DEVICE)
flags |= ZPCI_TABLE_PROTECTED;
ret = dma_update_trans(zdev, pa, dma_addr, size, flags);
if (ret)
goto out_free;
atomic64_add(nr_pages, &zdev->mapped_pages);
return dma_addr + (offset & ~PAGE_MASK);
out_free:
dma_free_address(dev, dma_addr, nr_pages);
out_err:
zpci_err("map error:\n");
zpci_err_dma(ret, pa);
return DMA_MAPPING_ERROR;
}
static void s390_dma_unmap_pages(struct device *dev, dma_addr_t dma_addr,
size_t size, enum dma_data_direction direction,
unsigned long attrs)
{
struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
int npages, ret;
npages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
dma_addr = dma_addr & PAGE_MASK;
ret = dma_update_trans(zdev, 0, dma_addr, npages * PAGE_SIZE,
ZPCI_PTE_INVALID);
if (ret) {
zpci_err("unmap error:\n");
zpci_err_dma(ret, dma_addr);
return;
}
atomic64_add(npages, &zdev->unmapped_pages);
dma_free_address(dev, dma_addr, npages);
}
static void *s390_dma_alloc(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag,
unsigned long attrs)
{
struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
struct page *page;
phys_addr_t pa;
dma_addr_t map;
size = PAGE_ALIGN(size);
page = alloc_pages(flag | __GFP_ZERO, get_order(size));
if (!page)
return NULL;
pa = page_to_phys(page);
map = s390_dma_map_pages(dev, page, 0, size, DMA_BIDIRECTIONAL, 0);
if (dma_mapping_error(dev, map)) {
__free_pages(page, get_order(size));
return NULL;
}
atomic64_add(size / PAGE_SIZE, &zdev->allocated_pages);
if (dma_handle)
*dma_handle = map;
return phys_to_virt(pa);
}
static void s390_dma_free(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle,
unsigned long attrs)
{
struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
size = PAGE_ALIGN(size);
atomic64_sub(size / PAGE_SIZE, &zdev->allocated_pages);
s390_dma_unmap_pages(dev, dma_handle, size, DMA_BIDIRECTIONAL, 0);
free_pages((unsigned long)vaddr, get_order(size));
}
/* Map a segment into a contiguous dma address area */
static int __s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
size_t size, dma_addr_t *handle,
enum dma_data_direction dir)
{
unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
dma_addr_t dma_addr_base, dma_addr;
int flags = ZPCI_PTE_VALID;
struct scatterlist *s;
phys_addr_t pa = 0;
int ret;
dma_addr_base = dma_alloc_address(dev, nr_pages);
if (dma_addr_base == DMA_MAPPING_ERROR)
return -ENOMEM;
dma_addr = dma_addr_base;
if (dir == DMA_NONE || dir == DMA_TO_DEVICE)
flags |= ZPCI_TABLE_PROTECTED;
for (s = sg; dma_addr < dma_addr_base + size; s = sg_next(s)) {
pa = page_to_phys(sg_page(s));
ret = __dma_update_trans(zdev, pa, dma_addr,
s->offset + s->length, flags);
if (ret)
goto unmap;
dma_addr += s->offset + s->length;
}
ret = __dma_purge_tlb(zdev, dma_addr_base, size, flags);
if (ret)
goto unmap;
*handle = dma_addr_base;
atomic64_add(nr_pages, &zdev->mapped_pages);
return ret;
unmap:
dma_update_trans(zdev, 0, dma_addr_base, dma_addr - dma_addr_base,
ZPCI_PTE_INVALID);
dma_free_address(dev, dma_addr_base, nr_pages);
zpci_err("map error:\n");
zpci_err_dma(ret, pa);
return ret;
}
static int s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
int nr_elements, enum dma_data_direction dir,
unsigned long attrs)
{
struct scatterlist *s = sg, *start = sg, *dma = sg;
unsigned int max = dma_get_max_seg_size(dev);
unsigned int size = s->offset + s->length;
unsigned int offset = s->offset;
int count = 0, i, ret;
for (i = 1; i < nr_elements; i++) {
s = sg_next(s);
s->dma_length = 0;
if (s->offset || (size & ~PAGE_MASK) ||
size + s->length > max) {
ret = __s390_dma_map_sg(dev, start, size,
&dma->dma_address, dir);
if (ret)
goto unmap;
dma->dma_address += offset;
dma->dma_length = size - offset;
size = offset = s->offset;
start = s;
dma = sg_next(dma);
count++;
}
size += s->length;
}
ret = __s390_dma_map_sg(dev, start, size, &dma->dma_address, dir);
if (ret)
goto unmap;
dma->dma_address += offset;
dma->dma_length = size - offset;
return count + 1;
unmap:
for_each_sg(sg, s, count, i)
s390_dma_unmap_pages(dev, sg_dma_address(s), sg_dma_len(s),
dir, attrs);
return ret;
}
static void s390_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
int nr_elements, enum dma_data_direction dir,
unsigned long attrs)
{
struct scatterlist *s;
int i;
for_each_sg(sg, s, nr_elements, i) {
if (s->dma_length)
s390_dma_unmap_pages(dev, s->dma_address, s->dma_length,
dir, attrs);
s->dma_address = 0;
s->dma_length = 0;
}
}
int zpci_dma_init_device(struct zpci_dev *zdev)
{
u8 status;
int rc;
/*
* At this point, if the device is part of an IOMMU domain, this would
* be a strong hint towards a bug in the IOMMU API (common) code and/or
* simultaneous access via IOMMU and DMA API. So let's issue a warning.
*/
WARN_ON(zdev->s390_domain);
spin_lock_init(&zdev->iommu_bitmap_lock);
zdev->dma_table = dma_alloc_cpu_table(GFP_KERNEL);
if (!zdev->dma_table) {
rc = -ENOMEM;
goto out;
}
/*
* Restrict the iommu bitmap size to the minimum of the following:
* - s390_iommu_aperture which defaults to high_memory
* - 3-level pagetable address limit minus start_dma offset
* - DMA address range allowed by the hardware (clp query pci fn)
*
* Also set zdev->end_dma to the actual end address of the usable
* range, instead of the theoretical maximum as reported by hardware.
*
* This limits the number of concurrently usable DMA mappings since
* for each DMA mapped memory address we need a DMA address including
* extra DMA addresses for multiple mappings of the same memory address.
*/
zdev->start_dma = PAGE_ALIGN(zdev->start_dma);
zdev->iommu_size = min3(s390_iommu_aperture,
ZPCI_TABLE_SIZE_RT - zdev->start_dma,
zdev->end_dma - zdev->start_dma + 1);
zdev->end_dma = zdev->start_dma + zdev->iommu_size - 1;
zdev->iommu_pages = zdev->iommu_size >> PAGE_SHIFT;
zdev->iommu_bitmap = vzalloc(zdev->iommu_pages / 8);
if (!zdev->iommu_bitmap) {
rc = -ENOMEM;
goto free_dma_table;
}
if (!s390_iommu_strict) {
zdev->lazy_bitmap = vzalloc(zdev->iommu_pages / 8);
if (!zdev->lazy_bitmap) {
rc = -ENOMEM;
goto free_bitmap;
}
}
if (zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
virt_to_phys(zdev->dma_table), &status)) {
rc = -EIO;
goto free_bitmap;
}
return 0;
free_bitmap:
vfree(zdev->iommu_bitmap);
zdev->iommu_bitmap = NULL;
vfree(zdev->lazy_bitmap);
zdev->lazy_bitmap = NULL;
free_dma_table:
dma_free_cpu_table(zdev->dma_table);
zdev->dma_table = NULL;
out:
return rc;
}
int zpci_dma_exit_device(struct zpci_dev *zdev)
{
int cc = 0;
/*
* At this point, if the device is part of an IOMMU domain, this would
* be a strong hint towards a bug in the IOMMU API (common) code and/or
* simultaneous access via IOMMU and DMA API. So let's issue a warning.
*/
WARN_ON(zdev->s390_domain);
if (zdev_enabled(zdev))
cc = zpci_unregister_ioat(zdev, 0);
/*
* cc == 3 indicates the function is gone already. This can happen
* if the function was deconfigured/disabled suddenly and we have not
* received a new handle yet.
*/
if (cc && cc != 3)
return -EIO;
dma_cleanup_tables(zdev->dma_table);
zdev->dma_table = NULL;
vfree(zdev->iommu_bitmap);
zdev->iommu_bitmap = NULL;
vfree(zdev->lazy_bitmap);
zdev->lazy_bitmap = NULL;
zdev->next_bit = 0;
return 0;
}
static int __init dma_alloc_cpu_table_caches(void)
{
dma_region_table_cache = kmem_cache_create("PCI_DMA_region_tables",
ZPCI_TABLE_SIZE, ZPCI_TABLE_ALIGN,
0, NULL);
if (!dma_region_table_cache)
return -ENOMEM;
dma_page_table_cache = kmem_cache_create("PCI_DMA_page_tables",
ZPCI_PT_SIZE, ZPCI_PT_ALIGN,
0, NULL);
if (!dma_page_table_cache) {
kmem_cache_destroy(dma_region_table_cache);
return -ENOMEM;
}
return 0;
}
int __init zpci_dma_init(void)
{
s390_iommu_aperture = (u64)virt_to_phys(high_memory);
if (!s390_iommu_aperture_factor)
s390_iommu_aperture = ULONG_MAX;
else
s390_iommu_aperture *= s390_iommu_aperture_factor;
return dma_alloc_cpu_table_caches();
}
void zpci_dma_exit(void)
{
kmem_cache_destroy(dma_page_table_cache);
kmem_cache_destroy(dma_region_table_cache);
}
const struct dma_map_ops s390_pci_dma_ops = {
.alloc = s390_dma_alloc,
.free = s390_dma_free,
.map_sg = s390_dma_map_sg,
.unmap_sg = s390_dma_unmap_sg,
.map_page = s390_dma_map_pages,
.unmap_page = s390_dma_unmap_pages,
.mmap = dma_common_mmap,
.get_sgtable = dma_common_get_sgtable,
.alloc_pages = dma_common_alloc_pages,
.free_pages = dma_common_free_pages,
/* dma_supported is unconditionally true without a callback */
};
EXPORT_SYMBOL_GPL(s390_pci_dma_ops);
static int __init s390_iommu_setup(char *str)
{
if (!strcmp(str, "strict"))
s390_iommu_strict = 1;
return 1;
}
__setup("s390_iommu=", s390_iommu_setup);
static int __init s390_iommu_aperture_setup(char *str)
{
if (kstrtou32(str, 10, &s390_iommu_aperture_factor))
s390_iommu_aperture_factor = 1;
return 1;
}
__setup("s390_iommu_aperture=", s390_iommu_aperture_setup);
| linux-master | arch/s390/pci/pci_dma.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2012
*
* Author(s):
* Jan Glauber <[email protected]>
*
* The System z PCI code is a rewrite from a prototype by
* the following people (Kudoz!):
* Alexander Schmidt
* Christoph Raisch
* Hannes Hering
* Hoang-Nam Nguyen
* Jan-Bernd Themann
* Stefan Roscher
* Thomas Klein
*/
#define KMSG_COMPONENT "zpci"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/export.h>
#include <linux/delay.h>
#include <linux/seq_file.h>
#include <linux/jump_label.h>
#include <linux/pci.h>
#include <linux/printk.h>
#include <asm/isc.h>
#include <asm/airq.h>
#include <asm/facility.h>
#include <asm/pci_insn.h>
#include <asm/pci_clp.h>
#include <asm/pci_dma.h>
#include "pci_bus.h"
#include "pci_iov.h"
/* list of all detected zpci devices */
static LIST_HEAD(zpci_list);
static DEFINE_SPINLOCK(zpci_list_lock);
static DECLARE_BITMAP(zpci_domain, ZPCI_DOMAIN_BITMAP_SIZE);
static DEFINE_SPINLOCK(zpci_domain_lock);
#define ZPCI_IOMAP_ENTRIES \
min(((unsigned long) ZPCI_NR_DEVICES * PCI_STD_NUM_BARS / 2), \
ZPCI_IOMAP_MAX_ENTRIES)
unsigned int s390_pci_no_rid;
static DEFINE_SPINLOCK(zpci_iomap_lock);
static unsigned long *zpci_iomap_bitmap;
struct zpci_iomap_entry *zpci_iomap_start;
EXPORT_SYMBOL_GPL(zpci_iomap_start);
DEFINE_STATIC_KEY_FALSE(have_mio);
static struct kmem_cache *zdev_fmb_cache;
/* AEN structures that must be preserved over KVM module re-insertion */
union zpci_sic_iib *zpci_aipb;
EXPORT_SYMBOL_GPL(zpci_aipb);
struct airq_iv *zpci_aif_sbv;
EXPORT_SYMBOL_GPL(zpci_aif_sbv);
struct zpci_dev *get_zdev_by_fid(u32 fid)
{
struct zpci_dev *tmp, *zdev = NULL;
spin_lock(&zpci_list_lock);
list_for_each_entry(tmp, &zpci_list, entry) {
if (tmp->fid == fid) {
zdev = tmp;
zpci_zdev_get(zdev);
break;
}
}
spin_unlock(&zpci_list_lock);
return zdev;
}
void zpci_remove_reserved_devices(void)
{
struct zpci_dev *tmp, *zdev;
enum zpci_state state;
LIST_HEAD(remove);
spin_lock(&zpci_list_lock);
list_for_each_entry_safe(zdev, tmp, &zpci_list, entry) {
if (zdev->state == ZPCI_FN_STATE_STANDBY &&
!clp_get_state(zdev->fid, &state) &&
state == ZPCI_FN_STATE_RESERVED)
list_move_tail(&zdev->entry, &remove);
}
spin_unlock(&zpci_list_lock);
list_for_each_entry_safe(zdev, tmp, &remove, entry)
zpci_device_reserved(zdev);
}
int pci_domain_nr(struct pci_bus *bus)
{
return ((struct zpci_bus *) bus->sysdata)->domain_nr;
}
EXPORT_SYMBOL_GPL(pci_domain_nr);
int pci_proc_domain(struct pci_bus *bus)
{
return pci_domain_nr(bus);
}
EXPORT_SYMBOL_GPL(pci_proc_domain);
/* Modify PCI: Register I/O address translation parameters */
int zpci_register_ioat(struct zpci_dev *zdev, u8 dmaas,
u64 base, u64 limit, u64 iota, u8 *status)
{
u64 req = ZPCI_CREATE_REQ(zdev->fh, dmaas, ZPCI_MOD_FC_REG_IOAT);
struct zpci_fib fib = {0};
u8 cc;
WARN_ON_ONCE(iota & 0x3fff);
fib.pba = base;
fib.pal = limit;
fib.iota = iota | ZPCI_IOTA_RTTO_FLAG;
fib.gd = zdev->gisa;
cc = zpci_mod_fc(req, &fib, status);
if (cc)
zpci_dbg(3, "reg ioat fid:%x, cc:%d, status:%d\n", zdev->fid, cc, *status);
return cc;
}
EXPORT_SYMBOL_GPL(zpci_register_ioat);
/* Modify PCI: Unregister I/O address translation parameters */
int zpci_unregister_ioat(struct zpci_dev *zdev, u8 dmaas)
{
u64 req = ZPCI_CREATE_REQ(zdev->fh, dmaas, ZPCI_MOD_FC_DEREG_IOAT);
struct zpci_fib fib = {0};
u8 cc, status;
fib.gd = zdev->gisa;
cc = zpci_mod_fc(req, &fib, &status);
if (cc)
zpci_dbg(3, "unreg ioat fid:%x, cc:%d, status:%d\n", zdev->fid, cc, status);
return cc;
}
/* Modify PCI: Set PCI function measurement parameters */
int zpci_fmb_enable_device(struct zpci_dev *zdev)
{
u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_SET_MEASURE);
struct zpci_fib fib = {0};
u8 cc, status;
if (zdev->fmb || sizeof(*zdev->fmb) < zdev->fmb_length)
return -EINVAL;
zdev->fmb = kmem_cache_zalloc(zdev_fmb_cache, GFP_KERNEL);
if (!zdev->fmb)
return -ENOMEM;
WARN_ON((u64) zdev->fmb & 0xf);
/* reset software counters */
atomic64_set(&zdev->allocated_pages, 0);
atomic64_set(&zdev->mapped_pages, 0);
atomic64_set(&zdev->unmapped_pages, 0);
fib.fmb_addr = virt_to_phys(zdev->fmb);
fib.gd = zdev->gisa;
cc = zpci_mod_fc(req, &fib, &status);
if (cc) {
kmem_cache_free(zdev_fmb_cache, zdev->fmb);
zdev->fmb = NULL;
}
return cc ? -EIO : 0;
}
/* Modify PCI: Disable PCI function measurement */
int zpci_fmb_disable_device(struct zpci_dev *zdev)
{
u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_SET_MEASURE);
struct zpci_fib fib = {0};
u8 cc, status;
if (!zdev->fmb)
return -EINVAL;
fib.gd = zdev->gisa;
/* Function measurement is disabled if fmb address is zero */
cc = zpci_mod_fc(req, &fib, &status);
if (cc == 3) /* Function already gone. */
cc = 0;
if (!cc) {
kmem_cache_free(zdev_fmb_cache, zdev->fmb);
zdev->fmb = NULL;
}
return cc ? -EIO : 0;
}
static int zpci_cfg_load(struct zpci_dev *zdev, int offset, u32 *val, u8 len)
{
u64 req = ZPCI_CREATE_REQ(zdev->fh, ZPCI_PCIAS_CFGSPC, len);
u64 data;
int rc;
rc = __zpci_load(&data, req, offset);
if (!rc) {
data = le64_to_cpu((__force __le64) data);
data >>= (8 - len) * 8;
*val = (u32) data;
} else
*val = 0xffffffff;
return rc;
}
static int zpci_cfg_store(struct zpci_dev *zdev, int offset, u32 val, u8 len)
{
u64 req = ZPCI_CREATE_REQ(zdev->fh, ZPCI_PCIAS_CFGSPC, len);
u64 data = val;
int rc;
data <<= (8 - len) * 8;
data = (__force u64) cpu_to_le64(data);
rc = __zpci_store(data, req, offset);
return rc;
}
resource_size_t pcibios_align_resource(void *data, const struct resource *res,
resource_size_t size,
resource_size_t align)
{
return 0;
}
/* combine single writes by using store-block insn */
void __iowrite64_copy(void __iomem *to, const void *from, size_t count)
{
zpci_memcpy_toio(to, from, count);
}
void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size,
unsigned long prot)
{
/*
* When PCI MIO instructions are unavailable the "physical" address
* encodes a hint for accessing the PCI memory space it represents.
* Just pass it unchanged such that ioread/iowrite can decode it.
*/
if (!static_branch_unlikely(&have_mio))
return (void __iomem *)phys_addr;
return generic_ioremap_prot(phys_addr, size, __pgprot(prot));
}
EXPORT_SYMBOL(ioremap_prot);
void iounmap(volatile void __iomem *addr)
{
if (static_branch_likely(&have_mio))
generic_iounmap(addr);
}
EXPORT_SYMBOL(iounmap);
/* Create a virtual mapping cookie for a PCI BAR */
static void __iomem *pci_iomap_range_fh(struct pci_dev *pdev, int bar,
unsigned long offset, unsigned long max)
{
struct zpci_dev *zdev = to_zpci(pdev);
int idx;
idx = zdev->bars[bar].map_idx;
spin_lock(&zpci_iomap_lock);
/* Detect overrun */
WARN_ON(!++zpci_iomap_start[idx].count);
zpci_iomap_start[idx].fh = zdev->fh;
zpci_iomap_start[idx].bar = bar;
spin_unlock(&zpci_iomap_lock);
return (void __iomem *) ZPCI_ADDR(idx) + offset;
}
static void __iomem *pci_iomap_range_mio(struct pci_dev *pdev, int bar,
unsigned long offset,
unsigned long max)
{
unsigned long barsize = pci_resource_len(pdev, bar);
struct zpci_dev *zdev = to_zpci(pdev);
void __iomem *iova;
iova = ioremap((unsigned long) zdev->bars[bar].mio_wt, barsize);
return iova ? iova + offset : iova;
}
void __iomem *pci_iomap_range(struct pci_dev *pdev, int bar,
unsigned long offset, unsigned long max)
{
if (bar >= PCI_STD_NUM_BARS || !pci_resource_len(pdev, bar))
return NULL;
if (static_branch_likely(&have_mio))
return pci_iomap_range_mio(pdev, bar, offset, max);
else
return pci_iomap_range_fh(pdev, bar, offset, max);
}
EXPORT_SYMBOL(pci_iomap_range);
void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
{
return pci_iomap_range(dev, bar, 0, maxlen);
}
EXPORT_SYMBOL(pci_iomap);
static void __iomem *pci_iomap_wc_range_mio(struct pci_dev *pdev, int bar,
unsigned long offset, unsigned long max)
{
unsigned long barsize = pci_resource_len(pdev, bar);
struct zpci_dev *zdev = to_zpci(pdev);
void __iomem *iova;
iova = ioremap((unsigned long) zdev->bars[bar].mio_wb, barsize);
return iova ? iova + offset : iova;
}
void __iomem *pci_iomap_wc_range(struct pci_dev *pdev, int bar,
unsigned long offset, unsigned long max)
{
if (bar >= PCI_STD_NUM_BARS || !pci_resource_len(pdev, bar))
return NULL;
if (static_branch_likely(&have_mio))
return pci_iomap_wc_range_mio(pdev, bar, offset, max);
else
return pci_iomap_range_fh(pdev, bar, offset, max);
}
EXPORT_SYMBOL(pci_iomap_wc_range);
void __iomem *pci_iomap_wc(struct pci_dev *dev, int bar, unsigned long maxlen)
{
return pci_iomap_wc_range(dev, bar, 0, maxlen);
}
EXPORT_SYMBOL(pci_iomap_wc);
static void pci_iounmap_fh(struct pci_dev *pdev, void __iomem *addr)
{
unsigned int idx = ZPCI_IDX(addr);
spin_lock(&zpci_iomap_lock);
/* Detect underrun */
WARN_ON(!zpci_iomap_start[idx].count);
if (!--zpci_iomap_start[idx].count) {
zpci_iomap_start[idx].fh = 0;
zpci_iomap_start[idx].bar = 0;
}
spin_unlock(&zpci_iomap_lock);
}
static void pci_iounmap_mio(struct pci_dev *pdev, void __iomem *addr)
{
iounmap(addr);
}
void pci_iounmap(struct pci_dev *pdev, void __iomem *addr)
{
if (static_branch_likely(&have_mio))
pci_iounmap_mio(pdev, addr);
else
pci_iounmap_fh(pdev, addr);
}
EXPORT_SYMBOL(pci_iounmap);
static int pci_read(struct pci_bus *bus, unsigned int devfn, int where,
int size, u32 *val)
{
struct zpci_dev *zdev = zdev_from_bus(bus, devfn);
return (zdev) ? zpci_cfg_load(zdev, where, val, size) : -ENODEV;
}
static int pci_write(struct pci_bus *bus, unsigned int devfn, int where,
int size, u32 val)
{
struct zpci_dev *zdev = zdev_from_bus(bus, devfn);
return (zdev) ? zpci_cfg_store(zdev, where, val, size) : -ENODEV;
}
static struct pci_ops pci_root_ops = {
.read = pci_read,
.write = pci_write,
};
static void zpci_map_resources(struct pci_dev *pdev)
{
struct zpci_dev *zdev = to_zpci(pdev);
resource_size_t len;
int i;
for (i = 0; i < PCI_STD_NUM_BARS; i++) {
len = pci_resource_len(pdev, i);
if (!len)
continue;
if (zpci_use_mio(zdev))
pdev->resource[i].start =
(resource_size_t __force) zdev->bars[i].mio_wt;
else
pdev->resource[i].start = (resource_size_t __force)
pci_iomap_range_fh(pdev, i, 0, 0);
pdev->resource[i].end = pdev->resource[i].start + len - 1;
}
zpci_iov_map_resources(pdev);
}
static void zpci_unmap_resources(struct pci_dev *pdev)
{
struct zpci_dev *zdev = to_zpci(pdev);
resource_size_t len;
int i;
if (zpci_use_mio(zdev))
return;
for (i = 0; i < PCI_STD_NUM_BARS; i++) {
len = pci_resource_len(pdev, i);
if (!len)
continue;
pci_iounmap_fh(pdev, (void __iomem __force *)
pdev->resource[i].start);
}
}
static int zpci_alloc_iomap(struct zpci_dev *zdev)
{
unsigned long entry;
spin_lock(&zpci_iomap_lock);
entry = find_first_zero_bit(zpci_iomap_bitmap, ZPCI_IOMAP_ENTRIES);
if (entry == ZPCI_IOMAP_ENTRIES) {
spin_unlock(&zpci_iomap_lock);
return -ENOSPC;
}
set_bit(entry, zpci_iomap_bitmap);
spin_unlock(&zpci_iomap_lock);
return entry;
}
static void zpci_free_iomap(struct zpci_dev *zdev, int entry)
{
spin_lock(&zpci_iomap_lock);
memset(&zpci_iomap_start[entry], 0, sizeof(struct zpci_iomap_entry));
clear_bit(entry, zpci_iomap_bitmap);
spin_unlock(&zpci_iomap_lock);
}
static void zpci_do_update_iomap_fh(struct zpci_dev *zdev, u32 fh)
{
int bar, idx;
spin_lock(&zpci_iomap_lock);
for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
if (!zdev->bars[bar].size)
continue;
idx = zdev->bars[bar].map_idx;
if (!zpci_iomap_start[idx].count)
continue;
WRITE_ONCE(zpci_iomap_start[idx].fh, zdev->fh);
}
spin_unlock(&zpci_iomap_lock);
}
void zpci_update_fh(struct zpci_dev *zdev, u32 fh)
{
if (!fh || zdev->fh == fh)
return;
zdev->fh = fh;
if (zpci_use_mio(zdev))
return;
if (zdev->has_resources && zdev_enabled(zdev))
zpci_do_update_iomap_fh(zdev, fh);
}
static struct resource *__alloc_res(struct zpci_dev *zdev, unsigned long start,
unsigned long size, unsigned long flags)
{
struct resource *r;
r = kzalloc(sizeof(*r), GFP_KERNEL);
if (!r)
return NULL;
r->start = start;
r->end = r->start + size - 1;
r->flags = flags;
r->name = zdev->res_name;
if (request_resource(&iomem_resource, r)) {
kfree(r);
return NULL;
}
return r;
}
int zpci_setup_bus_resources(struct zpci_dev *zdev)
{
unsigned long addr, size, flags;
struct resource *res;
int i, entry;
snprintf(zdev->res_name, sizeof(zdev->res_name),
"PCI Bus %04x:%02x", zdev->uid, ZPCI_BUS_NR);
for (i = 0; i < PCI_STD_NUM_BARS; i++) {
if (!zdev->bars[i].size)
continue;
entry = zpci_alloc_iomap(zdev);
if (entry < 0)
return entry;
zdev->bars[i].map_idx = entry;
/* only MMIO is supported */
flags = IORESOURCE_MEM;
if (zdev->bars[i].val & 8)
flags |= IORESOURCE_PREFETCH;
if (zdev->bars[i].val & 4)
flags |= IORESOURCE_MEM_64;
if (zpci_use_mio(zdev))
addr = (unsigned long) zdev->bars[i].mio_wt;
else
addr = ZPCI_ADDR(entry);
size = 1UL << zdev->bars[i].size;
res = __alloc_res(zdev, addr, size, flags);
if (!res) {
zpci_free_iomap(zdev, entry);
return -ENOMEM;
}
zdev->bars[i].res = res;
}
zdev->has_resources = 1;
return 0;
}
static void zpci_cleanup_bus_resources(struct zpci_dev *zdev)
{
struct resource *res;
int i;
pci_lock_rescan_remove();
for (i = 0; i < PCI_STD_NUM_BARS; i++) {
res = zdev->bars[i].res;
if (!res)
continue;
release_resource(res);
pci_bus_remove_resource(zdev->zbus->bus, res);
zpci_free_iomap(zdev, zdev->bars[i].map_idx);
zdev->bars[i].res = NULL;
kfree(res);
}
zdev->has_resources = 0;
pci_unlock_rescan_remove();
}
int pcibios_device_add(struct pci_dev *pdev)
{
struct zpci_dev *zdev = to_zpci(pdev);
struct resource *res;
int i;
/* The pdev has a reference to the zdev via its bus */
zpci_zdev_get(zdev);
if (pdev->is_physfn)
pdev->no_vf_scan = 1;
pdev->dev.groups = zpci_attr_groups;
pdev->dev.dma_ops = &s390_pci_dma_ops;
zpci_map_resources(pdev);
for (i = 0; i < PCI_STD_NUM_BARS; i++) {
res = &pdev->resource[i];
if (res->parent || !res->flags)
continue;
pci_claim_resource(pdev, i);
}
return 0;
}
void pcibios_release_device(struct pci_dev *pdev)
{
struct zpci_dev *zdev = to_zpci(pdev);
zpci_unmap_resources(pdev);
zpci_zdev_put(zdev);
}
int pcibios_enable_device(struct pci_dev *pdev, int mask)
{
struct zpci_dev *zdev = to_zpci(pdev);
zpci_debug_init_device(zdev, dev_name(&pdev->dev));
zpci_fmb_enable_device(zdev);
return pci_enable_resources(pdev, mask);
}
void pcibios_disable_device(struct pci_dev *pdev)
{
struct zpci_dev *zdev = to_zpci(pdev);
zpci_fmb_disable_device(zdev);
zpci_debug_exit_device(zdev);
}
static int __zpci_register_domain(int domain)
{
spin_lock(&zpci_domain_lock);
if (test_bit(domain, zpci_domain)) {
spin_unlock(&zpci_domain_lock);
pr_err("Domain %04x is already assigned\n", domain);
return -EEXIST;
}
set_bit(domain, zpci_domain);
spin_unlock(&zpci_domain_lock);
return domain;
}
static int __zpci_alloc_domain(void)
{
int domain;
spin_lock(&zpci_domain_lock);
/*
* We can always auto allocate domains below ZPCI_NR_DEVICES.
* There is either a free domain or we have reached the maximum in
* which case we would have bailed earlier.
*/
domain = find_first_zero_bit(zpci_domain, ZPCI_NR_DEVICES);
set_bit(domain, zpci_domain);
spin_unlock(&zpci_domain_lock);
return domain;
}
int zpci_alloc_domain(int domain)
{
if (zpci_unique_uid) {
if (domain)
return __zpci_register_domain(domain);
pr_warn("UID checking was active but no UID is provided: switching to automatic domain allocation\n");
update_uid_checking(false);
}
return __zpci_alloc_domain();
}
void zpci_free_domain(int domain)
{
spin_lock(&zpci_domain_lock);
clear_bit(domain, zpci_domain);
spin_unlock(&zpci_domain_lock);
}
int zpci_enable_device(struct zpci_dev *zdev)
{
u32 fh = zdev->fh;
int rc = 0;
if (clp_enable_fh(zdev, &fh, ZPCI_NR_DMA_SPACES))
rc = -EIO;
else
zpci_update_fh(zdev, fh);
return rc;
}
EXPORT_SYMBOL_GPL(zpci_enable_device);
int zpci_disable_device(struct zpci_dev *zdev)
{
u32 fh = zdev->fh;
int cc, rc = 0;
cc = clp_disable_fh(zdev, &fh);
if (!cc) {
zpci_update_fh(zdev, fh);
} else if (cc == CLP_RC_SETPCIFN_ALRDY) {
pr_info("Disabling PCI function %08x had no effect as it was already disabled\n",
zdev->fid);
/* Function is already disabled - update handle */
rc = clp_refresh_fh(zdev->fid, &fh);
if (!rc) {
zpci_update_fh(zdev, fh);
rc = -EINVAL;
}
} else {
rc = -EIO;
}
return rc;
}
EXPORT_SYMBOL_GPL(zpci_disable_device);
/**
* zpci_hot_reset_device - perform a reset of the given zPCI function
* @zdev: the slot which should be reset
*
* Performs a low level reset of the zPCI function. The reset is low level in
* the sense that the zPCI function can be reset without detaching it from the
* common PCI subsystem. The reset may be performed while under control of
* either DMA or IOMMU APIs in which case the existing DMA/IOMMU translation
* table is reinstated at the end of the reset.
*
* After the reset the functions internal state is reset to an initial state
* equivalent to its state during boot when first probing a driver.
* Consequently after reset the PCI function requires re-initialization via the
* common PCI code including re-enabling IRQs via pci_alloc_irq_vectors()
* and enabling the function via e.g.pci_enablde_device_flags().The caller
* must guard against concurrent reset attempts.
*
* In most cases this function should not be called directly but through
* pci_reset_function() or pci_reset_bus() which handle the save/restore and
* locking.
*
* Return: 0 on success and an error value otherwise
*/
int zpci_hot_reset_device(struct zpci_dev *zdev)
{
u8 status;
int rc;
zpci_dbg(3, "rst fid:%x, fh:%x\n", zdev->fid, zdev->fh);
if (zdev_enabled(zdev)) {
/* Disables device access, DMAs and IRQs (reset state) */
rc = zpci_disable_device(zdev);
/*
* Due to a z/VM vs LPAR inconsistency in the error state the
* FH may indicate an enabled device but disable says the
* device is already disabled don't treat it as an error here.
*/
if (rc == -EINVAL)
rc = 0;
if (rc)
return rc;
}
rc = zpci_enable_device(zdev);
if (rc)
return rc;
if (zdev->dma_table)
rc = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
virt_to_phys(zdev->dma_table), &status);
else
rc = zpci_dma_init_device(zdev);
if (rc) {
zpci_disable_device(zdev);
return rc;
}
return 0;
}
/**
* zpci_create_device() - Create a new zpci_dev and add it to the zbus
* @fid: Function ID of the device to be created
* @fh: Current Function Handle of the device to be created
* @state: Initial state after creation either Standby or Configured
*
* Creates a new zpci device and adds it to its, possibly newly created, zbus
* as well as zpci_list.
*
* Returns: the zdev on success or an error pointer otherwise
*/
struct zpci_dev *zpci_create_device(u32 fid, u32 fh, enum zpci_state state)
{
struct zpci_dev *zdev;
int rc;
zpci_dbg(1, "add fid:%x, fh:%x, c:%d\n", fid, fh, state);
zdev = kzalloc(sizeof(*zdev), GFP_KERNEL);
if (!zdev)
return ERR_PTR(-ENOMEM);
/* FID and Function Handle are the static/dynamic identifiers */
zdev->fid = fid;
zdev->fh = fh;
/* Query function properties and update zdev */
rc = clp_query_pci_fn(zdev);
if (rc)
goto error;
zdev->state = state;
kref_init(&zdev->kref);
mutex_init(&zdev->lock);
mutex_init(&zdev->kzdev_lock);
rc = zpci_init_iommu(zdev);
if (rc)
goto error;
rc = zpci_bus_device_register(zdev, &pci_root_ops);
if (rc)
goto error_destroy_iommu;
spin_lock(&zpci_list_lock);
list_add_tail(&zdev->entry, &zpci_list);
spin_unlock(&zpci_list_lock);
return zdev;
error_destroy_iommu:
zpci_destroy_iommu(zdev);
error:
zpci_dbg(0, "add fid:%x, rc:%d\n", fid, rc);
kfree(zdev);
return ERR_PTR(rc);
}
bool zpci_is_device_configured(struct zpci_dev *zdev)
{
enum zpci_state state = zdev->state;
return state != ZPCI_FN_STATE_RESERVED &&
state != ZPCI_FN_STATE_STANDBY;
}
/**
* zpci_scan_configured_device() - Scan a freshly configured zpci_dev
* @zdev: The zpci_dev to be configured
* @fh: The general function handle supplied by the platform
*
* Given a device in the configuration state Configured, enables, scans and
* adds it to the common code PCI subsystem if possible. If any failure occurs,
* the zpci_dev is left disabled.
*
* Return: 0 on success, or an error code otherwise
*/
int zpci_scan_configured_device(struct zpci_dev *zdev, u32 fh)
{
zpci_update_fh(zdev, fh);
return zpci_bus_scan_device(zdev);
}
/**
* zpci_deconfigure_device() - Deconfigure a zpci_dev
* @zdev: The zpci_dev to configure
*
* Deconfigure a zPCI function that is currently configured and possibly known
* to the common code PCI subsystem.
* If any failure occurs the device is left as is.
*
* Return: 0 on success, or an error code otherwise
*/
int zpci_deconfigure_device(struct zpci_dev *zdev)
{
int rc;
if (zdev->zbus->bus)
zpci_bus_remove_device(zdev, false);
if (zdev->dma_table) {
rc = zpci_dma_exit_device(zdev);
if (rc)
return rc;
}
if (zdev_enabled(zdev)) {
rc = zpci_disable_device(zdev);
if (rc)
return rc;
}
rc = sclp_pci_deconfigure(zdev->fid);
zpci_dbg(3, "deconf fid:%x, rc:%d\n", zdev->fid, rc);
if (rc)
return rc;
zdev->state = ZPCI_FN_STATE_STANDBY;
return 0;
}
/**
* zpci_device_reserved() - Mark device as resverved
* @zdev: the zpci_dev that was reserved
*
* Handle the case that a given zPCI function was reserved by another system.
* After a call to this function the zpci_dev can not be found via
* get_zdev_by_fid() anymore but may still be accessible via existing
* references though it will not be functional anymore.
*/
void zpci_device_reserved(struct zpci_dev *zdev)
{
if (zdev->has_hp_slot)
zpci_exit_slot(zdev);
/*
* Remove device from zpci_list as it is going away. This also
* makes sure we ignore subsequent zPCI events for this device.
*/
spin_lock(&zpci_list_lock);
list_del(&zdev->entry);
spin_unlock(&zpci_list_lock);
zdev->state = ZPCI_FN_STATE_RESERVED;
zpci_dbg(3, "rsv fid:%x\n", zdev->fid);
zpci_zdev_put(zdev);
}
void zpci_release_device(struct kref *kref)
{
struct zpci_dev *zdev = container_of(kref, struct zpci_dev, kref);
int ret;
if (zdev->zbus->bus)
zpci_bus_remove_device(zdev, false);
if (zdev->dma_table)
zpci_dma_exit_device(zdev);
if (zdev_enabled(zdev))
zpci_disable_device(zdev);
switch (zdev->state) {
case ZPCI_FN_STATE_CONFIGURED:
ret = sclp_pci_deconfigure(zdev->fid);
zpci_dbg(3, "deconf fid:%x, rc:%d\n", zdev->fid, ret);
fallthrough;
case ZPCI_FN_STATE_STANDBY:
if (zdev->has_hp_slot)
zpci_exit_slot(zdev);
spin_lock(&zpci_list_lock);
list_del(&zdev->entry);
spin_unlock(&zpci_list_lock);
zpci_dbg(3, "rsv fid:%x\n", zdev->fid);
fallthrough;
case ZPCI_FN_STATE_RESERVED:
if (zdev->has_resources)
zpci_cleanup_bus_resources(zdev);
zpci_bus_device_unregister(zdev);
zpci_destroy_iommu(zdev);
fallthrough;
default:
break;
}
zpci_dbg(3, "rem fid:%x\n", zdev->fid);
kfree_rcu(zdev, rcu);
}
int zpci_report_error(struct pci_dev *pdev,
struct zpci_report_error_header *report)
{
struct zpci_dev *zdev = to_zpci(pdev);
return sclp_pci_report(report, zdev->fh, zdev->fid);
}
EXPORT_SYMBOL(zpci_report_error);
/**
* zpci_clear_error_state() - Clears the zPCI error state of the device
* @zdev: The zdev for which the zPCI error state should be reset
*
* Clear the zPCI error state of the device. If clearing the zPCI error state
* fails the device is left in the error state. In this case it may make sense
* to call zpci_io_perm_failure() on the associated pdev if it exists.
*
* Returns: 0 on success, -EIO otherwise
*/
int zpci_clear_error_state(struct zpci_dev *zdev)
{
u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_RESET_ERROR);
struct zpci_fib fib = {0};
u8 status;
int cc;
cc = zpci_mod_fc(req, &fib, &status);
if (cc) {
zpci_dbg(3, "ces fid:%x, cc:%d, status:%x\n", zdev->fid, cc, status);
return -EIO;
}
return 0;
}
/**
* zpci_reset_load_store_blocked() - Re-enables L/S from error state
* @zdev: The zdev for which to unblock load/store access
*
* Re-enables load/store access for a PCI function in the error state while
* keeping DMA blocked. In this state drivers can poke MMIO space to determine
* if error recovery is possible while catching any rogue DMA access from the
* device.
*
* Returns: 0 on success, -EIO otherwise
*/
int zpci_reset_load_store_blocked(struct zpci_dev *zdev)
{
u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_RESET_BLOCK);
struct zpci_fib fib = {0};
u8 status;
int cc;
cc = zpci_mod_fc(req, &fib, &status);
if (cc) {
zpci_dbg(3, "rls fid:%x, cc:%d, status:%x\n", zdev->fid, cc, status);
return -EIO;
}
return 0;
}
static int zpci_mem_init(void)
{
BUILD_BUG_ON(!is_power_of_2(__alignof__(struct zpci_fmb)) ||
__alignof__(struct zpci_fmb) < sizeof(struct zpci_fmb));
zdev_fmb_cache = kmem_cache_create("PCI_FMB_cache", sizeof(struct zpci_fmb),
__alignof__(struct zpci_fmb), 0, NULL);
if (!zdev_fmb_cache)
goto error_fmb;
zpci_iomap_start = kcalloc(ZPCI_IOMAP_ENTRIES,
sizeof(*zpci_iomap_start), GFP_KERNEL);
if (!zpci_iomap_start)
goto error_iomap;
zpci_iomap_bitmap = kcalloc(BITS_TO_LONGS(ZPCI_IOMAP_ENTRIES),
sizeof(*zpci_iomap_bitmap), GFP_KERNEL);
if (!zpci_iomap_bitmap)
goto error_iomap_bitmap;
if (static_branch_likely(&have_mio))
clp_setup_writeback_mio();
return 0;
error_iomap_bitmap:
kfree(zpci_iomap_start);
error_iomap:
kmem_cache_destroy(zdev_fmb_cache);
error_fmb:
return -ENOMEM;
}
static void zpci_mem_exit(void)
{
kfree(zpci_iomap_bitmap);
kfree(zpci_iomap_start);
kmem_cache_destroy(zdev_fmb_cache);
}
static unsigned int s390_pci_probe __initdata = 1;
unsigned int s390_pci_force_floating __initdata;
static unsigned int s390_pci_initialized;
char * __init pcibios_setup(char *str)
{
if (!strcmp(str, "off")) {
s390_pci_probe = 0;
return NULL;
}
if (!strcmp(str, "nomio")) {
S390_lowcore.machine_flags &= ~MACHINE_FLAG_PCI_MIO;
return NULL;
}
if (!strcmp(str, "force_floating")) {
s390_pci_force_floating = 1;
return NULL;
}
if (!strcmp(str, "norid")) {
s390_pci_no_rid = 1;
return NULL;
}
return str;
}
bool zpci_is_enabled(void)
{
return s390_pci_initialized;
}
static int __init pci_base_init(void)
{
int rc;
if (!s390_pci_probe)
return 0;
if (!test_facility(69) || !test_facility(71)) {
pr_info("PCI is not supported because CPU facilities 69 or 71 are not available\n");
return 0;
}
if (MACHINE_HAS_PCI_MIO) {
static_branch_enable(&have_mio);
ctl_set_bit(2, 5);
}
rc = zpci_debug_init();
if (rc)
goto out;
rc = zpci_mem_init();
if (rc)
goto out_mem;
rc = zpci_irq_init();
if (rc)
goto out_irq;
rc = zpci_dma_init();
if (rc)
goto out_dma;
rc = clp_scan_pci_devices();
if (rc)
goto out_find;
zpci_bus_scan_busses();
s390_pci_initialized = 1;
return 0;
out_find:
zpci_dma_exit();
out_dma:
zpci_irq_exit();
out_irq:
zpci_mem_exit();
out_mem:
zpci_debug_exit();
out:
return rc;
}
subsys_initcall_sync(pci_base_init);
| linux-master | arch/s390/pci/pci.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* VFIO ZPCI devices support
*
* Copyright (C) IBM Corp. 2022. All rights reserved.
* Author(s): Pierre Morel <[email protected]>
*/
#include <linux/kvm_host.h>
struct zpci_kvm_hook zpci_kvm_hook;
EXPORT_SYMBOL_GPL(zpci_kvm_hook);
| linux-master | arch/s390/pci/pci_kvm_hook.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2012
*
* Author(s):
* Jan Glauber <[email protected]>
*/
#define KMSG_COMPONENT "zpci"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/kernel.h>
#include <linux/stat.h>
#include <linux/pci.h>
#include "../../../drivers/pci/pci.h"
#include <asm/sclp.h>
#define zpci_attr(name, fmt, member) \
static ssize_t name##_show(struct device *dev, \
struct device_attribute *attr, char *buf) \
{ \
struct zpci_dev *zdev = to_zpci(to_pci_dev(dev)); \
\
return sprintf(buf, fmt, zdev->member); \
} \
static DEVICE_ATTR_RO(name)
zpci_attr(function_id, "0x%08x\n", fid);
zpci_attr(function_handle, "0x%08x\n", fh);
zpci_attr(pchid, "0x%04x\n", pchid);
zpci_attr(pfgid, "0x%02x\n", pfgid);
zpci_attr(vfn, "0x%04x\n", vfn);
zpci_attr(pft, "0x%02x\n", pft);
zpci_attr(port, "%d\n", port);
zpci_attr(uid, "0x%x\n", uid);
zpci_attr(segment0, "0x%02x\n", pfip[0]);
zpci_attr(segment1, "0x%02x\n", pfip[1]);
zpci_attr(segment2, "0x%02x\n", pfip[2]);
zpci_attr(segment3, "0x%02x\n", pfip[3]);
static ssize_t mio_enabled_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
return sprintf(buf, zpci_use_mio(zdev) ? "1\n" : "0\n");
}
static DEVICE_ATTR_RO(mio_enabled);
static ssize_t recover_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct kernfs_node *kn;
struct pci_dev *pdev = to_pci_dev(dev);
struct zpci_dev *zdev = to_zpci(pdev);
int ret = 0;
/* Can't use device_remove_self() here as that would lead us to lock
* the pci_rescan_remove_lock while holding the device' kernfs lock.
* This would create a possible deadlock with disable_slot() which is
* not directly protected by the device' kernfs lock but takes it
* during the device removal which happens under
* pci_rescan_remove_lock.
*
* This is analogous to sdev_store_delete() in
* drivers/scsi/scsi_sysfs.c
*/
kn = sysfs_break_active_protection(&dev->kobj, &attr->attr);
WARN_ON_ONCE(!kn);
/* device_remove_file() serializes concurrent calls ignoring all but
* the first
*/
device_remove_file(dev, attr);
/* A concurrent call to recover_store() may slip between
* sysfs_break_active_protection() and the sysfs file removal.
* Once it unblocks from pci_lock_rescan_remove() the original pdev
* will already be removed.
*/
pci_lock_rescan_remove();
if (pci_dev_is_added(pdev)) {
pci_stop_and_remove_bus_device(pdev);
if (zdev->dma_table) {
ret = zpci_dma_exit_device(zdev);
if (ret)
goto out;
}
if (zdev_enabled(zdev)) {
ret = zpci_disable_device(zdev);
/*
* Due to a z/VM vs LPAR inconsistency in the error
* state the FH may indicate an enabled device but
* disable says the device is already disabled don't
* treat it as an error here.
*/
if (ret == -EINVAL)
ret = 0;
if (ret)
goto out;
}
ret = zpci_enable_device(zdev);
if (ret)
goto out;
ret = zpci_dma_init_device(zdev);
if (ret) {
zpci_disable_device(zdev);
goto out;
}
pci_rescan_bus(zdev->zbus->bus);
}
out:
pci_unlock_rescan_remove();
if (kn)
sysfs_unbreak_active_protection(kn);
return ret ? ret : count;
}
static DEVICE_ATTR_WO(recover);
static ssize_t util_string_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t off, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
struct pci_dev *pdev = to_pci_dev(dev);
struct zpci_dev *zdev = to_zpci(pdev);
return memory_read_from_buffer(buf, count, &off, zdev->util_str,
sizeof(zdev->util_str));
}
static BIN_ATTR_RO(util_string, CLP_UTIL_STR_LEN);
static ssize_t report_error_write(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t off, size_t count)
{
struct zpci_report_error_header *report = (void *) buf;
struct device *dev = kobj_to_dev(kobj);
struct pci_dev *pdev = to_pci_dev(dev);
struct zpci_dev *zdev = to_zpci(pdev);
int ret;
if (off || (count < sizeof(*report)))
return -EINVAL;
ret = sclp_pci_report(report, zdev->fh, zdev->fid);
return ret ? ret : count;
}
static BIN_ATTR(report_error, S_IWUSR, NULL, report_error_write, PAGE_SIZE);
static ssize_t uid_is_unique_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sysfs_emit(buf, "%d\n", zpci_unique_uid ? 1 : 0);
}
static DEVICE_ATTR_RO(uid_is_unique);
#ifndef CONFIG_DMI
/* analogous to smbios index */
static ssize_t index_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
u32 index = ~0;
if (zpci_unique_uid)
index = zdev->uid;
return sysfs_emit(buf, "%u\n", index);
}
static DEVICE_ATTR_RO(index);
static umode_t zpci_index_is_visible(struct kobject *kobj,
struct attribute *attr, int n)
{
return zpci_unique_uid ? attr->mode : 0;
}
static struct attribute *zpci_ident_attrs[] = {
&dev_attr_index.attr,
NULL,
};
static struct attribute_group zpci_ident_attr_group = {
.attrs = zpci_ident_attrs,
.is_visible = zpci_index_is_visible,
};
#endif
static struct bin_attribute *zpci_bin_attrs[] = {
&bin_attr_util_string,
&bin_attr_report_error,
NULL,
};
static struct attribute *zpci_dev_attrs[] = {
&dev_attr_function_id.attr,
&dev_attr_function_handle.attr,
&dev_attr_pchid.attr,
&dev_attr_pfgid.attr,
&dev_attr_pft.attr,
&dev_attr_port.attr,
&dev_attr_vfn.attr,
&dev_attr_uid.attr,
&dev_attr_recover.attr,
&dev_attr_mio_enabled.attr,
&dev_attr_uid_is_unique.attr,
NULL,
};
static struct attribute_group zpci_attr_group = {
.attrs = zpci_dev_attrs,
.bin_attrs = zpci_bin_attrs,
};
static struct attribute *pfip_attrs[] = {
&dev_attr_segment0.attr,
&dev_attr_segment1.attr,
&dev_attr_segment2.attr,
&dev_attr_segment3.attr,
NULL,
};
static struct attribute_group pfip_attr_group = {
.name = "pfip",
.attrs = pfip_attrs,
};
const struct attribute_group *zpci_attr_groups[] = {
&zpci_attr_group,
&pfip_attr_group,
#ifndef CONFIG_DMI
&zpci_ident_attr_group,
#endif
NULL,
};
| linux-master | arch/s390/pci/pci_sysfs.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2020
*
* Author(s):
* Niklas Schnelle <[email protected]>
*
*/
#define KMSG_COMPONENT "zpci"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/kernel.h>
#include <linux/pci.h>
#include "pci_iov.h"
static struct resource iov_res = {
.name = "PCI IOV res",
.start = 0,
.end = -1,
.flags = IORESOURCE_MEM,
};
void zpci_iov_map_resources(struct pci_dev *pdev)
{
resource_size_t len;
int i;
for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
int bar = i + PCI_IOV_RESOURCES;
len = pci_resource_len(pdev, bar);
if (!len)
continue;
pdev->resource[bar].parent = &iov_res;
}
}
void zpci_iov_remove_virtfn(struct pci_dev *pdev, int vfn)
{
pci_lock_rescan_remove();
/* Linux' vfid's start at 0 vfn at 1 */
pci_iov_remove_virtfn(pdev->physfn, vfn - 1);
pci_unlock_rescan_remove();
}
static int zpci_iov_link_virtfn(struct pci_dev *pdev, struct pci_dev *virtfn, int vfid)
{
int rc;
rc = pci_iov_sysfs_link(pdev, virtfn, vfid);
if (rc)
return rc;
virtfn->is_virtfn = 1;
virtfn->multifunction = 0;
virtfn->physfn = pci_dev_get(pdev);
return 0;
}
int zpci_iov_setup_virtfn(struct zpci_bus *zbus, struct pci_dev *virtfn, int vfn)
{
int i, cand_devfn;
struct zpci_dev *zdev;
struct pci_dev *pdev;
int vfid = vfn - 1; /* Linux' vfid's start at 0 vfn at 1*/
int rc = 0;
if (!zbus->multifunction)
return 0;
/* If the parent PF for the given VF is also configured in the
* instance, it must be on the same zbus.
* We can then identify the parent PF by checking what
* devfn the VF would have if it belonged to that PF using the PF's
* stride and offset. Only if this candidate devfn matches the
* actual devfn will we link both functions.
*/
for (i = 0; i < ZPCI_FUNCTIONS_PER_BUS; i++) {
zdev = zbus->function[i];
if (zdev && zdev->is_physfn) {
pdev = pci_get_slot(zbus->bus, zdev->devfn);
if (!pdev)
continue;
cand_devfn = pci_iov_virtfn_devfn(pdev, vfid);
if (cand_devfn == virtfn->devfn) {
rc = zpci_iov_link_virtfn(pdev, virtfn, vfid);
/* balance pci_get_slot() */
pci_dev_put(pdev);
break;
}
/* balance pci_get_slot() */
pci_dev_put(pdev);
}
}
return rc;
}
| linux-master | arch/s390/pci/pci_iov.c |
// SPDX-License-Identifier: GPL-2.0
/*
* s390 specific pci instructions
*
* Copyright IBM Corp. 2013
*/
#include <linux/export.h>
#include <linux/errno.h>
#include <linux/delay.h>
#include <linux/jump_label.h>
#include <asm/asm-extable.h>
#include <asm/facility.h>
#include <asm/pci_insn.h>
#include <asm/pci_debug.h>
#include <asm/pci_io.h>
#include <asm/processor.h>
#define ZPCI_INSN_BUSY_DELAY 1 /* 1 microsecond */
struct zpci_err_insn_data {
u8 insn;
u8 cc;
u8 status;
union {
struct {
u64 req;
u64 offset;
};
struct {
u64 addr;
u64 len;
};
};
} __packed;
static inline void zpci_err_insn_req(int lvl, u8 insn, u8 cc, u8 status,
u64 req, u64 offset)
{
struct zpci_err_insn_data data = {
.insn = insn, .cc = cc, .status = status,
.req = req, .offset = offset};
zpci_err_hex_level(lvl, &data, sizeof(data));
}
static inline void zpci_err_insn_addr(int lvl, u8 insn, u8 cc, u8 status,
u64 addr, u64 len)
{
struct zpci_err_insn_data data = {
.insn = insn, .cc = cc, .status = status,
.addr = addr, .len = len};
zpci_err_hex_level(lvl, &data, sizeof(data));
}
/* Modify PCI Function Controls */
static inline u8 __mpcifc(u64 req, struct zpci_fib *fib, u8 *status)
{
u8 cc;
asm volatile (
" .insn rxy,0xe300000000d0,%[req],%[fib]\n"
" ipm %[cc]\n"
" srl %[cc],28\n"
: [cc] "=d" (cc), [req] "+d" (req), [fib] "+Q" (*fib)
: : "cc");
*status = req >> 24 & 0xff;
return cc;
}
u8 zpci_mod_fc(u64 req, struct zpci_fib *fib, u8 *status)
{
bool retried = false;
u8 cc;
do {
cc = __mpcifc(req, fib, status);
if (cc == 2) {
msleep(ZPCI_INSN_BUSY_DELAY);
if (!retried) {
zpci_err_insn_req(1, 'M', cc, *status, req, 0);
retried = true;
}
}
} while (cc == 2);
if (cc)
zpci_err_insn_req(0, 'M', cc, *status, req, 0);
else if (retried)
zpci_err_insn_req(1, 'M', cc, *status, req, 0);
return cc;
}
EXPORT_SYMBOL_GPL(zpci_mod_fc);
/* Refresh PCI Translations */
static inline u8 __rpcit(u64 fn, u64 addr, u64 range, u8 *status)
{
union register_pair addr_range = {.even = addr, .odd = range};
u8 cc;
asm volatile (
" .insn rre,0xb9d30000,%[fn],%[addr_range]\n"
" ipm %[cc]\n"
" srl %[cc],28\n"
: [cc] "=d" (cc), [fn] "+d" (fn)
: [addr_range] "d" (addr_range.pair)
: "cc");
*status = fn >> 24 & 0xff;
return cc;
}
int zpci_refresh_trans(u64 fn, u64 addr, u64 range)
{
bool retried = false;
u8 cc, status;
do {
cc = __rpcit(fn, addr, range, &status);
if (cc == 2) {
udelay(ZPCI_INSN_BUSY_DELAY);
if (!retried) {
zpci_err_insn_addr(1, 'R', cc, status, addr, range);
retried = true;
}
}
} while (cc == 2);
if (cc)
zpci_err_insn_addr(0, 'R', cc, status, addr, range);
else if (retried)
zpci_err_insn_addr(1, 'R', cc, status, addr, range);
if (cc == 1 && (status == 4 || status == 16))
return -ENOMEM;
return (cc) ? -EIO : 0;
}
/* Set Interruption Controls */
int zpci_set_irq_ctrl(u16 ctl, u8 isc, union zpci_sic_iib *iib)
{
if (!test_facility(72))
return -EIO;
asm volatile(
".insn rsy,0xeb00000000d1,%[ctl],%[isc],%[iib]\n"
: : [ctl] "d" (ctl), [isc] "d" (isc << 27), [iib] "Q" (*iib));
return 0;
}
EXPORT_SYMBOL_GPL(zpci_set_irq_ctrl);
/* PCI Load */
static inline int ____pcilg(u64 *data, u64 req, u64 offset, u8 *status)
{
union register_pair req_off = {.even = req, .odd = offset};
int cc = -ENXIO;
u64 __data;
asm volatile (
" .insn rre,0xb9d20000,%[data],%[req_off]\n"
"0: ipm %[cc]\n"
" srl %[cc],28\n"
"1:\n"
EX_TABLE(0b, 1b)
: [cc] "+d" (cc), [data] "=d" (__data),
[req_off] "+&d" (req_off.pair) :: "cc");
*status = req_off.even >> 24 & 0xff;
*data = __data;
return cc;
}
static inline int __pcilg(u64 *data, u64 req, u64 offset, u8 *status)
{
u64 __data;
int cc;
cc = ____pcilg(&__data, req, offset, status);
if (!cc)
*data = __data;
return cc;
}
int __zpci_load(u64 *data, u64 req, u64 offset)
{
bool retried = false;
u8 status;
int cc;
do {
cc = __pcilg(data, req, offset, &status);
if (cc == 2) {
udelay(ZPCI_INSN_BUSY_DELAY);
if (!retried) {
zpci_err_insn_req(1, 'l', cc, status, req, offset);
retried = true;
}
}
} while (cc == 2);
if (cc)
zpci_err_insn_req(0, 'l', cc, status, req, offset);
else if (retried)
zpci_err_insn_req(1, 'l', cc, status, req, offset);
return (cc > 0) ? -EIO : cc;
}
EXPORT_SYMBOL_GPL(__zpci_load);
static inline int zpci_load_fh(u64 *data, const volatile void __iomem *addr,
unsigned long len)
{
struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(addr)];
u64 req = ZPCI_CREATE_REQ(READ_ONCE(entry->fh), entry->bar, len);
return __zpci_load(data, req, ZPCI_OFFSET(addr));
}
static inline int __pcilg_mio(u64 *data, u64 ioaddr, u64 len, u8 *status)
{
union register_pair ioaddr_len = {.even = ioaddr, .odd = len};
int cc = -ENXIO;
u64 __data;
asm volatile (
" .insn rre,0xb9d60000,%[data],%[ioaddr_len]\n"
"0: ipm %[cc]\n"
" srl %[cc],28\n"
"1:\n"
EX_TABLE(0b, 1b)
: [cc] "+d" (cc), [data] "=d" (__data),
[ioaddr_len] "+&d" (ioaddr_len.pair) :: "cc");
*status = ioaddr_len.odd >> 24 & 0xff;
*data = __data;
return cc;
}
int zpci_load(u64 *data, const volatile void __iomem *addr, unsigned long len)
{
u8 status;
int cc;
if (!static_branch_unlikely(&have_mio))
return zpci_load_fh(data, addr, len);
cc = __pcilg_mio(data, (__force u64) addr, len, &status);
if (cc)
zpci_err_insn_addr(0, 'L', cc, status, (__force u64) addr, len);
return (cc > 0) ? -EIO : cc;
}
EXPORT_SYMBOL_GPL(zpci_load);
/* PCI Store */
static inline int __pcistg(u64 data, u64 req, u64 offset, u8 *status)
{
union register_pair req_off = {.even = req, .odd = offset};
int cc = -ENXIO;
asm volatile (
" .insn rre,0xb9d00000,%[data],%[req_off]\n"
"0: ipm %[cc]\n"
" srl %[cc],28\n"
"1:\n"
EX_TABLE(0b, 1b)
: [cc] "+d" (cc), [req_off] "+&d" (req_off.pair)
: [data] "d" (data)
: "cc");
*status = req_off.even >> 24 & 0xff;
return cc;
}
int __zpci_store(u64 data, u64 req, u64 offset)
{
bool retried = false;
u8 status;
int cc;
do {
cc = __pcistg(data, req, offset, &status);
if (cc == 2) {
udelay(ZPCI_INSN_BUSY_DELAY);
if (!retried) {
zpci_err_insn_req(1, 's', cc, status, req, offset);
retried = true;
}
}
} while (cc == 2);
if (cc)
zpci_err_insn_req(0, 's', cc, status, req, offset);
else if (retried)
zpci_err_insn_req(1, 's', cc, status, req, offset);
return (cc > 0) ? -EIO : cc;
}
EXPORT_SYMBOL_GPL(__zpci_store);
static inline int zpci_store_fh(const volatile void __iomem *addr, u64 data,
unsigned long len)
{
struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(addr)];
u64 req = ZPCI_CREATE_REQ(READ_ONCE(entry->fh), entry->bar, len);
return __zpci_store(data, req, ZPCI_OFFSET(addr));
}
static inline int __pcistg_mio(u64 data, u64 ioaddr, u64 len, u8 *status)
{
union register_pair ioaddr_len = {.even = ioaddr, .odd = len};
int cc = -ENXIO;
asm volatile (
" .insn rre,0xb9d40000,%[data],%[ioaddr_len]\n"
"0: ipm %[cc]\n"
" srl %[cc],28\n"
"1:\n"
EX_TABLE(0b, 1b)
: [cc] "+d" (cc), [ioaddr_len] "+&d" (ioaddr_len.pair)
: [data] "d" (data)
: "cc", "memory");
*status = ioaddr_len.odd >> 24 & 0xff;
return cc;
}
int zpci_store(const volatile void __iomem *addr, u64 data, unsigned long len)
{
u8 status;
int cc;
if (!static_branch_unlikely(&have_mio))
return zpci_store_fh(addr, data, len);
cc = __pcistg_mio(data, (__force u64) addr, len, &status);
if (cc)
zpci_err_insn_addr(0, 'S', cc, status, (__force u64) addr, len);
return (cc > 0) ? -EIO : cc;
}
EXPORT_SYMBOL_GPL(zpci_store);
/* PCI Store Block */
static inline int __pcistb(const u64 *data, u64 req, u64 offset, u8 *status)
{
int cc = -ENXIO;
asm volatile (
" .insn rsy,0xeb00000000d0,%[req],%[offset],%[data]\n"
"0: ipm %[cc]\n"
" srl %[cc],28\n"
"1:\n"
EX_TABLE(0b, 1b)
: [cc] "+d" (cc), [req] "+d" (req)
: [offset] "d" (offset), [data] "Q" (*data)
: "cc");
*status = req >> 24 & 0xff;
return cc;
}
int __zpci_store_block(const u64 *data, u64 req, u64 offset)
{
bool retried = false;
u8 status;
int cc;
do {
cc = __pcistb(data, req, offset, &status);
if (cc == 2) {
udelay(ZPCI_INSN_BUSY_DELAY);
if (!retried) {
zpci_err_insn_req(0, 'b', cc, status, req, offset);
retried = true;
}
}
} while (cc == 2);
if (cc)
zpci_err_insn_req(0, 'b', cc, status, req, offset);
else if (retried)
zpci_err_insn_req(1, 'b', cc, status, req, offset);
return (cc > 0) ? -EIO : cc;
}
EXPORT_SYMBOL_GPL(__zpci_store_block);
static inline int zpci_write_block_fh(volatile void __iomem *dst,
const void *src, unsigned long len)
{
struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(dst)];
u64 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, len);
u64 offset = ZPCI_OFFSET(dst);
return __zpci_store_block(src, req, offset);
}
static inline int __pcistb_mio(const u64 *data, u64 ioaddr, u64 len, u8 *status)
{
int cc = -ENXIO;
asm volatile (
" .insn rsy,0xeb00000000d4,%[len],%[ioaddr],%[data]\n"
"0: ipm %[cc]\n"
" srl %[cc],28\n"
"1:\n"
EX_TABLE(0b, 1b)
: [cc] "+d" (cc), [len] "+d" (len)
: [ioaddr] "d" (ioaddr), [data] "Q" (*data)
: "cc");
*status = len >> 24 & 0xff;
return cc;
}
int zpci_write_block(volatile void __iomem *dst,
const void *src, unsigned long len)
{
u8 status;
int cc;
if (!static_branch_unlikely(&have_mio))
return zpci_write_block_fh(dst, src, len);
cc = __pcistb_mio(src, (__force u64) dst, len, &status);
if (cc)
zpci_err_insn_addr(0, 'B', cc, status, (__force u64) dst, len);
return (cc > 0) ? -EIO : cc;
}
EXPORT_SYMBOL_GPL(zpci_write_block);
static inline void __pciwb_mio(void)
{
asm volatile (".insn rre,0xb9d50000,0,0\n");
}
void zpci_barrier(void)
{
if (static_branch_likely(&have_mio))
__pciwb_mio();
}
EXPORT_SYMBOL_GPL(zpci_barrier);
| linux-master | arch/s390/pci/pci_insn.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2012,2015
*
* Author(s):
* Jan Glauber <[email protected]>
*/
#define KMSG_COMPONENT "zpci"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/kernel.h>
#include <linux/seq_file.h>
#include <linux/debugfs.h>
#include <linux/export.h>
#include <linux/pci.h>
#include <asm/debug.h>
#include <asm/pci_dma.h>
static struct dentry *debugfs_root;
debug_info_t *pci_debug_msg_id;
EXPORT_SYMBOL_GPL(pci_debug_msg_id);
debug_info_t *pci_debug_err_id;
EXPORT_SYMBOL_GPL(pci_debug_err_id);
static char *pci_common_names[] = {
"Load operations",
"Store operations",
"Store block operations",
"Refresh operations",
};
static char *pci_fmt0_names[] = {
"DMA read bytes",
"DMA write bytes",
};
static char *pci_fmt1_names[] = {
"Received bytes",
"Received packets",
"Transmitted bytes",
"Transmitted packets",
};
static char *pci_fmt2_names[] = {
"Consumed work units",
"Maximum work units",
};
static char *pci_fmt3_names[] = {
"Transmitted bytes",
};
static char *pci_sw_names[] = {
"Allocated pages",
"Mapped pages",
"Unmapped pages",
};
static void pci_fmb_show(struct seq_file *m, char *name[], int length,
u64 *data)
{
int i;
for (i = 0; i < length; i++, data++)
seq_printf(m, "%26s:\t%llu\n", name[i], *data);
}
static void pci_sw_counter_show(struct seq_file *m)
{
struct zpci_dev *zdev = m->private;
atomic64_t *counter = &zdev->allocated_pages;
int i;
for (i = 0; i < ARRAY_SIZE(pci_sw_names); i++, counter++)
seq_printf(m, "%26s:\t%llu\n", pci_sw_names[i],
atomic64_read(counter));
}
static int pci_perf_show(struct seq_file *m, void *v)
{
struct zpci_dev *zdev = m->private;
if (!zdev)
return 0;
mutex_lock(&zdev->lock);
if (!zdev->fmb) {
mutex_unlock(&zdev->lock);
seq_puts(m, "FMB statistics disabled\n");
return 0;
}
/* header */
seq_printf(m, "Update interval: %u ms\n", zdev->fmb_update);
seq_printf(m, "Samples: %u\n", zdev->fmb->samples);
seq_printf(m, "Last update TOD: %Lx\n", zdev->fmb->last_update);
pci_fmb_show(m, pci_common_names, ARRAY_SIZE(pci_common_names),
&zdev->fmb->ld_ops);
switch (zdev->fmb->format) {
case 0:
if (!(zdev->fmb->fmt_ind & ZPCI_FMB_DMA_COUNTER_VALID))
break;
pci_fmb_show(m, pci_fmt0_names, ARRAY_SIZE(pci_fmt0_names),
&zdev->fmb->fmt0.dma_rbytes);
break;
case 1:
pci_fmb_show(m, pci_fmt1_names, ARRAY_SIZE(pci_fmt1_names),
&zdev->fmb->fmt1.rx_bytes);
break;
case 2:
pci_fmb_show(m, pci_fmt2_names, ARRAY_SIZE(pci_fmt2_names),
&zdev->fmb->fmt2.consumed_work_units);
break;
case 3:
pci_fmb_show(m, pci_fmt3_names, ARRAY_SIZE(pci_fmt3_names),
&zdev->fmb->fmt3.tx_bytes);
break;
default:
seq_puts(m, "Unknown format\n");
}
pci_sw_counter_show(m);
mutex_unlock(&zdev->lock);
return 0;
}
static ssize_t pci_perf_seq_write(struct file *file, const char __user *ubuf,
size_t count, loff_t *off)
{
struct zpci_dev *zdev = ((struct seq_file *) file->private_data)->private;
unsigned long val;
int rc;
if (!zdev)
return 0;
rc = kstrtoul_from_user(ubuf, count, 10, &val);
if (rc)
return rc;
mutex_lock(&zdev->lock);
switch (val) {
case 0:
rc = zpci_fmb_disable_device(zdev);
break;
case 1:
rc = zpci_fmb_enable_device(zdev);
break;
}
mutex_unlock(&zdev->lock);
return rc ? rc : count;
}
static int pci_perf_seq_open(struct inode *inode, struct file *filp)
{
return single_open(filp, pci_perf_show,
file_inode(filp)->i_private);
}
static const struct file_operations debugfs_pci_perf_fops = {
.open = pci_perf_seq_open,
.read = seq_read,
.write = pci_perf_seq_write,
.llseek = seq_lseek,
.release = single_release,
};
void zpci_debug_init_device(struct zpci_dev *zdev, const char *name)
{
zdev->debugfs_dev = debugfs_create_dir(name, debugfs_root);
debugfs_create_file("statistics", S_IFREG | S_IRUGO | S_IWUSR,
zdev->debugfs_dev, zdev, &debugfs_pci_perf_fops);
}
void zpci_debug_exit_device(struct zpci_dev *zdev)
{
debugfs_remove_recursive(zdev->debugfs_dev);
}
int __init zpci_debug_init(void)
{
/* event trace buffer */
pci_debug_msg_id = debug_register("pci_msg", 8, 1, 8 * sizeof(long));
if (!pci_debug_msg_id)
return -EINVAL;
debug_register_view(pci_debug_msg_id, &debug_sprintf_view);
debug_set_level(pci_debug_msg_id, 3);
/* error log */
pci_debug_err_id = debug_register("pci_error", 2, 1, 16);
if (!pci_debug_err_id)
return -EINVAL;
debug_register_view(pci_debug_err_id, &debug_hex_ascii_view);
debug_set_level(pci_debug_err_id, 3);
debugfs_root = debugfs_create_dir("pci", NULL);
return 0;
}
void zpci_debug_exit(void)
{
debug_unregister(pci_debug_msg_id);
debug_unregister(pci_debug_err_id);
debugfs_remove(debugfs_root);
}
| linux-master | arch/s390/pci/pci_debug.c |
// SPDX-License-Identifier: GPL-2.0
#define KMSG_COMPONENT "zpci"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/kernel.h>
#include <linux/irq.h>
#include <linux/kernel_stat.h>
#include <linux/pci.h>
#include <linux/msi.h>
#include <linux/smp.h>
#include <asm/isc.h>
#include <asm/airq.h>
#include <asm/tpi.h>
static enum {FLOATING, DIRECTED} irq_delivery;
/*
* summary bit vector
* FLOATING - summary bit per function
* DIRECTED - summary bit per cpu (only used in fallback path)
*/
static struct airq_iv *zpci_sbv;
/*
* interrupt bit vectors
* FLOATING - interrupt bit vector per function
* DIRECTED - interrupt bit vector per cpu
*/
static struct airq_iv **zpci_ibv;
/* Modify PCI: Register floating adapter interruptions */
static int zpci_set_airq(struct zpci_dev *zdev)
{
u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_REG_INT);
struct zpci_fib fib = {0};
u8 status;
fib.fmt0.isc = PCI_ISC;
fib.fmt0.sum = 1; /* enable summary notifications */
fib.fmt0.noi = airq_iv_end(zdev->aibv);
fib.fmt0.aibv = virt_to_phys(zdev->aibv->vector);
fib.fmt0.aibvo = 0; /* each zdev has its own interrupt vector */
fib.fmt0.aisb = virt_to_phys(zpci_sbv->vector) + (zdev->aisb / 64) * 8;
fib.fmt0.aisbo = zdev->aisb & 63;
fib.gd = zdev->gisa;
return zpci_mod_fc(req, &fib, &status) ? -EIO : 0;
}
/* Modify PCI: Unregister floating adapter interruptions */
static int zpci_clear_airq(struct zpci_dev *zdev)
{
u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_DEREG_INT);
struct zpci_fib fib = {0};
u8 cc, status;
fib.gd = zdev->gisa;
cc = zpci_mod_fc(req, &fib, &status);
if (cc == 3 || (cc == 1 && status == 24))
/* Function already gone or IRQs already deregistered. */
cc = 0;
return cc ? -EIO : 0;
}
/* Modify PCI: Register CPU directed interruptions */
static int zpci_set_directed_irq(struct zpci_dev *zdev)
{
u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_REG_INT_D);
struct zpci_fib fib = {0};
u8 status;
fib.fmt = 1;
fib.fmt1.noi = zdev->msi_nr_irqs;
fib.fmt1.dibvo = zdev->msi_first_bit;
fib.gd = zdev->gisa;
return zpci_mod_fc(req, &fib, &status) ? -EIO : 0;
}
/* Modify PCI: Unregister CPU directed interruptions */
static int zpci_clear_directed_irq(struct zpci_dev *zdev)
{
u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_DEREG_INT_D);
struct zpci_fib fib = {0};
u8 cc, status;
fib.fmt = 1;
fib.gd = zdev->gisa;
cc = zpci_mod_fc(req, &fib, &status);
if (cc == 3 || (cc == 1 && status == 24))
/* Function already gone or IRQs already deregistered. */
cc = 0;
return cc ? -EIO : 0;
}
/* Register adapter interruptions */
static int zpci_set_irq(struct zpci_dev *zdev)
{
int rc;
if (irq_delivery == DIRECTED)
rc = zpci_set_directed_irq(zdev);
else
rc = zpci_set_airq(zdev);
if (!rc)
zdev->irqs_registered = 1;
return rc;
}
/* Clear adapter interruptions */
static int zpci_clear_irq(struct zpci_dev *zdev)
{
int rc;
if (irq_delivery == DIRECTED)
rc = zpci_clear_directed_irq(zdev);
else
rc = zpci_clear_airq(zdev);
if (!rc)
zdev->irqs_registered = 0;
return rc;
}
static int zpci_set_irq_affinity(struct irq_data *data, const struct cpumask *dest,
bool force)
{
struct msi_desc *entry = irq_data_get_msi_desc(data);
struct msi_msg msg = entry->msg;
int cpu_addr = smp_cpu_get_cpu_address(cpumask_first(dest));
msg.address_lo &= 0xff0000ff;
msg.address_lo |= (cpu_addr << 8);
pci_write_msi_msg(data->irq, &msg);
return IRQ_SET_MASK_OK;
}
static struct irq_chip zpci_irq_chip = {
.name = "PCI-MSI",
.irq_unmask = pci_msi_unmask_irq,
.irq_mask = pci_msi_mask_irq,
};
static void zpci_handle_cpu_local_irq(bool rescan)
{
struct airq_iv *dibv = zpci_ibv[smp_processor_id()];
union zpci_sic_iib iib = {{0}};
unsigned long bit;
int irqs_on = 0;
for (bit = 0;;) {
/* Scan the directed IRQ bit vector */
bit = airq_iv_scan(dibv, bit, airq_iv_end(dibv));
if (bit == -1UL) {
if (!rescan || irqs_on++)
/* End of second scan with interrupts on. */
break;
/* First scan complete, re-enable interrupts. */
if (zpci_set_irq_ctrl(SIC_IRQ_MODE_D_SINGLE, PCI_ISC, &iib))
break;
bit = 0;
continue;
}
inc_irq_stat(IRQIO_MSI);
generic_handle_irq(airq_iv_get_data(dibv, bit));
}
}
struct cpu_irq_data {
call_single_data_t csd;
atomic_t scheduled;
};
static DEFINE_PER_CPU_SHARED_ALIGNED(struct cpu_irq_data, irq_data);
static void zpci_handle_remote_irq(void *data)
{
atomic_t *scheduled = data;
do {
zpci_handle_cpu_local_irq(false);
} while (atomic_dec_return(scheduled));
}
static void zpci_handle_fallback_irq(void)
{
struct cpu_irq_data *cpu_data;
union zpci_sic_iib iib = {{0}};
unsigned long cpu;
int irqs_on = 0;
for (cpu = 0;;) {
cpu = airq_iv_scan(zpci_sbv, cpu, airq_iv_end(zpci_sbv));
if (cpu == -1UL) {
if (irqs_on++)
/* End of second scan with interrupts on. */
break;
/* First scan complete, re-enable interrupts. */
if (zpci_set_irq_ctrl(SIC_IRQ_MODE_SINGLE, PCI_ISC, &iib))
break;
cpu = 0;
continue;
}
cpu_data = &per_cpu(irq_data, cpu);
if (atomic_inc_return(&cpu_data->scheduled) > 1)
continue;
INIT_CSD(&cpu_data->csd, zpci_handle_remote_irq, &cpu_data->scheduled);
smp_call_function_single_async(cpu, &cpu_data->csd);
}
}
static void zpci_directed_irq_handler(struct airq_struct *airq,
struct tpi_info *tpi_info)
{
bool floating = !tpi_info->directed_irq;
if (floating) {
inc_irq_stat(IRQIO_PCF);
zpci_handle_fallback_irq();
} else {
inc_irq_stat(IRQIO_PCD);
zpci_handle_cpu_local_irq(true);
}
}
static void zpci_floating_irq_handler(struct airq_struct *airq,
struct tpi_info *tpi_info)
{
union zpci_sic_iib iib = {{0}};
unsigned long si, ai;
struct airq_iv *aibv;
int irqs_on = 0;
inc_irq_stat(IRQIO_PCF);
for (si = 0;;) {
/* Scan adapter summary indicator bit vector */
si = airq_iv_scan(zpci_sbv, si, airq_iv_end(zpci_sbv));
if (si == -1UL) {
if (irqs_on++)
/* End of second scan with interrupts on. */
break;
/* First scan complete, re-enable interrupts. */
if (zpci_set_irq_ctrl(SIC_IRQ_MODE_SINGLE, PCI_ISC, &iib))
break;
si = 0;
continue;
}
/* Scan the adapter interrupt vector for this device. */
aibv = zpci_ibv[si];
for (ai = 0;;) {
ai = airq_iv_scan(aibv, ai, airq_iv_end(aibv));
if (ai == -1UL)
break;
inc_irq_stat(IRQIO_MSI);
airq_iv_lock(aibv, ai);
generic_handle_irq(airq_iv_get_data(aibv, ai));
airq_iv_unlock(aibv, ai);
}
}
}
int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
{
struct zpci_dev *zdev = to_zpci(pdev);
unsigned int hwirq, msi_vecs, cpu;
unsigned long bit;
struct msi_desc *msi;
struct msi_msg msg;
int cpu_addr;
int rc, irq;
zdev->aisb = -1UL;
zdev->msi_first_bit = -1U;
if (type == PCI_CAP_ID_MSI && nvec > 1)
return 1;
msi_vecs = min_t(unsigned int, nvec, zdev->max_msi);
if (irq_delivery == DIRECTED) {
/* Allocate cpu vector bits */
bit = airq_iv_alloc(zpci_ibv[0], msi_vecs);
if (bit == -1UL)
return -EIO;
} else {
/* Allocate adapter summary indicator bit */
bit = airq_iv_alloc_bit(zpci_sbv);
if (bit == -1UL)
return -EIO;
zdev->aisb = bit;
/* Create adapter interrupt vector */
zdev->aibv = airq_iv_create(msi_vecs, AIRQ_IV_DATA | AIRQ_IV_BITLOCK, NULL);
if (!zdev->aibv)
return -ENOMEM;
/* Wire up shortcut pointer */
zpci_ibv[bit] = zdev->aibv;
/* Each function has its own interrupt vector */
bit = 0;
}
/* Request MSI interrupts */
hwirq = bit;
msi_for_each_desc(msi, &pdev->dev, MSI_DESC_NOTASSOCIATED) {
rc = -EIO;
if (hwirq - bit >= msi_vecs)
break;
irq = __irq_alloc_descs(-1, 0, 1, 0, THIS_MODULE,
(irq_delivery == DIRECTED) ?
msi->affinity : NULL);
if (irq < 0)
return -ENOMEM;
rc = irq_set_msi_desc(irq, msi);
if (rc)
return rc;
irq_set_chip_and_handler(irq, &zpci_irq_chip,
handle_percpu_irq);
msg.data = hwirq - bit;
if (irq_delivery == DIRECTED) {
if (msi->affinity)
cpu = cpumask_first(&msi->affinity->mask);
else
cpu = 0;
cpu_addr = smp_cpu_get_cpu_address(cpu);
msg.address_lo = zdev->msi_addr & 0xff0000ff;
msg.address_lo |= (cpu_addr << 8);
for_each_possible_cpu(cpu) {
airq_iv_set_data(zpci_ibv[cpu], hwirq, irq);
}
} else {
msg.address_lo = zdev->msi_addr & 0xffffffff;
airq_iv_set_data(zdev->aibv, hwirq, irq);
}
msg.address_hi = zdev->msi_addr >> 32;
pci_write_msi_msg(irq, &msg);
hwirq++;
}
zdev->msi_first_bit = bit;
zdev->msi_nr_irqs = msi_vecs;
rc = zpci_set_irq(zdev);
if (rc)
return rc;
return (msi_vecs == nvec) ? 0 : msi_vecs;
}
void arch_teardown_msi_irqs(struct pci_dev *pdev)
{
struct zpci_dev *zdev = to_zpci(pdev);
struct msi_desc *msi;
int rc;
/* Disable interrupts */
rc = zpci_clear_irq(zdev);
if (rc)
return;
/* Release MSI interrupts */
msi_for_each_desc(msi, &pdev->dev, MSI_DESC_ASSOCIATED) {
irq_set_msi_desc(msi->irq, NULL);
irq_free_desc(msi->irq);
msi->msg.address_lo = 0;
msi->msg.address_hi = 0;
msi->msg.data = 0;
msi->irq = 0;
}
if (zdev->aisb != -1UL) {
zpci_ibv[zdev->aisb] = NULL;
airq_iv_free_bit(zpci_sbv, zdev->aisb);
zdev->aisb = -1UL;
}
if (zdev->aibv) {
airq_iv_release(zdev->aibv);
zdev->aibv = NULL;
}
if ((irq_delivery == DIRECTED) && zdev->msi_first_bit != -1U)
airq_iv_free(zpci_ibv[0], zdev->msi_first_bit, zdev->msi_nr_irqs);
}
bool arch_restore_msi_irqs(struct pci_dev *pdev)
{
struct zpci_dev *zdev = to_zpci(pdev);
if (!zdev->irqs_registered)
zpci_set_irq(zdev);
return true;
}
static struct airq_struct zpci_airq = {
.handler = zpci_floating_irq_handler,
.isc = PCI_ISC,
};
static void __init cpu_enable_directed_irq(void *unused)
{
union zpci_sic_iib iib = {{0}};
union zpci_sic_iib ziib = {{0}};
iib.cdiib.dibv_addr = (u64) zpci_ibv[smp_processor_id()]->vector;
zpci_set_irq_ctrl(SIC_IRQ_MODE_SET_CPU, 0, &iib);
zpci_set_irq_ctrl(SIC_IRQ_MODE_D_SINGLE, PCI_ISC, &ziib);
}
static int __init zpci_directed_irq_init(void)
{
union zpci_sic_iib iib = {{0}};
unsigned int cpu;
zpci_sbv = airq_iv_create(num_possible_cpus(), 0, NULL);
if (!zpci_sbv)
return -ENOMEM;
iib.diib.isc = PCI_ISC;
iib.diib.nr_cpus = num_possible_cpus();
iib.diib.disb_addr = virt_to_phys(zpci_sbv->vector);
zpci_set_irq_ctrl(SIC_IRQ_MODE_DIRECT, 0, &iib);
zpci_ibv = kcalloc(num_possible_cpus(), sizeof(*zpci_ibv),
GFP_KERNEL);
if (!zpci_ibv)
return -ENOMEM;
for_each_possible_cpu(cpu) {
/*
* Per CPU IRQ vectors look the same but bit-allocation
* is only done on the first vector.
*/
zpci_ibv[cpu] = airq_iv_create(cache_line_size() * BITS_PER_BYTE,
AIRQ_IV_DATA |
AIRQ_IV_CACHELINE |
(!cpu ? AIRQ_IV_ALLOC : 0), NULL);
if (!zpci_ibv[cpu])
return -ENOMEM;
}
on_each_cpu(cpu_enable_directed_irq, NULL, 1);
zpci_irq_chip.irq_set_affinity = zpci_set_irq_affinity;
return 0;
}
static int __init zpci_floating_irq_init(void)
{
zpci_ibv = kcalloc(ZPCI_NR_DEVICES, sizeof(*zpci_ibv), GFP_KERNEL);
if (!zpci_ibv)
return -ENOMEM;
zpci_sbv = airq_iv_create(ZPCI_NR_DEVICES, AIRQ_IV_ALLOC, NULL);
if (!zpci_sbv)
goto out_free;
return 0;
out_free:
kfree(zpci_ibv);
return -ENOMEM;
}
int __init zpci_irq_init(void)
{
union zpci_sic_iib iib = {{0}};
int rc;
irq_delivery = sclp.has_dirq ? DIRECTED : FLOATING;
if (s390_pci_force_floating)
irq_delivery = FLOATING;
if (irq_delivery == DIRECTED)
zpci_airq.handler = zpci_directed_irq_handler;
rc = register_adapter_interrupt(&zpci_airq);
if (rc)
goto out;
/* Set summary to 1 to be called every time for the ISC. */
*zpci_airq.lsi_ptr = 1;
switch (irq_delivery) {
case FLOATING:
rc = zpci_floating_irq_init();
break;
case DIRECTED:
rc = zpci_directed_irq_init();
break;
}
if (rc)
goto out_airq;
/*
* Enable floating IRQs (with suppression after one IRQ). When using
* directed IRQs this enables the fallback path.
*/
zpci_set_irq_ctrl(SIC_IRQ_MODE_SINGLE, PCI_ISC, &iib);
return 0;
out_airq:
unregister_adapter_interrupt(&zpci_airq);
out:
return rc;
}
void __init zpci_irq_exit(void)
{
unsigned int cpu;
if (irq_delivery == DIRECTED) {
for_each_possible_cpu(cpu) {
airq_iv_release(zpci_ibv[cpu]);
}
}
kfree(zpci_ibv);
if (zpci_sbv)
airq_iv_release(zpci_sbv);
unregister_adapter_interrupt(&zpci_airq);
}
| linux-master | arch/s390/pci/pci_irq.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2012
*
* Author(s):
* Jan Glauber <[email protected]>
*/
#define KMSG_COMPONENT "zpci"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/kernel.h>
#include <linux/pci.h>
#include <asm/pci_debug.h>
#include <asm/pci_dma.h>
#include <asm/sclp.h>
#include "pci_bus.h"
/* Content Code Description for PCI Function Error */
struct zpci_ccdf_err {
u32 reserved1;
u32 fh; /* function handle */
u32 fid; /* function id */
u32 ett : 4; /* expected table type */
u32 mvn : 12; /* MSI vector number */
u32 dmaas : 8; /* DMA address space */
u32 : 6;
u32 q : 1; /* event qualifier */
u32 rw : 1; /* read/write */
u64 faddr; /* failing address */
u32 reserved3;
u16 reserved4;
u16 pec; /* PCI event code */
} __packed;
/* Content Code Description for PCI Function Availability */
struct zpci_ccdf_avail {
u32 reserved1;
u32 fh; /* function handle */
u32 fid; /* function id */
u32 reserved2;
u32 reserved3;
u32 reserved4;
u32 reserved5;
u16 reserved6;
u16 pec; /* PCI event code */
} __packed;
static inline bool ers_result_indicates_abort(pci_ers_result_t ers_res)
{
switch (ers_res) {
case PCI_ERS_RESULT_CAN_RECOVER:
case PCI_ERS_RESULT_RECOVERED:
case PCI_ERS_RESULT_NEED_RESET:
return false;
default:
return true;
}
}
static bool is_passed_through(struct zpci_dev *zdev)
{
return zdev->s390_domain;
}
static bool is_driver_supported(struct pci_driver *driver)
{
if (!driver || !driver->err_handler)
return false;
if (!driver->err_handler->error_detected)
return false;
if (!driver->err_handler->slot_reset)
return false;
if (!driver->err_handler->resume)
return false;
return true;
}
static pci_ers_result_t zpci_event_notify_error_detected(struct pci_dev *pdev,
struct pci_driver *driver)
{
pci_ers_result_t ers_res = PCI_ERS_RESULT_DISCONNECT;
ers_res = driver->err_handler->error_detected(pdev, pdev->error_state);
if (ers_result_indicates_abort(ers_res))
pr_info("%s: Automatic recovery failed after initial reporting\n", pci_name(pdev));
else if (ers_res == PCI_ERS_RESULT_NEED_RESET)
pr_debug("%s: Driver needs reset to recover\n", pci_name(pdev));
return ers_res;
}
static pci_ers_result_t zpci_event_do_error_state_clear(struct pci_dev *pdev,
struct pci_driver *driver)
{
pci_ers_result_t ers_res = PCI_ERS_RESULT_DISCONNECT;
struct zpci_dev *zdev = to_zpci(pdev);
int rc;
pr_info("%s: Unblocking device access for examination\n", pci_name(pdev));
rc = zpci_reset_load_store_blocked(zdev);
if (rc) {
pr_err("%s: Unblocking device access failed\n", pci_name(pdev));
/* Let's try a full reset instead */
return PCI_ERS_RESULT_NEED_RESET;
}
if (driver->err_handler->mmio_enabled) {
ers_res = driver->err_handler->mmio_enabled(pdev);
if (ers_result_indicates_abort(ers_res)) {
pr_info("%s: Automatic recovery failed after MMIO re-enable\n",
pci_name(pdev));
return ers_res;
} else if (ers_res == PCI_ERS_RESULT_NEED_RESET) {
pr_debug("%s: Driver needs reset to recover\n", pci_name(pdev));
return ers_res;
}
}
pr_debug("%s: Unblocking DMA\n", pci_name(pdev));
rc = zpci_clear_error_state(zdev);
if (!rc) {
pdev->error_state = pci_channel_io_normal;
} else {
pr_err("%s: Unblocking DMA failed\n", pci_name(pdev));
/* Let's try a full reset instead */
return PCI_ERS_RESULT_NEED_RESET;
}
return ers_res;
}
static pci_ers_result_t zpci_event_do_reset(struct pci_dev *pdev,
struct pci_driver *driver)
{
pci_ers_result_t ers_res = PCI_ERS_RESULT_DISCONNECT;
pr_info("%s: Initiating reset\n", pci_name(pdev));
if (zpci_hot_reset_device(to_zpci(pdev))) {
pr_err("%s: The reset request failed\n", pci_name(pdev));
return ers_res;
}
pdev->error_state = pci_channel_io_normal;
ers_res = driver->err_handler->slot_reset(pdev);
if (ers_result_indicates_abort(ers_res)) {
pr_info("%s: Automatic recovery failed after slot reset\n", pci_name(pdev));
return ers_res;
}
return ers_res;
}
/* zpci_event_attempt_error_recovery - Try to recover the given PCI function
* @pdev: PCI function to recover currently in the error state
*
* We follow the scheme outlined in Documentation/PCI/pci-error-recovery.rst.
* With the simplification that recovery always happens per function
* and the platform determines which functions are affected for
* multi-function devices.
*/
static pci_ers_result_t zpci_event_attempt_error_recovery(struct pci_dev *pdev)
{
pci_ers_result_t ers_res = PCI_ERS_RESULT_DISCONNECT;
struct pci_driver *driver;
/*
* Ensure that the PCI function is not removed concurrently, no driver
* is unbound or probed and that userspace can't access its
* configuration space while we perform recovery.
*/
pci_dev_lock(pdev);
if (pdev->error_state == pci_channel_io_perm_failure) {
ers_res = PCI_ERS_RESULT_DISCONNECT;
goto out_unlock;
}
pdev->error_state = pci_channel_io_frozen;
if (is_passed_through(to_zpci(pdev))) {
pr_info("%s: Cannot be recovered in the host because it is a pass-through device\n",
pci_name(pdev));
goto out_unlock;
}
driver = to_pci_driver(pdev->dev.driver);
if (!is_driver_supported(driver)) {
if (!driver)
pr_info("%s: Cannot be recovered because no driver is bound to the device\n",
pci_name(pdev));
else
pr_info("%s: The %s driver bound to the device does not support error recovery\n",
pci_name(pdev),
driver->name);
goto out_unlock;
}
ers_res = zpci_event_notify_error_detected(pdev, driver);
if (ers_result_indicates_abort(ers_res))
goto out_unlock;
if (ers_res == PCI_ERS_RESULT_CAN_RECOVER) {
ers_res = zpci_event_do_error_state_clear(pdev, driver);
if (ers_result_indicates_abort(ers_res))
goto out_unlock;
}
if (ers_res == PCI_ERS_RESULT_NEED_RESET)
ers_res = zpci_event_do_reset(pdev, driver);
if (ers_res != PCI_ERS_RESULT_RECOVERED) {
pr_err("%s: Automatic recovery failed; operator intervention is required\n",
pci_name(pdev));
goto out_unlock;
}
pr_info("%s: The device is ready to resume operations\n", pci_name(pdev));
if (driver->err_handler->resume)
driver->err_handler->resume(pdev);
out_unlock:
pci_dev_unlock(pdev);
return ers_res;
}
/* zpci_event_io_failure - Report PCI channel failure state to driver
* @pdev: PCI function for which to report
* @es: PCI channel failure state to report
*/
static void zpci_event_io_failure(struct pci_dev *pdev, pci_channel_state_t es)
{
struct pci_driver *driver;
pci_dev_lock(pdev);
pdev->error_state = es;
/**
* While vfio-pci's error_detected callback notifies user-space QEMU
* reacts to this by freezing the guest. In an s390 environment PCI
* errors are rarely fatal so this is overkill. Instead in the future
* we will inject the error event and let the guest recover the device
* itself.
*/
if (is_passed_through(to_zpci(pdev)))
goto out;
driver = to_pci_driver(pdev->dev.driver);
if (driver && driver->err_handler && driver->err_handler->error_detected)
driver->err_handler->error_detected(pdev, pdev->error_state);
out:
pci_dev_unlock(pdev);
}
static void __zpci_event_error(struct zpci_ccdf_err *ccdf)
{
struct zpci_dev *zdev = get_zdev_by_fid(ccdf->fid);
struct pci_dev *pdev = NULL;
pci_ers_result_t ers_res;
zpci_dbg(3, "err fid:%x, fh:%x, pec:%x\n",
ccdf->fid, ccdf->fh, ccdf->pec);
zpci_err("error CCDF:\n");
zpci_err_hex(ccdf, sizeof(*ccdf));
if (zdev) {
zpci_update_fh(zdev, ccdf->fh);
if (zdev->zbus->bus)
pdev = pci_get_slot(zdev->zbus->bus, zdev->devfn);
}
pr_err("%s: Event 0x%x reports an error for PCI function 0x%x\n",
pdev ? pci_name(pdev) : "n/a", ccdf->pec, ccdf->fid);
if (!pdev)
goto no_pdev;
switch (ccdf->pec) {
case 0x003a: /* Service Action or Error Recovery Successful */
ers_res = zpci_event_attempt_error_recovery(pdev);
if (ers_res != PCI_ERS_RESULT_RECOVERED)
zpci_event_io_failure(pdev, pci_channel_io_perm_failure);
break;
default:
/*
* Mark as frozen not permanently failed because the device
* could be subsequently recovered by the platform.
*/
zpci_event_io_failure(pdev, pci_channel_io_frozen);
break;
}
pci_dev_put(pdev);
no_pdev:
zpci_zdev_put(zdev);
}
void zpci_event_error(void *data)
{
if (zpci_is_enabled())
__zpci_event_error(data);
}
static void zpci_event_hard_deconfigured(struct zpci_dev *zdev, u32 fh)
{
zpci_update_fh(zdev, fh);
/* Give the driver a hint that the function is
* already unusable.
*/
zpci_bus_remove_device(zdev, true);
/* Even though the device is already gone we still
* need to free zPCI resources as part of the disable.
*/
if (zdev->dma_table)
zpci_dma_exit_device(zdev);
if (zdev_enabled(zdev))
zpci_disable_device(zdev);
zdev->state = ZPCI_FN_STATE_STANDBY;
}
static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
{
struct zpci_dev *zdev = get_zdev_by_fid(ccdf->fid);
bool existing_zdev = !!zdev;
enum zpci_state state;
zpci_dbg(3, "avl fid:%x, fh:%x, pec:%x\n",
ccdf->fid, ccdf->fh, ccdf->pec);
switch (ccdf->pec) {
case 0x0301: /* Reserved|Standby -> Configured */
if (!zdev) {
zdev = zpci_create_device(ccdf->fid, ccdf->fh, ZPCI_FN_STATE_CONFIGURED);
if (IS_ERR(zdev))
break;
} else {
/* the configuration request may be stale */
if (zdev->state != ZPCI_FN_STATE_STANDBY)
break;
zdev->state = ZPCI_FN_STATE_CONFIGURED;
}
zpci_scan_configured_device(zdev, ccdf->fh);
break;
case 0x0302: /* Reserved -> Standby */
if (!zdev)
zpci_create_device(ccdf->fid, ccdf->fh, ZPCI_FN_STATE_STANDBY);
else
zpci_update_fh(zdev, ccdf->fh);
break;
case 0x0303: /* Deconfiguration requested */
if (zdev) {
/* The event may have been queued before we confirgured
* the device.
*/
if (zdev->state != ZPCI_FN_STATE_CONFIGURED)
break;
zpci_update_fh(zdev, ccdf->fh);
zpci_deconfigure_device(zdev);
}
break;
case 0x0304: /* Configured -> Standby|Reserved */
if (zdev) {
/* The event may have been queued before we confirgured
* the device.:
*/
if (zdev->state == ZPCI_FN_STATE_CONFIGURED)
zpci_event_hard_deconfigured(zdev, ccdf->fh);
/* The 0x0304 event may immediately reserve the device */
if (!clp_get_state(zdev->fid, &state) &&
state == ZPCI_FN_STATE_RESERVED) {
zpci_device_reserved(zdev);
}
}
break;
case 0x0306: /* 0x308 or 0x302 for multiple devices */
zpci_remove_reserved_devices();
clp_scan_pci_devices();
break;
case 0x0308: /* Standby -> Reserved */
if (!zdev)
break;
zpci_device_reserved(zdev);
break;
default:
break;
}
if (existing_zdev)
zpci_zdev_put(zdev);
}
void zpci_event_availability(void *data)
{
if (zpci_is_enabled())
__zpci_event_availability(data);
}
| linux-master | arch/s390/pci/pci_event.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Simple program to generate defines out of facility lists that use the bit
* numbering scheme from the Princples of Operations: most significant bit
* has bit number 0.
*
* Copyright IBM Corp. 2015, 2018
*
*/
#include <strings.h>
#include <string.h>
#include <stdlib.h>
#include <stdio.h>
struct facility_def {
char *name;
int *bits;
};
static struct facility_def facility_defs[] = {
{
/*
* FACILITIES_ALS contains the list of facilities that are
* required to run a kernel that is compiled e.g. with
* -march=<machine>.
*/
.name = "FACILITIES_ALS",
.bits = (int[]){
0, /* N3 instructions */
1, /* z/Arch mode installed */
18, /* long displacement facility */
21, /* extended-immediate facility */
25, /* store clock fast */
27, /* mvcos */
32, /* compare and swap and store */
33, /* compare and swap and store 2 */
34, /* general instructions extension */
35, /* execute extensions */
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
45, /* fast-BCR, etc. */
#endif
#ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES
49, /* misc-instruction-extensions */
52, /* interlocked facility 2 */
#endif
#ifdef CONFIG_HAVE_MARCH_Z13_FEATURES
53, /* load-and-zero-rightmost-byte, etc. */
#endif
#ifdef CONFIG_HAVE_MARCH_Z14_FEATURES
58, /* miscellaneous-instruction-extension 2 */
#endif
#ifdef CONFIG_HAVE_MARCH_Z15_FEATURES
61, /* miscellaneous-instruction-extension 3 */
#endif
-1 /* END */
}
},
{
/*
* FACILITIES_KVM contains the list of facilities that are part
* of the default facility mask and list that are passed to the
* initial CPU model. If no CPU model is used, this, together
* with the non-hypervisor managed bits, is the maximum list of
* guest facilities supported by KVM.
*/
.name = "FACILITIES_KVM",
.bits = (int[]){
0, /* N3 instructions */
1, /* z/Arch mode installed */
2, /* z/Arch mode active */
3, /* DAT-enhancement */
4, /* idte segment table */
5, /* idte region table */
6, /* ASN-and-LX reuse */
7, /* stfle */
8, /* enhanced-DAT 1 */
9, /* sense-running-status */
10, /* conditional sske */
13, /* ipte-range */
14, /* nonquiescing key-setting */
73, /* transactional execution */
75, /* access-exception-fetch/store indication */
76, /* msa extension 3 */
77, /* msa extension 4 */
78, /* enhanced-DAT 2 */
130, /* instruction-execution-protection */
131, /* enhanced-SOP 2 and side-effect */
139, /* multiple epoch facility */
146, /* msa extension 8 */
150, /* enhanced sort */
151, /* deflate conversion */
155, /* msa extension 9 */
-1 /* END */
}
},
{
/*
* FACILITIES_KVM_CPUMODEL contains the list of facilities
* that can be enabled by CPU model code if the host supports
* it. These facilities are not passed to the guest without
* CPU model support.
*/
.name = "FACILITIES_KVM_CPUMODEL",
.bits = (int[]){
12, /* AP Query Configuration Information */
15, /* AP Facilities Test */
156, /* etoken facility */
165, /* nnpa facility */
193, /* bear enhancement facility */
194, /* rdp enhancement facility */
196, /* processor activity instrumentation facility */
197, /* processor activity instrumentation extension 1 */
-1 /* END */
}
},
};
static void print_facility_list(struct facility_def *def)
{
unsigned int high, bit, dword, i;
unsigned long long *array;
array = calloc(1, 8);
if (!array)
exit(EXIT_FAILURE);
high = 0;
for (i = 0; def->bits[i] != -1; i++) {
bit = 63 - (def->bits[i] & 63);
dword = def->bits[i] / 64;
if (dword > high) {
array = realloc(array, (dword + 1) * 8);
if (!array)
exit(EXIT_FAILURE);
memset(array + high + 1, 0, (dword - high) * 8);
high = dword;
}
array[dword] |= 1ULL << bit;
}
printf("#define %s ", def->name);
for (i = 0; i <= high; i++)
printf("_AC(0x%016llx,UL)%c", array[i], i < high ? ',' : '\n');
free(array);
}
static void print_facility_lists(void)
{
unsigned int i;
for (i = 0; i < sizeof(facility_defs) / sizeof(facility_defs[0]); i++)
print_facility_list(&facility_defs[i]);
}
int main(int argc, char **argv)
{
printf("#ifndef __ASM_S390_FACILITY_DEFS__\n");
printf("#define __ASM_S390_FACILITY_DEFS__\n");
printf("/*\n");
printf(" * DO NOT MODIFY.\n");
printf(" *\n");
printf(" * This file was generated by %s\n", __FILE__);
printf(" */\n\n");
printf("#include <linux/const.h>\n\n");
print_facility_lists();
printf("\n#endif\n");
return 0;
}
| linux-master | arch/s390/tools/gen_facilities.c |
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Generate opcode table initializers for the in-kernel disassembler.
*
* Copyright IBM Corp. 2017
*
*/
#include <stdlib.h>
#include <string.h>
#include <ctype.h>
#include <stdio.h>
#define STRING_SIZE_MAX 20
struct insn_type {
unsigned char byte;
unsigned char mask;
char **format;
};
struct insn {
struct insn_type *type;
char opcode[STRING_SIZE_MAX];
char name[STRING_SIZE_MAX];
char upper[STRING_SIZE_MAX];
char format[STRING_SIZE_MAX];
unsigned int name_len;
};
struct insn_group {
struct insn_type *type;
int offset;
int count;
char opcode[2];
};
struct insn_format {
char *format;
int type;
};
struct gen_opcode {
struct insn *insn;
int nr;
struct insn_group *group;
int nr_groups;
};
/*
* Table of instruction format types. Each opcode is defined with at
* least one byte (two nibbles), three nibbles, or two bytes (four
* nibbles).
* The byte member of each instruction format type entry defines
* within which byte of an instruction the third (and fourth) nibble
* of an opcode can be found. The mask member is the and-mask that
* needs to be applied on this byte in order to get the third (and
* fourth) nibble of the opcode.
* The format array defines all instruction formats (as defined in the
* Principles of Operation) which have the same position of the opcode
* nibbles.
* A special case are instruction formats with 1-byte opcodes. In this
* case the byte member always is zero, so that the mask is applied on
* the (only) byte that contains the opcode.
*/
static struct insn_type insn_type_table[] = {
{
.byte = 0,
.mask = 0xff,
.format = (char *[]) {
"MII",
"RR",
"RS",
"RSI",
"RX",
"SI",
"SMI",
"SS",
NULL,
},
},
{
.byte = 1,
.mask = 0x0f,
.format = (char *[]) {
"RI",
"RIL",
"SSF",
NULL,
},
},
{
.byte = 1,
.mask = 0xff,
.format = (char *[]) {
"E",
"IE",
"RRE",
"RRF",
"RRR",
"S",
"SIL",
"SSE",
NULL,
},
},
{
.byte = 5,
.mask = 0xff,
.format = (char *[]) {
"RIE",
"RIS",
"RRS",
"RSE",
"RSL",
"RSY",
"RXE",
"RXF",
"RXY",
"SIY",
"VRI",
"VRR",
"VRS",
"VRV",
"VRX",
"VSI",
NULL,
},
},
};
static struct insn_type *insn_format_to_type(char *format)
{
char tmp[STRING_SIZE_MAX];
char *base_format, **ptr;
int i;
strcpy(tmp, format);
base_format = tmp;
base_format = strsep(&base_format, "_");
for (i = 0; i < sizeof(insn_type_table) / sizeof(insn_type_table[0]); i++) {
ptr = insn_type_table[i].format;
while (*ptr) {
if (!strcmp(base_format, *ptr))
return &insn_type_table[i];
ptr++;
}
}
exit(EXIT_FAILURE);
}
static void read_instructions(struct gen_opcode *desc)
{
struct insn insn;
int rc, i;
while (1) {
rc = scanf("%s %s %s", insn.opcode, insn.name, insn.format);
if (rc == EOF)
break;
if (rc != 3)
exit(EXIT_FAILURE);
insn.type = insn_format_to_type(insn.format);
insn.name_len = strlen(insn.name);
for (i = 0; i <= insn.name_len; i++)
insn.upper[i] = toupper((unsigned char)insn.name[i]);
desc->nr++;
desc->insn = realloc(desc->insn, desc->nr * sizeof(*desc->insn));
if (!desc->insn)
exit(EXIT_FAILURE);
desc->insn[desc->nr - 1] = insn;
}
}
static int cmpformat(const void *a, const void *b)
{
return strcmp(((struct insn *)a)->format, ((struct insn *)b)->format);
}
static void print_formats(struct gen_opcode *desc)
{
char *format;
int i, count;
qsort(desc->insn, desc->nr, sizeof(*desc->insn), cmpformat);
format = "";
count = 0;
printf("enum {\n");
for (i = 0; i < desc->nr; i++) {
if (!strcmp(format, desc->insn[i].format))
continue;
count++;
format = desc->insn[i].format;
printf("\tINSTR_%s,\n", format);
}
printf("}; /* %d */\n\n", count);
}
static int cmp_long_insn(const void *a, const void *b)
{
return strcmp(((struct insn *)a)->name, ((struct insn *)b)->name);
}
static void print_long_insn(struct gen_opcode *desc)
{
struct insn *insn;
int i, count;
qsort(desc->insn, desc->nr, sizeof(*desc->insn), cmp_long_insn);
count = 0;
printf("enum {\n");
for (i = 0; i < desc->nr; i++) {
insn = &desc->insn[i];
if (insn->name_len < 6)
continue;
printf("\tLONG_INSN_%s,\n", insn->upper);
count++;
}
printf("}; /* %d */\n\n", count);
printf("#define LONG_INSN_INITIALIZER { \\\n");
for (i = 0; i < desc->nr; i++) {
insn = &desc->insn[i];
if (insn->name_len < 6)
continue;
printf("\t[LONG_INSN_%s] = \"%s\", \\\n", insn->upper, insn->name);
}
printf("}\n\n");
}
static void print_opcode(struct insn *insn, int nr)
{
char *opcode;
opcode = insn->opcode;
if (insn->type->byte != 0)
opcode += 2;
printf("\t[%4d] = { .opfrag = 0x%s, .format = INSTR_%s, ", nr, opcode, insn->format);
if (insn->name_len < 6)
printf(".name = \"%s\" ", insn->name);
else
printf(".offset = LONG_INSN_%s ", insn->upper);
printf("}, \\\n");
}
static void add_to_group(struct gen_opcode *desc, struct insn *insn, int offset)
{
struct insn_group *group;
group = desc->group ? &desc->group[desc->nr_groups - 1] : NULL;
if (group && (!strncmp(group->opcode, insn->opcode, 2) || group->type->byte == 0)) {
group->count++;
return;
}
desc->nr_groups++;
desc->group = realloc(desc->group, desc->nr_groups * sizeof(*desc->group));
if (!desc->group)
exit(EXIT_FAILURE);
group = &desc->group[desc->nr_groups - 1];
memcpy(group->opcode, insn->opcode, 2);
group->type = insn->type;
group->offset = offset;
group->count = 1;
}
static int cmpopcode(const void *a, const void *b)
{
return strcmp(((struct insn *)a)->opcode, ((struct insn *)b)->opcode);
}
static void print_opcode_table(struct gen_opcode *desc)
{
char opcode[2] = "";
struct insn *insn;
int i, offset;
qsort(desc->insn, desc->nr, sizeof(*desc->insn), cmpopcode);
printf("#define OPCODE_TABLE_INITIALIZER { \\\n");
offset = 0;
for (i = 0; i < desc->nr; i++) {
insn = &desc->insn[i];
if (insn->type->byte == 0)
continue;
add_to_group(desc, insn, offset);
if (strncmp(opcode, insn->opcode, 2)) {
memcpy(opcode, insn->opcode, 2);
printf("\t/* %.2s */ \\\n", opcode);
}
print_opcode(insn, offset);
offset++;
}
printf("\t/* 1-byte opcode instructions */ \\\n");
for (i = 0; i < desc->nr; i++) {
insn = &desc->insn[i];
if (insn->type->byte != 0)
continue;
add_to_group(desc, insn, offset);
print_opcode(insn, offset);
offset++;
}
printf("}\n\n");
}
static void print_opcode_table_offsets(struct gen_opcode *desc)
{
struct insn_group *group;
int i;
printf("#define OPCODE_OFFSET_INITIALIZER { \\\n");
for (i = 0; i < desc->nr_groups; i++) {
group = &desc->group[i];
printf("\t{ .opcode = 0x%.2s, .mask = 0x%02x, .byte = %d, .offset = %d, .count = %d }, \\\n",
group->opcode, group->type->mask, group->type->byte, group->offset, group->count);
}
printf("}\n\n");
}
int main(int argc, char **argv)
{
struct gen_opcode _desc = { 0 };
struct gen_opcode *desc = &_desc;
read_instructions(desc);
printf("#ifndef __S390_GENERATED_DIS_DEFS_H__\n");
printf("#define __S390_GENERATED_DIS_DEFS_H__\n");
printf("/*\n");
printf(" * DO NOT MODIFY.\n");
printf(" *\n");
printf(" * This file was generated by %s\n", __FILE__);
printf(" */\n\n");
print_formats(desc);
print_long_insn(desc);
print_opcode_table(desc);
print_opcode_table_offsets(desc);
printf("#endif\n");
exit(EXIT_SUCCESS);
}
| linux-master | arch/s390/tools/gen_opcode_table.c |
// SPDX-License-Identifier: GPL-2.0
/*
* BPF Jit compiler for s390.
*
* Minimum build requirements:
*
* - HAVE_MARCH_Z196_FEATURES: laal, laalg
* - HAVE_MARCH_Z10_FEATURES: msfi, cgrj, clgrj
* - HAVE_MARCH_Z9_109_FEATURES: alfi, llilf, clfi, oilf, nilf
* - 64BIT
*
* Copyright IBM Corp. 2012,2015
*
* Author(s): Martin Schwidefsky <[email protected]>
* Michael Holzheu <[email protected]>
*/
#define KMSG_COMPONENT "bpf_jit"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/netdevice.h>
#include <linux/filter.h>
#include <linux/init.h>
#include <linux/bpf.h>
#include <linux/mm.h>
#include <linux/kernel.h>
#include <asm/cacheflush.h>
#include <asm/extable.h>
#include <asm/dis.h>
#include <asm/facility.h>
#include <asm/nospec-branch.h>
#include <asm/set_memory.h>
#include <asm/text-patching.h>
#include "bpf_jit.h"
struct bpf_jit {
u32 seen; /* Flags to remember seen eBPF instructions */
u32 seen_reg[16]; /* Array to remember which registers are used */
u32 *addrs; /* Array with relative instruction addresses */
u8 *prg_buf; /* Start of program */
int size; /* Size of program and literal pool */
int size_prg; /* Size of program */
int prg; /* Current position in program */
int lit32_start; /* Start of 32-bit literal pool */
int lit32; /* Current position in 32-bit literal pool */
int lit64_start; /* Start of 64-bit literal pool */
int lit64; /* Current position in 64-bit literal pool */
int base_ip; /* Base address for literal pool */
int exit_ip; /* Address of exit */
int r1_thunk_ip; /* Address of expoline thunk for 'br %r1' */
int r14_thunk_ip; /* Address of expoline thunk for 'br %r14' */
int tail_call_start; /* Tail call start offset */
int excnt; /* Number of exception table entries */
int prologue_plt_ret; /* Return address for prologue hotpatch PLT */
int prologue_plt; /* Start of prologue hotpatch PLT */
};
#define SEEN_MEM BIT(0) /* use mem[] for temporary storage */
#define SEEN_LITERAL BIT(1) /* code uses literals */
#define SEEN_FUNC BIT(2) /* calls C functions */
#define SEEN_STACK (SEEN_FUNC | SEEN_MEM)
/*
* s390 registers
*/
#define REG_W0 (MAX_BPF_JIT_REG + 0) /* Work register 1 (even) */
#define REG_W1 (MAX_BPF_JIT_REG + 1) /* Work register 2 (odd) */
#define REG_L (MAX_BPF_JIT_REG + 2) /* Literal pool register */
#define REG_15 (MAX_BPF_JIT_REG + 3) /* Register 15 */
#define REG_0 REG_W0 /* Register 0 */
#define REG_1 REG_W1 /* Register 1 */
#define REG_2 BPF_REG_1 /* Register 2 */
#define REG_3 BPF_REG_2 /* Register 3 */
#define REG_4 BPF_REG_3 /* Register 4 */
#define REG_7 BPF_REG_6 /* Register 7 */
#define REG_8 BPF_REG_7 /* Register 8 */
#define REG_14 BPF_REG_0 /* Register 14 */
/*
* Mapping of BPF registers to s390 registers
*/
static const int reg2hex[] = {
/* Return code */
[BPF_REG_0] = 14,
/* Function parameters */
[BPF_REG_1] = 2,
[BPF_REG_2] = 3,
[BPF_REG_3] = 4,
[BPF_REG_4] = 5,
[BPF_REG_5] = 6,
/* Call saved registers */
[BPF_REG_6] = 7,
[BPF_REG_7] = 8,
[BPF_REG_8] = 9,
[BPF_REG_9] = 10,
/* BPF stack pointer */
[BPF_REG_FP] = 13,
/* Register for blinding */
[BPF_REG_AX] = 12,
/* Work registers for s390x backend */
[REG_W0] = 0,
[REG_W1] = 1,
[REG_L] = 11,
[REG_15] = 15,
};
static inline u32 reg(u32 dst_reg, u32 src_reg)
{
return reg2hex[dst_reg] << 4 | reg2hex[src_reg];
}
static inline u32 reg_high(u32 reg)
{
return reg2hex[reg] << 4;
}
static inline void reg_set_seen(struct bpf_jit *jit, u32 b1)
{
u32 r1 = reg2hex[b1];
if (r1 >= 6 && r1 <= 15 && !jit->seen_reg[r1])
jit->seen_reg[r1] = 1;
}
#define REG_SET_SEEN(b1) \
({ \
reg_set_seen(jit, b1); \
})
#define REG_SEEN(b1) jit->seen_reg[reg2hex[(b1)]]
/*
* EMIT macros for code generation
*/
#define _EMIT2(op) \
({ \
if (jit->prg_buf) \
*(u16 *) (jit->prg_buf + jit->prg) = (op); \
jit->prg += 2; \
})
#define EMIT2(op, b1, b2) \
({ \
_EMIT2((op) | reg(b1, b2)); \
REG_SET_SEEN(b1); \
REG_SET_SEEN(b2); \
})
#define _EMIT4(op) \
({ \
if (jit->prg_buf) \
*(u32 *) (jit->prg_buf + jit->prg) = (op); \
jit->prg += 4; \
})
#define EMIT4(op, b1, b2) \
({ \
_EMIT4((op) | reg(b1, b2)); \
REG_SET_SEEN(b1); \
REG_SET_SEEN(b2); \
})
#define EMIT4_RRF(op, b1, b2, b3) \
({ \
_EMIT4((op) | reg_high(b3) << 8 | reg(b1, b2)); \
REG_SET_SEEN(b1); \
REG_SET_SEEN(b2); \
REG_SET_SEEN(b3); \
})
#define _EMIT4_DISP(op, disp) \
({ \
unsigned int __disp = (disp) & 0xfff; \
_EMIT4((op) | __disp); \
})
#define EMIT4_DISP(op, b1, b2, disp) \
({ \
_EMIT4_DISP((op) | reg_high(b1) << 16 | \
reg_high(b2) << 8, (disp)); \
REG_SET_SEEN(b1); \
REG_SET_SEEN(b2); \
})
#define EMIT4_IMM(op, b1, imm) \
({ \
unsigned int __imm = (imm) & 0xffff; \
_EMIT4((op) | reg_high(b1) << 16 | __imm); \
REG_SET_SEEN(b1); \
})
#define EMIT4_PCREL(op, pcrel) \
({ \
long __pcrel = ((pcrel) >> 1) & 0xffff; \
_EMIT4((op) | __pcrel); \
})
#define EMIT4_PCREL_RIC(op, mask, target) \
({ \
int __rel = ((target) - jit->prg) / 2; \
_EMIT4((op) | (mask) << 20 | (__rel & 0xffff)); \
})
#define _EMIT6(op1, op2) \
({ \
if (jit->prg_buf) { \
*(u32 *) (jit->prg_buf + jit->prg) = (op1); \
*(u16 *) (jit->prg_buf + jit->prg + 4) = (op2); \
} \
jit->prg += 6; \
})
#define _EMIT6_DISP(op1, op2, disp) \
({ \
unsigned int __disp = (disp) & 0xfff; \
_EMIT6((op1) | __disp, op2); \
})
#define _EMIT6_DISP_LH(op1, op2, disp) \
({ \
u32 _disp = (u32) (disp); \
unsigned int __disp_h = _disp & 0xff000; \
unsigned int __disp_l = _disp & 0x00fff; \
_EMIT6((op1) | __disp_l, (op2) | __disp_h >> 4); \
})
#define EMIT6_DISP_LH(op1, op2, b1, b2, b3, disp) \
({ \
_EMIT6_DISP_LH((op1) | reg(b1, b2) << 16 | \
reg_high(b3) << 8, op2, disp); \
REG_SET_SEEN(b1); \
REG_SET_SEEN(b2); \
REG_SET_SEEN(b3); \
})
#define EMIT6_PCREL_RIEB(op1, op2, b1, b2, mask, target) \
({ \
unsigned int rel = (int)((target) - jit->prg) / 2; \
_EMIT6((op1) | reg(b1, b2) << 16 | (rel & 0xffff), \
(op2) | (mask) << 12); \
REG_SET_SEEN(b1); \
REG_SET_SEEN(b2); \
})
#define EMIT6_PCREL_RIEC(op1, op2, b1, imm, mask, target) \
({ \
unsigned int rel = (int)((target) - jit->prg) / 2; \
_EMIT6((op1) | (reg_high(b1) | (mask)) << 16 | \
(rel & 0xffff), (op2) | ((imm) & 0xff) << 8); \
REG_SET_SEEN(b1); \
BUILD_BUG_ON(((unsigned long) (imm)) > 0xff); \
})
#define EMIT6_PCREL(op1, op2, b1, b2, i, off, mask) \
({ \
int rel = (addrs[(i) + (off) + 1] - jit->prg) / 2; \
_EMIT6((op1) | reg(b1, b2) << 16 | (rel & 0xffff), (op2) | (mask));\
REG_SET_SEEN(b1); \
REG_SET_SEEN(b2); \
})
#define EMIT6_PCREL_RILB(op, b, target) \
({ \
unsigned int rel = (int)((target) - jit->prg) / 2; \
_EMIT6((op) | reg_high(b) << 16 | rel >> 16, rel & 0xffff);\
REG_SET_SEEN(b); \
})
#define EMIT6_PCREL_RIL(op, target) \
({ \
unsigned int rel = (int)((target) - jit->prg) / 2; \
_EMIT6((op) | rel >> 16, rel & 0xffff); \
})
#define EMIT6_PCREL_RILC(op, mask, target) \
({ \
EMIT6_PCREL_RIL((op) | (mask) << 20, (target)); \
})
#define _EMIT6_IMM(op, imm) \
({ \
unsigned int __imm = (imm); \
_EMIT6((op) | (__imm >> 16), __imm & 0xffff); \
})
#define EMIT6_IMM(op, b1, imm) \
({ \
_EMIT6_IMM((op) | reg_high(b1) << 16, imm); \
REG_SET_SEEN(b1); \
})
#define _EMIT_CONST_U32(val) \
({ \
unsigned int ret; \
ret = jit->lit32; \
if (jit->prg_buf) \
*(u32 *)(jit->prg_buf + jit->lit32) = (u32)(val);\
jit->lit32 += 4; \
ret; \
})
#define EMIT_CONST_U32(val) \
({ \
jit->seen |= SEEN_LITERAL; \
_EMIT_CONST_U32(val) - jit->base_ip; \
})
#define _EMIT_CONST_U64(val) \
({ \
unsigned int ret; \
ret = jit->lit64; \
if (jit->prg_buf) \
*(u64 *)(jit->prg_buf + jit->lit64) = (u64)(val);\
jit->lit64 += 8; \
ret; \
})
#define EMIT_CONST_U64(val) \
({ \
jit->seen |= SEEN_LITERAL; \
_EMIT_CONST_U64(val) - jit->base_ip; \
})
#define EMIT_ZERO(b1) \
({ \
if (!fp->aux->verifier_zext) { \
/* llgfr %dst,%dst (zero extend to 64 bit) */ \
EMIT4(0xb9160000, b1, b1); \
REG_SET_SEEN(b1); \
} \
})
/*
* Return whether this is the first pass. The first pass is special, since we
* don't know any sizes yet, and thus must be conservative.
*/
static bool is_first_pass(struct bpf_jit *jit)
{
return jit->size == 0;
}
/*
* Return whether this is the code generation pass. The code generation pass is
* special, since we should change as little as possible.
*/
static bool is_codegen_pass(struct bpf_jit *jit)
{
return jit->prg_buf;
}
/*
* Return whether "rel" can be encoded as a short PC-relative offset
*/
static bool is_valid_rel(int rel)
{
return rel >= -65536 && rel <= 65534;
}
/*
* Return whether "off" can be reached using a short PC-relative offset
*/
static bool can_use_rel(struct bpf_jit *jit, int off)
{
return is_valid_rel(off - jit->prg);
}
/*
* Return whether given displacement can be encoded using
* Long-Displacement Facility
*/
static bool is_valid_ldisp(int disp)
{
return disp >= -524288 && disp <= 524287;
}
/*
* Return whether the next 32-bit literal pool entry can be referenced using
* Long-Displacement Facility
*/
static bool can_use_ldisp_for_lit32(struct bpf_jit *jit)
{
return is_valid_ldisp(jit->lit32 - jit->base_ip);
}
/*
* Return whether the next 64-bit literal pool entry can be referenced using
* Long-Displacement Facility
*/
static bool can_use_ldisp_for_lit64(struct bpf_jit *jit)
{
return is_valid_ldisp(jit->lit64 - jit->base_ip);
}
/*
* Fill whole space with illegal instructions
*/
static void jit_fill_hole(void *area, unsigned int size)
{
memset(area, 0, size);
}
/*
* Save registers from "rs" (register start) to "re" (register end) on stack
*/
static void save_regs(struct bpf_jit *jit, u32 rs, u32 re)
{
u32 off = STK_OFF_R6 + (rs - 6) * 8;
if (rs == re)
/* stg %rs,off(%r15) */
_EMIT6(0xe300f000 | rs << 20 | off, 0x0024);
else
/* stmg %rs,%re,off(%r15) */
_EMIT6_DISP(0xeb00f000 | rs << 20 | re << 16, 0x0024, off);
}
/*
* Restore registers from "rs" (register start) to "re" (register end) on stack
*/
static void restore_regs(struct bpf_jit *jit, u32 rs, u32 re, u32 stack_depth)
{
u32 off = STK_OFF_R6 + (rs - 6) * 8;
if (jit->seen & SEEN_STACK)
off += STK_OFF + stack_depth;
if (rs == re)
/* lg %rs,off(%r15) */
_EMIT6(0xe300f000 | rs << 20 | off, 0x0004);
else
/* lmg %rs,%re,off(%r15) */
_EMIT6_DISP(0xeb00f000 | rs << 20 | re << 16, 0x0004, off);
}
/*
* Return first seen register (from start)
*/
static int get_start(struct bpf_jit *jit, int start)
{
int i;
for (i = start; i <= 15; i++) {
if (jit->seen_reg[i])
return i;
}
return 0;
}
/*
* Return last seen register (from start) (gap >= 2)
*/
static int get_end(struct bpf_jit *jit, int start)
{
int i;
for (i = start; i < 15; i++) {
if (!jit->seen_reg[i] && !jit->seen_reg[i + 1])
return i - 1;
}
return jit->seen_reg[15] ? 15 : 14;
}
#define REGS_SAVE 1
#define REGS_RESTORE 0
/*
* Save and restore clobbered registers (6-15) on stack.
* We save/restore registers in chunks with gap >= 2 registers.
*/
static void save_restore_regs(struct bpf_jit *jit, int op, u32 stack_depth)
{
const int last = 15, save_restore_size = 6;
int re = 6, rs;
if (is_first_pass(jit)) {
/*
* We don't know yet which registers are used. Reserve space
* conservatively.
*/
jit->prg += (last - re + 1) * save_restore_size;
return;
}
do {
rs = get_start(jit, re);
if (!rs)
break;
re = get_end(jit, rs + 1);
if (op == REGS_SAVE)
save_regs(jit, rs, re);
else
restore_regs(jit, rs, re, stack_depth);
re++;
} while (re <= last);
}
static void bpf_skip(struct bpf_jit *jit, int size)
{
if (size >= 6 && !is_valid_rel(size)) {
/* brcl 0xf,size */
EMIT6_PCREL_RIL(0xc0f4000000, size);
size -= 6;
} else if (size >= 4 && is_valid_rel(size)) {
/* brc 0xf,size */
EMIT4_PCREL(0xa7f40000, size);
size -= 4;
}
while (size >= 2) {
/* bcr 0,%0 */
_EMIT2(0x0700);
size -= 2;
}
}
/*
* PLT for hotpatchable calls. The calling convention is the same as for the
* ftrace hotpatch trampolines: %r0 is return address, %r1 is clobbered.
*/
extern const char bpf_plt[];
extern const char bpf_plt_ret[];
extern const char bpf_plt_target[];
extern const char bpf_plt_end[];
#define BPF_PLT_SIZE 32
asm(
".pushsection .rodata\n"
" .balign 8\n"
"bpf_plt:\n"
" lgrl %r0,bpf_plt_ret\n"
" lgrl %r1,bpf_plt_target\n"
" br %r1\n"
" .balign 8\n"
"bpf_plt_ret: .quad 0\n"
"bpf_plt_target: .quad 0\n"
"bpf_plt_end:\n"
" .popsection\n"
);
static void bpf_jit_plt(void *plt, void *ret, void *target)
{
memcpy(plt, bpf_plt, BPF_PLT_SIZE);
*(void **)((char *)plt + (bpf_plt_ret - bpf_plt)) = ret;
*(void **)((char *)plt + (bpf_plt_target - bpf_plt)) = target ?: ret;
}
/*
* Emit function prologue
*
* Save registers and create stack frame if necessary.
* See stack frame layout description in "bpf_jit.h"!
*/
static void bpf_jit_prologue(struct bpf_jit *jit, struct bpf_prog *fp,
u32 stack_depth)
{
/* No-op for hotpatching */
/* brcl 0,prologue_plt */
EMIT6_PCREL_RILC(0xc0040000, 0, jit->prologue_plt);
jit->prologue_plt_ret = jit->prg;
if (fp->aux->func_idx == 0) {
/* Initialize the tail call counter in the main program. */
/* xc STK_OFF_TCCNT(4,%r15),STK_OFF_TCCNT(%r15) */
_EMIT6(0xd703f000 | STK_OFF_TCCNT, 0xf000 | STK_OFF_TCCNT);
} else {
/*
* Skip the tail call counter initialization in subprograms.
* Insert nops in order to have tail_call_start at a
* predictable offset.
*/
bpf_skip(jit, 6);
}
/* Tail calls have to skip above initialization */
jit->tail_call_start = jit->prg;
/* Save registers */
save_restore_regs(jit, REGS_SAVE, stack_depth);
/* Setup literal pool */
if (is_first_pass(jit) || (jit->seen & SEEN_LITERAL)) {
if (!is_first_pass(jit) &&
is_valid_ldisp(jit->size - (jit->prg + 2))) {
/* basr %l,0 */
EMIT2(0x0d00, REG_L, REG_0);
jit->base_ip = jit->prg;
} else {
/* larl %l,lit32_start */
EMIT6_PCREL_RILB(0xc0000000, REG_L, jit->lit32_start);
jit->base_ip = jit->lit32_start;
}
}
/* Setup stack and backchain */
if (is_first_pass(jit) || (jit->seen & SEEN_STACK)) {
if (is_first_pass(jit) || (jit->seen & SEEN_FUNC))
/* lgr %w1,%r15 (backchain) */
EMIT4(0xb9040000, REG_W1, REG_15);
/* la %bfp,STK_160_UNUSED(%r15) (BPF frame pointer) */
EMIT4_DISP(0x41000000, BPF_REG_FP, REG_15, STK_160_UNUSED);
/* aghi %r15,-STK_OFF */
EMIT4_IMM(0xa70b0000, REG_15, -(STK_OFF + stack_depth));
if (is_first_pass(jit) || (jit->seen & SEEN_FUNC))
/* stg %w1,152(%r15) (backchain) */
EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0,
REG_15, 152);
}
}
/*
* Emit an expoline for a jump that follows
*/
static void emit_expoline(struct bpf_jit *jit)
{
/* exrl %r0,.+10 */
EMIT6_PCREL_RIL(0xc6000000, jit->prg + 10);
/* j . */
EMIT4_PCREL(0xa7f40000, 0);
}
/*
* Emit __s390_indirect_jump_r1 thunk if necessary
*/
static void emit_r1_thunk(struct bpf_jit *jit)
{
if (nospec_uses_trampoline()) {
jit->r1_thunk_ip = jit->prg;
emit_expoline(jit);
/* br %r1 */
_EMIT2(0x07f1);
}
}
/*
* Call r1 either directly or via __s390_indirect_jump_r1 thunk
*/
static void call_r1(struct bpf_jit *jit)
{
if (nospec_uses_trampoline())
/* brasl %r14,__s390_indirect_jump_r1 */
EMIT6_PCREL_RILB(0xc0050000, REG_14, jit->r1_thunk_ip);
else
/* basr %r14,%r1 */
EMIT2(0x0d00, REG_14, REG_1);
}
/*
* Function epilogue
*/
static void bpf_jit_epilogue(struct bpf_jit *jit, u32 stack_depth)
{
jit->exit_ip = jit->prg;
/* Load exit code: lgr %r2,%b0 */
EMIT4(0xb9040000, REG_2, BPF_REG_0);
/* Restore registers */
save_restore_regs(jit, REGS_RESTORE, stack_depth);
if (nospec_uses_trampoline()) {
jit->r14_thunk_ip = jit->prg;
/* Generate __s390_indirect_jump_r14 thunk */
emit_expoline(jit);
}
/* br %r14 */
_EMIT2(0x07fe);
if (is_first_pass(jit) || (jit->seen & SEEN_FUNC))
emit_r1_thunk(jit);
jit->prg = ALIGN(jit->prg, 8);
jit->prologue_plt = jit->prg;
if (jit->prg_buf)
bpf_jit_plt(jit->prg_buf + jit->prg,
jit->prg_buf + jit->prologue_plt_ret, NULL);
jit->prg += BPF_PLT_SIZE;
}
static int get_probe_mem_regno(const u8 *insn)
{
/*
* insn must point to llgc, llgh, llgf or lg, which have destination
* register at the same position.
*/
if (insn[0] != 0xe3) /* common llgc, llgh, llgf and lg prefix */
return -1;
if (insn[5] != 0x90 && /* llgc */
insn[5] != 0x91 && /* llgh */
insn[5] != 0x16 && /* llgf */
insn[5] != 0x04) /* lg */
return -1;
return insn[1] >> 4;
}
bool ex_handler_bpf(const struct exception_table_entry *x, struct pt_regs *regs)
{
regs->psw.addr = extable_fixup(x);
regs->gprs[x->data] = 0;
return true;
}
static int bpf_jit_probe_mem(struct bpf_jit *jit, struct bpf_prog *fp,
int probe_prg, int nop_prg)
{
struct exception_table_entry *ex;
int reg, prg;
s64 delta;
u8 *insn;
int i;
if (!fp->aux->extable)
/* Do nothing during early JIT passes. */
return 0;
insn = jit->prg_buf + probe_prg;
reg = get_probe_mem_regno(insn);
if (WARN_ON_ONCE(reg < 0))
/* JIT bug - unexpected probe instruction. */
return -1;
if (WARN_ON_ONCE(probe_prg + insn_length(*insn) != nop_prg))
/* JIT bug - gap between probe and nop instructions. */
return -1;
for (i = 0; i < 2; i++) {
if (WARN_ON_ONCE(jit->excnt >= fp->aux->num_exentries))
/* Verifier bug - not enough entries. */
return -1;
ex = &fp->aux->extable[jit->excnt];
/* Add extable entries for probe and nop instructions. */
prg = i == 0 ? probe_prg : nop_prg;
delta = jit->prg_buf + prg - (u8 *)&ex->insn;
if (WARN_ON_ONCE(delta < INT_MIN || delta > INT_MAX))
/* JIT bug - code and extable must be close. */
return -1;
ex->insn = delta;
/*
* Always land on the nop. Note that extable infrastructure
* ignores fixup field, it is handled by ex_handler_bpf().
*/
delta = jit->prg_buf + nop_prg - (u8 *)&ex->fixup;
if (WARN_ON_ONCE(delta < INT_MIN || delta > INT_MAX))
/* JIT bug - landing pad and extable must be close. */
return -1;
ex->fixup = delta;
ex->type = EX_TYPE_BPF;
ex->data = reg;
jit->excnt++;
}
return 0;
}
/*
* Sign-extend the register if necessary
*/
static int sign_extend(struct bpf_jit *jit, int r, u8 size, u8 flags)
{
if (!(flags & BTF_FMODEL_SIGNED_ARG))
return 0;
switch (size) {
case 1:
/* lgbr %r,%r */
EMIT4(0xb9060000, r, r);
return 0;
case 2:
/* lghr %r,%r */
EMIT4(0xb9070000, r, r);
return 0;
case 4:
/* lgfr %r,%r */
EMIT4(0xb9140000, r, r);
return 0;
case 8:
return 0;
default:
return -1;
}
}
/*
* Compile one eBPF instruction into s390x code
*
* NOTE: Use noinline because for gcov (-fprofile-arcs) gcc allocates a lot of
* stack space for the large switch statement.
*/
static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
int i, bool extra_pass, u32 stack_depth)
{
struct bpf_insn *insn = &fp->insnsi[i];
u32 dst_reg = insn->dst_reg;
u32 src_reg = insn->src_reg;
int last, insn_count = 1;
u32 *addrs = jit->addrs;
s32 imm = insn->imm;
s16 off = insn->off;
int probe_prg = -1;
unsigned int mask;
int nop_prg;
int err;
if (BPF_CLASS(insn->code) == BPF_LDX &&
BPF_MODE(insn->code) == BPF_PROBE_MEM)
probe_prg = jit->prg;
switch (insn->code) {
/*
* BPF_MOV
*/
case BPF_ALU | BPF_MOV | BPF_X: /* dst = (u32) src */
/* llgfr %dst,%src */
EMIT4(0xb9160000, dst_reg, src_reg);
if (insn_is_zext(&insn[1]))
insn_count = 2;
break;
case BPF_ALU64 | BPF_MOV | BPF_X: /* dst = src */
/* lgr %dst,%src */
EMIT4(0xb9040000, dst_reg, src_reg);
break;
case BPF_ALU | BPF_MOV | BPF_K: /* dst = (u32) imm */
/* llilf %dst,imm */
EMIT6_IMM(0xc00f0000, dst_reg, imm);
if (insn_is_zext(&insn[1]))
insn_count = 2;
break;
case BPF_ALU64 | BPF_MOV | BPF_K: /* dst = imm */
/* lgfi %dst,imm */
EMIT6_IMM(0xc0010000, dst_reg, imm);
break;
/*
* BPF_LD 64
*/
case BPF_LD | BPF_IMM | BPF_DW: /* dst = (u64) imm */
{
/* 16 byte instruction that uses two 'struct bpf_insn' */
u64 imm64;
imm64 = (u64)(u32) insn[0].imm | ((u64)(u32) insn[1].imm) << 32;
/* lgrl %dst,imm */
EMIT6_PCREL_RILB(0xc4080000, dst_reg, _EMIT_CONST_U64(imm64));
insn_count = 2;
break;
}
/*
* BPF_ADD
*/
case BPF_ALU | BPF_ADD | BPF_X: /* dst = (u32) dst + (u32) src */
/* ar %dst,%src */
EMIT2(0x1a00, dst_reg, src_reg);
EMIT_ZERO(dst_reg);
break;
case BPF_ALU64 | BPF_ADD | BPF_X: /* dst = dst + src */
/* agr %dst,%src */
EMIT4(0xb9080000, dst_reg, src_reg);
break;
case BPF_ALU | BPF_ADD | BPF_K: /* dst = (u32) dst + (u32) imm */
if (imm != 0) {
/* alfi %dst,imm */
EMIT6_IMM(0xc20b0000, dst_reg, imm);
}
EMIT_ZERO(dst_reg);
break;
case BPF_ALU64 | BPF_ADD | BPF_K: /* dst = dst + imm */
if (!imm)
break;
/* agfi %dst,imm */
EMIT6_IMM(0xc2080000, dst_reg, imm);
break;
/*
* BPF_SUB
*/
case BPF_ALU | BPF_SUB | BPF_X: /* dst = (u32) dst - (u32) src */
/* sr %dst,%src */
EMIT2(0x1b00, dst_reg, src_reg);
EMIT_ZERO(dst_reg);
break;
case BPF_ALU64 | BPF_SUB | BPF_X: /* dst = dst - src */
/* sgr %dst,%src */
EMIT4(0xb9090000, dst_reg, src_reg);
break;
case BPF_ALU | BPF_SUB | BPF_K: /* dst = (u32) dst - (u32) imm */
if (imm != 0) {
/* alfi %dst,-imm */
EMIT6_IMM(0xc20b0000, dst_reg, -imm);
}
EMIT_ZERO(dst_reg);
break;
case BPF_ALU64 | BPF_SUB | BPF_K: /* dst = dst - imm */
if (!imm)
break;
if (imm == -0x80000000) {
/* algfi %dst,0x80000000 */
EMIT6_IMM(0xc20a0000, dst_reg, 0x80000000);
} else {
/* agfi %dst,-imm */
EMIT6_IMM(0xc2080000, dst_reg, -imm);
}
break;
/*
* BPF_MUL
*/
case BPF_ALU | BPF_MUL | BPF_X: /* dst = (u32) dst * (u32) src */
/* msr %dst,%src */
EMIT4(0xb2520000, dst_reg, src_reg);
EMIT_ZERO(dst_reg);
break;
case BPF_ALU64 | BPF_MUL | BPF_X: /* dst = dst * src */
/* msgr %dst,%src */
EMIT4(0xb90c0000, dst_reg, src_reg);
break;
case BPF_ALU | BPF_MUL | BPF_K: /* dst = (u32) dst * (u32) imm */
if (imm != 1) {
/* msfi %r5,imm */
EMIT6_IMM(0xc2010000, dst_reg, imm);
}
EMIT_ZERO(dst_reg);
break;
case BPF_ALU64 | BPF_MUL | BPF_K: /* dst = dst * imm */
if (imm == 1)
break;
/* msgfi %dst,imm */
EMIT6_IMM(0xc2000000, dst_reg, imm);
break;
/*
* BPF_DIV / BPF_MOD
*/
case BPF_ALU | BPF_DIV | BPF_X: /* dst = (u32) dst / (u32) src */
case BPF_ALU | BPF_MOD | BPF_X: /* dst = (u32) dst % (u32) src */
{
int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0;
/* lhi %w0,0 */
EMIT4_IMM(0xa7080000, REG_W0, 0);
/* lr %w1,%dst */
EMIT2(0x1800, REG_W1, dst_reg);
/* dlr %w0,%src */
EMIT4(0xb9970000, REG_W0, src_reg);
/* llgfr %dst,%rc */
EMIT4(0xb9160000, dst_reg, rc_reg);
if (insn_is_zext(&insn[1]))
insn_count = 2;
break;
}
case BPF_ALU64 | BPF_DIV | BPF_X: /* dst = dst / src */
case BPF_ALU64 | BPF_MOD | BPF_X: /* dst = dst % src */
{
int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0;
/* lghi %w0,0 */
EMIT4_IMM(0xa7090000, REG_W0, 0);
/* lgr %w1,%dst */
EMIT4(0xb9040000, REG_W1, dst_reg);
/* dlgr %w0,%dst */
EMIT4(0xb9870000, REG_W0, src_reg);
/* lgr %dst,%rc */
EMIT4(0xb9040000, dst_reg, rc_reg);
break;
}
case BPF_ALU | BPF_DIV | BPF_K: /* dst = (u32) dst / (u32) imm */
case BPF_ALU | BPF_MOD | BPF_K: /* dst = (u32) dst % (u32) imm */
{
int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0;
if (imm == 1) {
if (BPF_OP(insn->code) == BPF_MOD)
/* lhgi %dst,0 */
EMIT4_IMM(0xa7090000, dst_reg, 0);
else
EMIT_ZERO(dst_reg);
break;
}
/* lhi %w0,0 */
EMIT4_IMM(0xa7080000, REG_W0, 0);
/* lr %w1,%dst */
EMIT2(0x1800, REG_W1, dst_reg);
if (!is_first_pass(jit) && can_use_ldisp_for_lit32(jit)) {
/* dl %w0,<d(imm)>(%l) */
EMIT6_DISP_LH(0xe3000000, 0x0097, REG_W0, REG_0, REG_L,
EMIT_CONST_U32(imm));
} else {
/* lgfrl %dst,imm */
EMIT6_PCREL_RILB(0xc40c0000, dst_reg,
_EMIT_CONST_U32(imm));
jit->seen |= SEEN_LITERAL;
/* dlr %w0,%dst */
EMIT4(0xb9970000, REG_W0, dst_reg);
}
/* llgfr %dst,%rc */
EMIT4(0xb9160000, dst_reg, rc_reg);
if (insn_is_zext(&insn[1]))
insn_count = 2;
break;
}
case BPF_ALU64 | BPF_DIV | BPF_K: /* dst = dst / imm */
case BPF_ALU64 | BPF_MOD | BPF_K: /* dst = dst % imm */
{
int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0;
if (imm == 1) {
if (BPF_OP(insn->code) == BPF_MOD)
/* lhgi %dst,0 */
EMIT4_IMM(0xa7090000, dst_reg, 0);
break;
}
/* lghi %w0,0 */
EMIT4_IMM(0xa7090000, REG_W0, 0);
/* lgr %w1,%dst */
EMIT4(0xb9040000, REG_W1, dst_reg);
if (!is_first_pass(jit) && can_use_ldisp_for_lit64(jit)) {
/* dlg %w0,<d(imm)>(%l) */
EMIT6_DISP_LH(0xe3000000, 0x0087, REG_W0, REG_0, REG_L,
EMIT_CONST_U64(imm));
} else {
/* lgrl %dst,imm */
EMIT6_PCREL_RILB(0xc4080000, dst_reg,
_EMIT_CONST_U64(imm));
jit->seen |= SEEN_LITERAL;
/* dlgr %w0,%dst */
EMIT4(0xb9870000, REG_W0, dst_reg);
}
/* lgr %dst,%rc */
EMIT4(0xb9040000, dst_reg, rc_reg);
break;
}
/*
* BPF_AND
*/
case BPF_ALU | BPF_AND | BPF_X: /* dst = (u32) dst & (u32) src */
/* nr %dst,%src */
EMIT2(0x1400, dst_reg, src_reg);
EMIT_ZERO(dst_reg);
break;
case BPF_ALU64 | BPF_AND | BPF_X: /* dst = dst & src */
/* ngr %dst,%src */
EMIT4(0xb9800000, dst_reg, src_reg);
break;
case BPF_ALU | BPF_AND | BPF_K: /* dst = (u32) dst & (u32) imm */
/* nilf %dst,imm */
EMIT6_IMM(0xc00b0000, dst_reg, imm);
EMIT_ZERO(dst_reg);
break;
case BPF_ALU64 | BPF_AND | BPF_K: /* dst = dst & imm */
if (!is_first_pass(jit) && can_use_ldisp_for_lit64(jit)) {
/* ng %dst,<d(imm)>(%l) */
EMIT6_DISP_LH(0xe3000000, 0x0080,
dst_reg, REG_0, REG_L,
EMIT_CONST_U64(imm));
} else {
/* lgrl %w0,imm */
EMIT6_PCREL_RILB(0xc4080000, REG_W0,
_EMIT_CONST_U64(imm));
jit->seen |= SEEN_LITERAL;
/* ngr %dst,%w0 */
EMIT4(0xb9800000, dst_reg, REG_W0);
}
break;
/*
* BPF_OR
*/
case BPF_ALU | BPF_OR | BPF_X: /* dst = (u32) dst | (u32) src */
/* or %dst,%src */
EMIT2(0x1600, dst_reg, src_reg);
EMIT_ZERO(dst_reg);
break;
case BPF_ALU64 | BPF_OR | BPF_X: /* dst = dst | src */
/* ogr %dst,%src */
EMIT4(0xb9810000, dst_reg, src_reg);
break;
case BPF_ALU | BPF_OR | BPF_K: /* dst = (u32) dst | (u32) imm */
/* oilf %dst,imm */
EMIT6_IMM(0xc00d0000, dst_reg, imm);
EMIT_ZERO(dst_reg);
break;
case BPF_ALU64 | BPF_OR | BPF_K: /* dst = dst | imm */
if (!is_first_pass(jit) && can_use_ldisp_for_lit64(jit)) {
/* og %dst,<d(imm)>(%l) */
EMIT6_DISP_LH(0xe3000000, 0x0081,
dst_reg, REG_0, REG_L,
EMIT_CONST_U64(imm));
} else {
/* lgrl %w0,imm */
EMIT6_PCREL_RILB(0xc4080000, REG_W0,
_EMIT_CONST_U64(imm));
jit->seen |= SEEN_LITERAL;
/* ogr %dst,%w0 */
EMIT4(0xb9810000, dst_reg, REG_W0);
}
break;
/*
* BPF_XOR
*/
case BPF_ALU | BPF_XOR | BPF_X: /* dst = (u32) dst ^ (u32) src */
/* xr %dst,%src */
EMIT2(0x1700, dst_reg, src_reg);
EMIT_ZERO(dst_reg);
break;
case BPF_ALU64 | BPF_XOR | BPF_X: /* dst = dst ^ src */
/* xgr %dst,%src */
EMIT4(0xb9820000, dst_reg, src_reg);
break;
case BPF_ALU | BPF_XOR | BPF_K: /* dst = (u32) dst ^ (u32) imm */
if (imm != 0) {
/* xilf %dst,imm */
EMIT6_IMM(0xc0070000, dst_reg, imm);
}
EMIT_ZERO(dst_reg);
break;
case BPF_ALU64 | BPF_XOR | BPF_K: /* dst = dst ^ imm */
if (!is_first_pass(jit) && can_use_ldisp_for_lit64(jit)) {
/* xg %dst,<d(imm)>(%l) */
EMIT6_DISP_LH(0xe3000000, 0x0082,
dst_reg, REG_0, REG_L,
EMIT_CONST_U64(imm));
} else {
/* lgrl %w0,imm */
EMIT6_PCREL_RILB(0xc4080000, REG_W0,
_EMIT_CONST_U64(imm));
jit->seen |= SEEN_LITERAL;
/* xgr %dst,%w0 */
EMIT4(0xb9820000, dst_reg, REG_W0);
}
break;
/*
* BPF_LSH
*/
case BPF_ALU | BPF_LSH | BPF_X: /* dst = (u32) dst << (u32) src */
/* sll %dst,0(%src) */
EMIT4_DISP(0x89000000, dst_reg, src_reg, 0);
EMIT_ZERO(dst_reg);
break;
case BPF_ALU64 | BPF_LSH | BPF_X: /* dst = dst << src */
/* sllg %dst,%dst,0(%src) */
EMIT6_DISP_LH(0xeb000000, 0x000d, dst_reg, dst_reg, src_reg, 0);
break;
case BPF_ALU | BPF_LSH | BPF_K: /* dst = (u32) dst << (u32) imm */
if (imm != 0) {
/* sll %dst,imm(%r0) */
EMIT4_DISP(0x89000000, dst_reg, REG_0, imm);
}
EMIT_ZERO(dst_reg);
break;
case BPF_ALU64 | BPF_LSH | BPF_K: /* dst = dst << imm */
if (imm == 0)
break;
/* sllg %dst,%dst,imm(%r0) */
EMIT6_DISP_LH(0xeb000000, 0x000d, dst_reg, dst_reg, REG_0, imm);
break;
/*
* BPF_RSH
*/
case BPF_ALU | BPF_RSH | BPF_X: /* dst = (u32) dst >> (u32) src */
/* srl %dst,0(%src) */
EMIT4_DISP(0x88000000, dst_reg, src_reg, 0);
EMIT_ZERO(dst_reg);
break;
case BPF_ALU64 | BPF_RSH | BPF_X: /* dst = dst >> src */
/* srlg %dst,%dst,0(%src) */
EMIT6_DISP_LH(0xeb000000, 0x000c, dst_reg, dst_reg, src_reg, 0);
break;
case BPF_ALU | BPF_RSH | BPF_K: /* dst = (u32) dst >> (u32) imm */
if (imm != 0) {
/* srl %dst,imm(%r0) */
EMIT4_DISP(0x88000000, dst_reg, REG_0, imm);
}
EMIT_ZERO(dst_reg);
break;
case BPF_ALU64 | BPF_RSH | BPF_K: /* dst = dst >> imm */
if (imm == 0)
break;
/* srlg %dst,%dst,imm(%r0) */
EMIT6_DISP_LH(0xeb000000, 0x000c, dst_reg, dst_reg, REG_0, imm);
break;
/*
* BPF_ARSH
*/
case BPF_ALU | BPF_ARSH | BPF_X: /* ((s32) dst) >>= src */
/* sra %dst,%dst,0(%src) */
EMIT4_DISP(0x8a000000, dst_reg, src_reg, 0);
EMIT_ZERO(dst_reg);
break;
case BPF_ALU64 | BPF_ARSH | BPF_X: /* ((s64) dst) >>= src */
/* srag %dst,%dst,0(%src) */
EMIT6_DISP_LH(0xeb000000, 0x000a, dst_reg, dst_reg, src_reg, 0);
break;
case BPF_ALU | BPF_ARSH | BPF_K: /* ((s32) dst >> imm */
if (imm != 0) {
/* sra %dst,imm(%r0) */
EMIT4_DISP(0x8a000000, dst_reg, REG_0, imm);
}
EMIT_ZERO(dst_reg);
break;
case BPF_ALU64 | BPF_ARSH | BPF_K: /* ((s64) dst) >>= imm */
if (imm == 0)
break;
/* srag %dst,%dst,imm(%r0) */
EMIT6_DISP_LH(0xeb000000, 0x000a, dst_reg, dst_reg, REG_0, imm);
break;
/*
* BPF_NEG
*/
case BPF_ALU | BPF_NEG: /* dst = (u32) -dst */
/* lcr %dst,%dst */
EMIT2(0x1300, dst_reg, dst_reg);
EMIT_ZERO(dst_reg);
break;
case BPF_ALU64 | BPF_NEG: /* dst = -dst */
/* lcgr %dst,%dst */
EMIT4(0xb9030000, dst_reg, dst_reg);
break;
/*
* BPF_FROM_BE/LE
*/
case BPF_ALU | BPF_END | BPF_FROM_BE:
/* s390 is big endian, therefore only clear high order bytes */
switch (imm) {
case 16: /* dst = (u16) cpu_to_be16(dst) */
/* llghr %dst,%dst */
EMIT4(0xb9850000, dst_reg, dst_reg);
if (insn_is_zext(&insn[1]))
insn_count = 2;
break;
case 32: /* dst = (u32) cpu_to_be32(dst) */
if (!fp->aux->verifier_zext)
/* llgfr %dst,%dst */
EMIT4(0xb9160000, dst_reg, dst_reg);
break;
case 64: /* dst = (u64) cpu_to_be64(dst) */
break;
}
break;
case BPF_ALU | BPF_END | BPF_FROM_LE:
switch (imm) {
case 16: /* dst = (u16) cpu_to_le16(dst) */
/* lrvr %dst,%dst */
EMIT4(0xb91f0000, dst_reg, dst_reg);
/* srl %dst,16(%r0) */
EMIT4_DISP(0x88000000, dst_reg, REG_0, 16);
/* llghr %dst,%dst */
EMIT4(0xb9850000, dst_reg, dst_reg);
if (insn_is_zext(&insn[1]))
insn_count = 2;
break;
case 32: /* dst = (u32) cpu_to_le32(dst) */
/* lrvr %dst,%dst */
EMIT4(0xb91f0000, dst_reg, dst_reg);
if (!fp->aux->verifier_zext)
/* llgfr %dst,%dst */
EMIT4(0xb9160000, dst_reg, dst_reg);
break;
case 64: /* dst = (u64) cpu_to_le64(dst) */
/* lrvgr %dst,%dst */
EMIT4(0xb90f0000, dst_reg, dst_reg);
break;
}
break;
/*
* BPF_NOSPEC (speculation barrier)
*/
case BPF_ST | BPF_NOSPEC:
break;
/*
* BPF_ST(X)
*/
case BPF_STX | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = src_reg */
/* stcy %src,off(%dst) */
EMIT6_DISP_LH(0xe3000000, 0x0072, src_reg, dst_reg, REG_0, off);
jit->seen |= SEEN_MEM;
break;
case BPF_STX | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = src */
/* sthy %src,off(%dst) */
EMIT6_DISP_LH(0xe3000000, 0x0070, src_reg, dst_reg, REG_0, off);
jit->seen |= SEEN_MEM;
break;
case BPF_STX | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = src */
/* sty %src,off(%dst) */
EMIT6_DISP_LH(0xe3000000, 0x0050, src_reg, dst_reg, REG_0, off);
jit->seen |= SEEN_MEM;
break;
case BPF_STX | BPF_MEM | BPF_DW: /* (u64 *)(dst + off) = src */
/* stg %src,off(%dst) */
EMIT6_DISP_LH(0xe3000000, 0x0024, src_reg, dst_reg, REG_0, off);
jit->seen |= SEEN_MEM;
break;
case BPF_ST | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = imm */
/* lhi %w0,imm */
EMIT4_IMM(0xa7080000, REG_W0, (u8) imm);
/* stcy %w0,off(dst) */
EMIT6_DISP_LH(0xe3000000, 0x0072, REG_W0, dst_reg, REG_0, off);
jit->seen |= SEEN_MEM;
break;
case BPF_ST | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = imm */
/* lhi %w0,imm */
EMIT4_IMM(0xa7080000, REG_W0, (u16) imm);
/* sthy %w0,off(dst) */
EMIT6_DISP_LH(0xe3000000, 0x0070, REG_W0, dst_reg, REG_0, off);
jit->seen |= SEEN_MEM;
break;
case BPF_ST | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = imm */
/* llilf %w0,imm */
EMIT6_IMM(0xc00f0000, REG_W0, (u32) imm);
/* sty %w0,off(%dst) */
EMIT6_DISP_LH(0xe3000000, 0x0050, REG_W0, dst_reg, REG_0, off);
jit->seen |= SEEN_MEM;
break;
case BPF_ST | BPF_MEM | BPF_DW: /* *(u64 *)(dst + off) = imm */
/* lgfi %w0,imm */
EMIT6_IMM(0xc0010000, REG_W0, imm);
/* stg %w0,off(%dst) */
EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W0, dst_reg, REG_0, off);
jit->seen |= SEEN_MEM;
break;
/*
* BPF_ATOMIC
*/
case BPF_STX | BPF_ATOMIC | BPF_DW:
case BPF_STX | BPF_ATOMIC | BPF_W:
{
bool is32 = BPF_SIZE(insn->code) == BPF_W;
switch (insn->imm) {
/* {op32|op64} {%w0|%src},%src,off(%dst) */
#define EMIT_ATOMIC(op32, op64) do { \
EMIT6_DISP_LH(0xeb000000, is32 ? (op32) : (op64), \
(insn->imm & BPF_FETCH) ? src_reg : REG_W0, \
src_reg, dst_reg, off); \
if (is32 && (insn->imm & BPF_FETCH)) \
EMIT_ZERO(src_reg); \
} while (0)
case BPF_ADD:
case BPF_ADD | BPF_FETCH:
/* {laal|laalg} */
EMIT_ATOMIC(0x00fa, 0x00ea);
break;
case BPF_AND:
case BPF_AND | BPF_FETCH:
/* {lan|lang} */
EMIT_ATOMIC(0x00f4, 0x00e4);
break;
case BPF_OR:
case BPF_OR | BPF_FETCH:
/* {lao|laog} */
EMIT_ATOMIC(0x00f6, 0x00e6);
break;
case BPF_XOR:
case BPF_XOR | BPF_FETCH:
/* {lax|laxg} */
EMIT_ATOMIC(0x00f7, 0x00e7);
break;
#undef EMIT_ATOMIC
case BPF_XCHG:
/* {ly|lg} %w0,off(%dst) */
EMIT6_DISP_LH(0xe3000000,
is32 ? 0x0058 : 0x0004, REG_W0, REG_0,
dst_reg, off);
/* 0: {csy|csg} %w0,%src,off(%dst) */
EMIT6_DISP_LH(0xeb000000, is32 ? 0x0014 : 0x0030,
REG_W0, src_reg, dst_reg, off);
/* brc 4,0b */
EMIT4_PCREL_RIC(0xa7040000, 4, jit->prg - 6);
/* {llgfr|lgr} %src,%w0 */
EMIT4(is32 ? 0xb9160000 : 0xb9040000, src_reg, REG_W0);
if (is32 && insn_is_zext(&insn[1]))
insn_count = 2;
break;
case BPF_CMPXCHG:
/* 0: {csy|csg} %b0,%src,off(%dst) */
EMIT6_DISP_LH(0xeb000000, is32 ? 0x0014 : 0x0030,
BPF_REG_0, src_reg, dst_reg, off);
break;
default:
pr_err("Unknown atomic operation %02x\n", insn->imm);
return -1;
}
jit->seen |= SEEN_MEM;
break;
}
/*
* BPF_LDX
*/
case BPF_LDX | BPF_MEM | BPF_B: /* dst = *(u8 *)(ul) (src + off) */
case BPF_LDX | BPF_PROBE_MEM | BPF_B:
/* llgc %dst,0(off,%src) */
EMIT6_DISP_LH(0xe3000000, 0x0090, dst_reg, src_reg, REG_0, off);
jit->seen |= SEEN_MEM;
if (insn_is_zext(&insn[1]))
insn_count = 2;
break;
case BPF_LDX | BPF_MEM | BPF_H: /* dst = *(u16 *)(ul) (src + off) */
case BPF_LDX | BPF_PROBE_MEM | BPF_H:
/* llgh %dst,0(off,%src) */
EMIT6_DISP_LH(0xe3000000, 0x0091, dst_reg, src_reg, REG_0, off);
jit->seen |= SEEN_MEM;
if (insn_is_zext(&insn[1]))
insn_count = 2;
break;
case BPF_LDX | BPF_MEM | BPF_W: /* dst = *(u32 *)(ul) (src + off) */
case BPF_LDX | BPF_PROBE_MEM | BPF_W:
/* llgf %dst,off(%src) */
jit->seen |= SEEN_MEM;
EMIT6_DISP_LH(0xe3000000, 0x0016, dst_reg, src_reg, REG_0, off);
if (insn_is_zext(&insn[1]))
insn_count = 2;
break;
case BPF_LDX | BPF_MEM | BPF_DW: /* dst = *(u64 *)(ul) (src + off) */
case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
/* lg %dst,0(off,%src) */
jit->seen |= SEEN_MEM;
EMIT6_DISP_LH(0xe3000000, 0x0004, dst_reg, src_reg, REG_0, off);
break;
/*
* BPF_JMP / CALL
*/
case BPF_JMP | BPF_CALL:
{
const struct btf_func_model *m;
bool func_addr_fixed;
int j, ret;
u64 func;
ret = bpf_jit_get_func_addr(fp, insn, extra_pass,
&func, &func_addr_fixed);
if (ret < 0)
return -1;
REG_SET_SEEN(BPF_REG_5);
jit->seen |= SEEN_FUNC;
/*
* Copy the tail call counter to where the callee expects it.
*
* Note 1: The callee can increment the tail call counter, but
* we do not load it back, since the x86 JIT does not do this
* either.
*
* Note 2: We assume that the verifier does not let us call the
* main program, which clears the tail call counter on entry.
*/
/* mvc STK_OFF_TCCNT(4,%r15),N(%r15) */
_EMIT6(0xd203f000 | STK_OFF_TCCNT,
0xf000 | (STK_OFF_TCCNT + STK_OFF + stack_depth));
/* Sign-extend the kfunc arguments. */
if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) {
m = bpf_jit_find_kfunc_model(fp, insn);
if (!m)
return -1;
for (j = 0; j < m->nr_args; j++) {
if (sign_extend(jit, BPF_REG_1 + j,
m->arg_size[j],
m->arg_flags[j]))
return -1;
}
}
/* lgrl %w1,func */
EMIT6_PCREL_RILB(0xc4080000, REG_W1, _EMIT_CONST_U64(func));
/* %r1() */
call_r1(jit);
/* lgr %b0,%r2: load return value into %b0 */
EMIT4(0xb9040000, BPF_REG_0, REG_2);
break;
}
case BPF_JMP | BPF_TAIL_CALL: {
int patch_1_clrj, patch_2_clij, patch_3_brc;
/*
* Implicit input:
* B1: pointer to ctx
* B2: pointer to bpf_array
* B3: index in bpf_array
*
* if (index >= array->map.max_entries)
* goto out;
*/
/* llgf %w1,map.max_entries(%b2) */
EMIT6_DISP_LH(0xe3000000, 0x0016, REG_W1, REG_0, BPF_REG_2,
offsetof(struct bpf_array, map.max_entries));
/* if ((u32)%b3 >= (u32)%w1) goto out; */
/* clrj %b3,%w1,0xa,out */
patch_1_clrj = jit->prg;
EMIT6_PCREL_RIEB(0xec000000, 0x0077, BPF_REG_3, REG_W1, 0xa,
jit->prg);
/*
* if (tail_call_cnt++ >= MAX_TAIL_CALL_CNT)
* goto out;
*/
if (jit->seen & SEEN_STACK)
off = STK_OFF_TCCNT + STK_OFF + stack_depth;
else
off = STK_OFF_TCCNT;
/* lhi %w0,1 */
EMIT4_IMM(0xa7080000, REG_W0, 1);
/* laal %w1,%w0,off(%r15) */
EMIT6_DISP_LH(0xeb000000, 0x00fa, REG_W1, REG_W0, REG_15, off);
/* clij %w1,MAX_TAIL_CALL_CNT-1,0x2,out */
patch_2_clij = jit->prg;
EMIT6_PCREL_RIEC(0xec000000, 0x007f, REG_W1, MAX_TAIL_CALL_CNT - 1,
2, jit->prg);
/*
* prog = array->ptrs[index];
* if (prog == NULL)
* goto out;
*/
/* llgfr %r1,%b3: %r1 = (u32) index */
EMIT4(0xb9160000, REG_1, BPF_REG_3);
/* sllg %r1,%r1,3: %r1 *= 8 */
EMIT6_DISP_LH(0xeb000000, 0x000d, REG_1, REG_1, REG_0, 3);
/* ltg %r1,prog(%b2,%r1) */
EMIT6_DISP_LH(0xe3000000, 0x0002, REG_1, BPF_REG_2,
REG_1, offsetof(struct bpf_array, ptrs));
/* brc 0x8,out */
patch_3_brc = jit->prg;
EMIT4_PCREL_RIC(0xa7040000, 8, jit->prg);
/*
* Restore registers before calling function
*/
save_restore_regs(jit, REGS_RESTORE, stack_depth);
/*
* goto *(prog->bpf_func + tail_call_start);
*/
/* lg %r1,bpf_func(%r1) */
EMIT6_DISP_LH(0xe3000000, 0x0004, REG_1, REG_1, REG_0,
offsetof(struct bpf_prog, bpf_func));
if (nospec_uses_trampoline()) {
jit->seen |= SEEN_FUNC;
/* aghi %r1,tail_call_start */
EMIT4_IMM(0xa70b0000, REG_1, jit->tail_call_start);
/* brcl 0xf,__s390_indirect_jump_r1 */
EMIT6_PCREL_RILC(0xc0040000, 0xf, jit->r1_thunk_ip);
} else {
/* bc 0xf,tail_call_start(%r1) */
_EMIT4(0x47f01000 + jit->tail_call_start);
}
/* out: */
if (jit->prg_buf) {
*(u16 *)(jit->prg_buf + patch_1_clrj + 2) =
(jit->prg - patch_1_clrj) >> 1;
*(u16 *)(jit->prg_buf + patch_2_clij + 2) =
(jit->prg - patch_2_clij) >> 1;
*(u16 *)(jit->prg_buf + patch_3_brc + 2) =
(jit->prg - patch_3_brc) >> 1;
}
break;
}
case BPF_JMP | BPF_EXIT: /* return b0 */
last = (i == fp->len - 1) ? 1 : 0;
if (last)
break;
if (!is_first_pass(jit) && can_use_rel(jit, jit->exit_ip))
/* brc 0xf, <exit> */
EMIT4_PCREL_RIC(0xa7040000, 0xf, jit->exit_ip);
else
/* brcl 0xf, <exit> */
EMIT6_PCREL_RILC(0xc0040000, 0xf, jit->exit_ip);
break;
/*
* Branch relative (number of skipped instructions) to offset on
* condition.
*
* Condition code to mask mapping:
*
* CC | Description | Mask
* ------------------------------
* 0 | Operands equal | 8
* 1 | First operand low | 4
* 2 | First operand high | 2
* 3 | Unused | 1
*
* For s390x relative branches: ip = ip + off_bytes
* For BPF relative branches: insn = insn + off_insns + 1
*
* For example for s390x with offset 0 we jump to the branch
* instruction itself (loop) and for BPF with offset 0 we
* branch to the instruction behind the branch.
*/
case BPF_JMP | BPF_JA: /* if (true) */
mask = 0xf000; /* j */
goto branch_oc;
case BPF_JMP | BPF_JSGT | BPF_K: /* ((s64) dst > (s64) imm) */
case BPF_JMP32 | BPF_JSGT | BPF_K: /* ((s32) dst > (s32) imm) */
mask = 0x2000; /* jh */
goto branch_ks;
case BPF_JMP | BPF_JSLT | BPF_K: /* ((s64) dst < (s64) imm) */
case BPF_JMP32 | BPF_JSLT | BPF_K: /* ((s32) dst < (s32) imm) */
mask = 0x4000; /* jl */
goto branch_ks;
case BPF_JMP | BPF_JSGE | BPF_K: /* ((s64) dst >= (s64) imm) */
case BPF_JMP32 | BPF_JSGE | BPF_K: /* ((s32) dst >= (s32) imm) */
mask = 0xa000; /* jhe */
goto branch_ks;
case BPF_JMP | BPF_JSLE | BPF_K: /* ((s64) dst <= (s64) imm) */
case BPF_JMP32 | BPF_JSLE | BPF_K: /* ((s32) dst <= (s32) imm) */
mask = 0xc000; /* jle */
goto branch_ks;
case BPF_JMP | BPF_JGT | BPF_K: /* (dst_reg > imm) */
case BPF_JMP32 | BPF_JGT | BPF_K: /* ((u32) dst_reg > (u32) imm) */
mask = 0x2000; /* jh */
goto branch_ku;
case BPF_JMP | BPF_JLT | BPF_K: /* (dst_reg < imm) */
case BPF_JMP32 | BPF_JLT | BPF_K: /* ((u32) dst_reg < (u32) imm) */
mask = 0x4000; /* jl */
goto branch_ku;
case BPF_JMP | BPF_JGE | BPF_K: /* (dst_reg >= imm) */
case BPF_JMP32 | BPF_JGE | BPF_K: /* ((u32) dst_reg >= (u32) imm) */
mask = 0xa000; /* jhe */
goto branch_ku;
case BPF_JMP | BPF_JLE | BPF_K: /* (dst_reg <= imm) */
case BPF_JMP32 | BPF_JLE | BPF_K: /* ((u32) dst_reg <= (u32) imm) */
mask = 0xc000; /* jle */
goto branch_ku;
case BPF_JMP | BPF_JNE | BPF_K: /* (dst_reg != imm) */
case BPF_JMP32 | BPF_JNE | BPF_K: /* ((u32) dst_reg != (u32) imm) */
mask = 0x7000; /* jne */
goto branch_ku;
case BPF_JMP | BPF_JEQ | BPF_K: /* (dst_reg == imm) */
case BPF_JMP32 | BPF_JEQ | BPF_K: /* ((u32) dst_reg == (u32) imm) */
mask = 0x8000; /* je */
goto branch_ku;
case BPF_JMP | BPF_JSET | BPF_K: /* (dst_reg & imm) */
case BPF_JMP32 | BPF_JSET | BPF_K: /* ((u32) dst_reg & (u32) imm) */
mask = 0x7000; /* jnz */
if (BPF_CLASS(insn->code) == BPF_JMP32) {
/* llilf %w1,imm (load zero extend imm) */
EMIT6_IMM(0xc00f0000, REG_W1, imm);
/* nr %w1,%dst */
EMIT2(0x1400, REG_W1, dst_reg);
} else {
/* lgfi %w1,imm (load sign extend imm) */
EMIT6_IMM(0xc0010000, REG_W1, imm);
/* ngr %w1,%dst */
EMIT4(0xb9800000, REG_W1, dst_reg);
}
goto branch_oc;
case BPF_JMP | BPF_JSGT | BPF_X: /* ((s64) dst > (s64) src) */
case BPF_JMP32 | BPF_JSGT | BPF_X: /* ((s32) dst > (s32) src) */
mask = 0x2000; /* jh */
goto branch_xs;
case BPF_JMP | BPF_JSLT | BPF_X: /* ((s64) dst < (s64) src) */
case BPF_JMP32 | BPF_JSLT | BPF_X: /* ((s32) dst < (s32) src) */
mask = 0x4000; /* jl */
goto branch_xs;
case BPF_JMP | BPF_JSGE | BPF_X: /* ((s64) dst >= (s64) src) */
case BPF_JMP32 | BPF_JSGE | BPF_X: /* ((s32) dst >= (s32) src) */
mask = 0xa000; /* jhe */
goto branch_xs;
case BPF_JMP | BPF_JSLE | BPF_X: /* ((s64) dst <= (s64) src) */
case BPF_JMP32 | BPF_JSLE | BPF_X: /* ((s32) dst <= (s32) src) */
mask = 0xc000; /* jle */
goto branch_xs;
case BPF_JMP | BPF_JGT | BPF_X: /* (dst > src) */
case BPF_JMP32 | BPF_JGT | BPF_X: /* ((u32) dst > (u32) src) */
mask = 0x2000; /* jh */
goto branch_xu;
case BPF_JMP | BPF_JLT | BPF_X: /* (dst < src) */
case BPF_JMP32 | BPF_JLT | BPF_X: /* ((u32) dst < (u32) src) */
mask = 0x4000; /* jl */
goto branch_xu;
case BPF_JMP | BPF_JGE | BPF_X: /* (dst >= src) */
case BPF_JMP32 | BPF_JGE | BPF_X: /* ((u32) dst >= (u32) src) */
mask = 0xa000; /* jhe */
goto branch_xu;
case BPF_JMP | BPF_JLE | BPF_X: /* (dst <= src) */
case BPF_JMP32 | BPF_JLE | BPF_X: /* ((u32) dst <= (u32) src) */
mask = 0xc000; /* jle */
goto branch_xu;
case BPF_JMP | BPF_JNE | BPF_X: /* (dst != src) */
case BPF_JMP32 | BPF_JNE | BPF_X: /* ((u32) dst != (u32) src) */
mask = 0x7000; /* jne */
goto branch_xu;
case BPF_JMP | BPF_JEQ | BPF_X: /* (dst == src) */
case BPF_JMP32 | BPF_JEQ | BPF_X: /* ((u32) dst == (u32) src) */
mask = 0x8000; /* je */
goto branch_xu;
case BPF_JMP | BPF_JSET | BPF_X: /* (dst & src) */
case BPF_JMP32 | BPF_JSET | BPF_X: /* ((u32) dst & (u32) src) */
{
bool is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32;
mask = 0x7000; /* jnz */
/* nrk or ngrk %w1,%dst,%src */
EMIT4_RRF((is_jmp32 ? 0xb9f40000 : 0xb9e40000),
REG_W1, dst_reg, src_reg);
goto branch_oc;
branch_ks:
is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32;
/* cfi or cgfi %dst,imm */
EMIT6_IMM(is_jmp32 ? 0xc20d0000 : 0xc20c0000,
dst_reg, imm);
if (!is_first_pass(jit) &&
can_use_rel(jit, addrs[i + off + 1])) {
/* brc mask,off */
EMIT4_PCREL_RIC(0xa7040000,
mask >> 12, addrs[i + off + 1]);
} else {
/* brcl mask,off */
EMIT6_PCREL_RILC(0xc0040000,
mask >> 12, addrs[i + off + 1]);
}
break;
branch_ku:
/* lgfi %w1,imm (load sign extend imm) */
src_reg = REG_1;
EMIT6_IMM(0xc0010000, src_reg, imm);
goto branch_xu;
branch_xs:
is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32;
if (!is_first_pass(jit) &&
can_use_rel(jit, addrs[i + off + 1])) {
/* crj or cgrj %dst,%src,mask,off */
EMIT6_PCREL(0xec000000, (is_jmp32 ? 0x0076 : 0x0064),
dst_reg, src_reg, i, off, mask);
} else {
/* cr or cgr %dst,%src */
if (is_jmp32)
EMIT2(0x1900, dst_reg, src_reg);
else
EMIT4(0xb9200000, dst_reg, src_reg);
/* brcl mask,off */
EMIT6_PCREL_RILC(0xc0040000,
mask >> 12, addrs[i + off + 1]);
}
break;
branch_xu:
is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32;
if (!is_first_pass(jit) &&
can_use_rel(jit, addrs[i + off + 1])) {
/* clrj or clgrj %dst,%src,mask,off */
EMIT6_PCREL(0xec000000, (is_jmp32 ? 0x0077 : 0x0065),
dst_reg, src_reg, i, off, mask);
} else {
/* clr or clgr %dst,%src */
if (is_jmp32)
EMIT2(0x1500, dst_reg, src_reg);
else
EMIT4(0xb9210000, dst_reg, src_reg);
/* brcl mask,off */
EMIT6_PCREL_RILC(0xc0040000,
mask >> 12, addrs[i + off + 1]);
}
break;
branch_oc:
if (!is_first_pass(jit) &&
can_use_rel(jit, addrs[i + off + 1])) {
/* brc mask,off */
EMIT4_PCREL_RIC(0xa7040000,
mask >> 12, addrs[i + off + 1]);
} else {
/* brcl mask,off */
EMIT6_PCREL_RILC(0xc0040000,
mask >> 12, addrs[i + off + 1]);
}
break;
}
default: /* too complex, give up */
pr_err("Unknown opcode %02x\n", insn->code);
return -1;
}
if (probe_prg != -1) {
/*
* Handlers of certain exceptions leave psw.addr pointing to
* the instruction directly after the failing one. Therefore,
* create two exception table entries and also add a nop in
* case two probing instructions come directly after each
* other.
*/
nop_prg = jit->prg;
/* bcr 0,%0 */
_EMIT2(0x0700);
err = bpf_jit_probe_mem(jit, fp, probe_prg, nop_prg);
if (err < 0)
return err;
}
return insn_count;
}
/*
* Return whether new i-th instruction address does not violate any invariant
*/
static bool bpf_is_new_addr_sane(struct bpf_jit *jit, int i)
{
/* On the first pass anything goes */
if (is_first_pass(jit))
return true;
/* The codegen pass must not change anything */
if (is_codegen_pass(jit))
return jit->addrs[i] == jit->prg;
/* Passes in between must not increase code size */
return jit->addrs[i] >= jit->prg;
}
/*
* Update the address of i-th instruction
*/
static int bpf_set_addr(struct bpf_jit *jit, int i)
{
int delta;
if (is_codegen_pass(jit)) {
delta = jit->prg - jit->addrs[i];
if (delta < 0)
bpf_skip(jit, -delta);
}
if (WARN_ON_ONCE(!bpf_is_new_addr_sane(jit, i)))
return -1;
jit->addrs[i] = jit->prg;
return 0;
}
/*
* Compile eBPF program into s390x code
*/
static int bpf_jit_prog(struct bpf_jit *jit, struct bpf_prog *fp,
bool extra_pass, u32 stack_depth)
{
int i, insn_count, lit32_size, lit64_size;
jit->lit32 = jit->lit32_start;
jit->lit64 = jit->lit64_start;
jit->prg = 0;
jit->excnt = 0;
bpf_jit_prologue(jit, fp, stack_depth);
if (bpf_set_addr(jit, 0) < 0)
return -1;
for (i = 0; i < fp->len; i += insn_count) {
insn_count = bpf_jit_insn(jit, fp, i, extra_pass, stack_depth);
if (insn_count < 0)
return -1;
/* Next instruction address */
if (bpf_set_addr(jit, i + insn_count) < 0)
return -1;
}
bpf_jit_epilogue(jit, stack_depth);
lit32_size = jit->lit32 - jit->lit32_start;
lit64_size = jit->lit64 - jit->lit64_start;
jit->lit32_start = jit->prg;
if (lit32_size)
jit->lit32_start = ALIGN(jit->lit32_start, 4);
jit->lit64_start = jit->lit32_start + lit32_size;
if (lit64_size)
jit->lit64_start = ALIGN(jit->lit64_start, 8);
jit->size = jit->lit64_start + lit64_size;
jit->size_prg = jit->prg;
if (WARN_ON_ONCE(fp->aux->extable &&
jit->excnt != fp->aux->num_exentries))
/* Verifier bug - too many entries. */
return -1;
return 0;
}
bool bpf_jit_needs_zext(void)
{
return true;
}
struct s390_jit_data {
struct bpf_binary_header *header;
struct bpf_jit ctx;
int pass;
};
static struct bpf_binary_header *bpf_jit_alloc(struct bpf_jit *jit,
struct bpf_prog *fp)
{
struct bpf_binary_header *header;
u32 extable_size;
u32 code_size;
/* We need two entries per insn. */
fp->aux->num_exentries *= 2;
code_size = roundup(jit->size,
__alignof__(struct exception_table_entry));
extable_size = fp->aux->num_exentries *
sizeof(struct exception_table_entry);
header = bpf_jit_binary_alloc(code_size + extable_size, &jit->prg_buf,
8, jit_fill_hole);
if (!header)
return NULL;
fp->aux->extable = (struct exception_table_entry *)
(jit->prg_buf + code_size);
return header;
}
/*
* Compile eBPF program "fp"
*/
struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
{
u32 stack_depth = round_up(fp->aux->stack_depth, 8);
struct bpf_prog *tmp, *orig_fp = fp;
struct bpf_binary_header *header;
struct s390_jit_data *jit_data;
bool tmp_blinded = false;
bool extra_pass = false;
struct bpf_jit jit;
int pass;
if (WARN_ON_ONCE(bpf_plt_end - bpf_plt != BPF_PLT_SIZE))
return orig_fp;
if (!fp->jit_requested)
return orig_fp;
tmp = bpf_jit_blind_constants(fp);
/*
* If blinding was requested and we failed during blinding,
* we must fall back to the interpreter.
*/
if (IS_ERR(tmp))
return orig_fp;
if (tmp != fp) {
tmp_blinded = true;
fp = tmp;
}
jit_data = fp->aux->jit_data;
if (!jit_data) {
jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
if (!jit_data) {
fp = orig_fp;
goto out;
}
fp->aux->jit_data = jit_data;
}
if (jit_data->ctx.addrs) {
jit = jit_data->ctx;
header = jit_data->header;
extra_pass = true;
pass = jit_data->pass + 1;
goto skip_init_ctx;
}
memset(&jit, 0, sizeof(jit));
jit.addrs = kvcalloc(fp->len + 1, sizeof(*jit.addrs), GFP_KERNEL);
if (jit.addrs == NULL) {
fp = orig_fp;
goto free_addrs;
}
/*
* Three initial passes:
* - 1/2: Determine clobbered registers
* - 3: Calculate program size and addrs array
*/
for (pass = 1; pass <= 3; pass++) {
if (bpf_jit_prog(&jit, fp, extra_pass, stack_depth)) {
fp = orig_fp;
goto free_addrs;
}
}
/*
* Final pass: Allocate and generate program
*/
header = bpf_jit_alloc(&jit, fp);
if (!header) {
fp = orig_fp;
goto free_addrs;
}
skip_init_ctx:
if (bpf_jit_prog(&jit, fp, extra_pass, stack_depth)) {
bpf_jit_binary_free(header);
fp = orig_fp;
goto free_addrs;
}
if (bpf_jit_enable > 1) {
bpf_jit_dump(fp->len, jit.size, pass, jit.prg_buf);
print_fn_code(jit.prg_buf, jit.size_prg);
}
if (!fp->is_func || extra_pass) {
bpf_jit_binary_lock_ro(header);
} else {
jit_data->header = header;
jit_data->ctx = jit;
jit_data->pass = pass;
}
fp->bpf_func = (void *) jit.prg_buf;
fp->jited = 1;
fp->jited_len = jit.size;
if (!fp->is_func || extra_pass) {
bpf_prog_fill_jited_linfo(fp, jit.addrs + 1);
free_addrs:
kvfree(jit.addrs);
kfree(jit_data);
fp->aux->jit_data = NULL;
}
out:
if (tmp_blinded)
bpf_jit_prog_release_other(fp, fp == orig_fp ?
tmp : orig_fp);
return fp;
}
bool bpf_jit_supports_kfunc_call(void)
{
return true;
}
bool bpf_jit_supports_far_kfunc_call(void)
{
return true;
}
int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
void *old_addr, void *new_addr)
{
struct {
u16 opc;
s32 disp;
} __packed insn;
char expected_plt[BPF_PLT_SIZE];
char current_plt[BPF_PLT_SIZE];
char new_plt[BPF_PLT_SIZE];
char *plt;
char *ret;
int err;
/* Verify the branch to be patched. */
err = copy_from_kernel_nofault(&insn, ip, sizeof(insn));
if (err < 0)
return err;
if (insn.opc != (0xc004 | (old_addr ? 0xf0 : 0)))
return -EINVAL;
if (t == BPF_MOD_JUMP &&
insn.disp == ((char *)new_addr - (char *)ip) >> 1) {
/*
* The branch already points to the destination,
* there is no PLT.
*/
} else {
/* Verify the PLT. */
plt = (char *)ip + (insn.disp << 1);
err = copy_from_kernel_nofault(current_plt, plt, BPF_PLT_SIZE);
if (err < 0)
return err;
ret = (char *)ip + 6;
bpf_jit_plt(expected_plt, ret, old_addr);
if (memcmp(current_plt, expected_plt, BPF_PLT_SIZE))
return -EINVAL;
/* Adjust the call address. */
bpf_jit_plt(new_plt, ret, new_addr);
s390_kernel_write(plt + (bpf_plt_target - bpf_plt),
new_plt + (bpf_plt_target - bpf_plt),
sizeof(void *));
}
/* Adjust the mask of the branch. */
insn.opc = 0xc004 | (new_addr ? 0xf0 : 0);
s390_kernel_write((char *)ip + 1, (char *)&insn.opc + 1, 1);
/* Make the new code visible to the other CPUs. */
text_poke_sync_lock();
return 0;
}
struct bpf_tramp_jit {
struct bpf_jit common;
int orig_stack_args_off;/* Offset of arguments placed on stack by the
* func_addr's original caller
*/
int stack_size; /* Trampoline stack size */
int stack_args_off; /* Offset of stack arguments for calling
* func_addr, has to be at the top
*/
int reg_args_off; /* Offset of register arguments for calling
* func_addr
*/
int ip_off; /* For bpf_get_func_ip(), has to be at
* (ctx - 16)
*/
int arg_cnt_off; /* For bpf_get_func_arg_cnt(), has to be at
* (ctx - 8)
*/
int bpf_args_off; /* Offset of BPF_PROG context, which consists
* of BPF arguments followed by return value
*/
int retval_off; /* Offset of return value (see above) */
int r7_r8_off; /* Offset of saved %r7 and %r8, which are used
* for __bpf_prog_enter() return value and
* func_addr respectively
*/
int r14_off; /* Offset of saved %r14 */
int run_ctx_off; /* Offset of struct bpf_tramp_run_ctx */
int tccnt_off; /* Offset of saved tailcall counter */
int do_fexit; /* do_fexit: label */
};
static void load_imm64(struct bpf_jit *jit, int dst_reg, u64 val)
{
/* llihf %dst_reg,val_hi */
EMIT6_IMM(0xc00e0000, dst_reg, (val >> 32));
/* oilf %rdst_reg,val_lo */
EMIT6_IMM(0xc00d0000, dst_reg, val);
}
static int invoke_bpf_prog(struct bpf_tramp_jit *tjit,
const struct btf_func_model *m,
struct bpf_tramp_link *tlink, bool save_ret)
{
struct bpf_jit *jit = &tjit->common;
int cookie_off = tjit->run_ctx_off +
offsetof(struct bpf_tramp_run_ctx, bpf_cookie);
struct bpf_prog *p = tlink->link.prog;
int patch;
/*
* run_ctx.cookie = tlink->cookie;
*/
/* %r0 = tlink->cookie */
load_imm64(jit, REG_W0, tlink->cookie);
/* stg %r0,cookie_off(%r15) */
EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W0, REG_0, REG_15, cookie_off);
/*
* if ((start = __bpf_prog_enter(p, &run_ctx)) == 0)
* goto skip;
*/
/* %r1 = __bpf_prog_enter */
load_imm64(jit, REG_1, (u64)bpf_trampoline_enter(p));
/* %r2 = p */
load_imm64(jit, REG_2, (u64)p);
/* la %r3,run_ctx_off(%r15) */
EMIT4_DISP(0x41000000, REG_3, REG_15, tjit->run_ctx_off);
/* %r1() */
call_r1(jit);
/* ltgr %r7,%r2 */
EMIT4(0xb9020000, REG_7, REG_2);
/* brcl 8,skip */
patch = jit->prg;
EMIT6_PCREL_RILC(0xc0040000, 8, 0);
/*
* retval = bpf_func(args, p->insnsi);
*/
/* %r1 = p->bpf_func */
load_imm64(jit, REG_1, (u64)p->bpf_func);
/* la %r2,bpf_args_off(%r15) */
EMIT4_DISP(0x41000000, REG_2, REG_15, tjit->bpf_args_off);
/* %r3 = p->insnsi */
if (!p->jited)
load_imm64(jit, REG_3, (u64)p->insnsi);
/* %r1() */
call_r1(jit);
/* stg %r2,retval_off(%r15) */
if (save_ret) {
if (sign_extend(jit, REG_2, m->ret_size, m->ret_flags))
return -1;
EMIT6_DISP_LH(0xe3000000, 0x0024, REG_2, REG_0, REG_15,
tjit->retval_off);
}
/* skip: */
if (jit->prg_buf)
*(u32 *)&jit->prg_buf[patch + 2] = (jit->prg - patch) >> 1;
/*
* __bpf_prog_exit(p, start, &run_ctx);
*/
/* %r1 = __bpf_prog_exit */
load_imm64(jit, REG_1, (u64)bpf_trampoline_exit(p));
/* %r2 = p */
load_imm64(jit, REG_2, (u64)p);
/* lgr %r3,%r7 */
EMIT4(0xb9040000, REG_3, REG_7);
/* la %r4,run_ctx_off(%r15) */
EMIT4_DISP(0x41000000, REG_4, REG_15, tjit->run_ctx_off);
/* %r1() */
call_r1(jit);
return 0;
}
static int alloc_stack(struct bpf_tramp_jit *tjit, size_t size)
{
int stack_offset = tjit->stack_size;
tjit->stack_size += size;
return stack_offset;
}
/* ABI uses %r2 - %r6 for parameter passing. */
#define MAX_NR_REG_ARGS 5
/* The "L" field of the "mvc" instruction is 8 bits. */
#define MAX_MVC_SIZE 256
#define MAX_NR_STACK_ARGS (MAX_MVC_SIZE / sizeof(u64))
/* -mfentry generates a 6-byte nop on s390x. */
#define S390X_PATCH_SIZE 6
static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
struct bpf_tramp_jit *tjit,
const struct btf_func_model *m,
u32 flags,
struct bpf_tramp_links *tlinks,
void *func_addr)
{
struct bpf_tramp_links *fmod_ret = &tlinks[BPF_TRAMP_MODIFY_RETURN];
struct bpf_tramp_links *fentry = &tlinks[BPF_TRAMP_FENTRY];
struct bpf_tramp_links *fexit = &tlinks[BPF_TRAMP_FEXIT];
int nr_bpf_args, nr_reg_args, nr_stack_args;
struct bpf_jit *jit = &tjit->common;
int arg, bpf_arg_off;
int i, j;
/* Support as many stack arguments as "mvc" instruction can handle. */
nr_reg_args = min_t(int, m->nr_args, MAX_NR_REG_ARGS);
nr_stack_args = m->nr_args - nr_reg_args;
if (nr_stack_args > MAX_NR_STACK_ARGS)
return -ENOTSUPP;
/* Return to %r14, since func_addr and %r0 are not available. */
if (!func_addr && !(flags & BPF_TRAMP_F_ORIG_STACK))
flags |= BPF_TRAMP_F_SKIP_FRAME;
/*
* Compute how many arguments we need to pass to BPF programs.
* BPF ABI mirrors that of x86_64: arguments that are 16 bytes or
* smaller are packed into 1 or 2 registers; larger arguments are
* passed via pointers.
* In s390x ABI, arguments that are 8 bytes or smaller are packed into
* a register; larger arguments are passed via pointers.
* We need to deal with this difference.
*/
nr_bpf_args = 0;
for (i = 0; i < m->nr_args; i++) {
if (m->arg_size[i] <= 8)
nr_bpf_args += 1;
else if (m->arg_size[i] <= 16)
nr_bpf_args += 2;
else
return -ENOTSUPP;
}
/*
* Calculate the stack layout.
*/
/* Reserve STACK_FRAME_OVERHEAD bytes for the callees. */
tjit->stack_size = STACK_FRAME_OVERHEAD;
tjit->stack_args_off = alloc_stack(tjit, nr_stack_args * sizeof(u64));
tjit->reg_args_off = alloc_stack(tjit, nr_reg_args * sizeof(u64));
tjit->ip_off = alloc_stack(tjit, sizeof(u64));
tjit->arg_cnt_off = alloc_stack(tjit, sizeof(u64));
tjit->bpf_args_off = alloc_stack(tjit, nr_bpf_args * sizeof(u64));
tjit->retval_off = alloc_stack(tjit, sizeof(u64));
tjit->r7_r8_off = alloc_stack(tjit, 2 * sizeof(u64));
tjit->r14_off = alloc_stack(tjit, sizeof(u64));
tjit->run_ctx_off = alloc_stack(tjit,
sizeof(struct bpf_tramp_run_ctx));
tjit->tccnt_off = alloc_stack(tjit, sizeof(u64));
/* The caller has already reserved STACK_FRAME_OVERHEAD bytes. */
tjit->stack_size -= STACK_FRAME_OVERHEAD;
tjit->orig_stack_args_off = tjit->stack_size + STACK_FRAME_OVERHEAD;
/* aghi %r15,-stack_size */
EMIT4_IMM(0xa70b0000, REG_15, -tjit->stack_size);
/* mvc tccnt_off(4,%r15),stack_size+STK_OFF_TCCNT(%r15) */
_EMIT6(0xd203f000 | tjit->tccnt_off,
0xf000 | (tjit->stack_size + STK_OFF_TCCNT));
/* stmg %r2,%rN,fwd_reg_args_off(%r15) */
if (nr_reg_args)
EMIT6_DISP_LH(0xeb000000, 0x0024, REG_2,
REG_2 + (nr_reg_args - 1), REG_15,
tjit->reg_args_off);
for (i = 0, j = 0; i < m->nr_args; i++) {
if (i < MAX_NR_REG_ARGS)
arg = REG_2 + i;
else
arg = tjit->orig_stack_args_off +
(i - MAX_NR_REG_ARGS) * sizeof(u64);
bpf_arg_off = tjit->bpf_args_off + j * sizeof(u64);
if (m->arg_size[i] <= 8) {
if (i < MAX_NR_REG_ARGS)
/* stg %arg,bpf_arg_off(%r15) */
EMIT6_DISP_LH(0xe3000000, 0x0024, arg,
REG_0, REG_15, bpf_arg_off);
else
/* mvc bpf_arg_off(8,%r15),arg(%r15) */
_EMIT6(0xd207f000 | bpf_arg_off,
0xf000 | arg);
j += 1;
} else {
if (i < MAX_NR_REG_ARGS) {
/* mvc bpf_arg_off(16,%r15),0(%arg) */
_EMIT6(0xd20ff000 | bpf_arg_off,
reg2hex[arg] << 12);
} else {
/* lg %r1,arg(%r15) */
EMIT6_DISP_LH(0xe3000000, 0x0004, REG_1, REG_0,
REG_15, arg);
/* mvc bpf_arg_off(16,%r15),0(%r1) */
_EMIT6(0xd20ff000 | bpf_arg_off, 0x1000);
}
j += 2;
}
}
/* stmg %r7,%r8,r7_r8_off(%r15) */
EMIT6_DISP_LH(0xeb000000, 0x0024, REG_7, REG_8, REG_15,
tjit->r7_r8_off);
/* stg %r14,r14_off(%r15) */
EMIT6_DISP_LH(0xe3000000, 0x0024, REG_14, REG_0, REG_15, tjit->r14_off);
if (flags & BPF_TRAMP_F_ORIG_STACK) {
/*
* The ftrace trampoline puts the return address (which is the
* address of the original function + S390X_PATCH_SIZE) into
* %r0; see ftrace_shared_hotpatch_trampoline_br and
* ftrace_init_nop() for details.
*/
/* lgr %r8,%r0 */
EMIT4(0xb9040000, REG_8, REG_0);
} else {
/* %r8 = func_addr + S390X_PATCH_SIZE */
load_imm64(jit, REG_8, (u64)func_addr + S390X_PATCH_SIZE);
}
/*
* ip = func_addr;
* arg_cnt = m->nr_args;
*/
if (flags & BPF_TRAMP_F_IP_ARG) {
/* %r0 = func_addr */
load_imm64(jit, REG_0, (u64)func_addr);
/* stg %r0,ip_off(%r15) */
EMIT6_DISP_LH(0xe3000000, 0x0024, REG_0, REG_0, REG_15,
tjit->ip_off);
}
/* lghi %r0,nr_bpf_args */
EMIT4_IMM(0xa7090000, REG_0, nr_bpf_args);
/* stg %r0,arg_cnt_off(%r15) */
EMIT6_DISP_LH(0xe3000000, 0x0024, REG_0, REG_0, REG_15,
tjit->arg_cnt_off);
if (flags & BPF_TRAMP_F_CALL_ORIG) {
/*
* __bpf_tramp_enter(im);
*/
/* %r1 = __bpf_tramp_enter */
load_imm64(jit, REG_1, (u64)__bpf_tramp_enter);
/* %r2 = im */
load_imm64(jit, REG_2, (u64)im);
/* %r1() */
call_r1(jit);
}
for (i = 0; i < fentry->nr_links; i++)
if (invoke_bpf_prog(tjit, m, fentry->links[i],
flags & BPF_TRAMP_F_RET_FENTRY_RET))
return -EINVAL;
if (fmod_ret->nr_links) {
/*
* retval = 0;
*/
/* xc retval_off(8,%r15),retval_off(%r15) */
_EMIT6(0xd707f000 | tjit->retval_off,
0xf000 | tjit->retval_off);
for (i = 0; i < fmod_ret->nr_links; i++) {
if (invoke_bpf_prog(tjit, m, fmod_ret->links[i], true))
return -EINVAL;
/*
* if (retval)
* goto do_fexit;
*/
/* ltg %r0,retval_off(%r15) */
EMIT6_DISP_LH(0xe3000000, 0x0002, REG_0, REG_0, REG_15,
tjit->retval_off);
/* brcl 7,do_fexit */
EMIT6_PCREL_RILC(0xc0040000, 7, tjit->do_fexit);
}
}
if (flags & BPF_TRAMP_F_CALL_ORIG) {
/*
* retval = func_addr(args);
*/
/* lmg %r2,%rN,reg_args_off(%r15) */
if (nr_reg_args)
EMIT6_DISP_LH(0xeb000000, 0x0004, REG_2,
REG_2 + (nr_reg_args - 1), REG_15,
tjit->reg_args_off);
/* mvc stack_args_off(N,%r15),orig_stack_args_off(%r15) */
if (nr_stack_args)
_EMIT6(0xd200f000 |
(nr_stack_args * sizeof(u64) - 1) << 16 |
tjit->stack_args_off,
0xf000 | tjit->orig_stack_args_off);
/* mvc STK_OFF_TCCNT(4,%r15),tccnt_off(%r15) */
_EMIT6(0xd203f000 | STK_OFF_TCCNT, 0xf000 | tjit->tccnt_off);
/* lgr %r1,%r8 */
EMIT4(0xb9040000, REG_1, REG_8);
/* %r1() */
call_r1(jit);
/* stg %r2,retval_off(%r15) */
EMIT6_DISP_LH(0xe3000000, 0x0024, REG_2, REG_0, REG_15,
tjit->retval_off);
im->ip_after_call = jit->prg_buf + jit->prg;
/*
* The following nop will be patched by bpf_tramp_image_put().
*/
/* brcl 0,im->ip_epilogue */
EMIT6_PCREL_RILC(0xc0040000, 0, (u64)im->ip_epilogue);
}
/* do_fexit: */
tjit->do_fexit = jit->prg;
for (i = 0; i < fexit->nr_links; i++)
if (invoke_bpf_prog(tjit, m, fexit->links[i], false))
return -EINVAL;
if (flags & BPF_TRAMP_F_CALL_ORIG) {
im->ip_epilogue = jit->prg_buf + jit->prg;
/*
* __bpf_tramp_exit(im);
*/
/* %r1 = __bpf_tramp_exit */
load_imm64(jit, REG_1, (u64)__bpf_tramp_exit);
/* %r2 = im */
load_imm64(jit, REG_2, (u64)im);
/* %r1() */
call_r1(jit);
}
/* lmg %r2,%rN,reg_args_off(%r15) */
if ((flags & BPF_TRAMP_F_RESTORE_REGS) && nr_reg_args)
EMIT6_DISP_LH(0xeb000000, 0x0004, REG_2,
REG_2 + (nr_reg_args - 1), REG_15,
tjit->reg_args_off);
/* lgr %r1,%r8 */
if (!(flags & BPF_TRAMP_F_SKIP_FRAME))
EMIT4(0xb9040000, REG_1, REG_8);
/* lmg %r7,%r8,r7_r8_off(%r15) */
EMIT6_DISP_LH(0xeb000000, 0x0004, REG_7, REG_8, REG_15,
tjit->r7_r8_off);
/* lg %r14,r14_off(%r15) */
EMIT6_DISP_LH(0xe3000000, 0x0004, REG_14, REG_0, REG_15, tjit->r14_off);
/* lg %r2,retval_off(%r15) */
if (flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET))
EMIT6_DISP_LH(0xe3000000, 0x0004, REG_2, REG_0, REG_15,
tjit->retval_off);
/* mvc stack_size+STK_OFF_TCCNT(4,%r15),tccnt_off(%r15) */
_EMIT6(0xd203f000 | (tjit->stack_size + STK_OFF_TCCNT),
0xf000 | tjit->tccnt_off);
/* aghi %r15,stack_size */
EMIT4_IMM(0xa70b0000, REG_15, tjit->stack_size);
/* Emit an expoline for the following indirect jump. */
if (nospec_uses_trampoline())
emit_expoline(jit);
if (flags & BPF_TRAMP_F_SKIP_FRAME)
/* br %r14 */
_EMIT2(0x07fe);
else
/* br %r1 */
_EMIT2(0x07f1);
emit_r1_thunk(jit);
return 0;
}
int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image,
void *image_end, const struct btf_func_model *m,
u32 flags, struct bpf_tramp_links *tlinks,
void *func_addr)
{
struct bpf_tramp_jit tjit;
int ret;
int i;
for (i = 0; i < 2; i++) {
if (i == 0) {
/* Compute offsets, check whether the code fits. */
memset(&tjit, 0, sizeof(tjit));
} else {
/* Generate the code. */
tjit.common.prg = 0;
tjit.common.prg_buf = image;
}
ret = __arch_prepare_bpf_trampoline(im, &tjit, m, flags,
tlinks, func_addr);
if (ret < 0)
return ret;
if (tjit.common.prg > (char *)image_end - (char *)image)
/*
* Use the same error code as for exceeding
* BPF_MAX_TRAMP_LINKS.
*/
return -E2BIG;
}
return ret;
}
bool bpf_jit_supports_subprog_tailcalls(void)
{
return true;
}
| linux-master | arch/s390/net/bpf_jit_comp.c |
// SPDX-License-Identifier: GPL-2.0
/*
* IBM System z PNET ID Support
*
* Copyright IBM Corp. 2018
*/
#include <linux/device.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/types.h>
#include <asm/ccwgroup.h>
#include <asm/ccwdev.h>
#include <asm/pnet.h>
#include <asm/ebcdic.h>
#define PNETIDS_LEN 64 /* Total utility string length in bytes
* to cover up to 4 PNETIDs of 16 bytes
* for up to 4 device ports
*/
#define MAX_PNETID_LEN 16 /* Max.length of a single port PNETID */
#define MAX_PNETID_PORTS (PNETIDS_LEN / MAX_PNETID_LEN)
/* Max. # of ports with a PNETID */
/*
* Get the PNETIDs from a device.
* s390 hardware supports the definition of a so-called Physical Network
* Identifier (short PNETID) per network device port. These PNETIDs can be
* used to identify network devices that are attached to the same physical
* network (broadcast domain).
*
* The device can be
* - a ccwgroup device with all bundled subchannels having the same PNETID
* - a PCI attached network device
*
* Returns:
* 0: PNETIDs extracted from device.
* -ENOMEM: No memory to extract utility string.
* -EOPNOTSUPP: Device type without utility string support
*/
static int pnet_ids_by_device(struct device *dev, u8 *pnetids)
{
memset(pnetids, 0, PNETIDS_LEN);
if (dev_is_ccwgroup(dev)) {
struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
u8 *util_str;
util_str = ccw_device_get_util_str(gdev->cdev[0], 0);
if (!util_str)
return -ENOMEM;
memcpy(pnetids, util_str, PNETIDS_LEN);
EBCASC(pnetids, PNETIDS_LEN);
kfree(util_str);
return 0;
}
if (dev_is_pci(dev)) {
struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
memcpy(pnetids, zdev->util_str, sizeof(zdev->util_str));
EBCASC(pnetids, sizeof(zdev->util_str));
return 0;
}
return -EOPNOTSUPP;
}
/*
* Extract the pnetid for a device port.
*
* Return 0 if a pnetid is found and -ENOENT otherwise.
*/
int pnet_id_by_dev_port(struct device *dev, unsigned short port, u8 *pnetid)
{
u8 pnetids[MAX_PNETID_PORTS][MAX_PNETID_LEN];
static const u8 zero[MAX_PNETID_LEN] = { 0 };
int rc = 0;
if (!dev || port >= MAX_PNETID_PORTS)
return -ENOENT;
if (!pnet_ids_by_device(dev, (u8 *)pnetids) &&
memcmp(pnetids[port], zero, MAX_PNETID_LEN))
memcpy(pnetid, pnetids[port], MAX_PNETID_LEN);
else
rc = -ENOENT;
return rc;
}
EXPORT_SYMBOL_GPL(pnet_id_by_dev_port);
MODULE_DESCRIPTION("pnetid determination from utility strings");
MODULE_LICENSE("GPL");
| linux-master | arch/s390/net/pnet.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include <linux/stdarg.h>
#include <linux/string.h>
#include <linux/ctype.h>
#include <asm/stacktrace.h>
#include <asm/boot_data.h>
#include <asm/lowcore.h>
#include <asm/setup.h>
#include <asm/sclp.h>
#include <asm/uv.h>
#include "boot.h"
const char hex_asc[] = "0123456789abcdef";
static char *as_hex(char *dst, unsigned long val, int pad)
{
char *p, *end = p = dst + max(pad, (int)__fls(val | 1) / 4 + 1);
for (*p-- = 0; p >= dst; val >>= 4)
*p-- = hex_asc[val & 0x0f];
return end;
}
static char *symstart(char *p)
{
while (*p)
p--;
return p + 1;
}
static noinline char *findsym(unsigned long ip, unsigned short *off, unsigned short *len)
{
/* symbol entries are in a form "10000 c4 startup\0" */
char *a = _decompressor_syms_start;
char *b = _decompressor_syms_end;
unsigned long start;
unsigned long size;
char *pivot;
char *endp;
while (a < b) {
pivot = symstart(a + (b - a) / 2);
start = simple_strtoull(pivot, &endp, 16);
size = simple_strtoull(endp + 1, &endp, 16);
if (ip < start) {
b = pivot;
continue;
}
if (ip > start + size) {
a = pivot + strlen(pivot) + 1;
continue;
}
*off = ip - start;
*len = size;
return endp + 1;
}
return NULL;
}
static noinline char *strsym(void *ip)
{
static char buf[64];
unsigned short off;
unsigned short len;
char *p;
p = findsym((unsigned long)ip, &off, &len);
if (p) {
strncpy(buf, p, sizeof(buf));
/* reserve 15 bytes for offset/len in symbol+0x1234/0x1234 */
p = buf + strnlen(buf, sizeof(buf) - 15);
strcpy(p, "+0x");
p = as_hex(p + 3, off, 0);
strcpy(p, "/0x");
as_hex(p + 3, len, 0);
} else {
as_hex(buf, (unsigned long)ip, 16);
}
return buf;
}
void decompressor_printk(const char *fmt, ...)
{
char buf[1024] = { 0 };
char *end = buf + sizeof(buf) - 1; /* make sure buf is 0 terminated */
unsigned long pad;
char *p = buf;
va_list args;
va_start(args, fmt);
for (; p < end && *fmt; fmt++) {
if (*fmt != '%') {
*p++ = *fmt;
continue;
}
pad = isdigit(*++fmt) ? simple_strtol(fmt, (char **)&fmt, 10) : 0;
switch (*fmt) {
case 's':
p = buf + strlcat(buf, va_arg(args, char *), sizeof(buf));
break;
case 'p':
if (*++fmt != 'S')
goto out;
p = buf + strlcat(buf, strsym(va_arg(args, void *)), sizeof(buf));
break;
case 'l':
if (*++fmt != 'x' || end - p <= max(sizeof(long) * 2, pad))
goto out;
p = as_hex(p, va_arg(args, unsigned long), pad);
break;
case 'x':
if (end - p <= max(sizeof(int) * 2, pad))
goto out;
p = as_hex(p, va_arg(args, unsigned int), pad);
break;
default:
goto out;
}
}
out:
va_end(args);
sclp_early_printk(buf);
}
void print_stacktrace(unsigned long sp)
{
struct stack_info boot_stack = { STACK_TYPE_TASK, (unsigned long)_stack_start,
(unsigned long)_stack_end };
bool first = true;
decompressor_printk("Call Trace:\n");
while (!(sp & 0x7) && on_stack(&boot_stack, sp, sizeof(struct stack_frame))) {
struct stack_frame *sf = (struct stack_frame *)sp;
decompressor_printk(first ? "(sp:%016lx [<%016lx>] %pS)\n" :
" sp:%016lx [<%016lx>] %pS\n",
sp, sf->gprs[8], (void *)sf->gprs[8]);
if (sf->back_chain <= sp)
break;
sp = sf->back_chain;
first = false;
}
}
void print_pgm_check_info(void)
{
unsigned long *gpregs = (unsigned long *)S390_lowcore.gpregs_save_area;
struct psw_bits *psw = &psw_bits(S390_lowcore.psw_save_area);
decompressor_printk("Linux version %s\n", kernel_version);
if (!is_prot_virt_guest() && early_command_line[0])
decompressor_printk("Kernel command line: %s\n", early_command_line);
decompressor_printk("Kernel fault: interruption code %04x ilc:%x\n",
S390_lowcore.pgm_code, S390_lowcore.pgm_ilc >> 1);
if (kaslr_enabled())
decompressor_printk("Kernel random base: %lx\n", __kaslr_offset);
decompressor_printk("PSW : %016lx %016lx (%pS)\n",
S390_lowcore.psw_save_area.mask,
S390_lowcore.psw_save_area.addr,
(void *)S390_lowcore.psw_save_area.addr);
decompressor_printk(
" R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x P:%x AS:%x CC:%x PM:%x RI:%x EA:%x\n",
psw->per, psw->dat, psw->io, psw->ext, psw->key, psw->mcheck,
psw->wait, psw->pstate, psw->as, psw->cc, psw->pm, psw->ri,
psw->eaba);
decompressor_printk("GPRS: %016lx %016lx %016lx %016lx\n",
gpregs[0], gpregs[1], gpregs[2], gpregs[3]);
decompressor_printk(" %016lx %016lx %016lx %016lx\n",
gpregs[4], gpregs[5], gpregs[6], gpregs[7]);
decompressor_printk(" %016lx %016lx %016lx %016lx\n",
gpregs[8], gpregs[9], gpregs[10], gpregs[11]);
decompressor_printk(" %016lx %016lx %016lx %016lx\n",
gpregs[12], gpregs[13], gpregs[14], gpregs[15]);
print_stacktrace(S390_lowcore.gpregs_save_area[15]);
decompressor_printk("Last Breaking-Event-Address:\n");
decompressor_printk(" [<%016lx>] %pS\n", (unsigned long)S390_lowcore.pgm_last_break,
(void *)S390_lowcore.pgm_last_break);
}
| linux-master | arch/s390/boot/pgm_check_info.c |
// SPDX-License-Identifier: GPL-2.0
#include "../kernel/ipl_vmparm.c"
| linux-master | arch/s390/boot/ipl_vmparm.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/ctype.h>
#include <linux/pgtable.h>
#include <asm/ebcdic.h>
#include <asm/sclp.h>
#include <asm/sections.h>
#include <asm/boot_data.h>
#include <asm/facility.h>
#include <asm/setup.h>
#include <asm/uv.h>
#include "boot.h"
struct parmarea parmarea __section(".parmarea") = {
.kernel_version = (unsigned long)kernel_version,
.max_command_line_size = COMMAND_LINE_SIZE,
.command_line = "root=/dev/ram0 ro",
};
char __bootdata(early_command_line)[COMMAND_LINE_SIZE];
unsigned int __bootdata_preserved(zlib_dfltcc_support) = ZLIB_DFLTCC_FULL;
struct ipl_parameter_block __bootdata_preserved(ipl_block);
int __bootdata_preserved(ipl_block_valid);
int __bootdata_preserved(__kaslr_enabled);
unsigned long vmalloc_size = VMALLOC_DEFAULT_SIZE;
unsigned long memory_limit;
int vmalloc_size_set;
static inline int __diag308(unsigned long subcode, void *addr)
{
unsigned long reg1, reg2;
union register_pair r1;
psw_t old;
r1.even = (unsigned long) addr;
r1.odd = 0;
asm volatile(
" mvc 0(16,%[psw_old]),0(%[psw_pgm])\n"
" epsw %[reg1],%[reg2]\n"
" st %[reg1],0(%[psw_pgm])\n"
" st %[reg2],4(%[psw_pgm])\n"
" larl %[reg1],1f\n"
" stg %[reg1],8(%[psw_pgm])\n"
" diag %[r1],%[subcode],0x308\n"
"1: mvc 0(16,%[psw_pgm]),0(%[psw_old])\n"
: [r1] "+&d" (r1.pair),
[reg1] "=&d" (reg1),
[reg2] "=&a" (reg2),
"+Q" (S390_lowcore.program_new_psw),
"=Q" (old)
: [subcode] "d" (subcode),
[psw_old] "a" (&old),
[psw_pgm] "a" (&S390_lowcore.program_new_psw)
: "cc", "memory");
return r1.odd;
}
void store_ipl_parmblock(void)
{
int rc;
rc = __diag308(DIAG308_STORE, &ipl_block);
if (rc == DIAG308_RC_OK &&
ipl_block.hdr.version <= IPL_MAX_SUPPORTED_VERSION)
ipl_block_valid = 1;
}
bool is_ipl_block_dump(void)
{
if (ipl_block.pb0_hdr.pbt == IPL_PBT_FCP &&
ipl_block.fcp.opt == IPL_PB0_FCP_OPT_DUMP)
return true;
if (ipl_block.pb0_hdr.pbt == IPL_PBT_NVME &&
ipl_block.nvme.opt == IPL_PB0_NVME_OPT_DUMP)
return true;
if (ipl_block.pb0_hdr.pbt == IPL_PBT_ECKD &&
ipl_block.eckd.opt == IPL_PB0_ECKD_OPT_DUMP)
return true;
return false;
}
static size_t scpdata_length(const u8 *buf, size_t count)
{
while (count) {
if (buf[count - 1] != '\0' && buf[count - 1] != ' ')
break;
count--;
}
return count;
}
static size_t ipl_block_get_ascii_scpdata(char *dest, size_t size,
const struct ipl_parameter_block *ipb)
{
const __u8 *scp_data;
__u32 scp_data_len;
int has_lowercase;
size_t count = 0;
size_t i;
switch (ipb->pb0_hdr.pbt) {
case IPL_PBT_FCP:
scp_data_len = ipb->fcp.scp_data_len;
scp_data = ipb->fcp.scp_data;
break;
case IPL_PBT_NVME:
scp_data_len = ipb->nvme.scp_data_len;
scp_data = ipb->nvme.scp_data;
break;
case IPL_PBT_ECKD:
scp_data_len = ipb->eckd.scp_data_len;
scp_data = ipb->eckd.scp_data;
break;
default:
goto out;
}
count = min(size - 1, scpdata_length(scp_data, scp_data_len));
if (!count)
goto out;
has_lowercase = 0;
for (i = 0; i < count; i++) {
if (!isascii(scp_data[i])) {
count = 0;
goto out;
}
if (!has_lowercase && islower(scp_data[i]))
has_lowercase = 1;
}
if (has_lowercase)
memcpy(dest, scp_data, count);
else
for (i = 0; i < count; i++)
dest[i] = tolower(scp_data[i]);
out:
dest[count] = '\0';
return count;
}
static void append_ipl_block_parm(void)
{
char *parm, *delim;
size_t len, rc = 0;
len = strlen(early_command_line);
delim = early_command_line + len; /* '\0' character position */
parm = early_command_line + len + 1; /* append right after '\0' */
switch (ipl_block.pb0_hdr.pbt) {
case IPL_PBT_CCW:
rc = ipl_block_get_ascii_vmparm(
parm, COMMAND_LINE_SIZE - len - 1, &ipl_block);
break;
case IPL_PBT_FCP:
case IPL_PBT_NVME:
case IPL_PBT_ECKD:
rc = ipl_block_get_ascii_scpdata(
parm, COMMAND_LINE_SIZE - len - 1, &ipl_block);
break;
}
if (rc) {
if (*parm == '=')
memmove(early_command_line, parm + 1, rc);
else
*delim = ' '; /* replace '\0' with space */
}
}
static inline int has_ebcdic_char(const char *str)
{
int i;
for (i = 0; str[i]; i++)
if (str[i] & 0x80)
return 1;
return 0;
}
void setup_boot_command_line(void)
{
parmarea.command_line[COMMAND_LINE_SIZE - 1] = 0;
/* convert arch command line to ascii if necessary */
if (has_ebcdic_char(parmarea.command_line))
EBCASC(parmarea.command_line, COMMAND_LINE_SIZE);
/* copy arch command line */
strcpy(early_command_line, strim(parmarea.command_line));
/* append IPL PARM data to the boot command line */
if (!is_prot_virt_guest() && ipl_block_valid)
append_ipl_block_parm();
}
static void modify_facility(unsigned long nr, bool clear)
{
if (clear)
__clear_facility(nr, stfle_fac_list);
else
__set_facility(nr, stfle_fac_list);
}
static void check_cleared_facilities(void)
{
unsigned long als[] = { FACILITIES_ALS };
int i;
for (i = 0; i < ARRAY_SIZE(als); i++) {
if ((stfle_fac_list[i] & als[i]) != als[i]) {
sclp_early_printk("Warning: The Linux kernel requires facilities cleared via command line option\n");
print_missing_facilities();
break;
}
}
}
static void modify_fac_list(char *str)
{
unsigned long val, endval;
char *endp;
bool clear;
while (*str) {
clear = false;
if (*str == '!') {
clear = true;
str++;
}
val = simple_strtoull(str, &endp, 0);
if (str == endp)
break;
str = endp;
if (*str == '-') {
str++;
endval = simple_strtoull(str, &endp, 0);
if (str == endp)
break;
str = endp;
while (val <= endval) {
modify_facility(val, clear);
val++;
}
} else {
modify_facility(val, clear);
}
if (*str != ',')
break;
str++;
}
check_cleared_facilities();
}
static char command_line_buf[COMMAND_LINE_SIZE];
void parse_boot_command_line(void)
{
char *param, *val;
bool enabled;
char *args;
int rc;
__kaslr_enabled = IS_ENABLED(CONFIG_RANDOMIZE_BASE);
args = strcpy(command_line_buf, early_command_line);
while (*args) {
args = next_arg(args, ¶m, &val);
if (!strcmp(param, "mem") && val)
memory_limit = round_down(memparse(val, NULL), PAGE_SIZE);
if (!strcmp(param, "vmalloc") && val) {
vmalloc_size = round_up(memparse(val, NULL), PAGE_SIZE);
vmalloc_size_set = 1;
}
if (!strcmp(param, "dfltcc") && val) {
if (!strcmp(val, "off"))
zlib_dfltcc_support = ZLIB_DFLTCC_DISABLED;
else if (!strcmp(val, "on"))
zlib_dfltcc_support = ZLIB_DFLTCC_FULL;
else if (!strcmp(val, "def_only"))
zlib_dfltcc_support = ZLIB_DFLTCC_DEFLATE_ONLY;
else if (!strcmp(val, "inf_only"))
zlib_dfltcc_support = ZLIB_DFLTCC_INFLATE_ONLY;
else if (!strcmp(val, "always"))
zlib_dfltcc_support = ZLIB_DFLTCC_FULL_DEBUG;
}
if (!strcmp(param, "facilities") && val)
modify_fac_list(val);
if (!strcmp(param, "nokaslr"))
__kaslr_enabled = 0;
#if IS_ENABLED(CONFIG_KVM)
if (!strcmp(param, "prot_virt")) {
rc = kstrtobool(val, &enabled);
if (!rc && enabled)
prot_virt_host = 1;
}
#endif
}
}
| linux-master | arch/s390/boot/ipl_parm.c |
// SPDX-License-Identifier: GPL-2.0
#include "../../../lib/ctype.c"
| linux-master | arch/s390/boot/ctype.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/compat.h>
#include <linux/ptrace.h>
#include <asm/cio.h>
#include <asm/asm-offsets.h>
#include "boot.h"
#define CCW0(cmd, addr, cnt, flg) \
{ .cmd_code = cmd, .cda = addr, .count = cnt, .flags = flg, }
#define PSW_MASK_DISABLED (PSW_MASK_WAIT | PSW_MASK_EA | PSW_MASK_BA)
struct ipl_lowcore {
psw_t32 ipl_psw; /* 0x0000 */
struct ccw0 ccwpgm[2]; /* 0x0008 */
u8 fill[56]; /* 0x0018 */
struct ccw0 ccwpgmcc[20]; /* 0x0050 */
u8 pad_0xf0[0x01a0-0x00f0]; /* 0x00f0 */
psw_t restart_psw; /* 0x01a0 */
psw_t external_new_psw; /* 0x01b0 */
psw_t svc_new_psw; /* 0x01c0 */
psw_t program_new_psw; /* 0x01d0 */
psw_t mcck_new_psw; /* 0x01e0 */
psw_t io_new_psw; /* 0x01f0 */
};
/*
* Initial lowcore for IPL: the first 24 bytes are loaded by IPL to
* addresses 0-23 (a PSW and two CCWs). Bytes 24-79 are discarded.
* The next 160 bytes are loaded to addresses 0x18-0xb7. They form
* the continuation of the CCW program started by IPL and load the
* range 0x0f0-0x730 from the image to the range 0x0f0-0x730 in
* memory. At the end of the channel program the PSW at location 0 is
* loaded.
* Initial processing starts at 0x200 = iplstart.
*
* The restart psw points to iplstart which allows to load a kernel
* image into memory and starting it by a psw restart on any cpu. All
* other default psw new locations contain a disabled wait psw where
* the address indicates which psw was loaded.
*
* Note that the 'file' utility can detect s390 kernel images. For
* that to succeed the two initial CCWs, and the 0x40 fill bytes must
* be present.
*/
static struct ipl_lowcore ipl_lowcore __used __section(".ipldata") = {
.ipl_psw = { .mask = PSW32_MASK_BASE, .addr = PSW32_ADDR_AMODE | IPL_START },
.ccwpgm = {
[ 0] = CCW0(CCW_CMD_READ_IPL, 0x018, 0x50, CCW_FLAG_SLI | CCW_FLAG_CC),
[ 1] = CCW0(CCW_CMD_READ_IPL, 0x068, 0x50, CCW_FLAG_SLI | CCW_FLAG_CC),
},
.fill = {
[ 0 ... 55] = 0x40,
},
.ccwpgmcc = {
[ 0] = CCW0(CCW_CMD_READ_IPL, 0x0f0, 0x50, CCW_FLAG_SLI | CCW_FLAG_CC),
[ 1] = CCW0(CCW_CMD_READ_IPL, 0x140, 0x50, CCW_FLAG_SLI | CCW_FLAG_CC),
[ 2] = CCW0(CCW_CMD_READ_IPL, 0x190, 0x50, CCW_FLAG_SLI | CCW_FLAG_CC),
[ 3] = CCW0(CCW_CMD_READ_IPL, 0x1e0, 0x50, CCW_FLAG_SLI | CCW_FLAG_CC),
[ 4] = CCW0(CCW_CMD_READ_IPL, 0x230, 0x50, CCW_FLAG_SLI | CCW_FLAG_CC),
[ 5] = CCW0(CCW_CMD_READ_IPL, 0x280, 0x50, CCW_FLAG_SLI | CCW_FLAG_CC),
[ 6] = CCW0(CCW_CMD_READ_IPL, 0x2d0, 0x50, CCW_FLAG_SLI | CCW_FLAG_CC),
[ 7] = CCW0(CCW_CMD_READ_IPL, 0x320, 0x50, CCW_FLAG_SLI | CCW_FLAG_CC),
[ 8] = CCW0(CCW_CMD_READ_IPL, 0x370, 0x50, CCW_FLAG_SLI | CCW_FLAG_CC),
[ 9] = CCW0(CCW_CMD_READ_IPL, 0x3c0, 0x50, CCW_FLAG_SLI | CCW_FLAG_CC),
[10] = CCW0(CCW_CMD_READ_IPL, 0x410, 0x50, CCW_FLAG_SLI | CCW_FLAG_CC),
[11] = CCW0(CCW_CMD_READ_IPL, 0x460, 0x50, CCW_FLAG_SLI | CCW_FLAG_CC),
[12] = CCW0(CCW_CMD_READ_IPL, 0x4b0, 0x50, CCW_FLAG_SLI | CCW_FLAG_CC),
[13] = CCW0(CCW_CMD_READ_IPL, 0x500, 0x50, CCW_FLAG_SLI | CCW_FLAG_CC),
[14] = CCW0(CCW_CMD_READ_IPL, 0x550, 0x50, CCW_FLAG_SLI | CCW_FLAG_CC),
[15] = CCW0(CCW_CMD_READ_IPL, 0x5a0, 0x50, CCW_FLAG_SLI | CCW_FLAG_CC),
[16] = CCW0(CCW_CMD_READ_IPL, 0x5f0, 0x50, CCW_FLAG_SLI | CCW_FLAG_CC),
[17] = CCW0(CCW_CMD_READ_IPL, 0x640, 0x50, CCW_FLAG_SLI | CCW_FLAG_CC),
[18] = CCW0(CCW_CMD_READ_IPL, 0x690, 0x50, CCW_FLAG_SLI | CCW_FLAG_CC),
[19] = CCW0(CCW_CMD_READ_IPL, 0x6e0, 0x50, CCW_FLAG_SLI),
},
.restart_psw = { .mask = 0, .addr = IPL_START, },
.external_new_psw = { .mask = PSW_MASK_DISABLED, .addr = __LC_EXT_NEW_PSW, },
.svc_new_psw = { .mask = PSW_MASK_DISABLED, .addr = __LC_SVC_NEW_PSW, },
.program_new_psw = { .mask = PSW_MASK_DISABLED, .addr = __LC_PGM_NEW_PSW, },
.mcck_new_psw = { .mask = PSW_MASK_DISABLED, .addr = __LC_MCK_NEW_PSW, },
.io_new_psw = { .mask = PSW_MASK_DISABLED, .addr = __LC_IO_NEW_PSW, },
};
| linux-master | arch/s390/boot/ipl_data.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/ctype.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#undef CONFIG_KASAN
#undef CONFIG_KASAN_GENERIC
#include "../lib/string.c"
int strncmp(const char *cs, const char *ct, size_t count)
{
unsigned char c1, c2;
while (count) {
c1 = *cs++;
c2 = *ct++;
if (c1 != c2)
return c1 < c2 ? -1 : 1;
if (!c1)
break;
count--;
}
return 0;
}
char *skip_spaces(const char *str)
{
while (isspace(*str))
++str;
return (char *)str;
}
char *strim(char *s)
{
size_t size;
char *end;
size = strlen(s);
if (!size)
return s;
end = s + size - 1;
while (end >= s && isspace(*end))
end--;
*(end + 1) = '\0';
return skip_spaces(s);
}
/* Works only for digits and letters, but small and fast */
#define TOLOWER(x) ((x) | 0x20)
static unsigned int simple_guess_base(const char *cp)
{
if (cp[0] == '0') {
if (TOLOWER(cp[1]) == 'x' && isxdigit(cp[2]))
return 16;
else
return 8;
} else {
return 10;
}
}
/**
* simple_strtoull - convert a string to an unsigned long long
* @cp: The start of the string
* @endp: A pointer to the end of the parsed string will be placed here
* @base: The number base to use
*/
unsigned long long simple_strtoull(const char *cp, char **endp,
unsigned int base)
{
unsigned long long result = 0;
if (!base)
base = simple_guess_base(cp);
if (base == 16 && cp[0] == '0' && TOLOWER(cp[1]) == 'x')
cp += 2;
while (isxdigit(*cp)) {
unsigned int value;
value = isdigit(*cp) ? *cp - '0' : TOLOWER(*cp) - 'a' + 10;
if (value >= base)
break;
result = result * base + value;
cp++;
}
if (endp)
*endp = (char *)cp;
return result;
}
long simple_strtol(const char *cp, char **endp, unsigned int base)
{
if (*cp == '-')
return -simple_strtoull(cp + 1, endp, base);
return simple_strtoull(cp, endp, base);
}
int kstrtobool(const char *s, bool *res)
{
if (!s)
return -EINVAL;
switch (s[0]) {
case 'y':
case 'Y':
case '1':
*res = true;
return 0;
case 'n':
case 'N':
case '0':
*res = false;
return 0;
case 'o':
case 'O':
switch (s[1]) {
case 'n':
case 'N':
*res = true;
return 0;
case 'f':
case 'F':
*res = false;
return 0;
default:
break;
}
default:
break;
}
return -EINVAL;
}
| linux-master | arch/s390/boot/string.c |
// SPDX-License-Identifier: GPL-2.0
#include "../kernel/ebcdic.c"
| linux-master | arch/s390/boot/ebcdic.c |
// SPDX-License-Identifier: GPL-2.0
#include "../kernel/machine_kexec_reloc.c"
| linux-master | arch/s390/boot/machine_kexec_reloc.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/sched/task.h>
#include <linux/pgtable.h>
#include <linux/kasan.h>
#include <asm/pgalloc.h>
#include <asm/facility.h>
#include <asm/sections.h>
#include <asm/physmem_info.h>
#include <asm/maccess.h>
#include <asm/abs_lowcore.h>
#include "decompressor.h"
#include "boot.h"
unsigned long __bootdata_preserved(s390_invalid_asce);
#ifdef CONFIG_PROC_FS
atomic_long_t __bootdata_preserved(direct_pages_count[PG_DIRECT_MAP_MAX]);
#endif
#define init_mm (*(struct mm_struct *)vmlinux.init_mm_off)
#define swapper_pg_dir vmlinux.swapper_pg_dir_off
#define invalid_pg_dir vmlinux.invalid_pg_dir_off
enum populate_mode {
POPULATE_NONE,
POPULATE_DIRECT,
POPULATE_ABS_LOWCORE,
#ifdef CONFIG_KASAN
POPULATE_KASAN_MAP_SHADOW,
POPULATE_KASAN_ZERO_SHADOW,
POPULATE_KASAN_SHALLOW
#endif
};
static void pgtable_populate(unsigned long addr, unsigned long end, enum populate_mode mode);
#ifdef CONFIG_KASAN
#define kasan_early_shadow_page vmlinux.kasan_early_shadow_page_off
#define kasan_early_shadow_pte ((pte_t *)vmlinux.kasan_early_shadow_pte_off)
#define kasan_early_shadow_pmd ((pmd_t *)vmlinux.kasan_early_shadow_pmd_off)
#define kasan_early_shadow_pud ((pud_t *)vmlinux.kasan_early_shadow_pud_off)
#define kasan_early_shadow_p4d ((p4d_t *)vmlinux.kasan_early_shadow_p4d_off)
#define __sha(x) ((unsigned long)kasan_mem_to_shadow((void *)x))
static pte_t pte_z;
static inline void kasan_populate(unsigned long start, unsigned long end, enum populate_mode mode)
{
start = PAGE_ALIGN_DOWN(__sha(start));
end = PAGE_ALIGN(__sha(end));
pgtable_populate(start, end, mode);
}
static void kasan_populate_shadow(void)
{
pmd_t pmd_z = __pmd(__pa(kasan_early_shadow_pte) | _SEGMENT_ENTRY);
pud_t pud_z = __pud(__pa(kasan_early_shadow_pmd) | _REGION3_ENTRY);
p4d_t p4d_z = __p4d(__pa(kasan_early_shadow_pud) | _REGION2_ENTRY);
unsigned long untracked_end;
unsigned long start, end;
int i;
pte_z = __pte(__pa(kasan_early_shadow_page) | pgprot_val(PAGE_KERNEL_RO));
if (!machine.has_nx)
pte_z = clear_pte_bit(pte_z, __pgprot(_PAGE_NOEXEC));
crst_table_init((unsigned long *)kasan_early_shadow_p4d, p4d_val(p4d_z));
crst_table_init((unsigned long *)kasan_early_shadow_pud, pud_val(pud_z));
crst_table_init((unsigned long *)kasan_early_shadow_pmd, pmd_val(pmd_z));
memset64((u64 *)kasan_early_shadow_pte, pte_val(pte_z), PTRS_PER_PTE);
/*
* Current memory layout:
* +- 0 -------------+ +- shadow start -+
* |1:1 ident mapping| /|1/8 of ident map|
* | | / | |
* +-end of ident map+ / +----------------+
* | ... gap ... | / | kasan |
* | | / | zero page |
* +- vmalloc area -+ / | mapping |
* | vmalloc_size | / | (untracked) |
* +- modules vaddr -+ / +----------------+
* | 2Gb |/ | unmapped | allocated per module
* +- shadow start -+ +----------------+
* | 1/8 addr space | | zero pg mapping| (untracked)
* +- shadow end ----+---------+- shadow end ---+
*
* Current memory layout (KASAN_VMALLOC):
* +- 0 -------------+ +- shadow start -+
* |1:1 ident mapping| /|1/8 of ident map|
* | | / | |
* +-end of ident map+ / +----------------+
* | ... gap ... | / | kasan zero page| (untracked)
* | | / | mapping |
* +- vmalloc area -+ / +----------------+
* | vmalloc_size | / |shallow populate|
* +- modules vaddr -+ / +----------------+
* | 2Gb |/ |shallow populate|
* +- shadow start -+ +----------------+
* | 1/8 addr space | | zero pg mapping| (untracked)
* +- shadow end ----+---------+- shadow end ---+
*/
for_each_physmem_usable_range(i, &start, &end)
kasan_populate(start, end, POPULATE_KASAN_MAP_SHADOW);
if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
untracked_end = VMALLOC_START;
/* shallowly populate kasan shadow for vmalloc and modules */
kasan_populate(VMALLOC_START, MODULES_END, POPULATE_KASAN_SHALLOW);
} else {
untracked_end = MODULES_VADDR;
}
/* populate kasan shadow for untracked memory */
kasan_populate(ident_map_size, untracked_end, POPULATE_KASAN_ZERO_SHADOW);
kasan_populate(MODULES_END, _REGION1_SIZE, POPULATE_KASAN_ZERO_SHADOW);
}
static bool kasan_pgd_populate_zero_shadow(pgd_t *pgd, unsigned long addr,
unsigned long end, enum populate_mode mode)
{
if (mode == POPULATE_KASAN_ZERO_SHADOW &&
IS_ALIGNED(addr, PGDIR_SIZE) && end - addr >= PGDIR_SIZE) {
pgd_populate(&init_mm, pgd, kasan_early_shadow_p4d);
return true;
}
return false;
}
static bool kasan_p4d_populate_zero_shadow(p4d_t *p4d, unsigned long addr,
unsigned long end, enum populate_mode mode)
{
if (mode == POPULATE_KASAN_ZERO_SHADOW &&
IS_ALIGNED(addr, P4D_SIZE) && end - addr >= P4D_SIZE) {
p4d_populate(&init_mm, p4d, kasan_early_shadow_pud);
return true;
}
return false;
}
static bool kasan_pud_populate_zero_shadow(pud_t *pud, unsigned long addr,
unsigned long end, enum populate_mode mode)
{
if (mode == POPULATE_KASAN_ZERO_SHADOW &&
IS_ALIGNED(addr, PUD_SIZE) && end - addr >= PUD_SIZE) {
pud_populate(&init_mm, pud, kasan_early_shadow_pmd);
return true;
}
return false;
}
static bool kasan_pmd_populate_zero_shadow(pmd_t *pmd, unsigned long addr,
unsigned long end, enum populate_mode mode)
{
if (mode == POPULATE_KASAN_ZERO_SHADOW &&
IS_ALIGNED(addr, PMD_SIZE) && end - addr >= PMD_SIZE) {
pmd_populate(&init_mm, pmd, kasan_early_shadow_pte);
return true;
}
return false;
}
static bool kasan_pte_populate_zero_shadow(pte_t *pte, enum populate_mode mode)
{
pte_t entry;
if (mode == POPULATE_KASAN_ZERO_SHADOW) {
set_pte(pte, pte_z);
return true;
}
return false;
}
#else
static inline void kasan_populate_shadow(void) {}
static inline bool kasan_pgd_populate_zero_shadow(pgd_t *pgd, unsigned long addr,
unsigned long end, enum populate_mode mode)
{
return false;
}
static inline bool kasan_p4d_populate_zero_shadow(p4d_t *p4d, unsigned long addr,
unsigned long end, enum populate_mode mode)
{
return false;
}
static inline bool kasan_pud_populate_zero_shadow(pud_t *pud, unsigned long addr,
unsigned long end, enum populate_mode mode)
{
return false;
}
static inline bool kasan_pmd_populate_zero_shadow(pmd_t *pmd, unsigned long addr,
unsigned long end, enum populate_mode mode)
{
return false;
}
static bool kasan_pte_populate_zero_shadow(pte_t *pte, enum populate_mode mode)
{
return false;
}
#endif
/*
* Mimic virt_to_kpte() in lack of init_mm symbol. Skip pmd NULL check though.
*/
static inline pte_t *__virt_to_kpte(unsigned long va)
{
return pte_offset_kernel(pmd_offset(pud_offset(p4d_offset(pgd_offset_k(va), va), va), va), va);
}
static void *boot_crst_alloc(unsigned long val)
{
unsigned long size = PAGE_SIZE << CRST_ALLOC_ORDER;
unsigned long *table;
table = (unsigned long *)physmem_alloc_top_down(RR_VMEM, size, size);
crst_table_init(table, val);
return table;
}
static pte_t *boot_pte_alloc(void)
{
static void *pte_leftover;
pte_t *pte;
/*
* handling pte_leftovers this way helps to avoid memory fragmentation
* during POPULATE_KASAN_MAP_SHADOW when EDAT is off
*/
if (!pte_leftover) {
pte_leftover = (void *)physmem_alloc_top_down(RR_VMEM, PAGE_SIZE, PAGE_SIZE);
pte = pte_leftover + _PAGE_TABLE_SIZE;
} else {
pte = pte_leftover;
pte_leftover = NULL;
}
memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE);
return pte;
}
static unsigned long _pa(unsigned long addr, unsigned long size, enum populate_mode mode)
{
switch (mode) {
case POPULATE_NONE:
return -1;
case POPULATE_DIRECT:
return addr;
case POPULATE_ABS_LOWCORE:
return __abs_lowcore_pa(addr);
#ifdef CONFIG_KASAN
case POPULATE_KASAN_MAP_SHADOW:
addr = physmem_alloc_top_down(RR_VMEM, size, size);
memset((void *)addr, 0, size);
return addr;
#endif
default:
return -1;
}
}
static bool can_large_pud(pud_t *pu_dir, unsigned long addr, unsigned long end)
{
return machine.has_edat2 &&
IS_ALIGNED(addr, PUD_SIZE) && (end - addr) >= PUD_SIZE;
}
static bool can_large_pmd(pmd_t *pm_dir, unsigned long addr, unsigned long end)
{
return machine.has_edat1 &&
IS_ALIGNED(addr, PMD_SIZE) && (end - addr) >= PMD_SIZE;
}
static void pgtable_pte_populate(pmd_t *pmd, unsigned long addr, unsigned long end,
enum populate_mode mode)
{
unsigned long pages = 0;
pte_t *pte, entry;
pte = pte_offset_kernel(pmd, addr);
for (; addr < end; addr += PAGE_SIZE, pte++) {
if (pte_none(*pte)) {
if (kasan_pte_populate_zero_shadow(pte, mode))
continue;
entry = __pte(_pa(addr, PAGE_SIZE, mode));
entry = set_pte_bit(entry, PAGE_KERNEL);
if (!machine.has_nx)
entry = clear_pte_bit(entry, __pgprot(_PAGE_NOEXEC));
set_pte(pte, entry);
pages++;
}
}
if (mode == POPULATE_DIRECT)
update_page_count(PG_DIRECT_MAP_4K, pages);
}
static void pgtable_pmd_populate(pud_t *pud, unsigned long addr, unsigned long end,
enum populate_mode mode)
{
unsigned long next, pages = 0;
pmd_t *pmd, entry;
pte_t *pte;
pmd = pmd_offset(pud, addr);
for (; addr < end; addr = next, pmd++) {
next = pmd_addr_end(addr, end);
if (pmd_none(*pmd)) {
if (kasan_pmd_populate_zero_shadow(pmd, addr, next, mode))
continue;
if (can_large_pmd(pmd, addr, next)) {
entry = __pmd(_pa(addr, _SEGMENT_SIZE, mode));
entry = set_pmd_bit(entry, SEGMENT_KERNEL);
if (!machine.has_nx)
entry = clear_pmd_bit(entry, __pgprot(_SEGMENT_ENTRY_NOEXEC));
set_pmd(pmd, entry);
pages++;
continue;
}
pte = boot_pte_alloc();
pmd_populate(&init_mm, pmd, pte);
} else if (pmd_large(*pmd)) {
continue;
}
pgtable_pte_populate(pmd, addr, next, mode);
}
if (mode == POPULATE_DIRECT)
update_page_count(PG_DIRECT_MAP_1M, pages);
}
static void pgtable_pud_populate(p4d_t *p4d, unsigned long addr, unsigned long end,
enum populate_mode mode)
{
unsigned long next, pages = 0;
pud_t *pud, entry;
pmd_t *pmd;
pud = pud_offset(p4d, addr);
for (; addr < end; addr = next, pud++) {
next = pud_addr_end(addr, end);
if (pud_none(*pud)) {
if (kasan_pud_populate_zero_shadow(pud, addr, next, mode))
continue;
if (can_large_pud(pud, addr, next)) {
entry = __pud(_pa(addr, _REGION3_SIZE, mode));
entry = set_pud_bit(entry, REGION3_KERNEL);
if (!machine.has_nx)
entry = clear_pud_bit(entry, __pgprot(_REGION_ENTRY_NOEXEC));
set_pud(pud, entry);
pages++;
continue;
}
pmd = boot_crst_alloc(_SEGMENT_ENTRY_EMPTY);
pud_populate(&init_mm, pud, pmd);
} else if (pud_large(*pud)) {
continue;
}
pgtable_pmd_populate(pud, addr, next, mode);
}
if (mode == POPULATE_DIRECT)
update_page_count(PG_DIRECT_MAP_2G, pages);
}
static void pgtable_p4d_populate(pgd_t *pgd, unsigned long addr, unsigned long end,
enum populate_mode mode)
{
unsigned long next;
p4d_t *p4d;
pud_t *pud;
p4d = p4d_offset(pgd, addr);
for (; addr < end; addr = next, p4d++) {
next = p4d_addr_end(addr, end);
if (p4d_none(*p4d)) {
if (kasan_p4d_populate_zero_shadow(p4d, addr, next, mode))
continue;
pud = boot_crst_alloc(_REGION3_ENTRY_EMPTY);
p4d_populate(&init_mm, p4d, pud);
}
pgtable_pud_populate(p4d, addr, next, mode);
}
}
static void pgtable_populate(unsigned long addr, unsigned long end, enum populate_mode mode)
{
unsigned long next;
pgd_t *pgd;
p4d_t *p4d;
pgd = pgd_offset(&init_mm, addr);
for (; addr < end; addr = next, pgd++) {
next = pgd_addr_end(addr, end);
if (pgd_none(*pgd)) {
if (kasan_pgd_populate_zero_shadow(pgd, addr, next, mode))
continue;
p4d = boot_crst_alloc(_REGION2_ENTRY_EMPTY);
pgd_populate(&init_mm, pgd, p4d);
}
#ifdef CONFIG_KASAN
if (mode == POPULATE_KASAN_SHALLOW)
continue;
#endif
pgtable_p4d_populate(pgd, addr, next, mode);
}
}
void setup_vmem(unsigned long asce_limit)
{
unsigned long start, end;
unsigned long asce_type;
unsigned long asce_bits;
int i;
if (asce_limit == _REGION1_SIZE) {
asce_type = _REGION2_ENTRY_EMPTY;
asce_bits = _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH;
} else {
asce_type = _REGION3_ENTRY_EMPTY;
asce_bits = _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
}
s390_invalid_asce = invalid_pg_dir | _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
crst_table_init((unsigned long *)swapper_pg_dir, asce_type);
crst_table_init((unsigned long *)invalid_pg_dir, _REGION3_ENTRY_EMPTY);
/*
* To allow prefixing the lowcore must be mapped with 4KB pages.
* To prevent creation of a large page at address 0 first map
* the lowcore and create the identity mapping only afterwards.
*/
pgtable_populate(0, sizeof(struct lowcore), POPULATE_DIRECT);
for_each_physmem_usable_range(i, &start, &end)
pgtable_populate(start, end, POPULATE_DIRECT);
pgtable_populate(__abs_lowcore, __abs_lowcore + sizeof(struct lowcore),
POPULATE_ABS_LOWCORE);
pgtable_populate(__memcpy_real_area, __memcpy_real_area + PAGE_SIZE,
POPULATE_NONE);
memcpy_real_ptep = __virt_to_kpte(__memcpy_real_area);
kasan_populate_shadow();
S390_lowcore.kernel_asce = swapper_pg_dir | asce_bits;
S390_lowcore.user_asce = s390_invalid_asce;
__ctl_load(S390_lowcore.kernel_asce, 1, 1);
__ctl_load(S390_lowcore.user_asce, 7, 7);
__ctl_load(S390_lowcore.kernel_asce, 13, 13);
init_mm.context.asce = S390_lowcore.kernel_asce;
}
| linux-master | arch/s390/boot/vmem.c |
// SPDX-License-Identifier: GPL-2.0
#include "boot.h"
#include "../../../drivers/s390/char/sclp_early_core.c"
/* SCLP early buffer must stay page-aligned and below 2GB */
static char __sclp_early_sccb[EXT_SCCB_READ_SCP] __aligned(PAGE_SIZE);
void sclp_early_setup_buffer(void)
{
sclp_early_set_buffer(&__sclp_early_sccb);
}
| linux-master | arch/s390/boot/sclp_early_core.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2016
*/
#include <linux/kernel.h>
#include <asm/processor.h>
#include <asm/facility.h>
#include <asm/lowcore.h>
#include <asm/sclp.h>
#include "boot.h"
/*
* The code within this file will be called very early. It may _not_
* access anything within the bss section, since that is not cleared
* yet and may contain data (e.g. initrd) that must be saved by other
* code.
* For temporary objects the stack (16k) should be used.
*/
static unsigned long als[] = { FACILITIES_ALS };
static void u16_to_hex(char *str, u16 val)
{
int i, num;
for (i = 1; i <= 4; i++) {
num = (val >> (16 - 4 * i)) & 0xf;
if (num >= 10)
num += 7;
*str++ = '0' + num;
}
*str = '\0';
}
static void print_machine_type(void)
{
static char mach_str[80] = "Detected machine-type number: ";
char type_str[5];
struct cpuid id;
get_cpu_id(&id);
u16_to_hex(type_str, id.machine);
strcat(mach_str, type_str);
strcat(mach_str, "\n");
sclp_early_printk(mach_str);
}
static void u16_to_decimal(char *str, u16 val)
{
int div = 1;
while (div * 10 <= val)
div *= 10;
while (div) {
*str++ = '0' + val / div;
val %= div;
div /= 10;
}
*str = '\0';
}
void print_missing_facilities(void)
{
static char als_str[80] = "Missing facilities: ";
unsigned long val;
char val_str[6];
int i, j, first;
first = 1;
for (i = 0; i < ARRAY_SIZE(als); i++) {
val = ~stfle_fac_list[i] & als[i];
for (j = 0; j < BITS_PER_LONG; j++) {
if (!(val & (1UL << (BITS_PER_LONG - 1 - j))))
continue;
if (!first)
strcat(als_str, ",");
/*
* Make sure we stay within one line. Consider that
* each facility bit adds up to five characters and
* z/VM adds a four character prefix.
*/
if (strlen(als_str) > 70) {
strcat(als_str, "\n");
sclp_early_printk(als_str);
*als_str = '\0';
}
u16_to_decimal(val_str, i * BITS_PER_LONG + j);
strcat(als_str, val_str);
first = 0;
}
}
strcat(als_str, "\n");
sclp_early_printk(als_str);
}
static void facility_mismatch(void)
{
sclp_early_printk("The Linux kernel requires more recent processor hardware\n");
print_machine_type();
print_missing_facilities();
sclp_early_printk("See Principles of Operations for facility bits\n");
disabled_wait();
}
void verify_facilities(void)
{
int i;
__stfle(stfle_fac_list, ARRAY_SIZE(stfle_fac_list));
for (i = 0; i < ARRAY_SIZE(als); i++) {
if ((stfle_fac_list[i] & als[i]) != als[i])
facility_mismatch();
}
}
| linux-master | arch/s390/boot/als.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Definitions and wrapper functions for kernel decompressor
*
* Copyright IBM Corp. 2010
*
* Author(s): Martin Schwidefsky <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/string.h>
#include <asm/page.h>
#include "decompressor.h"
#include "boot.h"
/*
* gzip declarations
*/
#define STATIC static
#undef memset
#undef memcpy
#undef memmove
#define memmove memmove
#define memzero(s, n) memset((s), 0, (n))
#if defined(CONFIG_KERNEL_BZIP2)
#define BOOT_HEAP_SIZE 0x400000
#elif defined(CONFIG_KERNEL_ZSTD)
#define BOOT_HEAP_SIZE 0x30000
#else
#define BOOT_HEAP_SIZE 0x10000
#endif
static unsigned long free_mem_ptr = (unsigned long) _end;
static unsigned long free_mem_end_ptr = (unsigned long) _end + BOOT_HEAP_SIZE;
#ifdef CONFIG_KERNEL_GZIP
#include "../../../../lib/decompress_inflate.c"
#endif
#ifdef CONFIG_KERNEL_BZIP2
#include "../../../../lib/decompress_bunzip2.c"
#endif
#ifdef CONFIG_KERNEL_LZ4
#include "../../../../lib/decompress_unlz4.c"
#endif
#ifdef CONFIG_KERNEL_LZMA
#include "../../../../lib/decompress_unlzma.c"
#endif
#ifdef CONFIG_KERNEL_LZO
#include "../../../../lib/decompress_unlzo.c"
#endif
#ifdef CONFIG_KERNEL_XZ
#include "../../../../lib/decompress_unxz.c"
#endif
#ifdef CONFIG_KERNEL_ZSTD
#include "../../../../lib/decompress_unzstd.c"
#endif
#define decompress_offset ALIGN((unsigned long)_end + BOOT_HEAP_SIZE, PAGE_SIZE)
unsigned long mem_safe_offset(void)
{
/*
* due to 4MB HEAD_SIZE for bzip2
* 'decompress_offset + vmlinux.image_size' could be larger than
* kernel at final position + its .bss, so take the larger of two
*/
return max(decompress_offset + vmlinux.image_size,
vmlinux.default_lma + vmlinux.image_size + vmlinux.bss_size);
}
void *decompress_kernel(void)
{
void *output = (void *)decompress_offset;
__decompress(_compressed_start, _compressed_end - _compressed_start,
NULL, NULL, output, vmlinux.image_size, NULL, error);
return output;
}
| linux-master | arch/s390/boot/decompressor.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/init.h>
#include <linux/ctype.h>
#include <asm/ebcdic.h>
#include <asm/sclp.h>
#include <asm/sections.h>
#include <asm/boot_data.h>
#include <asm/physmem_info.h>
#include <uapi/asm/ipl.h>
#include "boot.h"
int __bootdata_preserved(ipl_secure_flag);
unsigned long __bootdata_preserved(ipl_cert_list_addr);
unsigned long __bootdata_preserved(ipl_cert_list_size);
unsigned long __bootdata(early_ipl_comp_list_addr);
unsigned long __bootdata(early_ipl_comp_list_size);
static struct ipl_rb_certificates *certs;
static struct ipl_rb_components *comps;
static bool ipl_report_needs_saving;
#define for_each_rb_entry(entry, rb) \
for (entry = rb->entries; \
(void *) entry + sizeof(*entry) <= (void *) rb + rb->len; \
entry++)
static unsigned long get_cert_comp_list_size(void)
{
struct ipl_rb_certificate_entry *cert;
struct ipl_rb_component_entry *comp;
size_t size;
/*
* Find the length for the IPL report boot data
*/
early_ipl_comp_list_size = 0;
for_each_rb_entry(comp, comps)
early_ipl_comp_list_size += sizeof(*comp);
ipl_cert_list_size = 0;
for_each_rb_entry(cert, certs)
ipl_cert_list_size += sizeof(unsigned int) + cert->len;
return ipl_cert_list_size + early_ipl_comp_list_size;
}
bool ipl_report_certs_intersects(unsigned long addr, unsigned long size,
unsigned long *intersection_start)
{
struct ipl_rb_certificate_entry *cert;
if (!ipl_report_needs_saving)
return false;
for_each_rb_entry(cert, certs) {
if (intersects(addr, size, cert->addr, cert->len)) {
*intersection_start = cert->addr;
return true;
}
}
return false;
}
static void copy_components_bootdata(void)
{
struct ipl_rb_component_entry *comp, *ptr;
ptr = (struct ipl_rb_component_entry *) early_ipl_comp_list_addr;
for_each_rb_entry(comp, comps)
memcpy(ptr++, comp, sizeof(*ptr));
}
static void copy_certificates_bootdata(void)
{
struct ipl_rb_certificate_entry *cert;
void *ptr;
ptr = (void *) ipl_cert_list_addr;
for_each_rb_entry(cert, certs) {
*(unsigned int *) ptr = cert->len;
ptr += sizeof(unsigned int);
memcpy(ptr, (void *) cert->addr, cert->len);
ptr += cert->len;
}
}
int read_ipl_report(void)
{
struct ipl_pl_hdr *pl_hdr;
struct ipl_rl_hdr *rl_hdr;
struct ipl_rb_hdr *rb_hdr;
unsigned long tmp;
void *rl_end;
/*
* Check if there is a IPL report by looking at the copy
* of the IPL parameter information block.
*/
if (!ipl_block_valid ||
!(ipl_block.hdr.flags & IPL_PL_FLAG_IPLSR))
return -1;
ipl_secure_flag = !!(ipl_block.hdr.flags & IPL_PL_FLAG_SIPL);
/*
* There is an IPL report, to find it load the pointer to the
* IPL parameter information block from lowcore and skip past
* the IPL parameter list, then align the address to a double
* word boundary.
*/
tmp = (unsigned long) S390_lowcore.ipl_parmblock_ptr;
pl_hdr = (struct ipl_pl_hdr *) tmp;
tmp = (tmp + pl_hdr->len + 7) & -8UL;
rl_hdr = (struct ipl_rl_hdr *) tmp;
/* Walk through the IPL report blocks in the IPL Report list */
certs = NULL;
comps = NULL;
rl_end = (void *) rl_hdr + rl_hdr->len;
rb_hdr = (void *) rl_hdr + sizeof(*rl_hdr);
while ((void *) rb_hdr + sizeof(*rb_hdr) < rl_end &&
(void *) rb_hdr + rb_hdr->len <= rl_end) {
switch (rb_hdr->rbt) {
case IPL_RBT_CERTIFICATES:
certs = (struct ipl_rb_certificates *) rb_hdr;
break;
case IPL_RBT_COMPONENTS:
comps = (struct ipl_rb_components *) rb_hdr;
break;
default:
break;
}
rb_hdr = (void *) rb_hdr + rb_hdr->len;
}
/*
* With either the component list or the certificate list
* missing the kernel will stay ignorant of secure IPL.
*/
if (!comps || !certs) {
certs = NULL;
return -1;
}
ipl_report_needs_saving = true;
physmem_reserve(RR_IPLREPORT, (unsigned long)pl_hdr,
(unsigned long)rl_end - (unsigned long)pl_hdr);
return 0;
}
void save_ipl_cert_comp_list(void)
{
unsigned long size;
if (!ipl_report_needs_saving)
return;
size = get_cert_comp_list_size();
early_ipl_comp_list_addr = physmem_alloc_top_down(RR_CERT_COMP_LIST, size, sizeof(int));
ipl_cert_list_addr = early_ipl_comp_list_addr + early_ipl_comp_list_size;
copy_components_bootdata();
copy_certificates_bootdata();
physmem_free(RR_IPLREPORT);
ipl_report_needs_saving = false;
}
| linux-master | arch/s390/boot/ipl_report.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/processor.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <asm/physmem_info.h>
#include <asm/stacktrace.h>
#include <asm/boot_data.h>
#include <asm/sparsemem.h>
#include <asm/sections.h>
#include <asm/setup.h>
#include <asm/sclp.h>
#include <asm/uv.h>
#include "decompressor.h"
#include "boot.h"
struct physmem_info __bootdata(physmem_info);
static unsigned int physmem_alloc_ranges;
static unsigned long physmem_alloc_pos;
/* up to 256 storage elements, 1020 subincrements each */
#define ENTRIES_EXTENDED_MAX \
(256 * (1020 / 2) * sizeof(struct physmem_range))
static struct physmem_range *__get_physmem_range_ptr(u32 n)
{
if (n < MEM_INLINED_ENTRIES)
return &physmem_info.online[n];
if (unlikely(!physmem_info.online_extended)) {
physmem_info.online_extended = (struct physmem_range *)physmem_alloc_range(
RR_MEM_DETECT_EXTENDED, ENTRIES_EXTENDED_MAX, sizeof(long), 0,
physmem_alloc_pos, true);
}
return &physmem_info.online_extended[n - MEM_INLINED_ENTRIES];
}
/*
* sequential calls to add_physmem_online_range with adjacent memory ranges
* are merged together into single memory range.
*/
void add_physmem_online_range(u64 start, u64 end)
{
struct physmem_range *range;
if (physmem_info.range_count) {
range = __get_physmem_range_ptr(physmem_info.range_count - 1);
if (range->end == start) {
range->end = end;
return;
}
}
range = __get_physmem_range_ptr(physmem_info.range_count);
range->start = start;
range->end = end;
physmem_info.range_count++;
}
static int __diag260(unsigned long rx1, unsigned long rx2)
{
unsigned long reg1, reg2, ry;
union register_pair rx;
psw_t old;
int rc;
rx.even = rx1;
rx.odd = rx2;
ry = 0x10; /* storage configuration */
rc = -1; /* fail */
asm volatile(
" mvc 0(16,%[psw_old]),0(%[psw_pgm])\n"
" epsw %[reg1],%[reg2]\n"
" st %[reg1],0(%[psw_pgm])\n"
" st %[reg2],4(%[psw_pgm])\n"
" larl %[reg1],1f\n"
" stg %[reg1],8(%[psw_pgm])\n"
" diag %[rx],%[ry],0x260\n"
" ipm %[rc]\n"
" srl %[rc],28\n"
"1: mvc 0(16,%[psw_pgm]),0(%[psw_old])\n"
: [reg1] "=&d" (reg1),
[reg2] "=&a" (reg2),
[rc] "+&d" (rc),
[ry] "+&d" (ry),
"+Q" (S390_lowcore.program_new_psw),
"=Q" (old)
: [rx] "d" (rx.pair),
[psw_old] "a" (&old),
[psw_pgm] "a" (&S390_lowcore.program_new_psw)
: "cc", "memory");
return rc == 0 ? ry : -1;
}
static int diag260(void)
{
int rc, i;
struct {
unsigned long start;
unsigned long end;
} storage_extents[8] __aligned(16); /* VM supports up to 8 extends */
memset(storage_extents, 0, sizeof(storage_extents));
rc = __diag260((unsigned long)storage_extents, sizeof(storage_extents));
if (rc == -1)
return -1;
for (i = 0; i < min_t(int, rc, ARRAY_SIZE(storage_extents)); i++)
add_physmem_online_range(storage_extents[i].start, storage_extents[i].end + 1);
return 0;
}
static int tprot(unsigned long addr)
{
unsigned long reg1, reg2;
int rc = -EFAULT;
psw_t old;
asm volatile(
" mvc 0(16,%[psw_old]),0(%[psw_pgm])\n"
" epsw %[reg1],%[reg2]\n"
" st %[reg1],0(%[psw_pgm])\n"
" st %[reg2],4(%[psw_pgm])\n"
" larl %[reg1],1f\n"
" stg %[reg1],8(%[psw_pgm])\n"
" tprot 0(%[addr]),0\n"
" ipm %[rc]\n"
" srl %[rc],28\n"
"1: mvc 0(16,%[psw_pgm]),0(%[psw_old])\n"
: [reg1] "=&d" (reg1),
[reg2] "=&a" (reg2),
[rc] "+&d" (rc),
"=Q" (S390_lowcore.program_new_psw.addr),
"=Q" (old)
: [psw_old] "a" (&old),
[psw_pgm] "a" (&S390_lowcore.program_new_psw),
[addr] "a" (addr)
: "cc", "memory");
return rc;
}
static unsigned long search_mem_end(void)
{
unsigned long range = 1 << (MAX_PHYSMEM_BITS - 20); /* in 1MB blocks */
unsigned long offset = 0;
unsigned long pivot;
while (range > 1) {
range >>= 1;
pivot = offset + range;
if (!tprot(pivot << 20))
offset = pivot;
}
return (offset + 1) << 20;
}
unsigned long detect_max_physmem_end(void)
{
unsigned long max_physmem_end = 0;
if (!sclp_early_get_memsize(&max_physmem_end)) {
physmem_info.info_source = MEM_DETECT_SCLP_READ_INFO;
} else {
max_physmem_end = search_mem_end();
physmem_info.info_source = MEM_DETECT_BIN_SEARCH;
}
return max_physmem_end;
}
void detect_physmem_online_ranges(unsigned long max_physmem_end)
{
if (!sclp_early_read_storage_info()) {
physmem_info.info_source = MEM_DETECT_SCLP_STOR_INFO;
} else if (!diag260()) {
physmem_info.info_source = MEM_DETECT_DIAG260;
} else if (max_physmem_end) {
add_physmem_online_range(0, max_physmem_end);
}
}
void physmem_set_usable_limit(unsigned long limit)
{
physmem_info.usable = limit;
physmem_alloc_pos = limit;
}
static void die_oom(unsigned long size, unsigned long align, unsigned long min, unsigned long max)
{
unsigned long start, end, total_mem = 0, total_reserved_mem = 0;
struct reserved_range *range;
enum reserved_range_type t;
int i;
decompressor_printk("Linux version %s\n", kernel_version);
if (!is_prot_virt_guest() && early_command_line[0])
decompressor_printk("Kernel command line: %s\n", early_command_line);
decompressor_printk("Out of memory allocating %lx bytes %lx aligned in range %lx:%lx\n",
size, align, min, max);
decompressor_printk("Reserved memory ranges:\n");
for_each_physmem_reserved_range(t, range, &start, &end) {
decompressor_printk("%016lx %016lx %s\n", start, end, get_rr_type_name(t));
total_reserved_mem += end - start;
}
decompressor_printk("Usable online memory ranges (info source: %s [%x]):\n",
get_physmem_info_source(), physmem_info.info_source);
for_each_physmem_usable_range(i, &start, &end) {
decompressor_printk("%016lx %016lx\n", start, end);
total_mem += end - start;
}
decompressor_printk("Usable online memory total: %lx Reserved: %lx Free: %lx\n",
total_mem, total_reserved_mem,
total_mem > total_reserved_mem ? total_mem - total_reserved_mem : 0);
print_stacktrace(current_frame_address());
sclp_early_printk("\n\n -- System halted\n");
disabled_wait();
}
void physmem_reserve(enum reserved_range_type type, unsigned long addr, unsigned long size)
{
physmem_info.reserved[type].start = addr;
physmem_info.reserved[type].end = addr + size;
}
void physmem_free(enum reserved_range_type type)
{
physmem_info.reserved[type].start = 0;
physmem_info.reserved[type].end = 0;
}
static bool __physmem_alloc_intersects(unsigned long addr, unsigned long size,
unsigned long *intersection_start)
{
unsigned long res_addr, res_size;
int t;
for (t = 0; t < RR_MAX; t++) {
if (!get_physmem_reserved(t, &res_addr, &res_size))
continue;
if (intersects(addr, size, res_addr, res_size)) {
*intersection_start = res_addr;
return true;
}
}
return ipl_report_certs_intersects(addr, size, intersection_start);
}
static unsigned long __physmem_alloc_range(unsigned long size, unsigned long align,
unsigned long min, unsigned long max,
unsigned int from_ranges, unsigned int *ranges_left,
bool die_on_oom)
{
unsigned int nranges = from_ranges ?: physmem_info.range_count;
unsigned long range_start, range_end;
unsigned long intersection_start;
unsigned long addr, pos = max;
align = max(align, 8UL);
while (nranges) {
__get_physmem_range(nranges - 1, &range_start, &range_end, false);
pos = min(range_end, pos);
if (round_up(min, align) + size > pos)
break;
addr = round_down(pos - size, align);
if (range_start > addr) {
nranges--;
continue;
}
if (__physmem_alloc_intersects(addr, size, &intersection_start)) {
pos = intersection_start;
continue;
}
if (ranges_left)
*ranges_left = nranges;
return addr;
}
if (die_on_oom)
die_oom(size, align, min, max);
return 0;
}
unsigned long physmem_alloc_range(enum reserved_range_type type, unsigned long size,
unsigned long align, unsigned long min, unsigned long max,
bool die_on_oom)
{
unsigned long addr;
max = min(max, physmem_alloc_pos);
addr = __physmem_alloc_range(size, align, min, max, 0, NULL, die_on_oom);
if (addr)
physmem_reserve(type, addr, size);
return addr;
}
unsigned long physmem_alloc_top_down(enum reserved_range_type type, unsigned long size,
unsigned long align)
{
struct reserved_range *range = &physmem_info.reserved[type];
struct reserved_range *new_range;
unsigned int ranges_left;
unsigned long addr;
addr = __physmem_alloc_range(size, align, 0, physmem_alloc_pos, physmem_alloc_ranges,
&ranges_left, true);
/* if not a consecutive allocation of the same type or first allocation */
if (range->start != addr + size) {
if (range->end) {
physmem_alloc_pos = __physmem_alloc_range(
sizeof(struct reserved_range), 0, 0, physmem_alloc_pos,
physmem_alloc_ranges, &ranges_left, true);
new_range = (struct reserved_range *)physmem_alloc_pos;
*new_range = *range;
range->chain = new_range;
addr = __physmem_alloc_range(size, align, 0, physmem_alloc_pos,
ranges_left, &ranges_left, true);
}
range->end = addr + size;
}
range->start = addr;
physmem_alloc_pos = addr;
physmem_alloc_ranges = ranges_left;
return addr;
}
unsigned long get_physmem_alloc_pos(void)
{
return physmem_alloc_pos;
}
| linux-master | arch/s390/boot/physmem_info.c |
// SPDX-License-Identifier: GPL-2.0
#include <generated/utsversion.h>
#include <generated/utsrelease.h>
#include <generated/compile.h>
#include "boot.h"
const char kernel_version[] = UTS_RELEASE
" (" LINUX_COMPILE_BY "@" LINUX_COMPILE_HOST ") " UTS_VERSION;
| linux-master | arch/s390/boot/version.c |
// SPDX-License-Identifier: GPL-2.0
#include "../../../lib/cmdline.c"
| linux-master | arch/s390/boot/cmdline.c |
// SPDX-License-Identifier: GPL-2.0
#include <asm/uv.h>
#include <asm/boot_data.h>
#include <asm/facility.h>
#include <asm/sections.h>
#include "boot.h"
#include "uv.h"
/* will be used in arch/s390/kernel/uv.c */
#ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST
int __bootdata_preserved(prot_virt_guest);
#endif
#if IS_ENABLED(CONFIG_KVM)
int __bootdata_preserved(prot_virt_host);
#endif
struct uv_info __bootdata_preserved(uv_info);
void uv_query_info(void)
{
struct uv_cb_qui uvcb = {
.header.cmd = UVC_CMD_QUI,
.header.len = sizeof(uvcb)
};
if (!test_facility(158))
return;
/* rc==0x100 means that there is additional data we do not process */
if (uv_call(0, (uint64_t)&uvcb) && uvcb.header.rc != 0x100)
return;
if (IS_ENABLED(CONFIG_KVM)) {
memcpy(uv_info.inst_calls_list, uvcb.inst_calls_list, sizeof(uv_info.inst_calls_list));
uv_info.uv_base_stor_len = uvcb.uv_base_stor_len;
uv_info.guest_base_stor_len = uvcb.conf_base_phys_stor_len;
uv_info.guest_virt_base_stor_len = uvcb.conf_base_virt_stor_len;
uv_info.guest_virt_var_stor_len = uvcb.conf_virt_var_stor_len;
uv_info.guest_cpu_stor_len = uvcb.cpu_stor_len;
uv_info.max_sec_stor_addr = ALIGN(uvcb.max_guest_stor_addr, PAGE_SIZE);
uv_info.max_num_sec_conf = uvcb.max_num_sec_conf;
uv_info.max_guest_cpu_id = uvcb.max_guest_cpu_id;
uv_info.uv_feature_indications = uvcb.uv_feature_indications;
uv_info.supp_se_hdr_ver = uvcb.supp_se_hdr_versions;
uv_info.supp_se_hdr_pcf = uvcb.supp_se_hdr_pcf;
uv_info.conf_dump_storage_state_len = uvcb.conf_dump_storage_state_len;
uv_info.conf_dump_finalize_len = uvcb.conf_dump_finalize_len;
uv_info.supp_att_req_hdr_ver = uvcb.supp_att_req_hdr_ver;
uv_info.supp_att_pflags = uvcb.supp_att_pflags;
uv_info.supp_add_secret_req_ver = uvcb.supp_add_secret_req_ver;
uv_info.supp_add_secret_pcf = uvcb.supp_add_secret_pcf;
uv_info.supp_secret_types = uvcb.supp_secret_types;
uv_info.max_secrets = uvcb.max_secrets;
}
#ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST
if (test_bit_inv(BIT_UVC_CMD_SET_SHARED_ACCESS, (unsigned long *)uvcb.inst_calls_list) &&
test_bit_inv(BIT_UVC_CMD_REMOVE_SHARED_ACCESS, (unsigned long *)uvcb.inst_calls_list))
prot_virt_guest = 1;
#endif
}
#if IS_ENABLED(CONFIG_KVM)
unsigned long adjust_to_uv_max(unsigned long limit)
{
if (is_prot_virt_host() && uv_info.max_sec_stor_addr)
limit = min_t(unsigned long, limit, uv_info.max_sec_stor_addr);
return limit;
}
static int is_prot_virt_host_capable(void)
{
/* disable if no prot_virt=1 given on command-line */
if (!is_prot_virt_host())
return 0;
/* disable if protected guest virtualization is enabled */
if (is_prot_virt_guest())
return 0;
/* disable if no hardware support */
if (!test_facility(158))
return 0;
/* disable if kdump */
if (oldmem_data.start)
return 0;
/* disable if stand-alone dump */
if (ipl_block_valid && is_ipl_block_dump())
return 0;
return 1;
}
void sanitize_prot_virt_host(void)
{
prot_virt_host = is_prot_virt_host_capable();
}
#endif
| linux-master | arch/s390/boot/uv.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2019
*/
#include <linux/pgtable.h>
#include <asm/physmem_info.h>
#include <asm/cpacf.h>
#include <asm/timex.h>
#include <asm/sclp.h>
#include <asm/kasan.h>
#include "decompressor.h"
#include "boot.h"
#define PRNG_MODE_TDES 1
#define PRNG_MODE_SHA512 2
#define PRNG_MODE_TRNG 3
struct prno_parm {
u32 res;
u32 reseed_counter;
u64 stream_bytes;
u8 V[112];
u8 C[112];
};
struct prng_parm {
u8 parm_block[32];
u32 reseed_counter;
u64 byte_counter;
};
static int check_prng(void)
{
if (!cpacf_query_func(CPACF_KMC, CPACF_KMC_PRNG)) {
sclp_early_printk("KASLR disabled: CPU has no PRNG\n");
return 0;
}
if (cpacf_query_func(CPACF_PRNO, CPACF_PRNO_TRNG))
return PRNG_MODE_TRNG;
if (cpacf_query_func(CPACF_PRNO, CPACF_PRNO_SHA512_DRNG_GEN))
return PRNG_MODE_SHA512;
else
return PRNG_MODE_TDES;
}
static int get_random(unsigned long limit, unsigned long *value)
{
struct prng_parm prng = {
/* initial parameter block for tdes mode, copied from libica */
.parm_block = {
0x0F, 0x2B, 0x8E, 0x63, 0x8C, 0x8E, 0xD2, 0x52,
0x64, 0xB7, 0xA0, 0x7B, 0x75, 0x28, 0xB8, 0xF4,
0x75, 0x5F, 0xD2, 0xA6, 0x8D, 0x97, 0x11, 0xFF,
0x49, 0xD8, 0x23, 0xF3, 0x7E, 0x21, 0xEC, 0xA0
},
};
unsigned long seed, random;
struct prno_parm prno;
__u64 entropy[4];
int mode, i;
mode = check_prng();
seed = get_tod_clock_fast();
switch (mode) {
case PRNG_MODE_TRNG:
cpacf_trng(NULL, 0, (u8 *) &random, sizeof(random));
break;
case PRNG_MODE_SHA512:
cpacf_prno(CPACF_PRNO_SHA512_DRNG_SEED, &prno, NULL, 0,
(u8 *) &seed, sizeof(seed));
cpacf_prno(CPACF_PRNO_SHA512_DRNG_GEN, &prno, (u8 *) &random,
sizeof(random), NULL, 0);
break;
case PRNG_MODE_TDES:
/* add entropy */
*(unsigned long *) prng.parm_block ^= seed;
for (i = 0; i < 16; i++) {
cpacf_kmc(CPACF_KMC_PRNG, prng.parm_block,
(u8 *) entropy, (u8 *) entropy,
sizeof(entropy));
memcpy(prng.parm_block, entropy, sizeof(entropy));
}
random = seed;
cpacf_kmc(CPACF_KMC_PRNG, prng.parm_block, (u8 *) &random,
(u8 *) &random, sizeof(random));
break;
default:
return -1;
}
*value = random % limit;
return 0;
}
static void sort_reserved_ranges(struct reserved_range *res, unsigned long size)
{
struct reserved_range tmp;
int i, j;
for (i = 1; i < size; i++) {
tmp = res[i];
for (j = i - 1; j >= 0 && res[j].start > tmp.start; j--)
res[j + 1] = res[j];
res[j + 1] = tmp;
}
}
static unsigned long iterate_valid_positions(unsigned long size, unsigned long align,
unsigned long _min, unsigned long _max,
struct reserved_range *res, size_t res_count,
bool pos_count, unsigned long find_pos)
{
unsigned long start, end, tmp_end, range_pos, pos = 0;
struct reserved_range *res_end = res + res_count;
struct reserved_range *skip_res;
int i;
align = max(align, 8UL);
_min = round_up(_min, align);
for_each_physmem_usable_range(i, &start, &end) {
if (_min >= end)
continue;
start = round_up(start, align);
if (start >= _max)
break;
start = max(_min, start);
end = min(_max, end);
while (start + size <= end) {
/* skip reserved ranges below the start */
while (res && res->end <= start) {
res++;
if (res >= res_end)
res = NULL;
}
skip_res = NULL;
tmp_end = end;
/* has intersecting reserved range */
if (res && res->start < end) {
skip_res = res;
tmp_end = res->start;
}
if (start + size <= tmp_end) {
range_pos = (tmp_end - start - size) / align + 1;
if (pos_count) {
pos += range_pos;
} else {
if (range_pos >= find_pos)
return start + (find_pos - 1) * align;
find_pos -= range_pos;
}
}
if (!skip_res)
break;
start = round_up(skip_res->end, align);
}
}
return pos_count ? pos : 0;
}
/*
* Two types of decompressor memory allocations/reserves are considered
* differently.
*
* "Static" or "single" allocations are done via physmem_alloc_range() and
* physmem_reserve(), and they are listed in physmem_info.reserved[]. Each
* type of "static" allocation can only have one allocation per type and
* cannot have chains.
*
* On the other hand, "dynamic" or "repetitive" allocations are done via
* physmem_alloc_top_down(). These allocations are tightly packed together
* top down from the end of online memory. physmem_alloc_pos represents
* current position where those allocations start.
*
* Functions randomize_within_range() and iterate_valid_positions()
* only consider "dynamic" allocations by never looking above
* physmem_alloc_pos. "Static" allocations, however, are explicitly
* considered by checking the "res" (reserves) array. The first
* reserved_range of a "dynamic" allocation may also be checked along the
* way, but it will always be above the maximum value anyway.
*/
unsigned long randomize_within_range(unsigned long size, unsigned long align,
unsigned long min, unsigned long max)
{
struct reserved_range res[RR_MAX];
unsigned long max_pos, pos;
memcpy(res, physmem_info.reserved, sizeof(res));
sort_reserved_ranges(res, ARRAY_SIZE(res));
max = min(max, get_physmem_alloc_pos());
max_pos = iterate_valid_positions(size, align, min, max, res, ARRAY_SIZE(res), true, 0);
if (!max_pos)
return 0;
if (get_random(max_pos, &pos))
return 0;
return iterate_valid_positions(size, align, min, max, res, ARRAY_SIZE(res), false, pos + 1);
}
| linux-master | arch/s390/boot/kaslr.c |
// SPDX-License-Identifier: GPL-2.0
#include "../../../../lib/clz_ctz.c"
| linux-master | arch/s390/boot/clz_ctz.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/string.h>
#include <linux/elf.h>
#include <asm/boot_data.h>
#include <asm/sections.h>
#include <asm/maccess.h>
#include <asm/cpu_mf.h>
#include <asm/setup.h>
#include <asm/kasan.h>
#include <asm/kexec.h>
#include <asm/sclp.h>
#include <asm/diag.h>
#include <asm/uv.h>
#include <asm/abs_lowcore.h>
#include <asm/physmem_info.h>
#include "decompressor.h"
#include "boot.h"
#include "uv.h"
unsigned long __bootdata_preserved(__kaslr_offset);
unsigned long __bootdata_preserved(__abs_lowcore);
unsigned long __bootdata_preserved(__memcpy_real_area);
pte_t *__bootdata_preserved(memcpy_real_ptep);
unsigned long __bootdata_preserved(VMALLOC_START);
unsigned long __bootdata_preserved(VMALLOC_END);
struct page *__bootdata_preserved(vmemmap);
unsigned long __bootdata_preserved(vmemmap_size);
unsigned long __bootdata_preserved(MODULES_VADDR);
unsigned long __bootdata_preserved(MODULES_END);
unsigned long __bootdata_preserved(max_mappable);
unsigned long __bootdata(ident_map_size);
u64 __bootdata_preserved(stfle_fac_list[16]);
u64 __bootdata_preserved(alt_stfle_fac_list[16]);
struct oldmem_data __bootdata_preserved(oldmem_data);
struct machine_info machine;
void error(char *x)
{
sclp_early_printk("\n\n");
sclp_early_printk(x);
sclp_early_printk("\n\n -- System halted");
disabled_wait();
}
static void detect_facilities(void)
{
if (test_facility(8)) {
machine.has_edat1 = 1;
__ctl_set_bit(0, 23);
}
if (test_facility(78))
machine.has_edat2 = 1;
if (test_facility(130))
machine.has_nx = 1;
}
static void setup_lpp(void)
{
S390_lowcore.current_pid = 0;
S390_lowcore.lpp = LPP_MAGIC;
if (test_facility(40))
lpp(&S390_lowcore.lpp);
}
#ifdef CONFIG_KERNEL_UNCOMPRESSED
unsigned long mem_safe_offset(void)
{
return vmlinux.default_lma + vmlinux.image_size + vmlinux.bss_size;
}
#endif
static void rescue_initrd(unsigned long min, unsigned long max)
{
unsigned long old_addr, addr, size;
if (!IS_ENABLED(CONFIG_BLK_DEV_INITRD))
return;
if (!get_physmem_reserved(RR_INITRD, &addr, &size))
return;
if (addr >= min && addr + size <= max)
return;
old_addr = addr;
physmem_free(RR_INITRD);
addr = physmem_alloc_top_down(RR_INITRD, size, 0);
memmove((void *)addr, (void *)old_addr, size);
}
static void copy_bootdata(void)
{
if (__boot_data_end - __boot_data_start != vmlinux.bootdata_size)
error(".boot.data section size mismatch");
memcpy((void *)vmlinux.bootdata_off, __boot_data_start, vmlinux.bootdata_size);
if (__boot_data_preserved_end - __boot_data_preserved_start != vmlinux.bootdata_preserved_size)
error(".boot.preserved.data section size mismatch");
memcpy((void *)vmlinux.bootdata_preserved_off, __boot_data_preserved_start, vmlinux.bootdata_preserved_size);
}
static void handle_relocs(unsigned long offset)
{
Elf64_Rela *rela_start, *rela_end, *rela;
int r_type, r_sym, rc;
Elf64_Addr loc, val;
Elf64_Sym *dynsym;
rela_start = (Elf64_Rela *) vmlinux.rela_dyn_start;
rela_end = (Elf64_Rela *) vmlinux.rela_dyn_end;
dynsym = (Elf64_Sym *) vmlinux.dynsym_start;
for (rela = rela_start; rela < rela_end; rela++) {
loc = rela->r_offset + offset;
val = rela->r_addend;
r_sym = ELF64_R_SYM(rela->r_info);
if (r_sym) {
if (dynsym[r_sym].st_shndx != SHN_UNDEF)
val += dynsym[r_sym].st_value + offset;
} else {
/*
* 0 == undefined symbol table index (STN_UNDEF),
* used for R_390_RELATIVE, only add KASLR offset
*/
val += offset;
}
r_type = ELF64_R_TYPE(rela->r_info);
rc = arch_kexec_do_relocs(r_type, (void *) loc, val, 0);
if (rc)
error("Unknown relocation type");
}
}
/*
* Merge information from several sources into a single ident_map_size value.
* "ident_map_size" represents the upper limit of physical memory we may ever
* reach. It might not be all online memory, but also include standby (offline)
* memory. "ident_map_size" could be lower then actual standby or even online
* memory present, due to limiting factors. We should never go above this limit.
* It is the size of our identity mapping.
*
* Consider the following factors:
* 1. max_physmem_end - end of physical memory online or standby.
* Always >= end of the last online memory range (get_physmem_online_end()).
* 2. CONFIG_MAX_PHYSMEM_BITS - the maximum size of physical memory the
* kernel is able to support.
* 3. "mem=" kernel command line option which limits physical memory usage.
* 4. OLDMEM_BASE which is a kdump memory limit when the kernel is executed as
* crash kernel.
* 5. "hsa" size which is a memory limit when the kernel is executed during
* zfcp/nvme dump.
*/
static void setup_ident_map_size(unsigned long max_physmem_end)
{
unsigned long hsa_size;
ident_map_size = max_physmem_end;
if (memory_limit)
ident_map_size = min(ident_map_size, memory_limit);
ident_map_size = min(ident_map_size, 1UL << MAX_PHYSMEM_BITS);
#ifdef CONFIG_CRASH_DUMP
if (oldmem_data.start) {
__kaslr_enabled = 0;
ident_map_size = min(ident_map_size, oldmem_data.size);
} else if (ipl_block_valid && is_ipl_block_dump()) {
__kaslr_enabled = 0;
if (!sclp_early_get_hsa_size(&hsa_size) && hsa_size)
ident_map_size = min(ident_map_size, hsa_size);
}
#endif
}
static unsigned long setup_kernel_memory_layout(void)
{
unsigned long vmemmap_start;
unsigned long asce_limit;
unsigned long rte_size;
unsigned long pages;
unsigned long vsize;
unsigned long vmax;
pages = ident_map_size / PAGE_SIZE;
/* vmemmap contains a multiple of PAGES_PER_SECTION struct pages */
vmemmap_size = SECTION_ALIGN_UP(pages) * sizeof(struct page);
/* choose kernel address space layout: 4 or 3 levels. */
vsize = round_up(ident_map_size, _REGION3_SIZE) + vmemmap_size +
MODULES_LEN + MEMCPY_REAL_SIZE + ABS_LOWCORE_MAP_SIZE;
vsize = size_add(vsize, vmalloc_size);
if (IS_ENABLED(CONFIG_KASAN) || (vsize > _REGION2_SIZE)) {
asce_limit = _REGION1_SIZE;
rte_size = _REGION2_SIZE;
} else {
asce_limit = _REGION2_SIZE;
rte_size = _REGION3_SIZE;
}
/*
* Forcing modules and vmalloc area under the ultravisor
* secure storage limit, so that any vmalloc allocation
* we do could be used to back secure guest storage.
*/
vmax = adjust_to_uv_max(asce_limit);
#ifdef CONFIG_KASAN
/* force vmalloc and modules below kasan shadow */
vmax = min(vmax, KASAN_SHADOW_START);
#endif
__memcpy_real_area = round_down(vmax - MEMCPY_REAL_SIZE, PAGE_SIZE);
__abs_lowcore = round_down(__memcpy_real_area - ABS_LOWCORE_MAP_SIZE,
sizeof(struct lowcore));
MODULES_END = round_down(__abs_lowcore, _SEGMENT_SIZE);
MODULES_VADDR = MODULES_END - MODULES_LEN;
VMALLOC_END = MODULES_VADDR;
/* allow vmalloc area to occupy up to about 1/2 of the rest virtual space left */
vmalloc_size = min(vmalloc_size, round_down(VMALLOC_END / 2, _REGION3_SIZE));
VMALLOC_START = VMALLOC_END - vmalloc_size;
/* split remaining virtual space between 1:1 mapping & vmemmap array */
pages = VMALLOC_START / (PAGE_SIZE + sizeof(struct page));
pages = SECTION_ALIGN_UP(pages);
/* keep vmemmap_start aligned to a top level region table entry */
vmemmap_start = round_down(VMALLOC_START - pages * sizeof(struct page), rte_size);
vmemmap_start = min(vmemmap_start, 1UL << MAX_PHYSMEM_BITS);
/* maximum mappable address as seen by arch_get_mappable_range() */
max_mappable = vmemmap_start;
/* make sure identity map doesn't overlay with vmemmap */
ident_map_size = min(ident_map_size, vmemmap_start);
vmemmap_size = SECTION_ALIGN_UP(ident_map_size / PAGE_SIZE) * sizeof(struct page);
/* make sure vmemmap doesn't overlay with vmalloc area */
VMALLOC_START = max(vmemmap_start + vmemmap_size, VMALLOC_START);
vmemmap = (struct page *)vmemmap_start;
return asce_limit;
}
/*
* This function clears the BSS section of the decompressed Linux kernel and NOT the decompressor's.
*/
static void clear_bss_section(unsigned long vmlinux_lma)
{
memset((void *)vmlinux_lma + vmlinux.image_size, 0, vmlinux.bss_size);
}
/*
* Set vmalloc area size to an 8th of (potential) physical memory
* size, unless size has been set by kernel command line parameter.
*/
static void setup_vmalloc_size(void)
{
unsigned long size;
if (vmalloc_size_set)
return;
size = round_up(ident_map_size / 8, _SEGMENT_SIZE);
vmalloc_size = max(size, vmalloc_size);
}
static void offset_vmlinux_info(unsigned long offset)
{
*(unsigned long *)(&vmlinux.entry) += offset;
vmlinux.bootdata_off += offset;
vmlinux.bootdata_preserved_off += offset;
vmlinux.rela_dyn_start += offset;
vmlinux.rela_dyn_end += offset;
vmlinux.dynsym_start += offset;
vmlinux.init_mm_off += offset;
vmlinux.swapper_pg_dir_off += offset;
vmlinux.invalid_pg_dir_off += offset;
#ifdef CONFIG_KASAN
vmlinux.kasan_early_shadow_page_off += offset;
vmlinux.kasan_early_shadow_pte_off += offset;
vmlinux.kasan_early_shadow_pmd_off += offset;
vmlinux.kasan_early_shadow_pud_off += offset;
vmlinux.kasan_early_shadow_p4d_off += offset;
#endif
}
void startup_kernel(void)
{
unsigned long max_physmem_end;
unsigned long vmlinux_lma = 0;
unsigned long amode31_lma = 0;
unsigned long asce_limit;
unsigned long safe_addr;
void *img;
psw_t psw;
setup_lpp();
safe_addr = mem_safe_offset();
/*
* Reserve decompressor memory together with decompression heap, buffer and
* memory which might be occupied by uncompressed kernel at default 1Mb
* position (if KASLR is off or failed).
*/
physmem_reserve(RR_DECOMPRESSOR, 0, safe_addr);
if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && parmarea.initrd_size)
physmem_reserve(RR_INITRD, parmarea.initrd_start, parmarea.initrd_size);
oldmem_data.start = parmarea.oldmem_base;
oldmem_data.size = parmarea.oldmem_size;
store_ipl_parmblock();
read_ipl_report();
uv_query_info();
sclp_early_read_info();
setup_boot_command_line();
parse_boot_command_line();
detect_facilities();
sanitize_prot_virt_host();
max_physmem_end = detect_max_physmem_end();
setup_ident_map_size(max_physmem_end);
setup_vmalloc_size();
asce_limit = setup_kernel_memory_layout();
/* got final ident_map_size, physmem allocations could be performed now */
physmem_set_usable_limit(ident_map_size);
detect_physmem_online_ranges(max_physmem_end);
save_ipl_cert_comp_list();
rescue_initrd(safe_addr, ident_map_size);
if (kaslr_enabled()) {
vmlinux_lma = randomize_within_range(vmlinux.image_size + vmlinux.bss_size,
THREAD_SIZE, vmlinux.default_lma,
ident_map_size);
if (vmlinux_lma) {
__kaslr_offset = vmlinux_lma - vmlinux.default_lma;
offset_vmlinux_info(__kaslr_offset);
}
}
vmlinux_lma = vmlinux_lma ?: vmlinux.default_lma;
physmem_reserve(RR_VMLINUX, vmlinux_lma, vmlinux.image_size + vmlinux.bss_size);
if (!IS_ENABLED(CONFIG_KERNEL_UNCOMPRESSED)) {
img = decompress_kernel();
memmove((void *)vmlinux_lma, img, vmlinux.image_size);
} else if (__kaslr_offset) {
img = (void *)vmlinux.default_lma;
memmove((void *)vmlinux_lma, img, vmlinux.image_size);
memset(img, 0, vmlinux.image_size);
}
/* vmlinux decompression is done, shrink reserved low memory */
physmem_reserve(RR_DECOMPRESSOR, 0, (unsigned long)_decompressor_end);
if (kaslr_enabled())
amode31_lma = randomize_within_range(vmlinux.amode31_size, PAGE_SIZE, 0, SZ_2G);
amode31_lma = amode31_lma ?: vmlinux.default_lma - vmlinux.amode31_size;
physmem_reserve(RR_AMODE31, amode31_lma, vmlinux.amode31_size);
/*
* The order of the following operations is important:
*
* - handle_relocs() must follow clear_bss_section() to establish static
* memory references to data in .bss to be used by setup_vmem()
* (i.e init_mm.pgd)
*
* - setup_vmem() must follow handle_relocs() to be able using
* static memory references to data in .bss (i.e init_mm.pgd)
*
* - copy_bootdata() must follow setup_vmem() to propagate changes to
* bootdata made by setup_vmem()
*/
clear_bss_section(vmlinux_lma);
handle_relocs(__kaslr_offset);
setup_vmem(asce_limit);
copy_bootdata();
/*
* Save KASLR offset for early dumps, before vmcore_info is set.
* Mark as uneven to distinguish from real vmcore_info pointer.
*/
S390_lowcore.vmcore_info = __kaslr_offset ? __kaslr_offset | 0x1UL : 0;
/*
* Jump to the decompressed kernel entry point and switch DAT mode on.
*/
psw.addr = vmlinux.entry;
psw.mask = PSW_KERNEL_BITS;
__load_psw(psw);
}
| linux-master | arch/s390/boot/startup.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Data gathering module for Linux-VM Monitor Stream, Stage 1.
* Collects misc. OS related data (CPU utilization, running processes).
*
* Copyright IBM Corp. 2003, 2006
*
* Author: Gerald Schaefer <[email protected]>
*/
#define KMSG_COMPONENT "appldata"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/kernel_stat.h>
#include <linux/netdevice.h>
#include <linux/sched.h>
#include <linux/sched/loadavg.h>
#include <linux/sched/stat.h>
#include <asm/appldata.h>
#include <asm/smp.h>
#include "appldata.h"
/*
* OS data
*
* This is accessed as binary data by z/VM. If changes to it can't be avoided,
* the structure version (product ID, see appldata_base.c) needs to be changed
* as well and all documentation and z/VM applications using it must be
* updated.
*/
struct appldata_os_per_cpu {
u32 per_cpu_user; /* timer ticks spent in user mode */
u32 per_cpu_nice; /* ... spent with modified priority */
u32 per_cpu_system; /* ... spent in kernel mode */
u32 per_cpu_idle; /* ... spent in idle mode */
/* New in 2.6 */
u32 per_cpu_irq; /* ... spent in interrupts */
u32 per_cpu_softirq; /* ... spent in softirqs */
u32 per_cpu_iowait; /* ... spent while waiting for I/O */
/* New in modification level 01 */
u32 per_cpu_steal; /* ... stolen by hypervisor */
u32 cpu_id; /* number of this CPU */
} __attribute__((packed));
struct appldata_os_data {
u64 timestamp;
u32 sync_count_1; /* after VM collected the record data, */
u32 sync_count_2; /* sync_count_1 and sync_count_2 should be the
same. If not, the record has been updated on
the Linux side while VM was collecting the
(possibly corrupt) data */
u32 nr_cpus; /* number of (virtual) CPUs */
u32 per_cpu_size; /* size of the per-cpu data struct */
u32 cpu_offset; /* offset of the first per-cpu data struct */
u32 nr_running; /* number of runnable threads */
u32 nr_threads; /* number of threads */
u32 avenrun[3]; /* average nr. of running processes during */
/* the last 1, 5 and 15 minutes */
/* New in 2.6 */
u32 nr_iowait; /* number of blocked threads
(waiting for I/O) */
/* per cpu data */
struct appldata_os_per_cpu os_cpu[];
} __attribute__((packed));
static struct appldata_os_data *appldata_os_data;
static struct appldata_ops ops = {
.name = "os",
.record_nr = APPLDATA_RECORD_OS_ID,
.owner = THIS_MODULE,
.mod_lvl = {0xF0, 0xF1}, /* EBCDIC "01" */
};
/*
* appldata_get_os_data()
*
* gather OS data
*/
static void appldata_get_os_data(void *data)
{
int i, j, rc;
struct appldata_os_data *os_data;
unsigned int new_size;
os_data = data;
os_data->sync_count_1++;
os_data->nr_threads = nr_threads;
os_data->nr_running = nr_running();
os_data->nr_iowait = nr_iowait();
os_data->avenrun[0] = avenrun[0] + (FIXED_1/200);
os_data->avenrun[1] = avenrun[1] + (FIXED_1/200);
os_data->avenrun[2] = avenrun[2] + (FIXED_1/200);
j = 0;
for_each_online_cpu(i) {
os_data->os_cpu[j].per_cpu_user =
nsecs_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_USER]);
os_data->os_cpu[j].per_cpu_nice =
nsecs_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_NICE]);
os_data->os_cpu[j].per_cpu_system =
nsecs_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM]);
os_data->os_cpu[j].per_cpu_idle =
nsecs_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_IDLE]);
os_data->os_cpu[j].per_cpu_irq =
nsecs_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_IRQ]);
os_data->os_cpu[j].per_cpu_softirq =
nsecs_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ]);
os_data->os_cpu[j].per_cpu_iowait =
nsecs_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_IOWAIT]);
os_data->os_cpu[j].per_cpu_steal =
nsecs_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_STEAL]);
os_data->os_cpu[j].cpu_id = i;
j++;
}
os_data->nr_cpus = j;
new_size = struct_size(os_data, os_cpu, os_data->nr_cpus);
if (ops.size != new_size) {
if (ops.active) {
rc = appldata_diag(APPLDATA_RECORD_OS_ID,
APPLDATA_START_INTERVAL_REC,
(unsigned long) ops.data, new_size,
ops.mod_lvl);
if (rc != 0)
pr_err("Starting a new OS data collection "
"failed with rc=%d\n", rc);
rc = appldata_diag(APPLDATA_RECORD_OS_ID,
APPLDATA_STOP_REC,
(unsigned long) ops.data, ops.size,
ops.mod_lvl);
if (rc != 0)
pr_err("Stopping a faulty OS data "
"collection failed with rc=%d\n", rc);
}
ops.size = new_size;
}
os_data->timestamp = get_tod_clock();
os_data->sync_count_2++;
}
/*
* appldata_os_init()
*
* init data, register ops
*/
static int __init appldata_os_init(void)
{
int rc, max_size;
max_size = struct_size(appldata_os_data, os_cpu, num_possible_cpus());
if (max_size > APPLDATA_MAX_REC_SIZE) {
pr_err("Maximum OS record size %i exceeds the maximum "
"record size %i\n", max_size, APPLDATA_MAX_REC_SIZE);
rc = -ENOMEM;
goto out;
}
appldata_os_data = kzalloc(max_size, GFP_KERNEL | GFP_DMA);
if (appldata_os_data == NULL) {
rc = -ENOMEM;
goto out;
}
appldata_os_data->per_cpu_size = sizeof(struct appldata_os_per_cpu);
appldata_os_data->cpu_offset = offsetof(struct appldata_os_data,
os_cpu);
ops.data = appldata_os_data;
ops.callback = &appldata_get_os_data;
rc = appldata_register_ops(&ops);
if (rc != 0)
kfree(appldata_os_data);
out:
return rc;
}
/*
* appldata_os_exit()
*
* unregister ops
*/
static void __exit appldata_os_exit(void)
{
appldata_unregister_ops(&ops);
kfree(appldata_os_data);
}
module_init(appldata_os_init);
module_exit(appldata_os_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Gerald Schaefer");
MODULE_DESCRIPTION("Linux-VM Monitor Stream, OS statistics");
| linux-master | arch/s390/appldata/appldata_os.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Data gathering module for Linux-VM Monitor Stream, Stage 1.
* Collects accumulated network statistics (Packets received/transmitted,
* dropped, errors, ...).
*
* Copyright IBM Corp. 2003, 2006
*
* Author: Gerald Schaefer <[email protected]>
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/kernel_stat.h>
#include <linux/netdevice.h>
#include <net/net_namespace.h>
#include "appldata.h"
/*
* Network data
*
* This is accessed as binary data by z/VM. If changes to it can't be avoided,
* the structure version (product ID, see appldata_base.c) needs to be changed
* as well and all documentation and z/VM applications using it must be updated.
*/
struct appldata_net_sum_data {
u64 timestamp;
u32 sync_count_1; /* after VM collected the record data, */
u32 sync_count_2; /* sync_count_1 and sync_count_2 should be the
same. If not, the record has been updated on
the Linux side while VM was collecting the
(possibly corrupt) data */
u32 nr_interfaces; /* nr. of network interfaces being monitored */
u32 padding; /* next value is 64-bit aligned, so these */
/* 4 byte would be padded out by compiler */
u64 rx_packets; /* total packets received */
u64 tx_packets; /* total packets transmitted */
u64 rx_bytes; /* total bytes received */
u64 tx_bytes; /* total bytes transmitted */
u64 rx_errors; /* bad packets received */
u64 tx_errors; /* packet transmit problems */
u64 rx_dropped; /* no space in linux buffers */
u64 tx_dropped; /* no space available in linux */
u64 collisions; /* collisions while transmitting */
} __packed;
/*
* appldata_get_net_sum_data()
*
* gather accumulated network statistics
*/
static void appldata_get_net_sum_data(void *data)
{
int i;
struct appldata_net_sum_data *net_data;
struct net_device *dev;
unsigned long rx_packets, tx_packets, rx_bytes, tx_bytes, rx_errors,
tx_errors, rx_dropped, tx_dropped, collisions;
net_data = data;
net_data->sync_count_1++;
i = 0;
rx_packets = 0;
tx_packets = 0;
rx_bytes = 0;
tx_bytes = 0;
rx_errors = 0;
tx_errors = 0;
rx_dropped = 0;
tx_dropped = 0;
collisions = 0;
rcu_read_lock();
for_each_netdev_rcu(&init_net, dev) {
const struct rtnl_link_stats64 *stats;
struct rtnl_link_stats64 temp;
stats = dev_get_stats(dev, &temp);
rx_packets += stats->rx_packets;
tx_packets += stats->tx_packets;
rx_bytes += stats->rx_bytes;
tx_bytes += stats->tx_bytes;
rx_errors += stats->rx_errors;
tx_errors += stats->tx_errors;
rx_dropped += stats->rx_dropped;
tx_dropped += stats->tx_dropped;
collisions += stats->collisions;
i++;
}
rcu_read_unlock();
net_data->nr_interfaces = i;
net_data->rx_packets = rx_packets;
net_data->tx_packets = tx_packets;
net_data->rx_bytes = rx_bytes;
net_data->tx_bytes = tx_bytes;
net_data->rx_errors = rx_errors;
net_data->tx_errors = tx_errors;
net_data->rx_dropped = rx_dropped;
net_data->tx_dropped = tx_dropped;
net_data->collisions = collisions;
net_data->timestamp = get_tod_clock();
net_data->sync_count_2++;
}
static struct appldata_ops ops = {
.name = "net_sum",
.record_nr = APPLDATA_RECORD_NET_SUM_ID,
.size = sizeof(struct appldata_net_sum_data),
.callback = &appldata_get_net_sum_data,
.owner = THIS_MODULE,
.mod_lvl = {0xF0, 0xF0}, /* EBCDIC "00" */
};
/*
* appldata_net_init()
*
* init data, register ops
*/
static int __init appldata_net_init(void)
{
int ret;
ops.data = kzalloc(sizeof(struct appldata_net_sum_data), GFP_KERNEL);
if (!ops.data)
return -ENOMEM;
ret = appldata_register_ops(&ops);
if (ret)
kfree(ops.data);
return ret;
}
/*
* appldata_net_exit()
*
* unregister ops
*/
static void __exit appldata_net_exit(void)
{
appldata_unregister_ops(&ops);
kfree(ops.data);
}
module_init(appldata_net_init);
module_exit(appldata_net_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Gerald Schaefer");
MODULE_DESCRIPTION("Linux-VM Monitor Stream, accumulated network statistics");
| linux-master | arch/s390/appldata/appldata_net_sum.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Data gathering module for Linux-VM Monitor Stream, Stage 1.
* Collects data related to memory management.
*
* Copyright IBM Corp. 2003, 2006
*
* Author: Gerald Schaefer <[email protected]>
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/kernel_stat.h>
#include <linux/pagemap.h>
#include <linux/swap.h>
#include <linux/slab.h>
#include <linux/io.h>
#include "appldata.h"
#define P2K(x) ((x) << (PAGE_SHIFT - 10)) /* Converts #Pages to KB */
/*
* Memory data
*
* This is accessed as binary data by z/VM. If changes to it can't be avoided,
* the structure version (product ID, see appldata_base.c) needs to be changed
* as well and all documentation and z/VM applications using it must be
* updated.
*/
struct appldata_mem_data {
u64 timestamp;
u32 sync_count_1; /* after VM collected the record data, */
u32 sync_count_2; /* sync_count_1 and sync_count_2 should be the
same. If not, the record has been updated on
the Linux side while VM was collecting the
(possibly corrupt) data */
u64 pgpgin; /* data read from disk */
u64 pgpgout; /* data written to disk */
u64 pswpin; /* pages swapped in */
u64 pswpout; /* pages swapped out */
u64 sharedram; /* sharedram is currently set to 0 */
u64 totalram; /* total main memory size */
u64 freeram; /* free main memory size */
u64 totalhigh; /* total high memory size */
u64 freehigh; /* free high memory size */
u64 bufferram; /* memory reserved for buffers, free cache */
u64 cached; /* size of (used) cache, w/o buffers */
u64 totalswap; /* total swap space size */
u64 freeswap; /* free swap space */
// New in 2.6 -->
u64 pgalloc; /* page allocations */
u64 pgfault; /* page faults (major+minor) */
u64 pgmajfault; /* page faults (major only) */
// <-- New in 2.6
} __packed;
/*
* appldata_get_mem_data()
*
* gather memory data
*/
static void appldata_get_mem_data(void *data)
{
/*
* don't put large structures on the stack, we are
* serialized through the appldata_ops_mutex and can use static
*/
static struct sysinfo val;
unsigned long ev[NR_VM_EVENT_ITEMS];
struct appldata_mem_data *mem_data;
mem_data = data;
mem_data->sync_count_1++;
all_vm_events(ev);
mem_data->pgpgin = ev[PGPGIN] >> 1;
mem_data->pgpgout = ev[PGPGOUT] >> 1;
mem_data->pswpin = ev[PSWPIN];
mem_data->pswpout = ev[PSWPOUT];
mem_data->pgalloc = ev[PGALLOC_NORMAL];
mem_data->pgalloc += ev[PGALLOC_DMA];
mem_data->pgfault = ev[PGFAULT];
mem_data->pgmajfault = ev[PGMAJFAULT];
si_meminfo(&val);
mem_data->sharedram = val.sharedram;
mem_data->totalram = P2K(val.totalram);
mem_data->freeram = P2K(val.freeram);
mem_data->totalhigh = P2K(val.totalhigh);
mem_data->freehigh = P2K(val.freehigh);
mem_data->bufferram = P2K(val.bufferram);
mem_data->cached = P2K(global_node_page_state(NR_FILE_PAGES)
- val.bufferram);
si_swapinfo(&val);
mem_data->totalswap = P2K(val.totalswap);
mem_data->freeswap = P2K(val.freeswap);
mem_data->timestamp = get_tod_clock();
mem_data->sync_count_2++;
}
static struct appldata_ops ops = {
.name = "mem",
.record_nr = APPLDATA_RECORD_MEM_ID,
.size = sizeof(struct appldata_mem_data),
.callback = &appldata_get_mem_data,
.owner = THIS_MODULE,
.mod_lvl = {0xF0, 0xF0}, /* EBCDIC "00" */
};
/*
* appldata_mem_init()
*
* init_data, register ops
*/
static int __init appldata_mem_init(void)
{
int ret;
ops.data = kzalloc(sizeof(struct appldata_mem_data), GFP_KERNEL);
if (!ops.data)
return -ENOMEM;
ret = appldata_register_ops(&ops);
if (ret)
kfree(ops.data);
return ret;
}
/*
* appldata_mem_exit()
*
* unregister ops
*/
static void __exit appldata_mem_exit(void)
{
appldata_unregister_ops(&ops);
kfree(ops.data);
}
module_init(appldata_mem_init);
module_exit(appldata_mem_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Gerald Schaefer");
MODULE_DESCRIPTION("Linux-VM Monitor Stream, MEMORY statistics");
| linux-master | arch/s390/appldata/appldata_mem.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Base infrastructure for Linux-z/VM Monitor Stream, Stage 1.
* Exports appldata_register_ops() and appldata_unregister_ops() for the
* data gathering modules.
*
* Copyright IBM Corp. 2003, 2009
*
* Author: Gerald Schaefer <[email protected]>
*/
#define KMSG_COMPONENT "appldata"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/module.h>
#include <linux/sched/stat.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/proc_fs.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/pagemap.h>
#include <linux/sysctl.h>
#include <linux/notifier.h>
#include <linux/cpu.h>
#include <linux/workqueue.h>
#include <linux/uaccess.h>
#include <linux/io.h>
#include <asm/appldata.h>
#include <asm/vtimer.h>
#include <asm/smp.h>
#include "appldata.h"
#define APPLDATA_CPU_INTERVAL 10000 /* default (CPU) time for
sampling interval in
milliseconds */
#define TOD_MICRO 0x01000 /* nr. of TOD clock units
for 1 microsecond */
/*
* /proc entries (sysctl)
*/
static const char appldata_proc_name[APPLDATA_PROC_NAME_LENGTH] = "appldata";
static int appldata_timer_handler(struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos);
static int appldata_interval_handler(struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos);
static struct ctl_table_header *appldata_sysctl_header;
static struct ctl_table appldata_table[] = {
{
.procname = "timer",
.mode = S_IRUGO | S_IWUSR,
.proc_handler = appldata_timer_handler,
},
{
.procname = "interval",
.mode = S_IRUGO | S_IWUSR,
.proc_handler = appldata_interval_handler,
},
{ },
};
/*
* Timer
*/
static struct vtimer_list appldata_timer;
static DEFINE_SPINLOCK(appldata_timer_lock);
static int appldata_interval = APPLDATA_CPU_INTERVAL;
static int appldata_timer_active;
/*
* Work queue
*/
static struct workqueue_struct *appldata_wq;
static void appldata_work_fn(struct work_struct *work);
static DECLARE_WORK(appldata_work, appldata_work_fn);
/*
* Ops list
*/
static DEFINE_MUTEX(appldata_ops_mutex);
static LIST_HEAD(appldata_ops_list);
/*************************** timer, work, DIAG *******************************/
/*
* appldata_timer_function()
*
* schedule work and reschedule timer
*/
static void appldata_timer_function(unsigned long data)
{
queue_work(appldata_wq, (struct work_struct *) data);
}
/*
* appldata_work_fn()
*
* call data gathering function for each (active) module
*/
static void appldata_work_fn(struct work_struct *work)
{
struct list_head *lh;
struct appldata_ops *ops;
mutex_lock(&appldata_ops_mutex);
list_for_each(lh, &appldata_ops_list) {
ops = list_entry(lh, struct appldata_ops, list);
if (ops->active == 1) {
ops->callback(ops->data);
}
}
mutex_unlock(&appldata_ops_mutex);
}
static struct appldata_product_id appldata_id = {
.prod_nr = {0xD3, 0xC9, 0xD5, 0xE4,
0xE7, 0xD2, 0xD9}, /* "LINUXKR" */
.prod_fn = 0xD5D3, /* "NL" */
.version_nr = 0xF2F6, /* "26" */
.release_nr = 0xF0F1, /* "01" */
};
/*
* appldata_diag()
*
* prepare parameter list, issue DIAG 0xDC
*/
int appldata_diag(char record_nr, u16 function, unsigned long buffer,
u16 length, char *mod_lvl)
{
struct appldata_parameter_list *parm_list;
struct appldata_product_id *id;
int rc;
parm_list = kmalloc(sizeof(*parm_list), GFP_KERNEL);
id = kmemdup(&appldata_id, sizeof(appldata_id), GFP_KERNEL);
rc = -ENOMEM;
if (parm_list && id) {
id->record_nr = record_nr;
id->mod_lvl = (mod_lvl[0]) << 8 | mod_lvl[1];
rc = appldata_asm(parm_list, id, function,
(void *) buffer, length);
}
kfree(id);
kfree(parm_list);
return rc;
}
/************************ timer, work, DIAG <END> ****************************/
/****************************** /proc stuff **********************************/
#define APPLDATA_ADD_TIMER 0
#define APPLDATA_DEL_TIMER 1
#define APPLDATA_MOD_TIMER 2
/*
* __appldata_vtimer_setup()
*
* Add, delete or modify virtual timers on all online cpus.
* The caller needs to get the appldata_timer_lock spinlock.
*/
static void __appldata_vtimer_setup(int cmd)
{
u64 timer_interval = (u64) appldata_interval * 1000 * TOD_MICRO;
switch (cmd) {
case APPLDATA_ADD_TIMER:
if (appldata_timer_active)
break;
appldata_timer.expires = timer_interval;
add_virt_timer_periodic(&appldata_timer);
appldata_timer_active = 1;
break;
case APPLDATA_DEL_TIMER:
del_virt_timer(&appldata_timer);
if (!appldata_timer_active)
break;
appldata_timer_active = 0;
break;
case APPLDATA_MOD_TIMER:
if (!appldata_timer_active)
break;
mod_virt_timer_periodic(&appldata_timer, timer_interval);
}
}
/*
* appldata_timer_handler()
*
* Start/Stop timer, show status of timer (0 = not active, 1 = active)
*/
static int
appldata_timer_handler(struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int timer_active = appldata_timer_active;
int rc;
struct ctl_table ctl_entry = {
.procname = ctl->procname,
.data = &timer_active,
.maxlen = sizeof(int),
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE,
};
rc = proc_douintvec_minmax(&ctl_entry, write, buffer, lenp, ppos);
if (rc < 0 || !write)
return rc;
spin_lock(&appldata_timer_lock);
if (timer_active)
__appldata_vtimer_setup(APPLDATA_ADD_TIMER);
else
__appldata_vtimer_setup(APPLDATA_DEL_TIMER);
spin_unlock(&appldata_timer_lock);
return 0;
}
/*
* appldata_interval_handler()
*
* Set (CPU) timer interval for collection of data (in milliseconds), show
* current timer interval.
*/
static int
appldata_interval_handler(struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int interval = appldata_interval;
int rc;
struct ctl_table ctl_entry = {
.procname = ctl->procname,
.data = &interval,
.maxlen = sizeof(int),
.extra1 = SYSCTL_ONE,
};
rc = proc_dointvec_minmax(&ctl_entry, write, buffer, lenp, ppos);
if (rc < 0 || !write)
return rc;
spin_lock(&appldata_timer_lock);
appldata_interval = interval;
__appldata_vtimer_setup(APPLDATA_MOD_TIMER);
spin_unlock(&appldata_timer_lock);
return 0;
}
/*
* appldata_generic_handler()
*
* Generic start/stop monitoring and DIAG, show status of
* monitoring (0 = not in process, 1 = in process)
*/
static int
appldata_generic_handler(struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
struct appldata_ops *ops = NULL, *tmp_ops;
struct list_head *lh;
int rc, found;
int active;
struct ctl_table ctl_entry = {
.data = &active,
.maxlen = sizeof(int),
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE,
};
found = 0;
mutex_lock(&appldata_ops_mutex);
list_for_each(lh, &appldata_ops_list) {
tmp_ops = list_entry(lh, struct appldata_ops, list);
if (&tmp_ops->ctl_table[0] == ctl) {
found = 1;
}
}
if (!found) {
mutex_unlock(&appldata_ops_mutex);
return -ENODEV;
}
ops = ctl->data;
if (!try_module_get(ops->owner)) { // protect this function
mutex_unlock(&appldata_ops_mutex);
return -ENODEV;
}
mutex_unlock(&appldata_ops_mutex);
active = ops->active;
rc = proc_douintvec_minmax(&ctl_entry, write, buffer, lenp, ppos);
if (rc < 0 || !write) {
module_put(ops->owner);
return rc;
}
mutex_lock(&appldata_ops_mutex);
if (active && (ops->active == 0)) {
// protect work queue callback
if (!try_module_get(ops->owner)) {
mutex_unlock(&appldata_ops_mutex);
module_put(ops->owner);
return -ENODEV;
}
ops->callback(ops->data); // init record
rc = appldata_diag(ops->record_nr,
APPLDATA_START_INTERVAL_REC,
(unsigned long) ops->data, ops->size,
ops->mod_lvl);
if (rc != 0) {
pr_err("Starting the data collection for %s "
"failed with rc=%d\n", ops->name, rc);
module_put(ops->owner);
} else
ops->active = 1;
} else if (!active && (ops->active == 1)) {
ops->active = 0;
rc = appldata_diag(ops->record_nr, APPLDATA_STOP_REC,
(unsigned long) ops->data, ops->size,
ops->mod_lvl);
if (rc != 0)
pr_err("Stopping the data collection for %s "
"failed with rc=%d\n", ops->name, rc);
module_put(ops->owner);
}
mutex_unlock(&appldata_ops_mutex);
module_put(ops->owner);
return 0;
}
/*************************** /proc stuff <END> *******************************/
/************************* module-ops management *****************************/
/*
* appldata_register_ops()
*
* update ops list, register /proc/sys entries
*/
int appldata_register_ops(struct appldata_ops *ops)
{
if (ops->size > APPLDATA_MAX_REC_SIZE)
return -EINVAL;
/* The last entry must be an empty one */
ops->ctl_table = kcalloc(2, sizeof(struct ctl_table), GFP_KERNEL);
if (!ops->ctl_table)
return -ENOMEM;
mutex_lock(&appldata_ops_mutex);
list_add(&ops->list, &appldata_ops_list);
mutex_unlock(&appldata_ops_mutex);
ops->ctl_table[0].procname = ops->name;
ops->ctl_table[0].mode = S_IRUGO | S_IWUSR;
ops->ctl_table[0].proc_handler = appldata_generic_handler;
ops->ctl_table[0].data = ops;
ops->sysctl_header = register_sysctl_sz(appldata_proc_name, ops->ctl_table, 1);
if (!ops->sysctl_header)
goto out;
return 0;
out:
mutex_lock(&appldata_ops_mutex);
list_del(&ops->list);
mutex_unlock(&appldata_ops_mutex);
kfree(ops->ctl_table);
return -ENOMEM;
}
/*
* appldata_unregister_ops()
*
* update ops list, unregister /proc entries, stop DIAG if necessary
*/
void appldata_unregister_ops(struct appldata_ops *ops)
{
mutex_lock(&appldata_ops_mutex);
list_del(&ops->list);
mutex_unlock(&appldata_ops_mutex);
unregister_sysctl_table(ops->sysctl_header);
kfree(ops->ctl_table);
}
/********************** module-ops management <END> **************************/
/******************************* init / exit *********************************/
/*
* appldata_init()
*
* init timer, register /proc entries
*/
static int __init appldata_init(void)
{
init_virt_timer(&appldata_timer);
appldata_timer.function = appldata_timer_function;
appldata_timer.data = (unsigned long) &appldata_work;
appldata_wq = alloc_ordered_workqueue("appldata", 0);
if (!appldata_wq)
return -ENOMEM;
appldata_sysctl_header = register_sysctl(appldata_proc_name, appldata_table);
return 0;
}
__initcall(appldata_init);
/**************************** init / exit <END> ******************************/
EXPORT_SYMBOL_GPL(appldata_register_ops);
EXPORT_SYMBOL_GPL(appldata_unregister_ops);
EXPORT_SYMBOL_GPL(appldata_diag);
#ifdef CONFIG_SWAP
EXPORT_SYMBOL_GPL(si_swapinfo);
#endif
EXPORT_SYMBOL_GPL(nr_threads);
EXPORT_SYMBOL_GPL(nr_running);
EXPORT_SYMBOL_GPL(nr_iowait);
| linux-master | arch/s390/appldata/appldata_base.c |
// SPDX-License-Identifier: GPL-2.0
/*
* handling privileged instructions
*
* Copyright IBM Corp. 2008, 2020
*
* Author(s): Carsten Otte <[email protected]>
* Christian Borntraeger <[email protected]>
*/
#include <linux/kvm.h>
#include <linux/gfp.h>
#include <linux/errno.h>
#include <linux/mm_types.h>
#include <linux/pgtable.h>
#include <linux/io.h>
#include <asm/asm-offsets.h>
#include <asm/facility.h>
#include <asm/current.h>
#include <asm/debug.h>
#include <asm/ebcdic.h>
#include <asm/sysinfo.h>
#include <asm/page-states.h>
#include <asm/gmap.h>
#include <asm/ptrace.h>
#include <asm/sclp.h>
#include <asm/ap.h>
#include "gaccess.h"
#include "kvm-s390.h"
#include "trace.h"
static int handle_ri(struct kvm_vcpu *vcpu)
{
vcpu->stat.instruction_ri++;
if (test_kvm_facility(vcpu->kvm, 64)) {
VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (lazy)");
vcpu->arch.sie_block->ecb3 |= ECB3_RI;
kvm_s390_retry_instr(vcpu);
return 0;
} else
return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
}
int kvm_s390_handle_aa(struct kvm_vcpu *vcpu)
{
if ((vcpu->arch.sie_block->ipa & 0xf) <= 4)
return handle_ri(vcpu);
else
return -EOPNOTSUPP;
}
static int handle_gs(struct kvm_vcpu *vcpu)
{
vcpu->stat.instruction_gs++;
if (test_kvm_facility(vcpu->kvm, 133)) {
VCPU_EVENT(vcpu, 3, "%s", "ENABLE: GS (lazy)");
preempt_disable();
__ctl_set_bit(2, 4);
current->thread.gs_cb = (struct gs_cb *)&vcpu->run->s.regs.gscb;
restore_gs_cb(current->thread.gs_cb);
preempt_enable();
vcpu->arch.sie_block->ecb |= ECB_GS;
vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
vcpu->arch.gs_enabled = 1;
kvm_s390_retry_instr(vcpu);
return 0;
} else
return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
}
int kvm_s390_handle_e3(struct kvm_vcpu *vcpu)
{
int code = vcpu->arch.sie_block->ipb & 0xff;
if (code == 0x49 || code == 0x4d)
return handle_gs(vcpu);
else
return -EOPNOTSUPP;
}
/* Handle SCK (SET CLOCK) interception */
static int handle_set_clock(struct kvm_vcpu *vcpu)
{
struct kvm_s390_vm_tod_clock gtod = { 0 };
int rc;
u8 ar;
u64 op2;
vcpu->stat.instruction_sck++;
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
op2 = kvm_s390_get_base_disp_s(vcpu, &ar);
if (op2 & 7) /* Operand must be on a doubleword boundary */
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
rc = read_guest(vcpu, op2, ar, >od.tod, sizeof(gtod.tod));
if (rc)
return kvm_s390_inject_prog_cond(vcpu, rc);
VCPU_EVENT(vcpu, 3, "SCK: setting guest TOD to 0x%llx", gtod.tod);
/*
* To set the TOD clock the kvm lock must be taken, but the vcpu lock
* is already held in handle_set_clock. The usual lock order is the
* opposite. As SCK is deprecated and should not be used in several
* cases, for example when the multiple epoch facility or TOD clock
* steering facility is installed (see Principles of Operation), a
* slow path can be used. If the lock can not be taken via try_lock,
* the instruction will be retried via -EAGAIN at a later point in
* time.
*/
if (!kvm_s390_try_set_tod_clock(vcpu->kvm, >od)) {
kvm_s390_retry_instr(vcpu);
return -EAGAIN;
}
kvm_s390_set_psw_cc(vcpu, 0);
return 0;
}
static int handle_set_prefix(struct kvm_vcpu *vcpu)
{
u64 operand2;
u32 address;
int rc;
u8 ar;
vcpu->stat.instruction_spx++;
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
/* must be word boundary */
if (operand2 & 3)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
/* get the value */
rc = read_guest(vcpu, operand2, ar, &address, sizeof(address));
if (rc)
return kvm_s390_inject_prog_cond(vcpu, rc);
address &= 0x7fffe000u;
/*
* Make sure the new value is valid memory. We only need to check the
* first page, since address is 8k aligned and memory pieces are always
* at least 1MB aligned and have at least a size of 1MB.
*/
if (kvm_is_error_gpa(vcpu->kvm, address))
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
kvm_s390_set_prefix(vcpu, address);
trace_kvm_s390_handle_prefix(vcpu, 1, address);
return 0;
}
static int handle_store_prefix(struct kvm_vcpu *vcpu)
{
u64 operand2;
u32 address;
int rc;
u8 ar;
vcpu->stat.instruction_stpx++;
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
/* must be word boundary */
if (operand2 & 3)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
address = kvm_s390_get_prefix(vcpu);
/* get the value */
rc = write_guest(vcpu, operand2, ar, &address, sizeof(address));
if (rc)
return kvm_s390_inject_prog_cond(vcpu, rc);
VCPU_EVENT(vcpu, 3, "STPX: storing prefix 0x%x into 0x%llx", address, operand2);
trace_kvm_s390_handle_prefix(vcpu, 0, address);
return 0;
}
static int handle_store_cpu_address(struct kvm_vcpu *vcpu)
{
u16 vcpu_id = vcpu->vcpu_id;
u64 ga;
int rc;
u8 ar;
vcpu->stat.instruction_stap++;
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
ga = kvm_s390_get_base_disp_s(vcpu, &ar);
if (ga & 1)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
rc = write_guest(vcpu, ga, ar, &vcpu_id, sizeof(vcpu_id));
if (rc)
return kvm_s390_inject_prog_cond(vcpu, rc);
VCPU_EVENT(vcpu, 3, "STAP: storing cpu address (%u) to 0x%llx", vcpu_id, ga);
trace_kvm_s390_handle_stap(vcpu, ga);
return 0;
}
int kvm_s390_skey_check_enable(struct kvm_vcpu *vcpu)
{
int rc;
trace_kvm_s390_skey_related_inst(vcpu);
/* Already enabled? */
if (vcpu->arch.skey_enabled)
return 0;
rc = s390_enable_skey();
VCPU_EVENT(vcpu, 3, "enabling storage keys for guest: %d", rc);
if (rc)
return rc;
if (kvm_s390_test_cpuflags(vcpu, CPUSTAT_KSS))
kvm_s390_clear_cpuflags(vcpu, CPUSTAT_KSS);
if (!vcpu->kvm->arch.use_skf)
vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
else
vcpu->arch.sie_block->ictl &= ~(ICTL_ISKE | ICTL_SSKE | ICTL_RRBE);
vcpu->arch.skey_enabled = true;
return 0;
}
static int try_handle_skey(struct kvm_vcpu *vcpu)
{
int rc;
rc = kvm_s390_skey_check_enable(vcpu);
if (rc)
return rc;
if (vcpu->kvm->arch.use_skf) {
/* with storage-key facility, SIE interprets it for us */
kvm_s390_retry_instr(vcpu);
VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation");
return -EAGAIN;
}
return 0;
}
static int handle_iske(struct kvm_vcpu *vcpu)
{
unsigned long gaddr, vmaddr;
unsigned char key;
int reg1, reg2;
bool unlocked;
int rc;
vcpu->stat.instruction_iske++;
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
rc = try_handle_skey(vcpu);
if (rc)
return rc != -EAGAIN ? rc : 0;
kvm_s390_get_regs_rre(vcpu, ®1, ®2);
gaddr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
gaddr = kvm_s390_logical_to_effective(vcpu, gaddr);
gaddr = kvm_s390_real_to_abs(vcpu, gaddr);
vmaddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(gaddr));
if (kvm_is_error_hva(vmaddr))
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
retry:
unlocked = false;
mmap_read_lock(current->mm);
rc = get_guest_storage_key(current->mm, vmaddr, &key);
if (rc) {
rc = fixup_user_fault(current->mm, vmaddr,
FAULT_FLAG_WRITE, &unlocked);
if (!rc) {
mmap_read_unlock(current->mm);
goto retry;
}
}
mmap_read_unlock(current->mm);
if (rc == -EFAULT)
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
if (rc < 0)
return rc;
vcpu->run->s.regs.gprs[reg1] &= ~0xff;
vcpu->run->s.regs.gprs[reg1] |= key;
return 0;
}
static int handle_rrbe(struct kvm_vcpu *vcpu)
{
unsigned long vmaddr, gaddr;
int reg1, reg2;
bool unlocked;
int rc;
vcpu->stat.instruction_rrbe++;
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
rc = try_handle_skey(vcpu);
if (rc)
return rc != -EAGAIN ? rc : 0;
kvm_s390_get_regs_rre(vcpu, ®1, ®2);
gaddr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
gaddr = kvm_s390_logical_to_effective(vcpu, gaddr);
gaddr = kvm_s390_real_to_abs(vcpu, gaddr);
vmaddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(gaddr));
if (kvm_is_error_hva(vmaddr))
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
retry:
unlocked = false;
mmap_read_lock(current->mm);
rc = reset_guest_reference_bit(current->mm, vmaddr);
if (rc < 0) {
rc = fixup_user_fault(current->mm, vmaddr,
FAULT_FLAG_WRITE, &unlocked);
if (!rc) {
mmap_read_unlock(current->mm);
goto retry;
}
}
mmap_read_unlock(current->mm);
if (rc == -EFAULT)
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
if (rc < 0)
return rc;
kvm_s390_set_psw_cc(vcpu, rc);
return 0;
}
#define SSKE_NQ 0x8
#define SSKE_MR 0x4
#define SSKE_MC 0x2
#define SSKE_MB 0x1
static int handle_sske(struct kvm_vcpu *vcpu)
{
unsigned char m3 = vcpu->arch.sie_block->ipb >> 28;
unsigned long start, end;
unsigned char key, oldkey;
int reg1, reg2;
bool unlocked;
int rc;
vcpu->stat.instruction_sske++;
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
rc = try_handle_skey(vcpu);
if (rc)
return rc != -EAGAIN ? rc : 0;
if (!test_kvm_facility(vcpu->kvm, 8))
m3 &= ~SSKE_MB;
if (!test_kvm_facility(vcpu->kvm, 10))
m3 &= ~(SSKE_MC | SSKE_MR);
if (!test_kvm_facility(vcpu->kvm, 14))
m3 &= ~SSKE_NQ;
kvm_s390_get_regs_rre(vcpu, ®1, ®2);
key = vcpu->run->s.regs.gprs[reg1] & 0xfe;
start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
start = kvm_s390_logical_to_effective(vcpu, start);
if (m3 & SSKE_MB) {
/* start already designates an absolute address */
end = (start + _SEGMENT_SIZE) & ~(_SEGMENT_SIZE - 1);
} else {
start = kvm_s390_real_to_abs(vcpu, start);
end = start + PAGE_SIZE;
}
while (start != end) {
unsigned long vmaddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(start));
unlocked = false;
if (kvm_is_error_hva(vmaddr))
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
mmap_read_lock(current->mm);
rc = cond_set_guest_storage_key(current->mm, vmaddr, key, &oldkey,
m3 & SSKE_NQ, m3 & SSKE_MR,
m3 & SSKE_MC);
if (rc < 0) {
rc = fixup_user_fault(current->mm, vmaddr,
FAULT_FLAG_WRITE, &unlocked);
rc = !rc ? -EAGAIN : rc;
}
mmap_read_unlock(current->mm);
if (rc == -EFAULT)
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
if (rc == -EAGAIN)
continue;
if (rc < 0)
return rc;
start += PAGE_SIZE;
}
if (m3 & (SSKE_MC | SSKE_MR)) {
if (m3 & SSKE_MB) {
/* skey in reg1 is unpredictable */
kvm_s390_set_psw_cc(vcpu, 3);
} else {
kvm_s390_set_psw_cc(vcpu, rc);
vcpu->run->s.regs.gprs[reg1] &= ~0xff00UL;
vcpu->run->s.regs.gprs[reg1] |= (u64) oldkey << 8;
}
}
if (m3 & SSKE_MB) {
if (psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_BITS_AMODE_64BIT)
vcpu->run->s.regs.gprs[reg2] &= ~PAGE_MASK;
else
vcpu->run->s.regs.gprs[reg2] &= ~0xfffff000UL;
end = kvm_s390_logical_to_effective(vcpu, end);
vcpu->run->s.regs.gprs[reg2] |= end;
}
return 0;
}
static int handle_ipte_interlock(struct kvm_vcpu *vcpu)
{
vcpu->stat.instruction_ipte_interlock++;
if (psw_bits(vcpu->arch.sie_block->gpsw).pstate)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
wait_event(vcpu->kvm->arch.ipte_wq, !ipte_lock_held(vcpu->kvm));
kvm_s390_retry_instr(vcpu);
VCPU_EVENT(vcpu, 4, "%s", "retrying ipte interlock operation");
return 0;
}
static int handle_test_block(struct kvm_vcpu *vcpu)
{
gpa_t addr;
int reg2;
vcpu->stat.instruction_tb++;
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
kvm_s390_get_regs_rre(vcpu, NULL, ®2);
addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
addr = kvm_s390_logical_to_effective(vcpu, addr);
if (kvm_s390_check_low_addr_prot_real(vcpu, addr))
return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
addr = kvm_s390_real_to_abs(vcpu, addr);
if (kvm_is_error_gpa(vcpu->kvm, addr))
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
/*
* We don't expect errors on modern systems, and do not care
* about storage keys (yet), so let's just clear the page.
*/
if (kvm_clear_guest(vcpu->kvm, addr, PAGE_SIZE))
return -EFAULT;
kvm_s390_set_psw_cc(vcpu, 0);
vcpu->run->s.regs.gprs[0] = 0;
return 0;
}
static int handle_tpi(struct kvm_vcpu *vcpu)
{
struct kvm_s390_interrupt_info *inti;
unsigned long len;
u32 tpi_data[3];
int rc;
u64 addr;
u8 ar;
vcpu->stat.instruction_tpi++;
addr = kvm_s390_get_base_disp_s(vcpu, &ar);
if (addr & 3)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->arch.sie_block->gcr[6], 0);
if (!inti) {
kvm_s390_set_psw_cc(vcpu, 0);
return 0;
}
tpi_data[0] = inti->io.subchannel_id << 16 | inti->io.subchannel_nr;
tpi_data[1] = inti->io.io_int_parm;
tpi_data[2] = inti->io.io_int_word;
if (addr) {
/*
* Store the two-word I/O interruption code into the
* provided area.
*/
len = sizeof(tpi_data) - 4;
rc = write_guest(vcpu, addr, ar, &tpi_data, len);
if (rc) {
rc = kvm_s390_inject_prog_cond(vcpu, rc);
goto reinject_interrupt;
}
} else {
/*
* Store the three-word I/O interruption code into
* the appropriate lowcore area.
*/
len = sizeof(tpi_data);
if (write_guest_lc(vcpu, __LC_SUBCHANNEL_ID, &tpi_data, len)) {
/* failed writes to the low core are not recoverable */
rc = -EFAULT;
goto reinject_interrupt;
}
}
/* irq was successfully handed to the guest */
kfree(inti);
kvm_s390_set_psw_cc(vcpu, 1);
return 0;
reinject_interrupt:
/*
* If we encounter a problem storing the interruption code, the
* instruction is suppressed from the guest's view: reinject the
* interrupt.
*/
if (kvm_s390_reinject_io_int(vcpu->kvm, inti)) {
kfree(inti);
rc = -EFAULT;
}
/* don't set the cc, a pgm irq was injected or we drop to user space */
return rc ? -EFAULT : 0;
}
static int handle_tsch(struct kvm_vcpu *vcpu)
{
struct kvm_s390_interrupt_info *inti = NULL;
const u64 isc_mask = 0xffUL << 24; /* all iscs set */
vcpu->stat.instruction_tsch++;
/* a valid schid has at least one bit set */
if (vcpu->run->s.regs.gprs[1])
inti = kvm_s390_get_io_int(vcpu->kvm, isc_mask,
vcpu->run->s.regs.gprs[1]);
/*
* Prepare exit to userspace.
* We indicate whether we dequeued a pending I/O interrupt
* so that userspace can re-inject it if the instruction gets
* a program check. While this may re-order the pending I/O
* interrupts, this is no problem since the priority is kept
* intact.
*/
vcpu->run->exit_reason = KVM_EXIT_S390_TSCH;
vcpu->run->s390_tsch.dequeued = !!inti;
if (inti) {
vcpu->run->s390_tsch.subchannel_id = inti->io.subchannel_id;
vcpu->run->s390_tsch.subchannel_nr = inti->io.subchannel_nr;
vcpu->run->s390_tsch.io_int_parm = inti->io.io_int_parm;
vcpu->run->s390_tsch.io_int_word = inti->io.io_int_word;
}
vcpu->run->s390_tsch.ipb = vcpu->arch.sie_block->ipb;
kfree(inti);
return -EREMOTE;
}
static int handle_io_inst(struct kvm_vcpu *vcpu)
{
VCPU_EVENT(vcpu, 4, "%s", "I/O instruction");
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
if (vcpu->kvm->arch.css_support) {
/*
* Most I/O instructions will be handled by userspace.
* Exceptions are tpi and the interrupt portion of tsch.
*/
if (vcpu->arch.sie_block->ipa == 0xb236)
return handle_tpi(vcpu);
if (vcpu->arch.sie_block->ipa == 0xb235)
return handle_tsch(vcpu);
/* Handle in userspace. */
vcpu->stat.instruction_io_other++;
return -EOPNOTSUPP;
} else {
/*
* Set condition code 3 to stop the guest from issuing channel
* I/O instructions.
*/
kvm_s390_set_psw_cc(vcpu, 3);
return 0;
}
}
/*
* handle_pqap: Handling pqap interception
* @vcpu: the vcpu having issue the pqap instruction
*
* We now support PQAP/AQIC instructions and we need to correctly
* answer the guest even if no dedicated driver's hook is available.
*
* The intercepting code calls a dedicated callback for this instruction
* if a driver did register one in the CRYPTO satellite of the
* SIE block.
*
* If no callback is available, the queues are not available, return this
* response code to the caller and set CC to 3.
* Else return the response code returned by the callback.
*/
static int handle_pqap(struct kvm_vcpu *vcpu)
{
struct ap_queue_status status = {};
crypto_hook pqap_hook;
unsigned long reg0;
int ret;
uint8_t fc;
/* Verify that the AP instruction are available */
if (!ap_instructions_available())
return -EOPNOTSUPP;
/* Verify that the guest is allowed to use AP instructions */
if (!(vcpu->arch.sie_block->eca & ECA_APIE))
return -EOPNOTSUPP;
/*
* The only possibly intercepted functions when AP instructions are
* available for the guest are AQIC and TAPQ with the t bit set
* since we do not set IC.3 (FIII) we currently will only intercept
* the AQIC function code.
* Note: running nested under z/VM can result in intercepts for other
* function codes, e.g. PQAP(QCI). We do not support this and bail out.
*/
reg0 = vcpu->run->s.regs.gprs[0];
fc = (reg0 >> 24) & 0xff;
if (fc != 0x03)
return -EOPNOTSUPP;
/* PQAP instruction is allowed for guest kernel only */
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
/* Common PQAP instruction specification exceptions */
/* bits 41-47 must all be zeros */
if (reg0 & 0x007f0000UL)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
/* APFT not install and T bit set */
if (!test_kvm_facility(vcpu->kvm, 15) && (reg0 & 0x00800000UL))
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
/* APXA not installed and APID greater 64 or APQI greater 16 */
if (!(vcpu->kvm->arch.crypto.crycbd & 0x02) && (reg0 & 0x0000c0f0UL))
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
/* AQIC function code specific exception */
/* facility 65 not present for AQIC function code */
if (!test_kvm_facility(vcpu->kvm, 65))
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
/*
* If the hook callback is registered, there will be a pointer to the
* hook function pointer in the kvm_s390_crypto structure. Lock the
* owner, retrieve the hook function pointer and call the hook.
*/
down_read(&vcpu->kvm->arch.crypto.pqap_hook_rwsem);
if (vcpu->kvm->arch.crypto.pqap_hook) {
pqap_hook = *vcpu->kvm->arch.crypto.pqap_hook;
ret = pqap_hook(vcpu);
if (!ret && vcpu->run->s.regs.gprs[1] & 0x00ff0000)
kvm_s390_set_psw_cc(vcpu, 3);
up_read(&vcpu->kvm->arch.crypto.pqap_hook_rwsem);
return ret;
}
up_read(&vcpu->kvm->arch.crypto.pqap_hook_rwsem);
/*
* A vfio_driver must register a hook.
* No hook means no driver to enable the SIE CRYCB and no queues.
* We send this response to the guest.
*/
status.response_code = 0x01;
memcpy(&vcpu->run->s.regs.gprs[1], &status, sizeof(status));
kvm_s390_set_psw_cc(vcpu, 3);
return 0;
}
static int handle_stfl(struct kvm_vcpu *vcpu)
{
int rc;
unsigned int fac;
vcpu->stat.instruction_stfl++;
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
/*
* We need to shift the lower 32 facility bits (bit 0-31) from a u64
* into a u32 memory representation. They will remain bits 0-31.
*/
fac = *vcpu->kvm->arch.model.fac_list >> 32;
rc = write_guest_lc(vcpu, offsetof(struct lowcore, stfl_fac_list),
&fac, sizeof(fac));
if (rc)
return rc;
VCPU_EVENT(vcpu, 3, "STFL: store facility list 0x%x", fac);
trace_kvm_s390_handle_stfl(vcpu, fac);
return 0;
}
#define PSW_MASK_ADDR_MODE (PSW_MASK_EA | PSW_MASK_BA)
#define PSW_MASK_UNASSIGNED 0xb80800fe7fffffffUL
#define PSW_ADDR_24 0x0000000000ffffffUL
#define PSW_ADDR_31 0x000000007fffffffUL
int is_valid_psw(psw_t *psw)
{
if (psw->mask & PSW_MASK_UNASSIGNED)
return 0;
if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_BA) {
if (psw->addr & ~PSW_ADDR_31)
return 0;
}
if (!(psw->mask & PSW_MASK_ADDR_MODE) && (psw->addr & ~PSW_ADDR_24))
return 0;
if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_EA)
return 0;
if (psw->addr & 1)
return 0;
return 1;
}
int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu)
{
psw_t *gpsw = &vcpu->arch.sie_block->gpsw;
psw_compat_t new_psw;
u64 addr;
int rc;
u8 ar;
vcpu->stat.instruction_lpsw++;
if (gpsw->mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
addr = kvm_s390_get_base_disp_s(vcpu, &ar);
if (addr & 7)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw));
if (rc)
return kvm_s390_inject_prog_cond(vcpu, rc);
if (!(new_psw.mask & PSW32_MASK_BASE))
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
gpsw->mask = (new_psw.mask & ~PSW32_MASK_BASE) << 32;
gpsw->mask |= new_psw.addr & PSW32_ADDR_AMODE;
gpsw->addr = new_psw.addr & ~PSW32_ADDR_AMODE;
if (!is_valid_psw(gpsw))
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
return 0;
}
static int handle_lpswe(struct kvm_vcpu *vcpu)
{
psw_t new_psw;
u64 addr;
int rc;
u8 ar;
vcpu->stat.instruction_lpswe++;
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
addr = kvm_s390_get_base_disp_s(vcpu, &ar);
if (addr & 7)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw));
if (rc)
return kvm_s390_inject_prog_cond(vcpu, rc);
vcpu->arch.sie_block->gpsw = new_psw;
if (!is_valid_psw(&vcpu->arch.sie_block->gpsw))
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
return 0;
}
static int handle_stidp(struct kvm_vcpu *vcpu)
{
u64 stidp_data = vcpu->kvm->arch.model.cpuid;
u64 operand2;
int rc;
u8 ar;
vcpu->stat.instruction_stidp++;
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
if (operand2 & 7)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
rc = write_guest(vcpu, operand2, ar, &stidp_data, sizeof(stidp_data));
if (rc)
return kvm_s390_inject_prog_cond(vcpu, rc);
VCPU_EVENT(vcpu, 3, "STIDP: store cpu id 0x%llx", stidp_data);
return 0;
}
static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem)
{
int cpus = 0;
int n;
cpus = atomic_read(&vcpu->kvm->online_vcpus);
/* deal with other level 3 hypervisors */
if (stsi(mem, 3, 2, 2))
mem->count = 0;
if (mem->count < 8)
mem->count++;
for (n = mem->count - 1; n > 0 ; n--)
memcpy(&mem->vm[n], &mem->vm[n - 1], sizeof(mem->vm[0]));
memset(&mem->vm[0], 0, sizeof(mem->vm[0]));
mem->vm[0].cpus_total = cpus;
mem->vm[0].cpus_configured = cpus;
mem->vm[0].cpus_standby = 0;
mem->vm[0].cpus_reserved = 0;
mem->vm[0].caf = 1000;
memcpy(mem->vm[0].name, "KVMguest", 8);
ASCEBC(mem->vm[0].name, 8);
memcpy(mem->vm[0].cpi, "KVM/Linux ", 16);
ASCEBC(mem->vm[0].cpi, 16);
}
static void insert_stsi_usr_data(struct kvm_vcpu *vcpu, u64 addr, u8 ar,
u8 fc, u8 sel1, u16 sel2)
{
vcpu->run->exit_reason = KVM_EXIT_S390_STSI;
vcpu->run->s390_stsi.addr = addr;
vcpu->run->s390_stsi.ar = ar;
vcpu->run->s390_stsi.fc = fc;
vcpu->run->s390_stsi.sel1 = sel1;
vcpu->run->s390_stsi.sel2 = sel2;
}
static int handle_stsi(struct kvm_vcpu *vcpu)
{
int fc = (vcpu->run->s.regs.gprs[0] & 0xf0000000) >> 28;
int sel1 = vcpu->run->s.regs.gprs[0] & 0xff;
int sel2 = vcpu->run->s.regs.gprs[1] & 0xffff;
unsigned long mem = 0;
u64 operand2;
int rc = 0;
u8 ar;
vcpu->stat.instruction_stsi++;
VCPU_EVENT(vcpu, 3, "STSI: fc: %u sel1: %u sel2: %u", fc, sel1, sel2);
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
/* Bailout forbidden function codes */
if (fc > 3 && fc != 15)
goto out_no_data;
/*
* fc 15 is provided only with
* - PTF/CPU topology support through facility 15
* - KVM_CAP_S390_USER_STSI
*/
if (fc == 15 && (!test_kvm_facility(vcpu->kvm, 11) ||
!vcpu->kvm->arch.user_stsi))
goto out_no_data;
if (vcpu->run->s.regs.gprs[0] & 0x0fffff00
|| vcpu->run->s.regs.gprs[1] & 0xffff0000)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
if (fc == 0) {
vcpu->run->s.regs.gprs[0] = 3 << 28;
kvm_s390_set_psw_cc(vcpu, 0);
return 0;
}
operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
if (!kvm_s390_pv_cpu_is_protected(vcpu) && (operand2 & 0xfff))
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
switch (fc) {
case 1: /* same handling for 1 and 2 */
case 2:
mem = get_zeroed_page(GFP_KERNEL_ACCOUNT);
if (!mem)
goto out_no_data;
if (stsi((void *) mem, fc, sel1, sel2))
goto out_no_data;
break;
case 3:
if (sel1 != 2 || sel2 != 2)
goto out_no_data;
mem = get_zeroed_page(GFP_KERNEL_ACCOUNT);
if (!mem)
goto out_no_data;
handle_stsi_3_2_2(vcpu, (void *) mem);
break;
case 15: /* fc 15 is fully handled in userspace */
insert_stsi_usr_data(vcpu, operand2, ar, fc, sel1, sel2);
trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2);
return -EREMOTE;
}
if (kvm_s390_pv_cpu_is_protected(vcpu)) {
memcpy(sida_addr(vcpu->arch.sie_block), (void *)mem, PAGE_SIZE);
rc = 0;
} else {
rc = write_guest(vcpu, operand2, ar, (void *)mem, PAGE_SIZE);
}
if (rc) {
rc = kvm_s390_inject_prog_cond(vcpu, rc);
goto out;
}
if (vcpu->kvm->arch.user_stsi) {
insert_stsi_usr_data(vcpu, operand2, ar, fc, sel1, sel2);
rc = -EREMOTE;
}
trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2);
free_page(mem);
kvm_s390_set_psw_cc(vcpu, 0);
vcpu->run->s.regs.gprs[0] = 0;
return rc;
out_no_data:
kvm_s390_set_psw_cc(vcpu, 3);
out:
free_page(mem);
return rc;
}
int kvm_s390_handle_b2(struct kvm_vcpu *vcpu)
{
switch (vcpu->arch.sie_block->ipa & 0x00ff) {
case 0x02:
return handle_stidp(vcpu);
case 0x04:
return handle_set_clock(vcpu);
case 0x10:
return handle_set_prefix(vcpu);
case 0x11:
return handle_store_prefix(vcpu);
case 0x12:
return handle_store_cpu_address(vcpu);
case 0x14:
return kvm_s390_handle_vsie(vcpu);
case 0x21:
case 0x50:
return handle_ipte_interlock(vcpu);
case 0x29:
return handle_iske(vcpu);
case 0x2a:
return handle_rrbe(vcpu);
case 0x2b:
return handle_sske(vcpu);
case 0x2c:
return handle_test_block(vcpu);
case 0x30:
case 0x31:
case 0x32:
case 0x33:
case 0x34:
case 0x35:
case 0x36:
case 0x37:
case 0x38:
case 0x39:
case 0x3a:
case 0x3b:
case 0x3c:
case 0x5f:
case 0x74:
case 0x76:
return handle_io_inst(vcpu);
case 0x56:
return handle_sthyi(vcpu);
case 0x7d:
return handle_stsi(vcpu);
case 0xaf:
return handle_pqap(vcpu);
case 0xb1:
return handle_stfl(vcpu);
case 0xb2:
return handle_lpswe(vcpu);
default:
return -EOPNOTSUPP;
}
}
static int handle_epsw(struct kvm_vcpu *vcpu)
{
int reg1, reg2;
vcpu->stat.instruction_epsw++;
kvm_s390_get_regs_rre(vcpu, ®1, ®2);
/* This basically extracts the mask half of the psw. */
vcpu->run->s.regs.gprs[reg1] &= 0xffffffff00000000UL;
vcpu->run->s.regs.gprs[reg1] |= vcpu->arch.sie_block->gpsw.mask >> 32;
if (reg2) {
vcpu->run->s.regs.gprs[reg2] &= 0xffffffff00000000UL;
vcpu->run->s.regs.gprs[reg2] |=
vcpu->arch.sie_block->gpsw.mask & 0x00000000ffffffffUL;
}
return 0;
}
#define PFMF_RESERVED 0xfffc0101UL
#define PFMF_SK 0x00020000UL
#define PFMF_CF 0x00010000UL
#define PFMF_UI 0x00008000UL
#define PFMF_FSC 0x00007000UL
#define PFMF_NQ 0x00000800UL
#define PFMF_MR 0x00000400UL
#define PFMF_MC 0x00000200UL
#define PFMF_KEY 0x000000feUL
static int handle_pfmf(struct kvm_vcpu *vcpu)
{
bool mr = false, mc = false, nq;
int reg1, reg2;
unsigned long start, end;
unsigned char key;
vcpu->stat.instruction_pfmf++;
kvm_s390_get_regs_rre(vcpu, ®1, ®2);
if (!test_kvm_facility(vcpu->kvm, 8))
return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
if (vcpu->run->s.regs.gprs[reg1] & PFMF_RESERVED)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
/* Only provide non-quiescing support if enabled for the guest */
if (vcpu->run->s.regs.gprs[reg1] & PFMF_NQ &&
!test_kvm_facility(vcpu->kvm, 14))
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
/* Only provide conditional-SSKE support if enabled for the guest */
if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK &&
test_kvm_facility(vcpu->kvm, 10)) {
mr = vcpu->run->s.regs.gprs[reg1] & PFMF_MR;
mc = vcpu->run->s.regs.gprs[reg1] & PFMF_MC;
}
nq = vcpu->run->s.regs.gprs[reg1] & PFMF_NQ;
key = vcpu->run->s.regs.gprs[reg1] & PFMF_KEY;
start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
start = kvm_s390_logical_to_effective(vcpu, start);
if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) {
if (kvm_s390_check_low_addr_prot_real(vcpu, start))
return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
}
switch (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) {
case 0x00000000:
/* only 4k frames specify a real address */
start = kvm_s390_real_to_abs(vcpu, start);
end = (start + PAGE_SIZE) & ~(PAGE_SIZE - 1);
break;
case 0x00001000:
end = (start + _SEGMENT_SIZE) & ~(_SEGMENT_SIZE - 1);
break;
case 0x00002000:
/* only support 2G frame size if EDAT2 is available and we are
not in 24-bit addressing mode */
if (!test_kvm_facility(vcpu->kvm, 78) ||
psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_BITS_AMODE_24BIT)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
end = (start + _REGION3_SIZE) & ~(_REGION3_SIZE - 1);
break;
default:
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
}
while (start != end) {
unsigned long vmaddr;
bool unlocked = false;
/* Translate guest address to host address */
vmaddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(start));
if (kvm_is_error_hva(vmaddr))
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) {
if (kvm_clear_guest(vcpu->kvm, start, PAGE_SIZE))
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
}
if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK) {
int rc = kvm_s390_skey_check_enable(vcpu);
if (rc)
return rc;
mmap_read_lock(current->mm);
rc = cond_set_guest_storage_key(current->mm, vmaddr,
key, NULL, nq, mr, mc);
if (rc < 0) {
rc = fixup_user_fault(current->mm, vmaddr,
FAULT_FLAG_WRITE, &unlocked);
rc = !rc ? -EAGAIN : rc;
}
mmap_read_unlock(current->mm);
if (rc == -EFAULT)
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
if (rc == -EAGAIN)
continue;
if (rc < 0)
return rc;
}
start += PAGE_SIZE;
}
if (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) {
if (psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_BITS_AMODE_64BIT) {
vcpu->run->s.regs.gprs[reg2] = end;
} else {
vcpu->run->s.regs.gprs[reg2] &= ~0xffffffffUL;
end = kvm_s390_logical_to_effective(vcpu, end);
vcpu->run->s.regs.gprs[reg2] |= end;
}
}
return 0;
}
/*
* Must be called with relevant read locks held (kvm->mm->mmap_lock, kvm->srcu)
*/
static inline int __do_essa(struct kvm_vcpu *vcpu, const int orc)
{
int r1, r2, nappended, entries;
unsigned long gfn, hva, res, pgstev, ptev;
unsigned long *cbrlo;
/*
* We don't need to set SD.FPF.SK to 1 here, because if we have a
* machine check here we either handle it or crash
*/
kvm_s390_get_regs_rre(vcpu, &r1, &r2);
gfn = vcpu->run->s.regs.gprs[r2] >> PAGE_SHIFT;
hva = gfn_to_hva(vcpu->kvm, gfn);
entries = (vcpu->arch.sie_block->cbrlo & ~PAGE_MASK) >> 3;
if (kvm_is_error_hva(hva))
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
nappended = pgste_perform_essa(vcpu->kvm->mm, hva, orc, &ptev, &pgstev);
if (nappended < 0) {
res = orc ? 0x10 : 0;
vcpu->run->s.regs.gprs[r1] = res; /* Exception Indication */
return 0;
}
res = (pgstev & _PGSTE_GPS_USAGE_MASK) >> 22;
/*
* Set the block-content state part of the result. 0 means resident, so
* nothing to do if the page is valid. 2 is for preserved pages
* (non-present and non-zero), and 3 for zero pages (non-present and
* zero).
*/
if (ptev & _PAGE_INVALID) {
res |= 2;
if (pgstev & _PGSTE_GPS_ZERO)
res |= 1;
}
if (pgstev & _PGSTE_GPS_NODAT)
res |= 0x20;
vcpu->run->s.regs.gprs[r1] = res;
/*
* It is possible that all the normal 511 slots were full, in which case
* we will now write in the 512th slot, which is reserved for host use.
* In both cases we let the normal essa handling code process all the
* slots, including the reserved one, if needed.
*/
if (nappended > 0) {
cbrlo = phys_to_virt(vcpu->arch.sie_block->cbrlo & PAGE_MASK);
cbrlo[entries] = gfn << PAGE_SHIFT;
}
if (orc) {
struct kvm_memory_slot *ms = gfn_to_memslot(vcpu->kvm, gfn);
/* Increment only if we are really flipping the bit */
if (ms && !test_and_set_bit(gfn - ms->base_gfn, kvm_second_dirty_bitmap(ms)))
atomic64_inc(&vcpu->kvm->arch.cmma_dirty_pages);
}
return nappended;
}
static int handle_essa(struct kvm_vcpu *vcpu)
{
/* entries expected to be 1FF */
int entries = (vcpu->arch.sie_block->cbrlo & ~PAGE_MASK) >> 3;
unsigned long *cbrlo;
struct gmap *gmap;
int i, orc;
VCPU_EVENT(vcpu, 4, "ESSA: release %d pages", entries);
gmap = vcpu->arch.gmap;
vcpu->stat.instruction_essa++;
if (!vcpu->kvm->arch.use_cmma)
return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
/* Check for invalid operation request code */
orc = (vcpu->arch.sie_block->ipb & 0xf0000000) >> 28;
/* ORCs 0-6 are always valid */
if (orc > (test_kvm_facility(vcpu->kvm, 147) ? ESSA_SET_STABLE_NODAT
: ESSA_SET_STABLE_IF_RESIDENT))
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
if (!vcpu->kvm->arch.migration_mode) {
/*
* CMMA is enabled in the KVM settings, but is disabled in
* the SIE block and in the mm_context, and we are not doing
* a migration. Enable CMMA in the mm_context.
* Since we need to take a write lock to write to the context
* to avoid races with storage keys handling, we check if the
* value really needs to be written to; if the value is
* already correct, we do nothing and avoid the lock.
*/
if (vcpu->kvm->mm->context.uses_cmm == 0) {
mmap_write_lock(vcpu->kvm->mm);
vcpu->kvm->mm->context.uses_cmm = 1;
mmap_write_unlock(vcpu->kvm->mm);
}
/*
* If we are here, we are supposed to have CMMA enabled in
* the SIE block. Enabling CMMA works on a per-CPU basis,
* while the context use_cmma flag is per process.
* It's possible that the context flag is enabled and the
* SIE flag is not, so we set the flag always; if it was
* already set, nothing changes, otherwise we enable it
* on this CPU too.
*/
vcpu->arch.sie_block->ecb2 |= ECB2_CMMA;
/* Retry the ESSA instruction */
kvm_s390_retry_instr(vcpu);
} else {
int srcu_idx;
mmap_read_lock(vcpu->kvm->mm);
srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
i = __do_essa(vcpu, orc);
srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
mmap_read_unlock(vcpu->kvm->mm);
if (i < 0)
return i;
/* Account for the possible extra cbrl entry */
entries += i;
}
vcpu->arch.sie_block->cbrlo &= PAGE_MASK; /* reset nceo */
cbrlo = phys_to_virt(vcpu->arch.sie_block->cbrlo);
mmap_read_lock(gmap->mm);
for (i = 0; i < entries; ++i)
__gmap_zap(gmap, cbrlo[i]);
mmap_read_unlock(gmap->mm);
return 0;
}
int kvm_s390_handle_b9(struct kvm_vcpu *vcpu)
{
switch (vcpu->arch.sie_block->ipa & 0x00ff) {
case 0x8a:
case 0x8e:
case 0x8f:
return handle_ipte_interlock(vcpu);
case 0x8d:
return handle_epsw(vcpu);
case 0xab:
return handle_essa(vcpu);
case 0xaf:
return handle_pfmf(vcpu);
default:
return -EOPNOTSUPP;
}
}
int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu)
{
int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
int reg, rc, nr_regs;
u32 ctl_array[16];
u64 ga;
u8 ar;
vcpu->stat.instruction_lctl++;
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
ga = kvm_s390_get_base_disp_rs(vcpu, &ar);
if (ga & 3)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
VCPU_EVENT(vcpu, 4, "LCTL: r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, ga);
nr_regs = ((reg3 - reg1) & 0xf) + 1;
rc = read_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u32));
if (rc)
return kvm_s390_inject_prog_cond(vcpu, rc);
reg = reg1;
nr_regs = 0;
do {
vcpu->arch.sie_block->gcr[reg] &= 0xffffffff00000000ul;
vcpu->arch.sie_block->gcr[reg] |= ctl_array[nr_regs++];
if (reg == reg3)
break;
reg = (reg + 1) % 16;
} while (1);
kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
return 0;
}
int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu)
{
int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
int reg, rc, nr_regs;
u32 ctl_array[16];
u64 ga;
u8 ar;
vcpu->stat.instruction_stctl++;
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
ga = kvm_s390_get_base_disp_rs(vcpu, &ar);
if (ga & 3)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
VCPU_EVENT(vcpu, 4, "STCTL r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
trace_kvm_s390_handle_stctl(vcpu, 0, reg1, reg3, ga);
reg = reg1;
nr_regs = 0;
do {
ctl_array[nr_regs++] = vcpu->arch.sie_block->gcr[reg];
if (reg == reg3)
break;
reg = (reg + 1) % 16;
} while (1);
rc = write_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u32));
return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0;
}
static int handle_lctlg(struct kvm_vcpu *vcpu)
{
int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
int reg, rc, nr_regs;
u64 ctl_array[16];
u64 ga;
u8 ar;
vcpu->stat.instruction_lctlg++;
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
ga = kvm_s390_get_base_disp_rsy(vcpu, &ar);
if (ga & 7)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
VCPU_EVENT(vcpu, 4, "LCTLG: r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, ga);
nr_regs = ((reg3 - reg1) & 0xf) + 1;
rc = read_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u64));
if (rc)
return kvm_s390_inject_prog_cond(vcpu, rc);
reg = reg1;
nr_regs = 0;
do {
vcpu->arch.sie_block->gcr[reg] = ctl_array[nr_regs++];
if (reg == reg3)
break;
reg = (reg + 1) % 16;
} while (1);
kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
return 0;
}
static int handle_stctg(struct kvm_vcpu *vcpu)
{
int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
int reg, rc, nr_regs;
u64 ctl_array[16];
u64 ga;
u8 ar;
vcpu->stat.instruction_stctg++;
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
ga = kvm_s390_get_base_disp_rsy(vcpu, &ar);
if (ga & 7)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
VCPU_EVENT(vcpu, 4, "STCTG r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
trace_kvm_s390_handle_stctl(vcpu, 1, reg1, reg3, ga);
reg = reg1;
nr_regs = 0;
do {
ctl_array[nr_regs++] = vcpu->arch.sie_block->gcr[reg];
if (reg == reg3)
break;
reg = (reg + 1) % 16;
} while (1);
rc = write_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u64));
return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0;
}
int kvm_s390_handle_eb(struct kvm_vcpu *vcpu)
{
switch (vcpu->arch.sie_block->ipb & 0x000000ff) {
case 0x25:
return handle_stctg(vcpu);
case 0x2f:
return handle_lctlg(vcpu);
case 0x60:
case 0x61:
case 0x62:
return handle_ri(vcpu);
default:
return -EOPNOTSUPP;
}
}
static int handle_tprot(struct kvm_vcpu *vcpu)
{
u64 address, operand2;
unsigned long gpa;
u8 access_key;
bool writable;
int ret, cc;
u8 ar;
vcpu->stat.instruction_tprot++;
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
kvm_s390_get_base_disp_sse(vcpu, &address, &operand2, &ar, NULL);
access_key = (operand2 & 0xf0) >> 4;
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT)
ipte_lock(vcpu->kvm);
ret = guest_translate_address_with_key(vcpu, address, ar, &gpa,
GACC_STORE, access_key);
if (ret == 0) {
gfn_to_hva_prot(vcpu->kvm, gpa_to_gfn(gpa), &writable);
} else if (ret == PGM_PROTECTION) {
writable = false;
/* Write protected? Try again with read-only... */
ret = guest_translate_address_with_key(vcpu, address, ar, &gpa,
GACC_FETCH, access_key);
}
if (ret >= 0) {
cc = -1;
/* Fetching permitted; storing permitted */
if (ret == 0 && writable)
cc = 0;
/* Fetching permitted; storing not permitted */
else if (ret == 0 && !writable)
cc = 1;
/* Fetching not permitted; storing not permitted */
else if (ret == PGM_PROTECTION)
cc = 2;
/* Translation not available */
else if (ret != PGM_ADDRESSING && ret != PGM_TRANSLATION_SPEC)
cc = 3;
if (cc != -1) {
kvm_s390_set_psw_cc(vcpu, cc);
ret = 0;
} else {
ret = kvm_s390_inject_program_int(vcpu, ret);
}
}
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT)
ipte_unlock(vcpu->kvm);
return ret;
}
int kvm_s390_handle_e5(struct kvm_vcpu *vcpu)
{
switch (vcpu->arch.sie_block->ipa & 0x00ff) {
case 0x01:
return handle_tprot(vcpu);
default:
return -EOPNOTSUPP;
}
}
static int handle_sckpf(struct kvm_vcpu *vcpu)
{
u32 value;
vcpu->stat.instruction_sckpf++;
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
if (vcpu->run->s.regs.gprs[0] & 0x00000000ffff0000)
return kvm_s390_inject_program_int(vcpu,
PGM_SPECIFICATION);
value = vcpu->run->s.regs.gprs[0] & 0x000000000000ffff;
vcpu->arch.sie_block->todpr = value;
return 0;
}
static int handle_ptff(struct kvm_vcpu *vcpu)
{
vcpu->stat.instruction_ptff++;
/* we don't emulate any control instructions yet */
kvm_s390_set_psw_cc(vcpu, 3);
return 0;
}
int kvm_s390_handle_01(struct kvm_vcpu *vcpu)
{
switch (vcpu->arch.sie_block->ipa & 0x00ff) {
case 0x04:
return handle_ptff(vcpu);
case 0x07:
return handle_sckpf(vcpu);
default:
return -EOPNOTSUPP;
}
}
| linux-master | arch/s390/kvm/priv.c |
// SPDX-License-Identifier: GPL-2.0
/*
* handling kvm guest interrupts
*
* Copyright IBM Corp. 2008, 2020
*
* Author(s): Carsten Otte <[email protected]>
*/
#define KMSG_COMPONENT "kvm-s390"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/interrupt.h>
#include <linux/kvm_host.h>
#include <linux/hrtimer.h>
#include <linux/mmu_context.h>
#include <linux/nospec.h>
#include <linux/signal.h>
#include <linux/slab.h>
#include <linux/bitmap.h>
#include <linux/vmalloc.h>
#include <asm/asm-offsets.h>
#include <asm/dis.h>
#include <linux/uaccess.h>
#include <asm/sclp.h>
#include <asm/isc.h>
#include <asm/gmap.h>
#include <asm/switch_to.h>
#include <asm/nmi.h>
#include <asm/airq.h>
#include <asm/tpi.h>
#include "kvm-s390.h"
#include "gaccess.h"
#include "trace-s390.h"
#include "pci.h"
#define PFAULT_INIT 0x0600
#define PFAULT_DONE 0x0680
#define VIRTIO_PARAM 0x0d00
static struct kvm_s390_gib *gib;
/* handle external calls via sigp interpretation facility */
static int sca_ext_call_pending(struct kvm_vcpu *vcpu, int *src_id)
{
int c, scn;
if (!kvm_s390_test_cpuflags(vcpu, CPUSTAT_ECALL_PEND))
return 0;
BUG_ON(!kvm_s390_use_sca_entries());
read_lock(&vcpu->kvm->arch.sca_lock);
if (vcpu->kvm->arch.use_esca) {
struct esca_block *sca = vcpu->kvm->arch.sca;
union esca_sigp_ctrl sigp_ctrl =
sca->cpu[vcpu->vcpu_id].sigp_ctrl;
c = sigp_ctrl.c;
scn = sigp_ctrl.scn;
} else {
struct bsca_block *sca = vcpu->kvm->arch.sca;
union bsca_sigp_ctrl sigp_ctrl =
sca->cpu[vcpu->vcpu_id].sigp_ctrl;
c = sigp_ctrl.c;
scn = sigp_ctrl.scn;
}
read_unlock(&vcpu->kvm->arch.sca_lock);
if (src_id)
*src_id = scn;
return c;
}
static int sca_inject_ext_call(struct kvm_vcpu *vcpu, int src_id)
{
int expect, rc;
BUG_ON(!kvm_s390_use_sca_entries());
read_lock(&vcpu->kvm->arch.sca_lock);
if (vcpu->kvm->arch.use_esca) {
struct esca_block *sca = vcpu->kvm->arch.sca;
union esca_sigp_ctrl *sigp_ctrl =
&(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
union esca_sigp_ctrl new_val = {0}, old_val;
old_val = READ_ONCE(*sigp_ctrl);
new_val.scn = src_id;
new_val.c = 1;
old_val.c = 0;
expect = old_val.value;
rc = cmpxchg(&sigp_ctrl->value, old_val.value, new_val.value);
} else {
struct bsca_block *sca = vcpu->kvm->arch.sca;
union bsca_sigp_ctrl *sigp_ctrl =
&(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
union bsca_sigp_ctrl new_val = {0}, old_val;
old_val = READ_ONCE(*sigp_ctrl);
new_val.scn = src_id;
new_val.c = 1;
old_val.c = 0;
expect = old_val.value;
rc = cmpxchg(&sigp_ctrl->value, old_val.value, new_val.value);
}
read_unlock(&vcpu->kvm->arch.sca_lock);
if (rc != expect) {
/* another external call is pending */
return -EBUSY;
}
kvm_s390_set_cpuflags(vcpu, CPUSTAT_ECALL_PEND);
return 0;
}
static void sca_clear_ext_call(struct kvm_vcpu *vcpu)
{
int rc, expect;
if (!kvm_s390_use_sca_entries())
return;
kvm_s390_clear_cpuflags(vcpu, CPUSTAT_ECALL_PEND);
read_lock(&vcpu->kvm->arch.sca_lock);
if (vcpu->kvm->arch.use_esca) {
struct esca_block *sca = vcpu->kvm->arch.sca;
union esca_sigp_ctrl *sigp_ctrl =
&(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
union esca_sigp_ctrl old;
old = READ_ONCE(*sigp_ctrl);
expect = old.value;
rc = cmpxchg(&sigp_ctrl->value, old.value, 0);
} else {
struct bsca_block *sca = vcpu->kvm->arch.sca;
union bsca_sigp_ctrl *sigp_ctrl =
&(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
union bsca_sigp_ctrl old;
old = READ_ONCE(*sigp_ctrl);
expect = old.value;
rc = cmpxchg(&sigp_ctrl->value, old.value, 0);
}
read_unlock(&vcpu->kvm->arch.sca_lock);
WARN_ON(rc != expect); /* cannot clear? */
}
int psw_extint_disabled(struct kvm_vcpu *vcpu)
{
return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT);
}
static int psw_ioint_disabled(struct kvm_vcpu *vcpu)
{
return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO);
}
static int psw_mchk_disabled(struct kvm_vcpu *vcpu)
{
return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_MCHECK);
}
static int psw_interrupts_disabled(struct kvm_vcpu *vcpu)
{
return psw_extint_disabled(vcpu) &&
psw_ioint_disabled(vcpu) &&
psw_mchk_disabled(vcpu);
}
static int ckc_interrupts_enabled(struct kvm_vcpu *vcpu)
{
if (psw_extint_disabled(vcpu) ||
!(vcpu->arch.sie_block->gcr[0] & CR0_CLOCK_COMPARATOR_SUBMASK))
return 0;
if (guestdbg_enabled(vcpu) && guestdbg_sstep_enabled(vcpu))
/* No timer interrupts when single stepping */
return 0;
return 1;
}
static int ckc_irq_pending(struct kvm_vcpu *vcpu)
{
const u64 now = kvm_s390_get_tod_clock_fast(vcpu->kvm);
const u64 ckc = vcpu->arch.sie_block->ckc;
if (vcpu->arch.sie_block->gcr[0] & CR0_CLOCK_COMPARATOR_SIGN) {
if ((s64)ckc >= (s64)now)
return 0;
} else if (ckc >= now) {
return 0;
}
return ckc_interrupts_enabled(vcpu);
}
static int cpu_timer_interrupts_enabled(struct kvm_vcpu *vcpu)
{
return !psw_extint_disabled(vcpu) &&
(vcpu->arch.sie_block->gcr[0] & CR0_CPU_TIMER_SUBMASK);
}
static int cpu_timer_irq_pending(struct kvm_vcpu *vcpu)
{
if (!cpu_timer_interrupts_enabled(vcpu))
return 0;
return kvm_s390_get_cpu_timer(vcpu) >> 63;
}
static uint64_t isc_to_isc_bits(int isc)
{
return (0x80 >> isc) << 24;
}
static inline u32 isc_to_int_word(u8 isc)
{
return ((u32)isc << 27) | 0x80000000;
}
static inline u8 int_word_to_isc(u32 int_word)
{
return (int_word & 0x38000000) >> 27;
}
/*
* To use atomic bitmap functions, we have to provide a bitmap address
* that is u64 aligned. However, the ipm might be u32 aligned.
* Therefore, we logically start the bitmap at the very beginning of the
* struct and fixup the bit number.
*/
#define IPM_BIT_OFFSET (offsetof(struct kvm_s390_gisa, ipm) * BITS_PER_BYTE)
/**
* gisa_set_iam - change the GISA interruption alert mask
*
* @gisa: gisa to operate on
* @iam: new IAM value to use
*
* Change the IAM atomically with the next alert address and the IPM
* of the GISA if the GISA is not part of the GIB alert list. All three
* fields are located in the first long word of the GISA.
*
* Returns: 0 on success
* -EBUSY in case the gisa is part of the alert list
*/
static inline int gisa_set_iam(struct kvm_s390_gisa *gisa, u8 iam)
{
u64 word, _word;
do {
word = READ_ONCE(gisa->u64.word[0]);
if ((u64)gisa != word >> 32)
return -EBUSY;
_word = (word & ~0xffUL) | iam;
} while (cmpxchg(&gisa->u64.word[0], word, _word) != word);
return 0;
}
/**
* gisa_clear_ipm - clear the GISA interruption pending mask
*
* @gisa: gisa to operate on
*
* Clear the IPM atomically with the next alert address and the IAM
* of the GISA unconditionally. All three fields are located in the
* first long word of the GISA.
*/
static inline void gisa_clear_ipm(struct kvm_s390_gisa *gisa)
{
u64 word, _word;
do {
word = READ_ONCE(gisa->u64.word[0]);
_word = word & ~(0xffUL << 24);
} while (cmpxchg(&gisa->u64.word[0], word, _word) != word);
}
/**
* gisa_get_ipm_or_restore_iam - return IPM or restore GISA IAM
*
* @gi: gisa interrupt struct to work on
*
* Atomically restores the interruption alert mask if none of the
* relevant ISCs are pending and return the IPM.
*
* Returns: the relevant pending ISCs
*/
static inline u8 gisa_get_ipm_or_restore_iam(struct kvm_s390_gisa_interrupt *gi)
{
u8 pending_mask, alert_mask;
u64 word, _word;
do {
word = READ_ONCE(gi->origin->u64.word[0]);
alert_mask = READ_ONCE(gi->alert.mask);
pending_mask = (u8)(word >> 24) & alert_mask;
if (pending_mask)
return pending_mask;
_word = (word & ~0xffUL) | alert_mask;
} while (cmpxchg(&gi->origin->u64.word[0], word, _word) != word);
return 0;
}
static inline int gisa_in_alert_list(struct kvm_s390_gisa *gisa)
{
return READ_ONCE(gisa->next_alert) != (u32)virt_to_phys(gisa);
}
static inline void gisa_set_ipm_gisc(struct kvm_s390_gisa *gisa, u32 gisc)
{
set_bit_inv(IPM_BIT_OFFSET + gisc, (unsigned long *) gisa);
}
static inline u8 gisa_get_ipm(struct kvm_s390_gisa *gisa)
{
return READ_ONCE(gisa->ipm);
}
static inline int gisa_tac_ipm_gisc(struct kvm_s390_gisa *gisa, u32 gisc)
{
return test_and_clear_bit_inv(IPM_BIT_OFFSET + gisc, (unsigned long *) gisa);
}
static inline unsigned long pending_irqs_no_gisa(struct kvm_vcpu *vcpu)
{
unsigned long pending = vcpu->kvm->arch.float_int.pending_irqs |
vcpu->arch.local_int.pending_irqs;
pending &= ~vcpu->kvm->arch.float_int.masked_irqs;
return pending;
}
static inline unsigned long pending_irqs(struct kvm_vcpu *vcpu)
{
struct kvm_s390_gisa_interrupt *gi = &vcpu->kvm->arch.gisa_int;
unsigned long pending_mask;
pending_mask = pending_irqs_no_gisa(vcpu);
if (gi->origin)
pending_mask |= gisa_get_ipm(gi->origin) << IRQ_PEND_IO_ISC_7;
return pending_mask;
}
static inline int isc_to_irq_type(unsigned long isc)
{
return IRQ_PEND_IO_ISC_0 - isc;
}
static inline int irq_type_to_isc(unsigned long irq_type)
{
return IRQ_PEND_IO_ISC_0 - irq_type;
}
static unsigned long disable_iscs(struct kvm_vcpu *vcpu,
unsigned long active_mask)
{
int i;
for (i = 0; i <= MAX_ISC; i++)
if (!(vcpu->arch.sie_block->gcr[6] & isc_to_isc_bits(i)))
active_mask &= ~(1UL << (isc_to_irq_type(i)));
return active_mask;
}
static unsigned long deliverable_irqs(struct kvm_vcpu *vcpu)
{
unsigned long active_mask;
active_mask = pending_irqs(vcpu);
if (!active_mask)
return 0;
if (psw_extint_disabled(vcpu))
active_mask &= ~IRQ_PEND_EXT_MASK;
if (psw_ioint_disabled(vcpu))
active_mask &= ~IRQ_PEND_IO_MASK;
else
active_mask = disable_iscs(vcpu, active_mask);
if (!(vcpu->arch.sie_block->gcr[0] & CR0_EXTERNAL_CALL_SUBMASK))
__clear_bit(IRQ_PEND_EXT_EXTERNAL, &active_mask);
if (!(vcpu->arch.sie_block->gcr[0] & CR0_EMERGENCY_SIGNAL_SUBMASK))
__clear_bit(IRQ_PEND_EXT_EMERGENCY, &active_mask);
if (!(vcpu->arch.sie_block->gcr[0] & CR0_CLOCK_COMPARATOR_SUBMASK))
__clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &active_mask);
if (!(vcpu->arch.sie_block->gcr[0] & CR0_CPU_TIMER_SUBMASK))
__clear_bit(IRQ_PEND_EXT_CPU_TIMER, &active_mask);
if (!(vcpu->arch.sie_block->gcr[0] & CR0_SERVICE_SIGNAL_SUBMASK)) {
__clear_bit(IRQ_PEND_EXT_SERVICE, &active_mask);
__clear_bit(IRQ_PEND_EXT_SERVICE_EV, &active_mask);
}
if (psw_mchk_disabled(vcpu))
active_mask &= ~IRQ_PEND_MCHK_MASK;
/* PV guest cpus can have a single interruption injected at a time. */
if (kvm_s390_pv_cpu_get_handle(vcpu) &&
vcpu->arch.sie_block->iictl != IICTL_CODE_NONE)
active_mask &= ~(IRQ_PEND_EXT_II_MASK |
IRQ_PEND_IO_MASK |
IRQ_PEND_MCHK_MASK);
/*
* Check both floating and local interrupt's cr14 because
* bit IRQ_PEND_MCHK_REP could be set in both cases.
*/
if (!(vcpu->arch.sie_block->gcr[14] &
(vcpu->kvm->arch.float_int.mchk.cr14 |
vcpu->arch.local_int.irq.mchk.cr14)))
__clear_bit(IRQ_PEND_MCHK_REP, &active_mask);
/*
* STOP irqs will never be actively delivered. They are triggered via
* intercept requests and cleared when the stop intercept is performed.
*/
__clear_bit(IRQ_PEND_SIGP_STOP, &active_mask);
return active_mask;
}
static void __set_cpu_idle(struct kvm_vcpu *vcpu)
{
kvm_s390_set_cpuflags(vcpu, CPUSTAT_WAIT);
set_bit(vcpu->vcpu_idx, vcpu->kvm->arch.idle_mask);
}
static void __unset_cpu_idle(struct kvm_vcpu *vcpu)
{
kvm_s390_clear_cpuflags(vcpu, CPUSTAT_WAIT);
clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.idle_mask);
}
static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
{
kvm_s390_clear_cpuflags(vcpu, CPUSTAT_IO_INT | CPUSTAT_EXT_INT |
CPUSTAT_STOP_INT);
vcpu->arch.sie_block->lctl = 0x0000;
vcpu->arch.sie_block->ictl &= ~(ICTL_LPSW | ICTL_STCTL | ICTL_PINT);
if (guestdbg_enabled(vcpu)) {
vcpu->arch.sie_block->lctl |= (LCTL_CR0 | LCTL_CR9 |
LCTL_CR10 | LCTL_CR11);
vcpu->arch.sie_block->ictl |= (ICTL_STCTL | ICTL_PINT);
}
}
static void set_intercept_indicators_io(struct kvm_vcpu *vcpu)
{
if (!(pending_irqs_no_gisa(vcpu) & IRQ_PEND_IO_MASK))
return;
if (psw_ioint_disabled(vcpu))
kvm_s390_set_cpuflags(vcpu, CPUSTAT_IO_INT);
else
vcpu->arch.sie_block->lctl |= LCTL_CR6;
}
static void set_intercept_indicators_ext(struct kvm_vcpu *vcpu)
{
if (!(pending_irqs_no_gisa(vcpu) & IRQ_PEND_EXT_MASK))
return;
if (psw_extint_disabled(vcpu))
kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT);
else
vcpu->arch.sie_block->lctl |= LCTL_CR0;
}
static void set_intercept_indicators_mchk(struct kvm_vcpu *vcpu)
{
if (!(pending_irqs_no_gisa(vcpu) & IRQ_PEND_MCHK_MASK))
return;
if (psw_mchk_disabled(vcpu))
vcpu->arch.sie_block->ictl |= ICTL_LPSW;
else
vcpu->arch.sie_block->lctl |= LCTL_CR14;
}
static void set_intercept_indicators_stop(struct kvm_vcpu *vcpu)
{
if (kvm_s390_is_stop_irq_pending(vcpu))
kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOP_INT);
}
/* Set interception request for non-deliverable interrupts */
static void set_intercept_indicators(struct kvm_vcpu *vcpu)
{
set_intercept_indicators_io(vcpu);
set_intercept_indicators_ext(vcpu);
set_intercept_indicators_mchk(vcpu);
set_intercept_indicators_stop(vcpu);
}
static int __must_check __deliver_cpu_timer(struct kvm_vcpu *vcpu)
{
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
int rc = 0;
vcpu->stat.deliver_cputm++;
trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_CPU_TIMER,
0, 0);
if (kvm_s390_pv_cpu_is_protected(vcpu)) {
vcpu->arch.sie_block->iictl = IICTL_CODE_EXT;
vcpu->arch.sie_block->eic = EXT_IRQ_CPU_TIMER;
} else {
rc = put_guest_lc(vcpu, EXT_IRQ_CPU_TIMER,
(u16 *)__LC_EXT_INT_CODE);
rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR);
rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
&vcpu->arch.sie_block->gpsw, sizeof(psw_t));
rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
&vcpu->arch.sie_block->gpsw, sizeof(psw_t));
}
clear_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
return rc ? -EFAULT : 0;
}
static int __must_check __deliver_ckc(struct kvm_vcpu *vcpu)
{
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
int rc = 0;
vcpu->stat.deliver_ckc++;
trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_CLOCK_COMP,
0, 0);
if (kvm_s390_pv_cpu_is_protected(vcpu)) {
vcpu->arch.sie_block->iictl = IICTL_CODE_EXT;
vcpu->arch.sie_block->eic = EXT_IRQ_CLK_COMP;
} else {
rc = put_guest_lc(vcpu, EXT_IRQ_CLK_COMP,
(u16 __user *)__LC_EXT_INT_CODE);
rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR);
rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
&vcpu->arch.sie_block->gpsw, sizeof(psw_t));
rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
&vcpu->arch.sie_block->gpsw, sizeof(psw_t));
}
clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
return rc ? -EFAULT : 0;
}
static int __must_check __deliver_pfault_init(struct kvm_vcpu *vcpu)
{
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
struct kvm_s390_ext_info ext;
int rc;
spin_lock(&li->lock);
ext = li->irq.ext;
clear_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs);
li->irq.ext.ext_params2 = 0;
spin_unlock(&li->lock);
VCPU_EVENT(vcpu, 4, "deliver: pfault init token 0x%llx",
ext.ext_params2);
trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
KVM_S390_INT_PFAULT_INIT,
0, ext.ext_params2);
rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, (u16 *) __LC_EXT_INT_CODE);
rc |= put_guest_lc(vcpu, PFAULT_INIT, (u16 *) __LC_EXT_CPU_ADDR);
rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
&vcpu->arch.sie_block->gpsw, sizeof(psw_t));
rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
&vcpu->arch.sie_block->gpsw, sizeof(psw_t));
rc |= put_guest_lc(vcpu, ext.ext_params2, (u64 *) __LC_EXT_PARAMS2);
return rc ? -EFAULT : 0;
}
static int __write_machine_check(struct kvm_vcpu *vcpu,
struct kvm_s390_mchk_info *mchk)
{
unsigned long ext_sa_addr;
unsigned long lc;
freg_t fprs[NUM_FPRS];
union mci mci;
int rc;
/*
* All other possible payload for a machine check (e.g. the register
* contents in the save area) will be handled by the ultravisor, as
* the hypervisor does not not have the needed information for
* protected guests.
*/
if (kvm_s390_pv_cpu_is_protected(vcpu)) {
vcpu->arch.sie_block->iictl = IICTL_CODE_MCHK;
vcpu->arch.sie_block->mcic = mchk->mcic;
vcpu->arch.sie_block->faddr = mchk->failing_storage_address;
vcpu->arch.sie_block->edc = mchk->ext_damage_code;
return 0;
}
mci.val = mchk->mcic;
/* take care of lazy register loading */
save_fpu_regs();
save_access_regs(vcpu->run->s.regs.acrs);
if (MACHINE_HAS_GS && vcpu->arch.gs_enabled)
save_gs_cb(current->thread.gs_cb);
/* Extended save area */
rc = read_guest_lc(vcpu, __LC_MCESAD, &ext_sa_addr,
sizeof(unsigned long));
/* Only bits 0 through 63-LC are used for address formation */
lc = ext_sa_addr & MCESA_LC_MASK;
if (test_kvm_facility(vcpu->kvm, 133)) {
switch (lc) {
case 0:
case 10:
ext_sa_addr &= ~0x3ffUL;
break;
case 11:
ext_sa_addr &= ~0x7ffUL;
break;
case 12:
ext_sa_addr &= ~0xfffUL;
break;
default:
ext_sa_addr = 0;
break;
}
} else {
ext_sa_addr &= ~0x3ffUL;
}
if (!rc && mci.vr && ext_sa_addr && test_kvm_facility(vcpu->kvm, 129)) {
if (write_guest_abs(vcpu, ext_sa_addr, vcpu->run->s.regs.vrs,
512))
mci.vr = 0;
} else {
mci.vr = 0;
}
if (!rc && mci.gs && ext_sa_addr && test_kvm_facility(vcpu->kvm, 133)
&& (lc == 11 || lc == 12)) {
if (write_guest_abs(vcpu, ext_sa_addr + 1024,
&vcpu->run->s.regs.gscb, 32))
mci.gs = 0;
} else {
mci.gs = 0;
}
/* General interruption information */
rc |= put_guest_lc(vcpu, 1, (u8 __user *) __LC_AR_MODE_ID);
rc |= write_guest_lc(vcpu, __LC_MCK_OLD_PSW,
&vcpu->arch.sie_block->gpsw, sizeof(psw_t));
rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW,
&vcpu->arch.sie_block->gpsw, sizeof(psw_t));
rc |= put_guest_lc(vcpu, mci.val, (u64 __user *) __LC_MCCK_CODE);
/* Register-save areas */
if (MACHINE_HAS_VX) {
convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
rc |= write_guest_lc(vcpu, __LC_FPREGS_SAVE_AREA, fprs, 128);
} else {
rc |= write_guest_lc(vcpu, __LC_FPREGS_SAVE_AREA,
vcpu->run->s.regs.fprs, 128);
}
rc |= write_guest_lc(vcpu, __LC_GPREGS_SAVE_AREA,
vcpu->run->s.regs.gprs, 128);
rc |= put_guest_lc(vcpu, current->thread.fpu.fpc,
(u32 __user *) __LC_FP_CREG_SAVE_AREA);
rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->todpr,
(u32 __user *) __LC_TOD_PROGREG_SAVE_AREA);
rc |= put_guest_lc(vcpu, kvm_s390_get_cpu_timer(vcpu),
(u64 __user *) __LC_CPU_TIMER_SAVE_AREA);
rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->ckc >> 8,
(u64 __user *) __LC_CLOCK_COMP_SAVE_AREA);
rc |= write_guest_lc(vcpu, __LC_AREGS_SAVE_AREA,
&vcpu->run->s.regs.acrs, 64);
rc |= write_guest_lc(vcpu, __LC_CREGS_SAVE_AREA,
&vcpu->arch.sie_block->gcr, 128);
/* Extended interruption information */
rc |= put_guest_lc(vcpu, mchk->ext_damage_code,
(u32 __user *) __LC_EXT_DAMAGE_CODE);
rc |= put_guest_lc(vcpu, mchk->failing_storage_address,
(u64 __user *) __LC_MCCK_FAIL_STOR_ADDR);
rc |= write_guest_lc(vcpu, __LC_PSW_SAVE_AREA, &mchk->fixed_logout,
sizeof(mchk->fixed_logout));
return rc ? -EFAULT : 0;
}
static int __must_check __deliver_machine_check(struct kvm_vcpu *vcpu)
{
struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
struct kvm_s390_mchk_info mchk = {};
int deliver = 0;
int rc = 0;
spin_lock(&fi->lock);
spin_lock(&li->lock);
if (test_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs) ||
test_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs)) {
/*
* If there was an exigent machine check pending, then any
* repressible machine checks that might have been pending
* are indicated along with it, so always clear bits for
* repressible and exigent interrupts
*/
mchk = li->irq.mchk;
clear_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs);
clear_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs);
memset(&li->irq.mchk, 0, sizeof(mchk));
deliver = 1;
}
/*
* We indicate floating repressible conditions along with
* other pending conditions. Channel Report Pending and Channel
* Subsystem damage are the only two and are indicated by
* bits in mcic and masked in cr14.
*/
if (test_and_clear_bit(IRQ_PEND_MCHK_REP, &fi->pending_irqs)) {
mchk.mcic |= fi->mchk.mcic;
mchk.cr14 |= fi->mchk.cr14;
memset(&fi->mchk, 0, sizeof(mchk));
deliver = 1;
}
spin_unlock(&li->lock);
spin_unlock(&fi->lock);
if (deliver) {
VCPU_EVENT(vcpu, 3, "deliver: machine check mcic 0x%llx",
mchk.mcic);
trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
KVM_S390_MCHK,
mchk.cr14, mchk.mcic);
vcpu->stat.deliver_machine_check++;
rc = __write_machine_check(vcpu, &mchk);
}
return rc;
}
static int __must_check __deliver_restart(struct kvm_vcpu *vcpu)
{
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
int rc = 0;
VCPU_EVENT(vcpu, 3, "%s", "deliver: cpu restart");
vcpu->stat.deliver_restart_signal++;
trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0);
if (kvm_s390_pv_cpu_is_protected(vcpu)) {
vcpu->arch.sie_block->iictl = IICTL_CODE_RESTART;
} else {
rc = write_guest_lc(vcpu,
offsetof(struct lowcore, restart_old_psw),
&vcpu->arch.sie_block->gpsw, sizeof(psw_t));
rc |= read_guest_lc(vcpu, offsetof(struct lowcore, restart_psw),
&vcpu->arch.sie_block->gpsw, sizeof(psw_t));
}
clear_bit(IRQ_PEND_RESTART, &li->pending_irqs);
return rc ? -EFAULT : 0;
}
static int __must_check __deliver_set_prefix(struct kvm_vcpu *vcpu)
{
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
struct kvm_s390_prefix_info prefix;
spin_lock(&li->lock);
prefix = li->irq.prefix;
li->irq.prefix.address = 0;
clear_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs);
spin_unlock(&li->lock);
vcpu->stat.deliver_prefix_signal++;
trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
KVM_S390_SIGP_SET_PREFIX,
prefix.address, 0);
kvm_s390_set_prefix(vcpu, prefix.address);
return 0;
}
static int __must_check __deliver_emergency_signal(struct kvm_vcpu *vcpu)
{
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
int rc;
int cpu_addr;
spin_lock(&li->lock);
cpu_addr = find_first_bit(li->sigp_emerg_pending, KVM_MAX_VCPUS);
clear_bit(cpu_addr, li->sigp_emerg_pending);
if (bitmap_empty(li->sigp_emerg_pending, KVM_MAX_VCPUS))
clear_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs);
spin_unlock(&li->lock);
VCPU_EVENT(vcpu, 4, "%s", "deliver: sigp emerg");
vcpu->stat.deliver_emergency_signal++;
trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY,
cpu_addr, 0);
if (kvm_s390_pv_cpu_is_protected(vcpu)) {
vcpu->arch.sie_block->iictl = IICTL_CODE_EXT;
vcpu->arch.sie_block->eic = EXT_IRQ_EMERGENCY_SIG;
vcpu->arch.sie_block->extcpuaddr = cpu_addr;
return 0;
}
rc = put_guest_lc(vcpu, EXT_IRQ_EMERGENCY_SIG,
(u16 *)__LC_EXT_INT_CODE);
rc |= put_guest_lc(vcpu, cpu_addr, (u16 *)__LC_EXT_CPU_ADDR);
rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
&vcpu->arch.sie_block->gpsw, sizeof(psw_t));
rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
&vcpu->arch.sie_block->gpsw, sizeof(psw_t));
return rc ? -EFAULT : 0;
}
static int __must_check __deliver_external_call(struct kvm_vcpu *vcpu)
{
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
struct kvm_s390_extcall_info extcall;
int rc;
spin_lock(&li->lock);
extcall = li->irq.extcall;
li->irq.extcall.code = 0;
clear_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs);
spin_unlock(&li->lock);
VCPU_EVENT(vcpu, 4, "%s", "deliver: sigp ext call");
vcpu->stat.deliver_external_call++;
trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
KVM_S390_INT_EXTERNAL_CALL,
extcall.code, 0);
if (kvm_s390_pv_cpu_is_protected(vcpu)) {
vcpu->arch.sie_block->iictl = IICTL_CODE_EXT;
vcpu->arch.sie_block->eic = EXT_IRQ_EXTERNAL_CALL;
vcpu->arch.sie_block->extcpuaddr = extcall.code;
return 0;
}
rc = put_guest_lc(vcpu, EXT_IRQ_EXTERNAL_CALL,
(u16 *)__LC_EXT_INT_CODE);
rc |= put_guest_lc(vcpu, extcall.code, (u16 *)__LC_EXT_CPU_ADDR);
rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
&vcpu->arch.sie_block->gpsw, sizeof(psw_t));
rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, &vcpu->arch.sie_block->gpsw,
sizeof(psw_t));
return rc ? -EFAULT : 0;
}
static int __deliver_prog_pv(struct kvm_vcpu *vcpu, u16 code)
{
switch (code) {
case PGM_SPECIFICATION:
vcpu->arch.sie_block->iictl = IICTL_CODE_SPECIFICATION;
break;
case PGM_OPERAND:
vcpu->arch.sie_block->iictl = IICTL_CODE_OPERAND;
break;
default:
return -EINVAL;
}
return 0;
}
static int __must_check __deliver_prog(struct kvm_vcpu *vcpu)
{
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
struct kvm_s390_pgm_info pgm_info;
int rc = 0, nullifying = false;
u16 ilen;
spin_lock(&li->lock);
pgm_info = li->irq.pgm;
clear_bit(IRQ_PEND_PROG, &li->pending_irqs);
memset(&li->irq.pgm, 0, sizeof(pgm_info));
spin_unlock(&li->lock);
ilen = pgm_info.flags & KVM_S390_PGM_FLAGS_ILC_MASK;
VCPU_EVENT(vcpu, 3, "deliver: program irq code 0x%x, ilen:%d",
pgm_info.code, ilen);
vcpu->stat.deliver_program++;
trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_PROGRAM_INT,
pgm_info.code, 0);
/* PER is handled by the ultravisor */
if (kvm_s390_pv_cpu_is_protected(vcpu))
return __deliver_prog_pv(vcpu, pgm_info.code & ~PGM_PER);
switch (pgm_info.code & ~PGM_PER) {
case PGM_AFX_TRANSLATION:
case PGM_ASX_TRANSLATION:
case PGM_EX_TRANSLATION:
case PGM_LFX_TRANSLATION:
case PGM_LSTE_SEQUENCE:
case PGM_LSX_TRANSLATION:
case PGM_LX_TRANSLATION:
case PGM_PRIMARY_AUTHORITY:
case PGM_SECONDARY_AUTHORITY:
nullifying = true;
fallthrough;
case PGM_SPACE_SWITCH:
rc = put_guest_lc(vcpu, pgm_info.trans_exc_code,
(u64 *)__LC_TRANS_EXC_CODE);
break;
case PGM_ALEN_TRANSLATION:
case PGM_ALE_SEQUENCE:
case PGM_ASTE_INSTANCE:
case PGM_ASTE_SEQUENCE:
case PGM_ASTE_VALIDITY:
case PGM_EXTENDED_AUTHORITY:
rc = put_guest_lc(vcpu, pgm_info.exc_access_id,
(u8 *)__LC_EXC_ACCESS_ID);
nullifying = true;
break;
case PGM_ASCE_TYPE:
case PGM_PAGE_TRANSLATION:
case PGM_REGION_FIRST_TRANS:
case PGM_REGION_SECOND_TRANS:
case PGM_REGION_THIRD_TRANS:
case PGM_SEGMENT_TRANSLATION:
rc = put_guest_lc(vcpu, pgm_info.trans_exc_code,
(u64 *)__LC_TRANS_EXC_CODE);
rc |= put_guest_lc(vcpu, pgm_info.exc_access_id,
(u8 *)__LC_EXC_ACCESS_ID);
rc |= put_guest_lc(vcpu, pgm_info.op_access_id,
(u8 *)__LC_OP_ACCESS_ID);
nullifying = true;
break;
case PGM_MONITOR:
rc = put_guest_lc(vcpu, pgm_info.mon_class_nr,
(u16 *)__LC_MON_CLASS_NR);
rc |= put_guest_lc(vcpu, pgm_info.mon_code,
(u64 *)__LC_MON_CODE);
break;
case PGM_VECTOR_PROCESSING:
case PGM_DATA:
rc = put_guest_lc(vcpu, pgm_info.data_exc_code,
(u32 *)__LC_DATA_EXC_CODE);
break;
case PGM_PROTECTION:
rc = put_guest_lc(vcpu, pgm_info.trans_exc_code,
(u64 *)__LC_TRANS_EXC_CODE);
rc |= put_guest_lc(vcpu, pgm_info.exc_access_id,
(u8 *)__LC_EXC_ACCESS_ID);
break;
case PGM_STACK_FULL:
case PGM_STACK_EMPTY:
case PGM_STACK_SPECIFICATION:
case PGM_STACK_TYPE:
case PGM_STACK_OPERATION:
case PGM_TRACE_TABEL:
case PGM_CRYPTO_OPERATION:
nullifying = true;
break;
}
if (pgm_info.code & PGM_PER) {
rc |= put_guest_lc(vcpu, pgm_info.per_code,
(u8 *) __LC_PER_CODE);
rc |= put_guest_lc(vcpu, pgm_info.per_atmid,
(u8 *)__LC_PER_ATMID);
rc |= put_guest_lc(vcpu, pgm_info.per_address,
(u64 *) __LC_PER_ADDRESS);
rc |= put_guest_lc(vcpu, pgm_info.per_access_id,
(u8 *) __LC_PER_ACCESS_ID);
}
if (nullifying && !(pgm_info.flags & KVM_S390_PGM_FLAGS_NO_REWIND))
kvm_s390_rewind_psw(vcpu, ilen);
/* bit 1+2 of the target are the ilc, so we can directly use ilen */
rc |= put_guest_lc(vcpu, ilen, (u16 *) __LC_PGM_ILC);
rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->gbea,
(u64 *) __LC_PGM_LAST_BREAK);
rc |= put_guest_lc(vcpu, pgm_info.code,
(u16 *)__LC_PGM_INT_CODE);
rc |= write_guest_lc(vcpu, __LC_PGM_OLD_PSW,
&vcpu->arch.sie_block->gpsw, sizeof(psw_t));
rc |= read_guest_lc(vcpu, __LC_PGM_NEW_PSW,
&vcpu->arch.sie_block->gpsw, sizeof(psw_t));
return rc ? -EFAULT : 0;
}
#define SCCB_MASK 0xFFFFFFF8
#define SCCB_EVENT_PENDING 0x3
static int write_sclp(struct kvm_vcpu *vcpu, u32 parm)
{
int rc;
if (kvm_s390_pv_cpu_get_handle(vcpu)) {
vcpu->arch.sie_block->iictl = IICTL_CODE_EXT;
vcpu->arch.sie_block->eic = EXT_IRQ_SERVICE_SIG;
vcpu->arch.sie_block->eiparams = parm;
return 0;
}
rc = put_guest_lc(vcpu, EXT_IRQ_SERVICE_SIG, (u16 *)__LC_EXT_INT_CODE);
rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR);
rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
&vcpu->arch.sie_block->gpsw, sizeof(psw_t));
rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
&vcpu->arch.sie_block->gpsw, sizeof(psw_t));
rc |= put_guest_lc(vcpu, parm,
(u32 *)__LC_EXT_PARAMS);
return rc ? -EFAULT : 0;
}
static int __must_check __deliver_service(struct kvm_vcpu *vcpu)
{
struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
struct kvm_s390_ext_info ext;
spin_lock(&fi->lock);
if (test_bit(IRQ_PEND_EXT_SERVICE, &fi->masked_irqs) ||
!(test_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs))) {
spin_unlock(&fi->lock);
return 0;
}
ext = fi->srv_signal;
memset(&fi->srv_signal, 0, sizeof(ext));
clear_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs);
clear_bit(IRQ_PEND_EXT_SERVICE_EV, &fi->pending_irqs);
if (kvm_s390_pv_cpu_is_protected(vcpu))
set_bit(IRQ_PEND_EXT_SERVICE, &fi->masked_irqs);
spin_unlock(&fi->lock);
VCPU_EVENT(vcpu, 4, "deliver: sclp parameter 0x%x",
ext.ext_params);
vcpu->stat.deliver_service_signal++;
trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_SERVICE,
ext.ext_params, 0);
return write_sclp(vcpu, ext.ext_params);
}
static int __must_check __deliver_service_ev(struct kvm_vcpu *vcpu)
{
struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
struct kvm_s390_ext_info ext;
spin_lock(&fi->lock);
if (!(test_bit(IRQ_PEND_EXT_SERVICE_EV, &fi->pending_irqs))) {
spin_unlock(&fi->lock);
return 0;
}
ext = fi->srv_signal;
/* only clear the event bit */
fi->srv_signal.ext_params &= ~SCCB_EVENT_PENDING;
clear_bit(IRQ_PEND_EXT_SERVICE_EV, &fi->pending_irqs);
spin_unlock(&fi->lock);
VCPU_EVENT(vcpu, 4, "%s", "deliver: sclp parameter event");
vcpu->stat.deliver_service_signal++;
trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_SERVICE,
ext.ext_params, 0);
return write_sclp(vcpu, SCCB_EVENT_PENDING);
}
static int __must_check __deliver_pfault_done(struct kvm_vcpu *vcpu)
{
struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
struct kvm_s390_interrupt_info *inti;
int rc = 0;
spin_lock(&fi->lock);
inti = list_first_entry_or_null(&fi->lists[FIRQ_LIST_PFAULT],
struct kvm_s390_interrupt_info,
list);
if (inti) {
list_del(&inti->list);
fi->counters[FIRQ_CNTR_PFAULT] -= 1;
}
if (list_empty(&fi->lists[FIRQ_LIST_PFAULT]))
clear_bit(IRQ_PEND_PFAULT_DONE, &fi->pending_irqs);
spin_unlock(&fi->lock);
if (inti) {
trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
KVM_S390_INT_PFAULT_DONE, 0,
inti->ext.ext_params2);
VCPU_EVENT(vcpu, 4, "deliver: pfault done token 0x%llx",
inti->ext.ext_params2);
rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE,
(u16 *)__LC_EXT_INT_CODE);
rc |= put_guest_lc(vcpu, PFAULT_DONE,
(u16 *)__LC_EXT_CPU_ADDR);
rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
&vcpu->arch.sie_block->gpsw,
sizeof(psw_t));
rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
&vcpu->arch.sie_block->gpsw,
sizeof(psw_t));
rc |= put_guest_lc(vcpu, inti->ext.ext_params2,
(u64 *)__LC_EXT_PARAMS2);
kfree(inti);
}
return rc ? -EFAULT : 0;
}
static int __must_check __deliver_virtio(struct kvm_vcpu *vcpu)
{
struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
struct kvm_s390_interrupt_info *inti;
int rc = 0;
spin_lock(&fi->lock);
inti = list_first_entry_or_null(&fi->lists[FIRQ_LIST_VIRTIO],
struct kvm_s390_interrupt_info,
list);
if (inti) {
VCPU_EVENT(vcpu, 4,
"deliver: virtio parm: 0x%x,parm64: 0x%llx",
inti->ext.ext_params, inti->ext.ext_params2);
vcpu->stat.deliver_virtio++;
trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
inti->type,
inti->ext.ext_params,
inti->ext.ext_params2);
list_del(&inti->list);
fi->counters[FIRQ_CNTR_VIRTIO] -= 1;
}
if (list_empty(&fi->lists[FIRQ_LIST_VIRTIO]))
clear_bit(IRQ_PEND_VIRTIO, &fi->pending_irqs);
spin_unlock(&fi->lock);
if (inti) {
rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE,
(u16 *)__LC_EXT_INT_CODE);
rc |= put_guest_lc(vcpu, VIRTIO_PARAM,
(u16 *)__LC_EXT_CPU_ADDR);
rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
&vcpu->arch.sie_block->gpsw,
sizeof(psw_t));
rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
&vcpu->arch.sie_block->gpsw,
sizeof(psw_t));
rc |= put_guest_lc(vcpu, inti->ext.ext_params,
(u32 *)__LC_EXT_PARAMS);
rc |= put_guest_lc(vcpu, inti->ext.ext_params2,
(u64 *)__LC_EXT_PARAMS2);
kfree(inti);
}
return rc ? -EFAULT : 0;
}
static int __do_deliver_io(struct kvm_vcpu *vcpu, struct kvm_s390_io_info *io)
{
int rc;
if (kvm_s390_pv_cpu_is_protected(vcpu)) {
vcpu->arch.sie_block->iictl = IICTL_CODE_IO;
vcpu->arch.sie_block->subchannel_id = io->subchannel_id;
vcpu->arch.sie_block->subchannel_nr = io->subchannel_nr;
vcpu->arch.sie_block->io_int_parm = io->io_int_parm;
vcpu->arch.sie_block->io_int_word = io->io_int_word;
return 0;
}
rc = put_guest_lc(vcpu, io->subchannel_id, (u16 *)__LC_SUBCHANNEL_ID);
rc |= put_guest_lc(vcpu, io->subchannel_nr, (u16 *)__LC_SUBCHANNEL_NR);
rc |= put_guest_lc(vcpu, io->io_int_parm, (u32 *)__LC_IO_INT_PARM);
rc |= put_guest_lc(vcpu, io->io_int_word, (u32 *)__LC_IO_INT_WORD);
rc |= write_guest_lc(vcpu, __LC_IO_OLD_PSW,
&vcpu->arch.sie_block->gpsw,
sizeof(psw_t));
rc |= read_guest_lc(vcpu, __LC_IO_NEW_PSW,
&vcpu->arch.sie_block->gpsw,
sizeof(psw_t));
return rc ? -EFAULT : 0;
}
static int __must_check __deliver_io(struct kvm_vcpu *vcpu,
unsigned long irq_type)
{
struct list_head *isc_list;
struct kvm_s390_float_interrupt *fi;
struct kvm_s390_gisa_interrupt *gi = &vcpu->kvm->arch.gisa_int;
struct kvm_s390_interrupt_info *inti = NULL;
struct kvm_s390_io_info io;
u32 isc;
int rc = 0;
fi = &vcpu->kvm->arch.float_int;
spin_lock(&fi->lock);
isc = irq_type_to_isc(irq_type);
isc_list = &fi->lists[isc];
inti = list_first_entry_or_null(isc_list,
struct kvm_s390_interrupt_info,
list);
if (inti) {
if (inti->type & KVM_S390_INT_IO_AI_MASK)
VCPU_EVENT(vcpu, 4, "%s", "deliver: I/O (AI)");
else
VCPU_EVENT(vcpu, 4, "deliver: I/O %x ss %x schid %04x",
inti->io.subchannel_id >> 8,
inti->io.subchannel_id >> 1 & 0x3,
inti->io.subchannel_nr);
vcpu->stat.deliver_io++;
trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
inti->type,
((__u32)inti->io.subchannel_id << 16) |
inti->io.subchannel_nr,
((__u64)inti->io.io_int_parm << 32) |
inti->io.io_int_word);
list_del(&inti->list);
fi->counters[FIRQ_CNTR_IO] -= 1;
}
if (list_empty(isc_list))
clear_bit(irq_type, &fi->pending_irqs);
spin_unlock(&fi->lock);
if (inti) {
rc = __do_deliver_io(vcpu, &(inti->io));
kfree(inti);
goto out;
}
if (gi->origin && gisa_tac_ipm_gisc(gi->origin, isc)) {
/*
* in case an adapter interrupt was not delivered
* in SIE context KVM will handle the delivery
*/
VCPU_EVENT(vcpu, 4, "%s isc %u", "deliver: I/O (AI/gisa)", isc);
memset(&io, 0, sizeof(io));
io.io_int_word = isc_to_int_word(isc);
vcpu->stat.deliver_io++;
trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
KVM_S390_INT_IO(1, 0, 0, 0),
((__u32)io.subchannel_id << 16) |
io.subchannel_nr,
((__u64)io.io_int_parm << 32) |
io.io_int_word);
rc = __do_deliver_io(vcpu, &io);
}
out:
return rc;
}
/* Check whether an external call is pending (deliverable or not) */
int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu)
{
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
if (!sclp.has_sigpif)
return test_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs);
return sca_ext_call_pending(vcpu, NULL);
}
int kvm_s390_vcpu_has_irq(struct kvm_vcpu *vcpu, int exclude_stop)
{
if (deliverable_irqs(vcpu))
return 1;
if (kvm_cpu_has_pending_timer(vcpu))
return 1;
/* external call pending and deliverable */
if (kvm_s390_ext_call_pending(vcpu) &&
!psw_extint_disabled(vcpu) &&
(vcpu->arch.sie_block->gcr[0] & CR0_EXTERNAL_CALL_SUBMASK))
return 1;
if (!exclude_stop && kvm_s390_is_stop_irq_pending(vcpu))
return 1;
return 0;
}
int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
{
return ckc_irq_pending(vcpu) || cpu_timer_irq_pending(vcpu);
}
static u64 __calculate_sltime(struct kvm_vcpu *vcpu)
{
const u64 now = kvm_s390_get_tod_clock_fast(vcpu->kvm);
const u64 ckc = vcpu->arch.sie_block->ckc;
u64 cputm, sltime = 0;
if (ckc_interrupts_enabled(vcpu)) {
if (vcpu->arch.sie_block->gcr[0] & CR0_CLOCK_COMPARATOR_SIGN) {
if ((s64)now < (s64)ckc)
sltime = tod_to_ns((s64)ckc - (s64)now);
} else if (now < ckc) {
sltime = tod_to_ns(ckc - now);
}
/* already expired */
if (!sltime)
return 0;
if (cpu_timer_interrupts_enabled(vcpu)) {
cputm = kvm_s390_get_cpu_timer(vcpu);
/* already expired? */
if (cputm >> 63)
return 0;
return min_t(u64, sltime, tod_to_ns(cputm));
}
} else if (cpu_timer_interrupts_enabled(vcpu)) {
sltime = kvm_s390_get_cpu_timer(vcpu);
/* already expired? */
if (sltime >> 63)
return 0;
}
return sltime;
}
int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
{
struct kvm_s390_gisa_interrupt *gi = &vcpu->kvm->arch.gisa_int;
u64 sltime;
vcpu->stat.exit_wait_state++;
/* fast path */
if (kvm_arch_vcpu_runnable(vcpu))
return 0;
if (psw_interrupts_disabled(vcpu)) {
VCPU_EVENT(vcpu, 3, "%s", "disabled wait");
return -EOPNOTSUPP; /* disabled wait */
}
if (gi->origin &&
(gisa_get_ipm_or_restore_iam(gi) &
vcpu->arch.sie_block->gcr[6] >> 24))
return 0;
if (!ckc_interrupts_enabled(vcpu) &&
!cpu_timer_interrupts_enabled(vcpu)) {
VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer");
__set_cpu_idle(vcpu);
goto no_timer;
}
sltime = __calculate_sltime(vcpu);
if (!sltime)
return 0;
__set_cpu_idle(vcpu);
hrtimer_start(&vcpu->arch.ckc_timer, sltime, HRTIMER_MODE_REL);
VCPU_EVENT(vcpu, 4, "enabled wait: %llu ns", sltime);
no_timer:
kvm_vcpu_srcu_read_unlock(vcpu);
kvm_vcpu_halt(vcpu);
vcpu->valid_wakeup = false;
__unset_cpu_idle(vcpu);
kvm_vcpu_srcu_read_lock(vcpu);
hrtimer_cancel(&vcpu->arch.ckc_timer);
return 0;
}
void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu)
{
vcpu->valid_wakeup = true;
kvm_vcpu_wake_up(vcpu);
/*
* The VCPU might not be sleeping but rather executing VSIE. Let's
* kick it, so it leaves the SIE to process the request.
*/
kvm_s390_vsie_kick(vcpu);
}
enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer)
{
struct kvm_vcpu *vcpu;
u64 sltime;
vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer);
sltime = __calculate_sltime(vcpu);
/*
* If the monotonic clock runs faster than the tod clock we might be
* woken up too early and have to go back to sleep to avoid deadlocks.
*/
if (sltime && hrtimer_forward_now(timer, ns_to_ktime(sltime)))
return HRTIMER_RESTART;
kvm_s390_vcpu_wakeup(vcpu);
return HRTIMER_NORESTART;
}
void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu)
{
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
spin_lock(&li->lock);
li->pending_irqs = 0;
bitmap_zero(li->sigp_emerg_pending, KVM_MAX_VCPUS);
memset(&li->irq, 0, sizeof(li->irq));
spin_unlock(&li->lock);
sca_clear_ext_call(vcpu);
}
int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
{
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
int rc = 0;
bool delivered = false;
unsigned long irq_type;
unsigned long irqs;
__reset_intercept_indicators(vcpu);
/* pending ckc conditions might have been invalidated */
clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
if (ckc_irq_pending(vcpu))
set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
/* pending cpu timer conditions might have been invalidated */
clear_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
if (cpu_timer_irq_pending(vcpu))
set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
while ((irqs = deliverable_irqs(vcpu)) && !rc) {
/* bits are in the reverse order of interrupt priority */
irq_type = find_last_bit(&irqs, IRQ_PEND_COUNT);
switch (irq_type) {
case IRQ_PEND_IO_ISC_0:
case IRQ_PEND_IO_ISC_1:
case IRQ_PEND_IO_ISC_2:
case IRQ_PEND_IO_ISC_3:
case IRQ_PEND_IO_ISC_4:
case IRQ_PEND_IO_ISC_5:
case IRQ_PEND_IO_ISC_6:
case IRQ_PEND_IO_ISC_7:
rc = __deliver_io(vcpu, irq_type);
break;
case IRQ_PEND_MCHK_EX:
case IRQ_PEND_MCHK_REP:
rc = __deliver_machine_check(vcpu);
break;
case IRQ_PEND_PROG:
rc = __deliver_prog(vcpu);
break;
case IRQ_PEND_EXT_EMERGENCY:
rc = __deliver_emergency_signal(vcpu);
break;
case IRQ_PEND_EXT_EXTERNAL:
rc = __deliver_external_call(vcpu);
break;
case IRQ_PEND_EXT_CLOCK_COMP:
rc = __deliver_ckc(vcpu);
break;
case IRQ_PEND_EXT_CPU_TIMER:
rc = __deliver_cpu_timer(vcpu);
break;
case IRQ_PEND_RESTART:
rc = __deliver_restart(vcpu);
break;
case IRQ_PEND_SET_PREFIX:
rc = __deliver_set_prefix(vcpu);
break;
case IRQ_PEND_PFAULT_INIT:
rc = __deliver_pfault_init(vcpu);
break;
case IRQ_PEND_EXT_SERVICE:
rc = __deliver_service(vcpu);
break;
case IRQ_PEND_EXT_SERVICE_EV:
rc = __deliver_service_ev(vcpu);
break;
case IRQ_PEND_PFAULT_DONE:
rc = __deliver_pfault_done(vcpu);
break;
case IRQ_PEND_VIRTIO:
rc = __deliver_virtio(vcpu);
break;
default:
WARN_ONCE(1, "Unknown pending irq type %ld", irq_type);
clear_bit(irq_type, &li->pending_irqs);
}
delivered |= !rc;
}
/*
* We delivered at least one interrupt and modified the PC. Force a
* singlestep event now.
*/
if (delivered && guestdbg_sstep_enabled(vcpu)) {
struct kvm_debug_exit_arch *debug_exit = &vcpu->run->debug.arch;
debug_exit->addr = vcpu->arch.sie_block->gpsw.addr;
debug_exit->type = KVM_SINGLESTEP;
vcpu->guest_debug |= KVM_GUESTDBG_EXIT_PENDING;
}
set_intercept_indicators(vcpu);
return rc;
}
static int __inject_prog(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
{
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
vcpu->stat.inject_program++;
VCPU_EVENT(vcpu, 3, "inject: program irq code 0x%x", irq->u.pgm.code);
trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_PROGRAM_INT,
irq->u.pgm.code, 0);
if (!(irq->u.pgm.flags & KVM_S390_PGM_FLAGS_ILC_VALID)) {
/* auto detection if no valid ILC was given */
irq->u.pgm.flags &= ~KVM_S390_PGM_FLAGS_ILC_MASK;
irq->u.pgm.flags |= kvm_s390_get_ilen(vcpu);
irq->u.pgm.flags |= KVM_S390_PGM_FLAGS_ILC_VALID;
}
if (irq->u.pgm.code == PGM_PER) {
li->irq.pgm.code |= PGM_PER;
li->irq.pgm.flags = irq->u.pgm.flags;
/* only modify PER related information */
li->irq.pgm.per_address = irq->u.pgm.per_address;
li->irq.pgm.per_code = irq->u.pgm.per_code;
li->irq.pgm.per_atmid = irq->u.pgm.per_atmid;
li->irq.pgm.per_access_id = irq->u.pgm.per_access_id;
} else if (!(irq->u.pgm.code & PGM_PER)) {
li->irq.pgm.code = (li->irq.pgm.code & PGM_PER) |
irq->u.pgm.code;
li->irq.pgm.flags = irq->u.pgm.flags;
/* only modify non-PER information */
li->irq.pgm.trans_exc_code = irq->u.pgm.trans_exc_code;
li->irq.pgm.mon_code = irq->u.pgm.mon_code;
li->irq.pgm.data_exc_code = irq->u.pgm.data_exc_code;
li->irq.pgm.mon_class_nr = irq->u.pgm.mon_class_nr;
li->irq.pgm.exc_access_id = irq->u.pgm.exc_access_id;
li->irq.pgm.op_access_id = irq->u.pgm.op_access_id;
} else {
li->irq.pgm = irq->u.pgm;
}
set_bit(IRQ_PEND_PROG, &li->pending_irqs);
return 0;
}
static int __inject_pfault_init(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
{
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
vcpu->stat.inject_pfault_init++;
VCPU_EVENT(vcpu, 4, "inject: pfault init parameter block at 0x%llx",
irq->u.ext.ext_params2);
trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_PFAULT_INIT,
irq->u.ext.ext_params,
irq->u.ext.ext_params2);
li->irq.ext = irq->u.ext;
set_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs);
kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT);
return 0;
}
static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
{
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
struct kvm_s390_extcall_info *extcall = &li->irq.extcall;
uint16_t src_id = irq->u.extcall.code;
vcpu->stat.inject_external_call++;
VCPU_EVENT(vcpu, 4, "inject: external call source-cpu:%u",
src_id);
trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EXTERNAL_CALL,
src_id, 0);
/* sending vcpu invalid */
if (kvm_get_vcpu_by_id(vcpu->kvm, src_id) == NULL)
return -EINVAL;
if (sclp.has_sigpif && !kvm_s390_pv_cpu_get_handle(vcpu))
return sca_inject_ext_call(vcpu, src_id);
if (test_and_set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs))
return -EBUSY;
*extcall = irq->u.extcall;
kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT);
return 0;
}
static int __inject_set_prefix(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
{
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
struct kvm_s390_prefix_info *prefix = &li->irq.prefix;
vcpu->stat.inject_set_prefix++;
VCPU_EVENT(vcpu, 3, "inject: set prefix to %x",
irq->u.prefix.address);
trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_SET_PREFIX,
irq->u.prefix.address, 0);
if (!is_vcpu_stopped(vcpu))
return -EBUSY;
*prefix = irq->u.prefix;
set_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs);
return 0;
}
#define KVM_S390_STOP_SUPP_FLAGS (KVM_S390_STOP_FLAG_STORE_STATUS)
static int __inject_sigp_stop(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
{
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
struct kvm_s390_stop_info *stop = &li->irq.stop;
int rc = 0;
vcpu->stat.inject_stop_signal++;
trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_STOP, 0, 0);
if (irq->u.stop.flags & ~KVM_S390_STOP_SUPP_FLAGS)
return -EINVAL;
if (is_vcpu_stopped(vcpu)) {
if (irq->u.stop.flags & KVM_S390_STOP_FLAG_STORE_STATUS)
rc = kvm_s390_store_status_unloaded(vcpu,
KVM_S390_STORE_STATUS_NOADDR);
return rc;
}
if (test_and_set_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs))
return -EBUSY;
stop->flags = irq->u.stop.flags;
kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOP_INT);
return 0;
}
static int __inject_sigp_restart(struct kvm_vcpu *vcpu)
{
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
vcpu->stat.inject_restart++;
VCPU_EVENT(vcpu, 3, "%s", "inject: restart int");
trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0);
set_bit(IRQ_PEND_RESTART, &li->pending_irqs);
return 0;
}
static int __inject_sigp_emergency(struct kvm_vcpu *vcpu,
struct kvm_s390_irq *irq)
{
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
vcpu->stat.inject_emergency_signal++;
VCPU_EVENT(vcpu, 4, "inject: emergency from cpu %u",
irq->u.emerg.code);
trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY,
irq->u.emerg.code, 0);
/* sending vcpu invalid */
if (kvm_get_vcpu_by_id(vcpu->kvm, irq->u.emerg.code) == NULL)
return -EINVAL;
set_bit(irq->u.emerg.code, li->sigp_emerg_pending);
set_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs);
kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT);
return 0;
}
static int __inject_mchk(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
{
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
struct kvm_s390_mchk_info *mchk = &li->irq.mchk;
vcpu->stat.inject_mchk++;
VCPU_EVENT(vcpu, 3, "inject: machine check mcic 0x%llx",
irq->u.mchk.mcic);
trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_MCHK, 0,
irq->u.mchk.mcic);
/*
* Because repressible machine checks can be indicated along with
* exigent machine checks (PoP, Chapter 11, Interruption action)
* we need to combine cr14, mcic and external damage code.
* Failing storage address and the logout area should not be or'ed
* together, we just indicate the last occurrence of the corresponding
* machine check
*/
mchk->cr14 |= irq->u.mchk.cr14;
mchk->mcic |= irq->u.mchk.mcic;
mchk->ext_damage_code |= irq->u.mchk.ext_damage_code;
mchk->failing_storage_address = irq->u.mchk.failing_storage_address;
memcpy(&mchk->fixed_logout, &irq->u.mchk.fixed_logout,
sizeof(mchk->fixed_logout));
if (mchk->mcic & MCHK_EX_MASK)
set_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs);
else if (mchk->mcic & MCHK_REP_MASK)
set_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs);
return 0;
}
static int __inject_ckc(struct kvm_vcpu *vcpu)
{
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
vcpu->stat.inject_ckc++;
VCPU_EVENT(vcpu, 3, "%s", "inject: clock comparator external");
trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_CLOCK_COMP,
0, 0);
set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT);
return 0;
}
static int __inject_cpu_timer(struct kvm_vcpu *vcpu)
{
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
vcpu->stat.inject_cputm++;
VCPU_EVENT(vcpu, 3, "%s", "inject: cpu timer external");
trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_CPU_TIMER,
0, 0);
set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT);
return 0;
}
static struct kvm_s390_interrupt_info *get_io_int(struct kvm *kvm,
int isc, u32 schid)
{
struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
struct list_head *isc_list = &fi->lists[FIRQ_LIST_IO_ISC_0 + isc];
struct kvm_s390_interrupt_info *iter;
u16 id = (schid & 0xffff0000U) >> 16;
u16 nr = schid & 0x0000ffffU;
spin_lock(&fi->lock);
list_for_each_entry(iter, isc_list, list) {
if (schid && (id != iter->io.subchannel_id ||
nr != iter->io.subchannel_nr))
continue;
/* found an appropriate entry */
list_del_init(&iter->list);
fi->counters[FIRQ_CNTR_IO] -= 1;
if (list_empty(isc_list))
clear_bit(isc_to_irq_type(isc), &fi->pending_irqs);
spin_unlock(&fi->lock);
return iter;
}
spin_unlock(&fi->lock);
return NULL;
}
static struct kvm_s390_interrupt_info *get_top_io_int(struct kvm *kvm,
u64 isc_mask, u32 schid)
{
struct kvm_s390_interrupt_info *inti = NULL;
int isc;
for (isc = 0; isc <= MAX_ISC && !inti; isc++) {
if (isc_mask & isc_to_isc_bits(isc))
inti = get_io_int(kvm, isc, schid);
}
return inti;
}
static int get_top_gisa_isc(struct kvm *kvm, u64 isc_mask, u32 schid)
{
struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
unsigned long active_mask;
int isc;
if (schid)
goto out;
if (!gi->origin)
goto out;
active_mask = (isc_mask & gisa_get_ipm(gi->origin) << 24) << 32;
while (active_mask) {
isc = __fls(active_mask) ^ (BITS_PER_LONG - 1);
if (gisa_tac_ipm_gisc(gi->origin, isc))
return isc;
clear_bit_inv(isc, &active_mask);
}
out:
return -EINVAL;
}
/*
* Dequeue and return an I/O interrupt matching any of the interruption
* subclasses as designated by the isc mask in cr6 and the schid (if != 0).
* Take into account the interrupts pending in the interrupt list and in GISA.
*
* Note that for a guest that does not enable I/O interrupts
* but relies on TPI, a flood of classic interrupts may starve
* out adapter interrupts on the same isc. Linux does not do
* that, and it is possible to work around the issue by configuring
* different iscs for classic and adapter interrupts in the guest,
* but we may want to revisit this in the future.
*/
struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
u64 isc_mask, u32 schid)
{
struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
struct kvm_s390_interrupt_info *inti, *tmp_inti;
int isc;
inti = get_top_io_int(kvm, isc_mask, schid);
isc = get_top_gisa_isc(kvm, isc_mask, schid);
if (isc < 0)
/* no AI in GISA */
goto out;
if (!inti)
/* AI in GISA but no classical IO int */
goto gisa_out;
/* both types of interrupts present */
if (int_word_to_isc(inti->io.io_int_word) <= isc) {
/* classical IO int with higher priority */
gisa_set_ipm_gisc(gi->origin, isc);
goto out;
}
gisa_out:
tmp_inti = kzalloc(sizeof(*inti), GFP_KERNEL_ACCOUNT);
if (tmp_inti) {
tmp_inti->type = KVM_S390_INT_IO(1, 0, 0, 0);
tmp_inti->io.io_int_word = isc_to_int_word(isc);
if (inti)
kvm_s390_reinject_io_int(kvm, inti);
inti = tmp_inti;
} else
gisa_set_ipm_gisc(gi->origin, isc);
out:
return inti;
}
static int __inject_service(struct kvm *kvm,
struct kvm_s390_interrupt_info *inti)
{
struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
kvm->stat.inject_service_signal++;
spin_lock(&fi->lock);
fi->srv_signal.ext_params |= inti->ext.ext_params & SCCB_EVENT_PENDING;
/* We always allow events, track them separately from the sccb ints */
if (fi->srv_signal.ext_params & SCCB_EVENT_PENDING)
set_bit(IRQ_PEND_EXT_SERVICE_EV, &fi->pending_irqs);
/*
* Early versions of the QEMU s390 bios will inject several
* service interrupts after another without handling a
* condition code indicating busy.
* We will silently ignore those superfluous sccb values.
* A future version of QEMU will take care of serialization
* of servc requests
*/
if (fi->srv_signal.ext_params & SCCB_MASK)
goto out;
fi->srv_signal.ext_params |= inti->ext.ext_params & SCCB_MASK;
set_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs);
out:
spin_unlock(&fi->lock);
kfree(inti);
return 0;
}
static int __inject_virtio(struct kvm *kvm,
struct kvm_s390_interrupt_info *inti)
{
struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
kvm->stat.inject_virtio++;
spin_lock(&fi->lock);
if (fi->counters[FIRQ_CNTR_VIRTIO] >= KVM_S390_MAX_VIRTIO_IRQS) {
spin_unlock(&fi->lock);
return -EBUSY;
}
fi->counters[FIRQ_CNTR_VIRTIO] += 1;
list_add_tail(&inti->list, &fi->lists[FIRQ_LIST_VIRTIO]);
set_bit(IRQ_PEND_VIRTIO, &fi->pending_irqs);
spin_unlock(&fi->lock);
return 0;
}
static int __inject_pfault_done(struct kvm *kvm,
struct kvm_s390_interrupt_info *inti)
{
struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
kvm->stat.inject_pfault_done++;
spin_lock(&fi->lock);
if (fi->counters[FIRQ_CNTR_PFAULT] >=
(ASYNC_PF_PER_VCPU * KVM_MAX_VCPUS)) {
spin_unlock(&fi->lock);
return -EBUSY;
}
fi->counters[FIRQ_CNTR_PFAULT] += 1;
list_add_tail(&inti->list, &fi->lists[FIRQ_LIST_PFAULT]);
set_bit(IRQ_PEND_PFAULT_DONE, &fi->pending_irqs);
spin_unlock(&fi->lock);
return 0;
}
#define CR_PENDING_SUBCLASS 28
static int __inject_float_mchk(struct kvm *kvm,
struct kvm_s390_interrupt_info *inti)
{
struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
kvm->stat.inject_float_mchk++;
spin_lock(&fi->lock);
fi->mchk.cr14 |= inti->mchk.cr14 & (1UL << CR_PENDING_SUBCLASS);
fi->mchk.mcic |= inti->mchk.mcic;
set_bit(IRQ_PEND_MCHK_REP, &fi->pending_irqs);
spin_unlock(&fi->lock);
kfree(inti);
return 0;
}
static int __inject_io(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
{
struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
struct kvm_s390_float_interrupt *fi;
struct list_head *list;
int isc;
kvm->stat.inject_io++;
isc = int_word_to_isc(inti->io.io_int_word);
/*
* We do not use the lock checking variant as this is just a
* performance optimization and we do not hold the lock here.
* This is ok as the code will pick interrupts from both "lists"
* for delivery.
*/
if (gi->origin && inti->type & KVM_S390_INT_IO_AI_MASK) {
VM_EVENT(kvm, 4, "%s isc %1u", "inject: I/O (AI/gisa)", isc);
gisa_set_ipm_gisc(gi->origin, isc);
kfree(inti);
return 0;
}
fi = &kvm->arch.float_int;
spin_lock(&fi->lock);
if (fi->counters[FIRQ_CNTR_IO] >= KVM_S390_MAX_FLOAT_IRQS) {
spin_unlock(&fi->lock);
return -EBUSY;
}
fi->counters[FIRQ_CNTR_IO] += 1;
if (inti->type & KVM_S390_INT_IO_AI_MASK)
VM_EVENT(kvm, 4, "%s", "inject: I/O (AI)");
else
VM_EVENT(kvm, 4, "inject: I/O %x ss %x schid %04x",
inti->io.subchannel_id >> 8,
inti->io.subchannel_id >> 1 & 0x3,
inti->io.subchannel_nr);
list = &fi->lists[FIRQ_LIST_IO_ISC_0 + isc];
list_add_tail(&inti->list, list);
set_bit(isc_to_irq_type(isc), &fi->pending_irqs);
spin_unlock(&fi->lock);
return 0;
}
/*
* Find a destination VCPU for a floating irq and kick it.
*/
static void __floating_irq_kick(struct kvm *kvm, u64 type)
{
struct kvm_vcpu *dst_vcpu;
int sigcpu, online_vcpus, nr_tries = 0;
online_vcpus = atomic_read(&kvm->online_vcpus);
if (!online_vcpus)
return;
/* find idle VCPUs first, then round robin */
sigcpu = find_first_bit(kvm->arch.idle_mask, online_vcpus);
if (sigcpu == online_vcpus) {
do {
sigcpu = kvm->arch.float_int.next_rr_cpu++;
kvm->arch.float_int.next_rr_cpu %= online_vcpus;
/* avoid endless loops if all vcpus are stopped */
if (nr_tries++ >= online_vcpus)
return;
} while (is_vcpu_stopped(kvm_get_vcpu(kvm, sigcpu)));
}
dst_vcpu = kvm_get_vcpu(kvm, sigcpu);
/* make the VCPU drop out of the SIE, or wake it up if sleeping */
switch (type) {
case KVM_S390_MCHK:
kvm_s390_set_cpuflags(dst_vcpu, CPUSTAT_STOP_INT);
break;
case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
if (!(type & KVM_S390_INT_IO_AI_MASK &&
kvm->arch.gisa_int.origin) ||
kvm_s390_pv_cpu_get_handle(dst_vcpu))
kvm_s390_set_cpuflags(dst_vcpu, CPUSTAT_IO_INT);
break;
default:
kvm_s390_set_cpuflags(dst_vcpu, CPUSTAT_EXT_INT);
break;
}
kvm_s390_vcpu_wakeup(dst_vcpu);
}
static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
{
u64 type = READ_ONCE(inti->type);
int rc;
switch (type) {
case KVM_S390_MCHK:
rc = __inject_float_mchk(kvm, inti);
break;
case KVM_S390_INT_VIRTIO:
rc = __inject_virtio(kvm, inti);
break;
case KVM_S390_INT_SERVICE:
rc = __inject_service(kvm, inti);
break;
case KVM_S390_INT_PFAULT_DONE:
rc = __inject_pfault_done(kvm, inti);
break;
case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
rc = __inject_io(kvm, inti);
break;
default:
rc = -EINVAL;
}
if (rc)
return rc;
__floating_irq_kick(kvm, type);
return 0;
}
int kvm_s390_inject_vm(struct kvm *kvm,
struct kvm_s390_interrupt *s390int)
{
struct kvm_s390_interrupt_info *inti;
int rc;
inti = kzalloc(sizeof(*inti), GFP_KERNEL_ACCOUNT);
if (!inti)
return -ENOMEM;
inti->type = s390int->type;
switch (inti->type) {
case KVM_S390_INT_VIRTIO:
VM_EVENT(kvm, 5, "inject: virtio parm:%x,parm64:%llx",
s390int->parm, s390int->parm64);
inti->ext.ext_params = s390int->parm;
inti->ext.ext_params2 = s390int->parm64;
break;
case KVM_S390_INT_SERVICE:
VM_EVENT(kvm, 4, "inject: sclp parm:%x", s390int->parm);
inti->ext.ext_params = s390int->parm;
break;
case KVM_S390_INT_PFAULT_DONE:
inti->ext.ext_params2 = s390int->parm64;
break;
case KVM_S390_MCHK:
VM_EVENT(kvm, 3, "inject: machine check mcic 0x%llx",
s390int->parm64);
inti->mchk.cr14 = s390int->parm; /* upper bits are not used */
inti->mchk.mcic = s390int->parm64;
break;
case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
inti->io.subchannel_id = s390int->parm >> 16;
inti->io.subchannel_nr = s390int->parm & 0x0000ffffu;
inti->io.io_int_parm = s390int->parm64 >> 32;
inti->io.io_int_word = s390int->parm64 & 0x00000000ffffffffull;
break;
default:
kfree(inti);
return -EINVAL;
}
trace_kvm_s390_inject_vm(s390int->type, s390int->parm, s390int->parm64,
2);
rc = __inject_vm(kvm, inti);
if (rc)
kfree(inti);
return rc;
}
int kvm_s390_reinject_io_int(struct kvm *kvm,
struct kvm_s390_interrupt_info *inti)
{
return __inject_vm(kvm, inti);
}
int s390int_to_s390irq(struct kvm_s390_interrupt *s390int,
struct kvm_s390_irq *irq)
{
irq->type = s390int->type;
switch (irq->type) {
case KVM_S390_PROGRAM_INT:
if (s390int->parm & 0xffff0000)
return -EINVAL;
irq->u.pgm.code = s390int->parm;
break;
case KVM_S390_SIGP_SET_PREFIX:
irq->u.prefix.address = s390int->parm;
break;
case KVM_S390_SIGP_STOP:
irq->u.stop.flags = s390int->parm;
break;
case KVM_S390_INT_EXTERNAL_CALL:
if (s390int->parm & 0xffff0000)
return -EINVAL;
irq->u.extcall.code = s390int->parm;
break;
case KVM_S390_INT_EMERGENCY:
if (s390int->parm & 0xffff0000)
return -EINVAL;
irq->u.emerg.code = s390int->parm;
break;
case KVM_S390_MCHK:
irq->u.mchk.mcic = s390int->parm64;
break;
case KVM_S390_INT_PFAULT_INIT:
irq->u.ext.ext_params = s390int->parm;
irq->u.ext.ext_params2 = s390int->parm64;
break;
case KVM_S390_RESTART:
case KVM_S390_INT_CLOCK_COMP:
case KVM_S390_INT_CPU_TIMER:
break;
default:
return -EINVAL;
}
return 0;
}
int kvm_s390_is_stop_irq_pending(struct kvm_vcpu *vcpu)
{
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
return test_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs);
}
int kvm_s390_is_restart_irq_pending(struct kvm_vcpu *vcpu)
{
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
return test_bit(IRQ_PEND_RESTART, &li->pending_irqs);
}
void kvm_s390_clear_stop_irq(struct kvm_vcpu *vcpu)
{
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
spin_lock(&li->lock);
li->irq.stop.flags = 0;
clear_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs);
spin_unlock(&li->lock);
}
static int do_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
{
int rc;
switch (irq->type) {
case KVM_S390_PROGRAM_INT:
rc = __inject_prog(vcpu, irq);
break;
case KVM_S390_SIGP_SET_PREFIX:
rc = __inject_set_prefix(vcpu, irq);
break;
case KVM_S390_SIGP_STOP:
rc = __inject_sigp_stop(vcpu, irq);
break;
case KVM_S390_RESTART:
rc = __inject_sigp_restart(vcpu);
break;
case KVM_S390_INT_CLOCK_COMP:
rc = __inject_ckc(vcpu);
break;
case KVM_S390_INT_CPU_TIMER:
rc = __inject_cpu_timer(vcpu);
break;
case KVM_S390_INT_EXTERNAL_CALL:
rc = __inject_extcall(vcpu, irq);
break;
case KVM_S390_INT_EMERGENCY:
rc = __inject_sigp_emergency(vcpu, irq);
break;
case KVM_S390_MCHK:
rc = __inject_mchk(vcpu, irq);
break;
case KVM_S390_INT_PFAULT_INIT:
rc = __inject_pfault_init(vcpu, irq);
break;
case KVM_S390_INT_VIRTIO:
case KVM_S390_INT_SERVICE:
case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
default:
rc = -EINVAL;
}
return rc;
}
int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
{
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
int rc;
spin_lock(&li->lock);
rc = do_inject_vcpu(vcpu, irq);
spin_unlock(&li->lock);
if (!rc)
kvm_s390_vcpu_wakeup(vcpu);
return rc;
}
static inline void clear_irq_list(struct list_head *_list)
{
struct kvm_s390_interrupt_info *inti, *n;
list_for_each_entry_safe(inti, n, _list, list) {
list_del(&inti->list);
kfree(inti);
}
}
static void inti_to_irq(struct kvm_s390_interrupt_info *inti,
struct kvm_s390_irq *irq)
{
irq->type = inti->type;
switch (inti->type) {
case KVM_S390_INT_PFAULT_INIT:
case KVM_S390_INT_PFAULT_DONE:
case KVM_S390_INT_VIRTIO:
irq->u.ext = inti->ext;
break;
case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
irq->u.io = inti->io;
break;
}
}
void kvm_s390_clear_float_irqs(struct kvm *kvm)
{
struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
int i;
mutex_lock(&kvm->lock);
if (!kvm_s390_pv_is_protected(kvm))
fi->masked_irqs = 0;
mutex_unlock(&kvm->lock);
spin_lock(&fi->lock);
fi->pending_irqs = 0;
memset(&fi->srv_signal, 0, sizeof(fi->srv_signal));
memset(&fi->mchk, 0, sizeof(fi->mchk));
for (i = 0; i < FIRQ_LIST_COUNT; i++)
clear_irq_list(&fi->lists[i]);
for (i = 0; i < FIRQ_MAX_COUNT; i++)
fi->counters[i] = 0;
spin_unlock(&fi->lock);
kvm_s390_gisa_clear(kvm);
};
static int get_all_floating_irqs(struct kvm *kvm, u8 __user *usrbuf, u64 len)
{
struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
struct kvm_s390_interrupt_info *inti;
struct kvm_s390_float_interrupt *fi;
struct kvm_s390_irq *buf;
struct kvm_s390_irq *irq;
int max_irqs;
int ret = 0;
int n = 0;
int i;
if (len > KVM_S390_FLIC_MAX_BUFFER || len == 0)
return -EINVAL;
/*
* We are already using -ENOMEM to signal
* userspace it may retry with a bigger buffer,
* so we need to use something else for this case
*/
buf = vzalloc(len);
if (!buf)
return -ENOBUFS;
max_irqs = len / sizeof(struct kvm_s390_irq);
if (gi->origin && gisa_get_ipm(gi->origin)) {
for (i = 0; i <= MAX_ISC; i++) {
if (n == max_irqs) {
/* signal userspace to try again */
ret = -ENOMEM;
goto out_nolock;
}
if (gisa_tac_ipm_gisc(gi->origin, i)) {
irq = (struct kvm_s390_irq *) &buf[n];
irq->type = KVM_S390_INT_IO(1, 0, 0, 0);
irq->u.io.io_int_word = isc_to_int_word(i);
n++;
}
}
}
fi = &kvm->arch.float_int;
spin_lock(&fi->lock);
for (i = 0; i < FIRQ_LIST_COUNT; i++) {
list_for_each_entry(inti, &fi->lists[i], list) {
if (n == max_irqs) {
/* signal userspace to try again */
ret = -ENOMEM;
goto out;
}
inti_to_irq(inti, &buf[n]);
n++;
}
}
if (test_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs) ||
test_bit(IRQ_PEND_EXT_SERVICE_EV, &fi->pending_irqs)) {
if (n == max_irqs) {
/* signal userspace to try again */
ret = -ENOMEM;
goto out;
}
irq = (struct kvm_s390_irq *) &buf[n];
irq->type = KVM_S390_INT_SERVICE;
irq->u.ext = fi->srv_signal;
n++;
}
if (test_bit(IRQ_PEND_MCHK_REP, &fi->pending_irqs)) {
if (n == max_irqs) {
/* signal userspace to try again */
ret = -ENOMEM;
goto out;
}
irq = (struct kvm_s390_irq *) &buf[n];
irq->type = KVM_S390_MCHK;
irq->u.mchk = fi->mchk;
n++;
}
out:
spin_unlock(&fi->lock);
out_nolock:
if (!ret && n > 0) {
if (copy_to_user(usrbuf, buf, sizeof(struct kvm_s390_irq) * n))
ret = -EFAULT;
}
vfree(buf);
return ret < 0 ? ret : n;
}
static int flic_ais_mode_get_all(struct kvm *kvm, struct kvm_device_attr *attr)
{
struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
struct kvm_s390_ais_all ais;
if (attr->attr < sizeof(ais))
return -EINVAL;
if (!test_kvm_facility(kvm, 72))
return -EOPNOTSUPP;
mutex_lock(&fi->ais_lock);
ais.simm = fi->simm;
ais.nimm = fi->nimm;
mutex_unlock(&fi->ais_lock);
if (copy_to_user((void __user *)attr->addr, &ais, sizeof(ais)))
return -EFAULT;
return 0;
}
static int flic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
{
int r;
switch (attr->group) {
case KVM_DEV_FLIC_GET_ALL_IRQS:
r = get_all_floating_irqs(dev->kvm, (u8 __user *) attr->addr,
attr->attr);
break;
case KVM_DEV_FLIC_AISM_ALL:
r = flic_ais_mode_get_all(dev->kvm, attr);
break;
default:
r = -EINVAL;
}
return r;
}
static inline int copy_irq_from_user(struct kvm_s390_interrupt_info *inti,
u64 addr)
{
struct kvm_s390_irq __user *uptr = (struct kvm_s390_irq __user *) addr;
void *target = NULL;
void __user *source;
u64 size;
if (get_user(inti->type, (u64 __user *)addr))
return -EFAULT;
switch (inti->type) {
case KVM_S390_INT_PFAULT_INIT:
case KVM_S390_INT_PFAULT_DONE:
case KVM_S390_INT_VIRTIO:
case KVM_S390_INT_SERVICE:
target = (void *) &inti->ext;
source = &uptr->u.ext;
size = sizeof(inti->ext);
break;
case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
target = (void *) &inti->io;
source = &uptr->u.io;
size = sizeof(inti->io);
break;
case KVM_S390_MCHK:
target = (void *) &inti->mchk;
source = &uptr->u.mchk;
size = sizeof(inti->mchk);
break;
default:
return -EINVAL;
}
if (copy_from_user(target, source, size))
return -EFAULT;
return 0;
}
static int enqueue_floating_irq(struct kvm_device *dev,
struct kvm_device_attr *attr)
{
struct kvm_s390_interrupt_info *inti = NULL;
int r = 0;
int len = attr->attr;
if (len % sizeof(struct kvm_s390_irq) != 0)
return -EINVAL;
else if (len > KVM_S390_FLIC_MAX_BUFFER)
return -EINVAL;
while (len >= sizeof(struct kvm_s390_irq)) {
inti = kzalloc(sizeof(*inti), GFP_KERNEL_ACCOUNT);
if (!inti)
return -ENOMEM;
r = copy_irq_from_user(inti, attr->addr);
if (r) {
kfree(inti);
return r;
}
r = __inject_vm(dev->kvm, inti);
if (r) {
kfree(inti);
return r;
}
len -= sizeof(struct kvm_s390_irq);
attr->addr += sizeof(struct kvm_s390_irq);
}
return r;
}
static struct s390_io_adapter *get_io_adapter(struct kvm *kvm, unsigned int id)
{
if (id >= MAX_S390_IO_ADAPTERS)
return NULL;
id = array_index_nospec(id, MAX_S390_IO_ADAPTERS);
return kvm->arch.adapters[id];
}
static int register_io_adapter(struct kvm_device *dev,
struct kvm_device_attr *attr)
{
struct s390_io_adapter *adapter;
struct kvm_s390_io_adapter adapter_info;
if (copy_from_user(&adapter_info,
(void __user *)attr->addr, sizeof(adapter_info)))
return -EFAULT;
if (adapter_info.id >= MAX_S390_IO_ADAPTERS)
return -EINVAL;
adapter_info.id = array_index_nospec(adapter_info.id,
MAX_S390_IO_ADAPTERS);
if (dev->kvm->arch.adapters[adapter_info.id] != NULL)
return -EINVAL;
adapter = kzalloc(sizeof(*adapter), GFP_KERNEL_ACCOUNT);
if (!adapter)
return -ENOMEM;
adapter->id = adapter_info.id;
adapter->isc = adapter_info.isc;
adapter->maskable = adapter_info.maskable;
adapter->masked = false;
adapter->swap = adapter_info.swap;
adapter->suppressible = (adapter_info.flags) &
KVM_S390_ADAPTER_SUPPRESSIBLE;
dev->kvm->arch.adapters[adapter->id] = adapter;
return 0;
}
int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked)
{
int ret;
struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
if (!adapter || !adapter->maskable)
return -EINVAL;
ret = adapter->masked;
adapter->masked = masked;
return ret;
}
void kvm_s390_destroy_adapters(struct kvm *kvm)
{
int i;
for (i = 0; i < MAX_S390_IO_ADAPTERS; i++)
kfree(kvm->arch.adapters[i]);
}
static int modify_io_adapter(struct kvm_device *dev,
struct kvm_device_attr *attr)
{
struct kvm_s390_io_adapter_req req;
struct s390_io_adapter *adapter;
int ret;
if (copy_from_user(&req, (void __user *)attr->addr, sizeof(req)))
return -EFAULT;
adapter = get_io_adapter(dev->kvm, req.id);
if (!adapter)
return -EINVAL;
switch (req.type) {
case KVM_S390_IO_ADAPTER_MASK:
ret = kvm_s390_mask_adapter(dev->kvm, req.id, req.mask);
if (ret > 0)
ret = 0;
break;
/*
* The following operations are no longer needed and therefore no-ops.
* The gpa to hva translation is done when an IRQ route is set up. The
* set_irq code uses get_user_pages_remote() to do the actual write.
*/
case KVM_S390_IO_ADAPTER_MAP:
case KVM_S390_IO_ADAPTER_UNMAP:
ret = 0;
break;
default:
ret = -EINVAL;
}
return ret;
}
static int clear_io_irq(struct kvm *kvm, struct kvm_device_attr *attr)
{
const u64 isc_mask = 0xffUL << 24; /* all iscs set */
u32 schid;
if (attr->flags)
return -EINVAL;
if (attr->attr != sizeof(schid))
return -EINVAL;
if (copy_from_user(&schid, (void __user *) attr->addr, sizeof(schid)))
return -EFAULT;
if (!schid)
return -EINVAL;
kfree(kvm_s390_get_io_int(kvm, isc_mask, schid));
/*
* If userspace is conforming to the architecture, we can have at most
* one pending I/O interrupt per subchannel, so this is effectively a
* clear all.
*/
return 0;
}
static int modify_ais_mode(struct kvm *kvm, struct kvm_device_attr *attr)
{
struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
struct kvm_s390_ais_req req;
int ret = 0;
if (!test_kvm_facility(kvm, 72))
return -EOPNOTSUPP;
if (copy_from_user(&req, (void __user *)attr->addr, sizeof(req)))
return -EFAULT;
if (req.isc > MAX_ISC)
return -EINVAL;
trace_kvm_s390_modify_ais_mode(req.isc,
(fi->simm & AIS_MODE_MASK(req.isc)) ?
(fi->nimm & AIS_MODE_MASK(req.isc)) ?
2 : KVM_S390_AIS_MODE_SINGLE :
KVM_S390_AIS_MODE_ALL, req.mode);
mutex_lock(&fi->ais_lock);
switch (req.mode) {
case KVM_S390_AIS_MODE_ALL:
fi->simm &= ~AIS_MODE_MASK(req.isc);
fi->nimm &= ~AIS_MODE_MASK(req.isc);
break;
case KVM_S390_AIS_MODE_SINGLE:
fi->simm |= AIS_MODE_MASK(req.isc);
fi->nimm &= ~AIS_MODE_MASK(req.isc);
break;
default:
ret = -EINVAL;
}
mutex_unlock(&fi->ais_lock);
return ret;
}
static int kvm_s390_inject_airq(struct kvm *kvm,
struct s390_io_adapter *adapter)
{
struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
struct kvm_s390_interrupt s390int = {
.type = KVM_S390_INT_IO(1, 0, 0, 0),
.parm = 0,
.parm64 = isc_to_int_word(adapter->isc),
};
int ret = 0;
if (!test_kvm_facility(kvm, 72) || !adapter->suppressible)
return kvm_s390_inject_vm(kvm, &s390int);
mutex_lock(&fi->ais_lock);
if (fi->nimm & AIS_MODE_MASK(adapter->isc)) {
trace_kvm_s390_airq_suppressed(adapter->id, adapter->isc);
goto out;
}
ret = kvm_s390_inject_vm(kvm, &s390int);
if (!ret && (fi->simm & AIS_MODE_MASK(adapter->isc))) {
fi->nimm |= AIS_MODE_MASK(adapter->isc);
trace_kvm_s390_modify_ais_mode(adapter->isc,
KVM_S390_AIS_MODE_SINGLE, 2);
}
out:
mutex_unlock(&fi->ais_lock);
return ret;
}
static int flic_inject_airq(struct kvm *kvm, struct kvm_device_attr *attr)
{
unsigned int id = attr->attr;
struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
if (!adapter)
return -EINVAL;
return kvm_s390_inject_airq(kvm, adapter);
}
static int flic_ais_mode_set_all(struct kvm *kvm, struct kvm_device_attr *attr)
{
struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
struct kvm_s390_ais_all ais;
if (!test_kvm_facility(kvm, 72))
return -EOPNOTSUPP;
if (copy_from_user(&ais, (void __user *)attr->addr, sizeof(ais)))
return -EFAULT;
mutex_lock(&fi->ais_lock);
fi->simm = ais.simm;
fi->nimm = ais.nimm;
mutex_unlock(&fi->ais_lock);
return 0;
}
static int flic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
{
int r = 0;
unsigned long i;
struct kvm_vcpu *vcpu;
switch (attr->group) {
case KVM_DEV_FLIC_ENQUEUE:
r = enqueue_floating_irq(dev, attr);
break;
case KVM_DEV_FLIC_CLEAR_IRQS:
kvm_s390_clear_float_irqs(dev->kvm);
break;
case KVM_DEV_FLIC_APF_ENABLE:
dev->kvm->arch.gmap->pfault_enabled = 1;
break;
case KVM_DEV_FLIC_APF_DISABLE_WAIT:
dev->kvm->arch.gmap->pfault_enabled = 0;
/*
* Make sure no async faults are in transition when
* clearing the queues. So we don't need to worry
* about late coming workers.
*/
synchronize_srcu(&dev->kvm->srcu);
kvm_for_each_vcpu(i, vcpu, dev->kvm)
kvm_clear_async_pf_completion_queue(vcpu);
break;
case KVM_DEV_FLIC_ADAPTER_REGISTER:
r = register_io_adapter(dev, attr);
break;
case KVM_DEV_FLIC_ADAPTER_MODIFY:
r = modify_io_adapter(dev, attr);
break;
case KVM_DEV_FLIC_CLEAR_IO_IRQ:
r = clear_io_irq(dev->kvm, attr);
break;
case KVM_DEV_FLIC_AISM:
r = modify_ais_mode(dev->kvm, attr);
break;
case KVM_DEV_FLIC_AIRQ_INJECT:
r = flic_inject_airq(dev->kvm, attr);
break;
case KVM_DEV_FLIC_AISM_ALL:
r = flic_ais_mode_set_all(dev->kvm, attr);
break;
default:
r = -EINVAL;
}
return r;
}
static int flic_has_attr(struct kvm_device *dev,
struct kvm_device_attr *attr)
{
switch (attr->group) {
case KVM_DEV_FLIC_GET_ALL_IRQS:
case KVM_DEV_FLIC_ENQUEUE:
case KVM_DEV_FLIC_CLEAR_IRQS:
case KVM_DEV_FLIC_APF_ENABLE:
case KVM_DEV_FLIC_APF_DISABLE_WAIT:
case KVM_DEV_FLIC_ADAPTER_REGISTER:
case KVM_DEV_FLIC_ADAPTER_MODIFY:
case KVM_DEV_FLIC_CLEAR_IO_IRQ:
case KVM_DEV_FLIC_AISM:
case KVM_DEV_FLIC_AIRQ_INJECT:
case KVM_DEV_FLIC_AISM_ALL:
return 0;
}
return -ENXIO;
}
static int flic_create(struct kvm_device *dev, u32 type)
{
if (!dev)
return -EINVAL;
if (dev->kvm->arch.flic)
return -EINVAL;
dev->kvm->arch.flic = dev;
return 0;
}
static void flic_destroy(struct kvm_device *dev)
{
dev->kvm->arch.flic = NULL;
kfree(dev);
}
/* s390 floating irq controller (flic) */
struct kvm_device_ops kvm_flic_ops = {
.name = "kvm-flic",
.get_attr = flic_get_attr,
.set_attr = flic_set_attr,
.has_attr = flic_has_attr,
.create = flic_create,
.destroy = flic_destroy,
};
static unsigned long get_ind_bit(__u64 addr, unsigned long bit_nr, bool swap)
{
unsigned long bit;
bit = bit_nr + (addr % PAGE_SIZE) * 8;
return swap ? (bit ^ (BITS_PER_LONG - 1)) : bit;
}
static struct page *get_map_page(struct kvm *kvm, u64 uaddr)
{
struct page *page = NULL;
mmap_read_lock(kvm->mm);
get_user_pages_remote(kvm->mm, uaddr, 1, FOLL_WRITE,
&page, NULL);
mmap_read_unlock(kvm->mm);
return page;
}
static int adapter_indicators_set(struct kvm *kvm,
struct s390_io_adapter *adapter,
struct kvm_s390_adapter_int *adapter_int)
{
unsigned long bit;
int summary_set, idx;
struct page *ind_page, *summary_page;
void *map;
ind_page = get_map_page(kvm, adapter_int->ind_addr);
if (!ind_page)
return -1;
summary_page = get_map_page(kvm, adapter_int->summary_addr);
if (!summary_page) {
put_page(ind_page);
return -1;
}
idx = srcu_read_lock(&kvm->srcu);
map = page_address(ind_page);
bit = get_ind_bit(adapter_int->ind_addr,
adapter_int->ind_offset, adapter->swap);
set_bit(bit, map);
mark_page_dirty(kvm, adapter_int->ind_addr >> PAGE_SHIFT);
set_page_dirty_lock(ind_page);
map = page_address(summary_page);
bit = get_ind_bit(adapter_int->summary_addr,
adapter_int->summary_offset, adapter->swap);
summary_set = test_and_set_bit(bit, map);
mark_page_dirty(kvm, adapter_int->summary_addr >> PAGE_SHIFT);
set_page_dirty_lock(summary_page);
srcu_read_unlock(&kvm->srcu, idx);
put_page(ind_page);
put_page(summary_page);
return summary_set ? 0 : 1;
}
/*
* < 0 - not injected due to error
* = 0 - coalesced, summary indicator already active
* > 0 - injected interrupt
*/
static int set_adapter_int(struct kvm_kernel_irq_routing_entry *e,
struct kvm *kvm, int irq_source_id, int level,
bool line_status)
{
int ret;
struct s390_io_adapter *adapter;
/* We're only interested in the 0->1 transition. */
if (!level)
return 0;
adapter = get_io_adapter(kvm, e->adapter.adapter_id);
if (!adapter)
return -1;
ret = adapter_indicators_set(kvm, adapter, &e->adapter);
if ((ret > 0) && !adapter->masked) {
ret = kvm_s390_inject_airq(kvm, adapter);
if (ret == 0)
ret = 1;
}
return ret;
}
/*
* Inject the machine check to the guest.
*/
void kvm_s390_reinject_machine_check(struct kvm_vcpu *vcpu,
struct mcck_volatile_info *mcck_info)
{
struct kvm_s390_interrupt_info inti;
struct kvm_s390_irq irq;
struct kvm_s390_mchk_info *mchk;
union mci mci;
__u64 cr14 = 0; /* upper bits are not used */
int rc;
mci.val = mcck_info->mcic;
if (mci.sr)
cr14 |= CR14_RECOVERY_SUBMASK;
if (mci.dg)
cr14 |= CR14_DEGRADATION_SUBMASK;
if (mci.w)
cr14 |= CR14_WARNING_SUBMASK;
mchk = mci.ck ? &inti.mchk : &irq.u.mchk;
mchk->cr14 = cr14;
mchk->mcic = mcck_info->mcic;
mchk->ext_damage_code = mcck_info->ext_damage_code;
mchk->failing_storage_address = mcck_info->failing_storage_address;
if (mci.ck) {
/* Inject the floating machine check */
inti.type = KVM_S390_MCHK;
rc = __inject_vm(vcpu->kvm, &inti);
} else {
/* Inject the machine check to specified vcpu */
irq.type = KVM_S390_MCHK;
rc = kvm_s390_inject_vcpu(vcpu, &irq);
}
WARN_ON_ONCE(rc);
}
int kvm_set_routing_entry(struct kvm *kvm,
struct kvm_kernel_irq_routing_entry *e,
const struct kvm_irq_routing_entry *ue)
{
u64 uaddr;
switch (ue->type) {
/* we store the userspace addresses instead of the guest addresses */
case KVM_IRQ_ROUTING_S390_ADAPTER:
e->set = set_adapter_int;
uaddr = gmap_translate(kvm->arch.gmap, ue->u.adapter.summary_addr);
if (uaddr == -EFAULT)
return -EFAULT;
e->adapter.summary_addr = uaddr;
uaddr = gmap_translate(kvm->arch.gmap, ue->u.adapter.ind_addr);
if (uaddr == -EFAULT)
return -EFAULT;
e->adapter.ind_addr = uaddr;
e->adapter.summary_offset = ue->u.adapter.summary_offset;
e->adapter.ind_offset = ue->u.adapter.ind_offset;
e->adapter.adapter_id = ue->u.adapter.adapter_id;
return 0;
default:
return -EINVAL;
}
}
int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm,
int irq_source_id, int level, bool line_status)
{
return -EINVAL;
}
int kvm_s390_set_irq_state(struct kvm_vcpu *vcpu, void __user *irqstate, int len)
{
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
struct kvm_s390_irq *buf;
int r = 0;
int n;
buf = vmalloc(len);
if (!buf)
return -ENOMEM;
if (copy_from_user((void *) buf, irqstate, len)) {
r = -EFAULT;
goto out_free;
}
/*
* Don't allow setting the interrupt state
* when there are already interrupts pending
*/
spin_lock(&li->lock);
if (li->pending_irqs) {
r = -EBUSY;
goto out_unlock;
}
for (n = 0; n < len / sizeof(*buf); n++) {
r = do_inject_vcpu(vcpu, &buf[n]);
if (r)
break;
}
out_unlock:
spin_unlock(&li->lock);
out_free:
vfree(buf);
return r;
}
static void store_local_irq(struct kvm_s390_local_interrupt *li,
struct kvm_s390_irq *irq,
unsigned long irq_type)
{
switch (irq_type) {
case IRQ_PEND_MCHK_EX:
case IRQ_PEND_MCHK_REP:
irq->type = KVM_S390_MCHK;
irq->u.mchk = li->irq.mchk;
break;
case IRQ_PEND_PROG:
irq->type = KVM_S390_PROGRAM_INT;
irq->u.pgm = li->irq.pgm;
break;
case IRQ_PEND_PFAULT_INIT:
irq->type = KVM_S390_INT_PFAULT_INIT;
irq->u.ext = li->irq.ext;
break;
case IRQ_PEND_EXT_EXTERNAL:
irq->type = KVM_S390_INT_EXTERNAL_CALL;
irq->u.extcall = li->irq.extcall;
break;
case IRQ_PEND_EXT_CLOCK_COMP:
irq->type = KVM_S390_INT_CLOCK_COMP;
break;
case IRQ_PEND_EXT_CPU_TIMER:
irq->type = KVM_S390_INT_CPU_TIMER;
break;
case IRQ_PEND_SIGP_STOP:
irq->type = KVM_S390_SIGP_STOP;
irq->u.stop = li->irq.stop;
break;
case IRQ_PEND_RESTART:
irq->type = KVM_S390_RESTART;
break;
case IRQ_PEND_SET_PREFIX:
irq->type = KVM_S390_SIGP_SET_PREFIX;
irq->u.prefix = li->irq.prefix;
break;
}
}
int kvm_s390_get_irq_state(struct kvm_vcpu *vcpu, __u8 __user *buf, int len)
{
int scn;
DECLARE_BITMAP(sigp_emerg_pending, KVM_MAX_VCPUS);
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
unsigned long pending_irqs;
struct kvm_s390_irq irq;
unsigned long irq_type;
int cpuaddr;
int n = 0;
spin_lock(&li->lock);
pending_irqs = li->pending_irqs;
memcpy(&sigp_emerg_pending, &li->sigp_emerg_pending,
sizeof(sigp_emerg_pending));
spin_unlock(&li->lock);
for_each_set_bit(irq_type, &pending_irqs, IRQ_PEND_COUNT) {
memset(&irq, 0, sizeof(irq));
if (irq_type == IRQ_PEND_EXT_EMERGENCY)
continue;
if (n + sizeof(irq) > len)
return -ENOBUFS;
store_local_irq(&vcpu->arch.local_int, &irq, irq_type);
if (copy_to_user(&buf[n], &irq, sizeof(irq)))
return -EFAULT;
n += sizeof(irq);
}
if (test_bit(IRQ_PEND_EXT_EMERGENCY, &pending_irqs)) {
for_each_set_bit(cpuaddr, sigp_emerg_pending, KVM_MAX_VCPUS) {
memset(&irq, 0, sizeof(irq));
if (n + sizeof(irq) > len)
return -ENOBUFS;
irq.type = KVM_S390_INT_EMERGENCY;
irq.u.emerg.code = cpuaddr;
if (copy_to_user(&buf[n], &irq, sizeof(irq)))
return -EFAULT;
n += sizeof(irq);
}
}
if (sca_ext_call_pending(vcpu, &scn)) {
if (n + sizeof(irq) > len)
return -ENOBUFS;
memset(&irq, 0, sizeof(irq));
irq.type = KVM_S390_INT_EXTERNAL_CALL;
irq.u.extcall.code = scn;
if (copy_to_user(&buf[n], &irq, sizeof(irq)))
return -EFAULT;
n += sizeof(irq);
}
return n;
}
static void __airqs_kick_single_vcpu(struct kvm *kvm, u8 deliverable_mask)
{
int vcpu_idx, online_vcpus = atomic_read(&kvm->online_vcpus);
struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
struct kvm_vcpu *vcpu;
u8 vcpu_isc_mask;
for_each_set_bit(vcpu_idx, kvm->arch.idle_mask, online_vcpus) {
vcpu = kvm_get_vcpu(kvm, vcpu_idx);
if (psw_ioint_disabled(vcpu))
continue;
vcpu_isc_mask = (u8)(vcpu->arch.sie_block->gcr[6] >> 24);
if (deliverable_mask & vcpu_isc_mask) {
/* lately kicked but not yet running */
if (test_and_set_bit(vcpu_idx, gi->kicked_mask))
return;
kvm_s390_vcpu_wakeup(vcpu);
return;
}
}
}
static enum hrtimer_restart gisa_vcpu_kicker(struct hrtimer *timer)
{
struct kvm_s390_gisa_interrupt *gi =
container_of(timer, struct kvm_s390_gisa_interrupt, timer);
struct kvm *kvm =
container_of(gi->origin, struct sie_page2, gisa)->kvm;
u8 pending_mask;
pending_mask = gisa_get_ipm_or_restore_iam(gi);
if (pending_mask) {
__airqs_kick_single_vcpu(kvm, pending_mask);
hrtimer_forward_now(timer, ns_to_ktime(gi->expires));
return HRTIMER_RESTART;
}
return HRTIMER_NORESTART;
}
#define NULL_GISA_ADDR 0x00000000UL
#define NONE_GISA_ADDR 0x00000001UL
#define GISA_ADDR_MASK 0xfffff000UL
static void process_gib_alert_list(void)
{
struct kvm_s390_gisa_interrupt *gi;
u32 final, gisa_phys, origin = 0UL;
struct kvm_s390_gisa *gisa;
struct kvm *kvm;
do {
/*
* If the NONE_GISA_ADDR is still stored in the alert list
* origin, we will leave the outer loop. No further GISA has
* been added to the alert list by millicode while processing
* the current alert list.
*/
final = (origin & NONE_GISA_ADDR);
/*
* Cut off the alert list and store the NONE_GISA_ADDR in the
* alert list origin to avoid further GAL interruptions.
* A new alert list can be build up by millicode in parallel
* for guests not in the yet cut-off alert list. When in the
* final loop, store the NULL_GISA_ADDR instead. This will re-
* enable GAL interruptions on the host again.
*/
origin = xchg(&gib->alert_list_origin,
(!final) ? NONE_GISA_ADDR : NULL_GISA_ADDR);
/*
* Loop through the just cut-off alert list and start the
* gisa timers to kick idle vcpus to consume the pending
* interruptions asap.
*/
while (origin & GISA_ADDR_MASK) {
gisa_phys = origin;
gisa = phys_to_virt(gisa_phys);
origin = gisa->next_alert;
gisa->next_alert = gisa_phys;
kvm = container_of(gisa, struct sie_page2, gisa)->kvm;
gi = &kvm->arch.gisa_int;
if (hrtimer_active(&gi->timer))
hrtimer_cancel(&gi->timer);
hrtimer_start(&gi->timer, 0, HRTIMER_MODE_REL);
}
} while (!final);
}
void kvm_s390_gisa_clear(struct kvm *kvm)
{
struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
if (!gi->origin)
return;
gisa_clear_ipm(gi->origin);
VM_EVENT(kvm, 3, "gisa 0x%pK cleared", gi->origin);
}
void kvm_s390_gisa_init(struct kvm *kvm)
{
struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
if (!css_general_characteristics.aiv)
return;
gi->origin = &kvm->arch.sie_page2->gisa;
gi->alert.mask = 0;
spin_lock_init(&gi->alert.ref_lock);
gi->expires = 50 * 1000; /* 50 usec */
hrtimer_init(&gi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
gi->timer.function = gisa_vcpu_kicker;
memset(gi->origin, 0, sizeof(struct kvm_s390_gisa));
gi->origin->next_alert = (u32)virt_to_phys(gi->origin);
VM_EVENT(kvm, 3, "gisa 0x%pK initialized", gi->origin);
}
void kvm_s390_gisa_enable(struct kvm *kvm)
{
struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
struct kvm_vcpu *vcpu;
unsigned long i;
u32 gisa_desc;
if (gi->origin)
return;
kvm_s390_gisa_init(kvm);
gisa_desc = kvm_s390_get_gisa_desc(kvm);
if (!gisa_desc)
return;
kvm_for_each_vcpu(i, vcpu, kvm) {
mutex_lock(&vcpu->mutex);
vcpu->arch.sie_block->gd = gisa_desc;
vcpu->arch.sie_block->eca |= ECA_AIV;
VCPU_EVENT(vcpu, 3, "AIV gisa format-%u enabled for cpu %03u",
vcpu->arch.sie_block->gd & 0x3, vcpu->vcpu_id);
mutex_unlock(&vcpu->mutex);
}
}
void kvm_s390_gisa_destroy(struct kvm *kvm)
{
struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
struct kvm_s390_gisa *gisa = gi->origin;
if (!gi->origin)
return;
if (gi->alert.mask)
KVM_EVENT(3, "vm 0x%pK has unexpected iam 0x%02x",
kvm, gi->alert.mask);
while (gisa_in_alert_list(gi->origin))
cpu_relax();
hrtimer_cancel(&gi->timer);
gi->origin = NULL;
VM_EVENT(kvm, 3, "gisa 0x%pK destroyed", gisa);
}
void kvm_s390_gisa_disable(struct kvm *kvm)
{
struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
struct kvm_vcpu *vcpu;
unsigned long i;
if (!gi->origin)
return;
kvm_for_each_vcpu(i, vcpu, kvm) {
mutex_lock(&vcpu->mutex);
vcpu->arch.sie_block->eca &= ~ECA_AIV;
vcpu->arch.sie_block->gd = 0U;
mutex_unlock(&vcpu->mutex);
VCPU_EVENT(vcpu, 3, "AIV disabled for cpu %03u", vcpu->vcpu_id);
}
kvm_s390_gisa_destroy(kvm);
}
/**
* kvm_s390_gisc_register - register a guest ISC
*
* @kvm: the kernel vm to work with
* @gisc: the guest interruption sub class to register
*
* The function extends the vm specific alert mask to use.
* The effective IAM mask in the GISA is updated as well
* in case the GISA is not part of the GIB alert list.
* It will be updated latest when the IAM gets restored
* by gisa_get_ipm_or_restore_iam().
*
* Returns: the nonspecific ISC (NISC) the gib alert mechanism
* has registered with the channel subsystem.
* -ENODEV in case the vm uses no GISA
* -ERANGE in case the guest ISC is invalid
*/
int kvm_s390_gisc_register(struct kvm *kvm, u32 gisc)
{
struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
if (!gi->origin)
return -ENODEV;
if (gisc > MAX_ISC)
return -ERANGE;
spin_lock(&gi->alert.ref_lock);
gi->alert.ref_count[gisc]++;
if (gi->alert.ref_count[gisc] == 1) {
gi->alert.mask |= 0x80 >> gisc;
gisa_set_iam(gi->origin, gi->alert.mask);
}
spin_unlock(&gi->alert.ref_lock);
return gib->nisc;
}
EXPORT_SYMBOL_GPL(kvm_s390_gisc_register);
/**
* kvm_s390_gisc_unregister - unregister a guest ISC
*
* @kvm: the kernel vm to work with
* @gisc: the guest interruption sub class to register
*
* The function reduces the vm specific alert mask to use.
* The effective IAM mask in the GISA is updated as well
* in case the GISA is not part of the GIB alert list.
* It will be updated latest when the IAM gets restored
* by gisa_get_ipm_or_restore_iam().
*
* Returns: the nonspecific ISC (NISC) the gib alert mechanism
* has registered with the channel subsystem.
* -ENODEV in case the vm uses no GISA
* -ERANGE in case the guest ISC is invalid
* -EINVAL in case the guest ISC is not registered
*/
int kvm_s390_gisc_unregister(struct kvm *kvm, u32 gisc)
{
struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
int rc = 0;
if (!gi->origin)
return -ENODEV;
if (gisc > MAX_ISC)
return -ERANGE;
spin_lock(&gi->alert.ref_lock);
if (gi->alert.ref_count[gisc] == 0) {
rc = -EINVAL;
goto out;
}
gi->alert.ref_count[gisc]--;
if (gi->alert.ref_count[gisc] == 0) {
gi->alert.mask &= ~(0x80 >> gisc);
gisa_set_iam(gi->origin, gi->alert.mask);
}
out:
spin_unlock(&gi->alert.ref_lock);
return rc;
}
EXPORT_SYMBOL_GPL(kvm_s390_gisc_unregister);
static void aen_host_forward(unsigned long si)
{
struct kvm_s390_gisa_interrupt *gi;
struct zpci_gaite *gaite;
struct kvm *kvm;
gaite = (struct zpci_gaite *)aift->gait +
(si * sizeof(struct zpci_gaite));
if (gaite->count == 0)
return;
if (gaite->aisb != 0)
set_bit_inv(gaite->aisbo, phys_to_virt(gaite->aisb));
kvm = kvm_s390_pci_si_to_kvm(aift, si);
if (!kvm)
return;
gi = &kvm->arch.gisa_int;
if (!(gi->origin->g1.simm & AIS_MODE_MASK(gaite->gisc)) ||
!(gi->origin->g1.nimm & AIS_MODE_MASK(gaite->gisc))) {
gisa_set_ipm_gisc(gi->origin, gaite->gisc);
if (hrtimer_active(&gi->timer))
hrtimer_cancel(&gi->timer);
hrtimer_start(&gi->timer, 0, HRTIMER_MODE_REL);
kvm->stat.aen_forward++;
}
}
static void aen_process_gait(u8 isc)
{
bool found = false, first = true;
union zpci_sic_iib iib = {{0}};
unsigned long si, flags;
spin_lock_irqsave(&aift->gait_lock, flags);
if (!aift->gait) {
spin_unlock_irqrestore(&aift->gait_lock, flags);
return;
}
for (si = 0;;) {
/* Scan adapter summary indicator bit vector */
si = airq_iv_scan(aift->sbv, si, airq_iv_end(aift->sbv));
if (si == -1UL) {
if (first || found) {
/* Re-enable interrupts. */
zpci_set_irq_ctrl(SIC_IRQ_MODE_SINGLE, isc,
&iib);
first = found = false;
} else {
/* Interrupts on and all bits processed */
break;
}
found = false;
si = 0;
/* Scan again after re-enabling interrupts */
continue;
}
found = true;
aen_host_forward(si);
}
spin_unlock_irqrestore(&aift->gait_lock, flags);
}
static void gib_alert_irq_handler(struct airq_struct *airq,
struct tpi_info *tpi_info)
{
struct tpi_adapter_info *info = (struct tpi_adapter_info *)tpi_info;
inc_irq_stat(IRQIO_GAL);
if ((info->forward || info->error) &&
IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM)) {
aen_process_gait(info->isc);
if (info->aism != 0)
process_gib_alert_list();
} else {
process_gib_alert_list();
}
}
static struct airq_struct gib_alert_irq = {
.handler = gib_alert_irq_handler,
};
void kvm_s390_gib_destroy(void)
{
if (!gib)
return;
if (kvm_s390_pci_interp_allowed() && aift) {
mutex_lock(&aift->aift_lock);
kvm_s390_pci_aen_exit();
mutex_unlock(&aift->aift_lock);
}
chsc_sgib(0);
unregister_adapter_interrupt(&gib_alert_irq);
free_page((unsigned long)gib);
gib = NULL;
}
int __init kvm_s390_gib_init(u8 nisc)
{
u32 gib_origin;
int rc = 0;
if (!css_general_characteristics.aiv) {
KVM_EVENT(3, "%s", "gib not initialized, no AIV facility");
goto out;
}
gib = (struct kvm_s390_gib *)get_zeroed_page(GFP_KERNEL_ACCOUNT | GFP_DMA);
if (!gib) {
rc = -ENOMEM;
goto out;
}
gib_alert_irq.isc = nisc;
if (register_adapter_interrupt(&gib_alert_irq)) {
pr_err("Registering the GIB alert interruption handler failed\n");
rc = -EIO;
goto out_free_gib;
}
/* adapter interrupts used for AP (applicable here) don't use the LSI */
*gib_alert_irq.lsi_ptr = 0xff;
gib->nisc = nisc;
gib_origin = virt_to_phys(gib);
if (chsc_sgib(gib_origin)) {
pr_err("Associating the GIB with the AIV facility failed\n");
free_page((unsigned long)gib);
gib = NULL;
rc = -EIO;
goto out_unreg_gal;
}
if (kvm_s390_pci_interp_allowed()) {
if (kvm_s390_pci_aen_init(nisc)) {
pr_err("Initializing AEN for PCI failed\n");
rc = -EIO;
goto out_unreg_gal;
}
}
KVM_EVENT(3, "gib 0x%pK (nisc=%d) initialized", gib, gib->nisc);
goto out;
out_unreg_gal:
unregister_adapter_interrupt(&gib_alert_irq);
out_free_gib:
free_page((unsigned long)gib);
gib = NULL;
out:
return rc;
}
| linux-master | arch/s390/kvm/interrupt.c |
// SPDX-License-Identifier: GPL-2.0
/*
* kvm guest debug support
*
* Copyright IBM Corp. 2014
*
* Author(s): David Hildenbrand <[email protected]>
*/
#include <linux/kvm_host.h>
#include <linux/errno.h>
#include "kvm-s390.h"
#include "gaccess.h"
/*
* Extends the address range given by *start and *stop to include the address
* range starting with estart and the length len. Takes care of overflowing
* intervals and tries to minimize the overall interval size.
*/
static void extend_address_range(u64 *start, u64 *stop, u64 estart, int len)
{
u64 estop;
if (len > 0)
len--;
else
len = 0;
estop = estart + len;
/* 0-0 range represents "not set" */
if ((*start == 0) && (*stop == 0)) {
*start = estart;
*stop = estop;
} else if (*start <= *stop) {
/* increase the existing range */
if (estart < *start)
*start = estart;
if (estop > *stop)
*stop = estop;
} else {
/* "overflowing" interval, whereby *stop > *start */
if (estart <= *stop) {
if (estop > *stop)
*stop = estop;
} else if (estop > *start) {
if (estart < *start)
*start = estart;
}
/* minimize the range */
else if ((estop - *stop) < (*start - estart))
*stop = estop;
else
*start = estart;
}
}
#define MAX_INST_SIZE 6
static void enable_all_hw_bp(struct kvm_vcpu *vcpu)
{
unsigned long start, len;
u64 *cr9 = &vcpu->arch.sie_block->gcr[9];
u64 *cr10 = &vcpu->arch.sie_block->gcr[10];
u64 *cr11 = &vcpu->arch.sie_block->gcr[11];
int i;
if (vcpu->arch.guestdbg.nr_hw_bp <= 0 ||
vcpu->arch.guestdbg.hw_bp_info == NULL)
return;
/*
* If the guest is not interested in branching events, we can safely
* limit them to the PER address range.
*/
if (!(*cr9 & PER_EVENT_BRANCH))
*cr9 |= PER_CONTROL_BRANCH_ADDRESS;
*cr9 |= PER_EVENT_IFETCH | PER_EVENT_BRANCH;
for (i = 0; i < vcpu->arch.guestdbg.nr_hw_bp; i++) {
start = vcpu->arch.guestdbg.hw_bp_info[i].addr;
len = vcpu->arch.guestdbg.hw_bp_info[i].len;
/*
* The instruction in front of the desired bp has to
* report instruction-fetching events
*/
if (start < MAX_INST_SIZE) {
len += start;
start = 0;
} else {
start -= MAX_INST_SIZE;
len += MAX_INST_SIZE;
}
extend_address_range(cr10, cr11, start, len);
}
}
static void enable_all_hw_wp(struct kvm_vcpu *vcpu)
{
unsigned long start, len;
u64 *cr9 = &vcpu->arch.sie_block->gcr[9];
u64 *cr10 = &vcpu->arch.sie_block->gcr[10];
u64 *cr11 = &vcpu->arch.sie_block->gcr[11];
int i;
if (vcpu->arch.guestdbg.nr_hw_wp <= 0 ||
vcpu->arch.guestdbg.hw_wp_info == NULL)
return;
/* if host uses storage alternation for special address
* spaces, enable all events and give all to the guest */
if (*cr9 & PER_EVENT_STORE && *cr9 & PER_CONTROL_ALTERATION) {
*cr9 &= ~PER_CONTROL_ALTERATION;
*cr10 = 0;
*cr11 = -1UL;
} else {
*cr9 &= ~PER_CONTROL_ALTERATION;
*cr9 |= PER_EVENT_STORE;
for (i = 0; i < vcpu->arch.guestdbg.nr_hw_wp; i++) {
start = vcpu->arch.guestdbg.hw_wp_info[i].addr;
len = vcpu->arch.guestdbg.hw_wp_info[i].len;
extend_address_range(cr10, cr11, start, len);
}
}
}
void kvm_s390_backup_guest_per_regs(struct kvm_vcpu *vcpu)
{
vcpu->arch.guestdbg.cr0 = vcpu->arch.sie_block->gcr[0];
vcpu->arch.guestdbg.cr9 = vcpu->arch.sie_block->gcr[9];
vcpu->arch.guestdbg.cr10 = vcpu->arch.sie_block->gcr[10];
vcpu->arch.guestdbg.cr11 = vcpu->arch.sie_block->gcr[11];
}
void kvm_s390_restore_guest_per_regs(struct kvm_vcpu *vcpu)
{
vcpu->arch.sie_block->gcr[0] = vcpu->arch.guestdbg.cr0;
vcpu->arch.sie_block->gcr[9] = vcpu->arch.guestdbg.cr9;
vcpu->arch.sie_block->gcr[10] = vcpu->arch.guestdbg.cr10;
vcpu->arch.sie_block->gcr[11] = vcpu->arch.guestdbg.cr11;
}
void kvm_s390_patch_guest_per_regs(struct kvm_vcpu *vcpu)
{
/*
* TODO: if guest psw has per enabled, otherwise 0s!
* This reduces the amount of reported events.
* Need to intercept all psw changes!
*/
if (guestdbg_sstep_enabled(vcpu)) {
/* disable timer (clock-comparator) interrupts */
vcpu->arch.sie_block->gcr[0] &= ~CR0_CLOCK_COMPARATOR_SUBMASK;
vcpu->arch.sie_block->gcr[9] |= PER_EVENT_IFETCH;
vcpu->arch.sie_block->gcr[10] = 0;
vcpu->arch.sie_block->gcr[11] = -1UL;
}
if (guestdbg_hw_bp_enabled(vcpu)) {
enable_all_hw_bp(vcpu);
enable_all_hw_wp(vcpu);
}
/* TODO: Instruction-fetching-nullification not allowed for now */
if (vcpu->arch.sie_block->gcr[9] & PER_EVENT_NULLIFICATION)
vcpu->arch.sie_block->gcr[9] &= ~PER_EVENT_NULLIFICATION;
}
#define MAX_WP_SIZE 100
static int __import_wp_info(struct kvm_vcpu *vcpu,
struct kvm_hw_breakpoint *bp_data,
struct kvm_hw_wp_info_arch *wp_info)
{
int ret = 0;
wp_info->len = bp_data->len;
wp_info->addr = bp_data->addr;
wp_info->phys_addr = bp_data->phys_addr;
wp_info->old_data = NULL;
if (wp_info->len < 0 || wp_info->len > MAX_WP_SIZE)
return -EINVAL;
wp_info->old_data = kmalloc(bp_data->len, GFP_KERNEL_ACCOUNT);
if (!wp_info->old_data)
return -ENOMEM;
/* try to backup the original value */
ret = read_guest_abs(vcpu, wp_info->phys_addr, wp_info->old_data,
wp_info->len);
if (ret) {
kfree(wp_info->old_data);
wp_info->old_data = NULL;
}
return ret;
}
#define MAX_BP_COUNT 50
int kvm_s390_import_bp_data(struct kvm_vcpu *vcpu,
struct kvm_guest_debug *dbg)
{
int ret = 0, nr_wp = 0, nr_bp = 0, i;
struct kvm_hw_breakpoint *bp_data = NULL;
struct kvm_hw_wp_info_arch *wp_info = NULL;
struct kvm_hw_bp_info_arch *bp_info = NULL;
if (dbg->arch.nr_hw_bp <= 0 || !dbg->arch.hw_bp)
return 0;
else if (dbg->arch.nr_hw_bp > MAX_BP_COUNT)
return -EINVAL;
bp_data = memdup_user(dbg->arch.hw_bp,
sizeof(*bp_data) * dbg->arch.nr_hw_bp);
if (IS_ERR(bp_data))
return PTR_ERR(bp_data);
for (i = 0; i < dbg->arch.nr_hw_bp; i++) {
switch (bp_data[i].type) {
case KVM_HW_WP_WRITE:
nr_wp++;
break;
case KVM_HW_BP:
nr_bp++;
break;
default:
break;
}
}
if (nr_wp > 0) {
wp_info = kmalloc_array(nr_wp,
sizeof(*wp_info),
GFP_KERNEL_ACCOUNT);
if (!wp_info) {
ret = -ENOMEM;
goto error;
}
}
if (nr_bp > 0) {
bp_info = kmalloc_array(nr_bp,
sizeof(*bp_info),
GFP_KERNEL_ACCOUNT);
if (!bp_info) {
ret = -ENOMEM;
goto error;
}
}
for (nr_wp = 0, nr_bp = 0, i = 0; i < dbg->arch.nr_hw_bp; i++) {
switch (bp_data[i].type) {
case KVM_HW_WP_WRITE:
ret = __import_wp_info(vcpu, &bp_data[i],
&wp_info[nr_wp]);
if (ret)
goto error;
nr_wp++;
break;
case KVM_HW_BP:
bp_info[nr_bp].len = bp_data[i].len;
bp_info[nr_bp].addr = bp_data[i].addr;
nr_bp++;
break;
}
}
vcpu->arch.guestdbg.nr_hw_bp = nr_bp;
vcpu->arch.guestdbg.hw_bp_info = bp_info;
vcpu->arch.guestdbg.nr_hw_wp = nr_wp;
vcpu->arch.guestdbg.hw_wp_info = wp_info;
return 0;
error:
kfree(bp_data);
kfree(wp_info);
kfree(bp_info);
return ret;
}
void kvm_s390_clear_bp_data(struct kvm_vcpu *vcpu)
{
int i;
struct kvm_hw_wp_info_arch *hw_wp_info = NULL;
for (i = 0; i < vcpu->arch.guestdbg.nr_hw_wp; i++) {
hw_wp_info = &vcpu->arch.guestdbg.hw_wp_info[i];
kfree(hw_wp_info->old_data);
hw_wp_info->old_data = NULL;
}
kfree(vcpu->arch.guestdbg.hw_wp_info);
vcpu->arch.guestdbg.hw_wp_info = NULL;
kfree(vcpu->arch.guestdbg.hw_bp_info);
vcpu->arch.guestdbg.hw_bp_info = NULL;
vcpu->arch.guestdbg.nr_hw_wp = 0;
vcpu->arch.guestdbg.nr_hw_bp = 0;
}
static inline int in_addr_range(u64 addr, u64 a, u64 b)
{
if (a <= b)
return (addr >= a) && (addr <= b);
else
/* "overflowing" interval */
return (addr >= a) || (addr <= b);
}
#define end_of_range(bp_info) (bp_info->addr + bp_info->len - 1)
static struct kvm_hw_bp_info_arch *find_hw_bp(struct kvm_vcpu *vcpu,
unsigned long addr)
{
struct kvm_hw_bp_info_arch *bp_info = vcpu->arch.guestdbg.hw_bp_info;
int i;
if (vcpu->arch.guestdbg.nr_hw_bp == 0)
return NULL;
for (i = 0; i < vcpu->arch.guestdbg.nr_hw_bp; i++) {
/* addr is directly the start or in the range of a bp */
if (addr == bp_info->addr)
goto found;
if (bp_info->len > 0 &&
in_addr_range(addr, bp_info->addr, end_of_range(bp_info)))
goto found;
bp_info++;
}
return NULL;
found:
return bp_info;
}
static struct kvm_hw_wp_info_arch *any_wp_changed(struct kvm_vcpu *vcpu)
{
int i;
struct kvm_hw_wp_info_arch *wp_info = NULL;
void *temp = NULL;
if (vcpu->arch.guestdbg.nr_hw_wp == 0)
return NULL;
for (i = 0; i < vcpu->arch.guestdbg.nr_hw_wp; i++) {
wp_info = &vcpu->arch.guestdbg.hw_wp_info[i];
if (!wp_info || !wp_info->old_data || wp_info->len <= 0)
continue;
temp = kmalloc(wp_info->len, GFP_KERNEL_ACCOUNT);
if (!temp)
continue;
/* refetch the wp data and compare it to the old value */
if (!read_guest_abs(vcpu, wp_info->phys_addr, temp,
wp_info->len)) {
if (memcmp(temp, wp_info->old_data, wp_info->len)) {
kfree(temp);
return wp_info;
}
}
kfree(temp);
temp = NULL;
}
return NULL;
}
void kvm_s390_prepare_debug_exit(struct kvm_vcpu *vcpu)
{
vcpu->run->exit_reason = KVM_EXIT_DEBUG;
vcpu->guest_debug &= ~KVM_GUESTDBG_EXIT_PENDING;
}
#define PER_CODE_MASK (PER_EVENT_MASK >> 24)
#define PER_CODE_BRANCH (PER_EVENT_BRANCH >> 24)
#define PER_CODE_IFETCH (PER_EVENT_IFETCH >> 24)
#define PER_CODE_STORE (PER_EVENT_STORE >> 24)
#define PER_CODE_STORE_REAL (PER_EVENT_STORE_REAL >> 24)
#define per_bp_event(code) \
(code & (PER_CODE_IFETCH | PER_CODE_BRANCH))
#define per_write_wp_event(code) \
(code & (PER_CODE_STORE | PER_CODE_STORE_REAL))
static int debug_exit_required(struct kvm_vcpu *vcpu, u8 perc,
unsigned long peraddr)
{
struct kvm_debug_exit_arch *debug_exit = &vcpu->run->debug.arch;
struct kvm_hw_wp_info_arch *wp_info = NULL;
struct kvm_hw_bp_info_arch *bp_info = NULL;
unsigned long addr = vcpu->arch.sie_block->gpsw.addr;
if (guestdbg_hw_bp_enabled(vcpu)) {
if (per_write_wp_event(perc) &&
vcpu->arch.guestdbg.nr_hw_wp > 0) {
wp_info = any_wp_changed(vcpu);
if (wp_info) {
debug_exit->addr = wp_info->addr;
debug_exit->type = KVM_HW_WP_WRITE;
goto exit_required;
}
}
if (per_bp_event(perc) &&
vcpu->arch.guestdbg.nr_hw_bp > 0) {
bp_info = find_hw_bp(vcpu, addr);
/* remove duplicate events if PC==PER address */
if (bp_info && (addr != peraddr)) {
debug_exit->addr = addr;
debug_exit->type = KVM_HW_BP;
vcpu->arch.guestdbg.last_bp = addr;
goto exit_required;
}
/* breakpoint missed */
bp_info = find_hw_bp(vcpu, peraddr);
if (bp_info && vcpu->arch.guestdbg.last_bp != peraddr) {
debug_exit->addr = peraddr;
debug_exit->type = KVM_HW_BP;
goto exit_required;
}
}
}
if (guestdbg_sstep_enabled(vcpu) && per_bp_event(perc)) {
debug_exit->addr = addr;
debug_exit->type = KVM_SINGLESTEP;
goto exit_required;
}
return 0;
exit_required:
return 1;
}
static int per_fetched_addr(struct kvm_vcpu *vcpu, unsigned long *addr)
{
u8 exec_ilen = 0;
u16 opcode[3];
int rc;
if (vcpu->arch.sie_block->icptcode == ICPT_PROGI) {
/* PER address references the fetched or the execute instr */
*addr = vcpu->arch.sie_block->peraddr;
/*
* Manually detect if we have an EXECUTE instruction. As
* instructions are always 2 byte aligned we can read the
* first two bytes unconditionally
*/
rc = read_guest_instr(vcpu, *addr, &opcode, 2);
if (rc)
return rc;
if (opcode[0] >> 8 == 0x44)
exec_ilen = 4;
if ((opcode[0] & 0xff0f) == 0xc600)
exec_ilen = 6;
} else {
/* instr was suppressed, calculate the responsible instr */
*addr = __rewind_psw(vcpu->arch.sie_block->gpsw,
kvm_s390_get_ilen(vcpu));
if (vcpu->arch.sie_block->icptstatus & 0x01) {
exec_ilen = (vcpu->arch.sie_block->icptstatus & 0x60) >> 4;
if (!exec_ilen)
exec_ilen = 4;
}
}
if (exec_ilen) {
/* read the complete EXECUTE instr to detect the fetched addr */
rc = read_guest_instr(vcpu, *addr, &opcode, exec_ilen);
if (rc)
return rc;
if (exec_ilen == 6) {
/* EXECUTE RELATIVE LONG - RIL-b format */
s32 rl = *((s32 *) (opcode + 1));
/* rl is a _signed_ 32 bit value specifying halfwords */
*addr += (u64)(s64) rl * 2;
} else {
/* EXECUTE - RX-a format */
u32 base = (opcode[1] & 0xf000) >> 12;
u32 disp = opcode[1] & 0x0fff;
u32 index = opcode[0] & 0x000f;
*addr = base ? vcpu->run->s.regs.gprs[base] : 0;
*addr += index ? vcpu->run->s.regs.gprs[index] : 0;
*addr += disp;
}
*addr = kvm_s390_logical_to_effective(vcpu, *addr);
}
return 0;
}
#define guest_per_enabled(vcpu) \
(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PER)
int kvm_s390_handle_per_ifetch_icpt(struct kvm_vcpu *vcpu)
{
const u64 cr10 = vcpu->arch.sie_block->gcr[10];
const u64 cr11 = vcpu->arch.sie_block->gcr[11];
const u8 ilen = kvm_s390_get_ilen(vcpu);
struct kvm_s390_pgm_info pgm_info = {
.code = PGM_PER,
.per_code = PER_CODE_IFETCH,
.per_address = __rewind_psw(vcpu->arch.sie_block->gpsw, ilen),
};
unsigned long fetched_addr;
int rc;
/*
* The PSW points to the next instruction, therefore the intercepted
* instruction generated a PER i-fetch event. PER address therefore
* points at the previous PSW address (could be an EXECUTE function).
*/
if (!guestdbg_enabled(vcpu))
return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
if (debug_exit_required(vcpu, pgm_info.per_code, pgm_info.per_address))
vcpu->guest_debug |= KVM_GUESTDBG_EXIT_PENDING;
if (!guest_per_enabled(vcpu) ||
!(vcpu->arch.sie_block->gcr[9] & PER_EVENT_IFETCH))
return 0;
rc = per_fetched_addr(vcpu, &fetched_addr);
if (rc < 0)
return rc;
if (rc)
/* instruction-fetching exceptions */
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
if (in_addr_range(fetched_addr, cr10, cr11))
return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
return 0;
}
static int filter_guest_per_event(struct kvm_vcpu *vcpu)
{
const u8 perc = vcpu->arch.sie_block->perc;
u64 addr = vcpu->arch.sie_block->gpsw.addr;
u64 cr9 = vcpu->arch.sie_block->gcr[9];
u64 cr10 = vcpu->arch.sie_block->gcr[10];
u64 cr11 = vcpu->arch.sie_block->gcr[11];
/* filter all events, demanded by the guest */
u8 guest_perc = perc & (cr9 >> 24) & PER_CODE_MASK;
unsigned long fetched_addr;
int rc;
if (!guest_per_enabled(vcpu))
guest_perc = 0;
/* filter "successful-branching" events */
if (guest_perc & PER_CODE_BRANCH &&
cr9 & PER_CONTROL_BRANCH_ADDRESS &&
!in_addr_range(addr, cr10, cr11))
guest_perc &= ~PER_CODE_BRANCH;
/* filter "instruction-fetching" events */
if (guest_perc & PER_CODE_IFETCH) {
rc = per_fetched_addr(vcpu, &fetched_addr);
if (rc < 0)
return rc;
/*
* Don't inject an irq on exceptions. This would make handling
* on icpt code 8 very complex (as PSW was already rewound).
*/
if (rc || !in_addr_range(fetched_addr, cr10, cr11))
guest_perc &= ~PER_CODE_IFETCH;
}
/* All other PER events will be given to the guest */
/* TODO: Check altered address/address space */
vcpu->arch.sie_block->perc = guest_perc;
if (!guest_perc)
vcpu->arch.sie_block->iprcc &= ~PGM_PER;
return 0;
}
#define pssec(vcpu) (vcpu->arch.sie_block->gcr[1] & _ASCE_SPACE_SWITCH)
#define hssec(vcpu) (vcpu->arch.sie_block->gcr[13] & _ASCE_SPACE_SWITCH)
#define old_ssec(vcpu) ((vcpu->arch.sie_block->tecmc >> 31) & 0x1)
#define old_as_is_home(vcpu) !(vcpu->arch.sie_block->tecmc & 0xffff)
int kvm_s390_handle_per_event(struct kvm_vcpu *vcpu)
{
int rc, new_as;
if (debug_exit_required(vcpu, vcpu->arch.sie_block->perc,
vcpu->arch.sie_block->peraddr))
vcpu->guest_debug |= KVM_GUESTDBG_EXIT_PENDING;
rc = filter_guest_per_event(vcpu);
if (rc)
return rc;
/*
* Only RP, SAC, SACF, PT, PTI, PR, PC instructions can trigger
* a space-switch event. PER events enforce space-switch events
* for these instructions. So if no PER event for the guest is left,
* we might have to filter the space-switch element out, too.
*/
if (vcpu->arch.sie_block->iprcc == PGM_SPACE_SWITCH) {
vcpu->arch.sie_block->iprcc = 0;
new_as = psw_bits(vcpu->arch.sie_block->gpsw).as;
/*
* If the AS changed from / to home, we had RP, SAC or SACF
* instruction. Check primary and home space-switch-event
* controls. (theoretically home -> home produced no event)
*/
if (((new_as == PSW_BITS_AS_HOME) ^ old_as_is_home(vcpu)) &&
(pssec(vcpu) || hssec(vcpu)))
vcpu->arch.sie_block->iprcc = PGM_SPACE_SWITCH;
/*
* PT, PTI, PR, PC instruction operate on primary AS only. Check
* if the primary-space-switch-event control was or got set.
*/
if (new_as == PSW_BITS_AS_PRIMARY && !old_as_is_home(vcpu) &&
(pssec(vcpu) || old_ssec(vcpu)))
vcpu->arch.sie_block->iprcc = PGM_SPACE_SWITCH;
}
return 0;
}
| linux-master | arch/s390/kvm/guestdbg.c |
// SPDX-License-Identifier: GPL-2.0
/*
* handling diagnose instructions
*
* Copyright IBM Corp. 2008, 2020
*
* Author(s): Carsten Otte <[email protected]>
* Christian Borntraeger <[email protected]>
*/
#include <linux/kvm.h>
#include <linux/kvm_host.h>
#include <asm/gmap.h>
#include <asm/virtio-ccw.h>
#include "kvm-s390.h"
#include "trace.h"
#include "trace-s390.h"
#include "gaccess.h"
static int diag_release_pages(struct kvm_vcpu *vcpu)
{
unsigned long start, end;
unsigned long prefix = kvm_s390_get_prefix(vcpu);
start = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4];
end = vcpu->run->s.regs.gprs[vcpu->arch.sie_block->ipa & 0xf] + PAGE_SIZE;
vcpu->stat.instruction_diagnose_10++;
if (start & ~PAGE_MASK || end & ~PAGE_MASK || start >= end
|| start < 2 * PAGE_SIZE)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
VCPU_EVENT(vcpu, 5, "diag release pages %lX %lX", start, end);
/*
* We checked for start >= end above, so lets check for the
* fast path (no prefix swap page involved)
*/
if (end <= prefix || start >= prefix + 2 * PAGE_SIZE) {
gmap_discard(vcpu->arch.gmap, start, end);
} else {
/*
* This is slow path. gmap_discard will check for start
* so lets split this into before prefix, prefix, after
* prefix and let gmap_discard make some of these calls
* NOPs.
*/
gmap_discard(vcpu->arch.gmap, start, prefix);
if (start <= prefix)
gmap_discard(vcpu->arch.gmap, 0, PAGE_SIZE);
if (end > prefix + PAGE_SIZE)
gmap_discard(vcpu->arch.gmap, PAGE_SIZE, 2 * PAGE_SIZE);
gmap_discard(vcpu->arch.gmap, prefix + 2 * PAGE_SIZE, end);
}
return 0;
}
static int __diag_page_ref_service(struct kvm_vcpu *vcpu)
{
struct prs_parm {
u16 code;
u16 subcode;
u16 parm_len;
u16 parm_version;
u64 token_addr;
u64 select_mask;
u64 compare_mask;
u64 zarch;
};
struct prs_parm parm;
int rc;
u16 rx = (vcpu->arch.sie_block->ipa & 0xf0) >> 4;
u16 ry = (vcpu->arch.sie_block->ipa & 0x0f);
VCPU_EVENT(vcpu, 3, "diag page reference parameter block at 0x%llx",
vcpu->run->s.regs.gprs[rx]);
vcpu->stat.instruction_diagnose_258++;
if (vcpu->run->s.regs.gprs[rx] & 7)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
rc = read_guest(vcpu, vcpu->run->s.regs.gprs[rx], rx, &parm, sizeof(parm));
if (rc)
return kvm_s390_inject_prog_cond(vcpu, rc);
if (parm.parm_version != 2 || parm.parm_len < 5 || parm.code != 0x258)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
switch (parm.subcode) {
case 0: /* TOKEN */
VCPU_EVENT(vcpu, 3, "pageref token addr 0x%llx "
"select mask 0x%llx compare mask 0x%llx",
parm.token_addr, parm.select_mask, parm.compare_mask);
if (vcpu->arch.pfault_token != KVM_S390_PFAULT_TOKEN_INVALID) {
/*
* If the pagefault handshake is already activated,
* the token must not be changed. We have to return
* decimal 8 instead, as mandated in SC24-6084.
*/
vcpu->run->s.regs.gprs[ry] = 8;
return 0;
}
if ((parm.compare_mask & parm.select_mask) != parm.compare_mask ||
parm.token_addr & 7 || parm.zarch != 0x8000000000000000ULL)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
if (kvm_is_error_gpa(vcpu->kvm, parm.token_addr))
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
vcpu->arch.pfault_token = parm.token_addr;
vcpu->arch.pfault_select = parm.select_mask;
vcpu->arch.pfault_compare = parm.compare_mask;
vcpu->run->s.regs.gprs[ry] = 0;
rc = 0;
break;
case 1: /*
* CANCEL
* Specification allows to let already pending tokens survive
* the cancel, therefore to reduce code complexity, we assume
* all outstanding tokens are already pending.
*/
VCPU_EVENT(vcpu, 3, "pageref cancel addr 0x%llx", parm.token_addr);
if (parm.token_addr || parm.select_mask ||
parm.compare_mask || parm.zarch)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
vcpu->run->s.regs.gprs[ry] = 0;
/*
* If the pfault handling was not established or is already
* canceled SC24-6084 requests to return decimal 4.
*/
if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
vcpu->run->s.regs.gprs[ry] = 4;
else
vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
rc = 0;
break;
default:
rc = -EOPNOTSUPP;
break;
}
return rc;
}
static int __diag_time_slice_end(struct kvm_vcpu *vcpu)
{
VCPU_EVENT(vcpu, 5, "%s", "diag time slice end");
vcpu->stat.instruction_diagnose_44++;
kvm_vcpu_on_spin(vcpu, true);
return 0;
}
static int forward_cnt;
static unsigned long cur_slice;
static int diag9c_forwarding_overrun(void)
{
/* Reset the count on a new slice */
if (time_after(jiffies, cur_slice)) {
cur_slice = jiffies;
forward_cnt = diag9c_forwarding_hz / HZ;
}
return forward_cnt-- <= 0 ? 1 : 0;
}
static int __diag_time_slice_end_directed(struct kvm_vcpu *vcpu)
{
struct kvm_vcpu *tcpu;
int tcpu_cpu;
int tid;
tid = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4];
vcpu->stat.instruction_diagnose_9c++;
/* yield to self */
if (tid == vcpu->vcpu_id)
goto no_yield;
/* yield to invalid */
tcpu = kvm_get_vcpu_by_id(vcpu->kvm, tid);
if (!tcpu)
goto no_yield;
/* target guest VCPU already running */
tcpu_cpu = READ_ONCE(tcpu->cpu);
if (tcpu_cpu >= 0) {
if (!diag9c_forwarding_hz || diag9c_forwarding_overrun())
goto no_yield;
/* target host CPU already running */
if (!vcpu_is_preempted(tcpu_cpu))
goto no_yield;
smp_yield_cpu(tcpu_cpu);
VCPU_EVENT(vcpu, 5,
"diag time slice end directed to %d: yield forwarded",
tid);
vcpu->stat.diag_9c_forward++;
return 0;
}
if (kvm_vcpu_yield_to(tcpu) <= 0)
goto no_yield;
VCPU_EVENT(vcpu, 5, "diag time slice end directed to %d: done", tid);
return 0;
no_yield:
VCPU_EVENT(vcpu, 5, "diag time slice end directed to %d: ignored", tid);
vcpu->stat.diag_9c_ignored++;
return 0;
}
static int __diag_ipl_functions(struct kvm_vcpu *vcpu)
{
unsigned int reg = vcpu->arch.sie_block->ipa & 0xf;
unsigned long subcode = vcpu->run->s.regs.gprs[reg] & 0xffff;
VCPU_EVENT(vcpu, 3, "diag ipl functions, subcode %lx", subcode);
vcpu->stat.instruction_diagnose_308++;
switch (subcode) {
case 3:
vcpu->run->s390_reset_flags = KVM_S390_RESET_CLEAR;
break;
case 4:
vcpu->run->s390_reset_flags = 0;
break;
default:
return -EOPNOTSUPP;
}
/*
* no need to check the return value of vcpu_stop as it can only have
* an error for protvirt, but protvirt means user cpu state
*/
if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
kvm_s390_vcpu_stop(vcpu);
vcpu->run->s390_reset_flags |= KVM_S390_RESET_SUBSYSTEM;
vcpu->run->s390_reset_flags |= KVM_S390_RESET_IPL;
vcpu->run->s390_reset_flags |= KVM_S390_RESET_CPU_INIT;
vcpu->run->exit_reason = KVM_EXIT_S390_RESET;
VCPU_EVENT(vcpu, 3, "requesting userspace resets %llx",
vcpu->run->s390_reset_flags);
trace_kvm_s390_request_resets(vcpu->run->s390_reset_flags);
return -EREMOTE;
}
static int __diag_virtio_hypercall(struct kvm_vcpu *vcpu)
{
int ret;
vcpu->stat.instruction_diagnose_500++;
/* No virtio-ccw notification? Get out quickly. */
if (!vcpu->kvm->arch.css_support ||
(vcpu->run->s.regs.gprs[1] != KVM_S390_VIRTIO_CCW_NOTIFY))
return -EOPNOTSUPP;
VCPU_EVENT(vcpu, 4, "diag 0x500 schid 0x%8.8x queue 0x%x cookie 0x%llx",
(u32) vcpu->run->s.regs.gprs[2],
(u32) vcpu->run->s.regs.gprs[3],
vcpu->run->s.regs.gprs[4]);
/*
* The layout is as follows:
* - gpr 2 contains the subchannel id (passed as addr)
* - gpr 3 contains the virtqueue index (passed as datamatch)
* - gpr 4 contains the index on the bus (optionally)
*/
ret = kvm_io_bus_write_cookie(vcpu, KVM_VIRTIO_CCW_NOTIFY_BUS,
vcpu->run->s.regs.gprs[2] & 0xffffffff,
8, &vcpu->run->s.regs.gprs[3],
vcpu->run->s.regs.gprs[4]);
/*
* Return cookie in gpr 2, but don't overwrite the register if the
* diagnose will be handled by userspace.
*/
if (ret != -EOPNOTSUPP)
vcpu->run->s.regs.gprs[2] = ret;
/* kvm_io_bus_write_cookie returns -EOPNOTSUPP if it found no match. */
return ret < 0 ? ret : 0;
}
int kvm_s390_handle_diag(struct kvm_vcpu *vcpu)
{
int code = kvm_s390_get_base_disp_rs(vcpu, NULL) & 0xffff;
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
trace_kvm_s390_handle_diag(vcpu, code);
switch (code) {
case 0x10:
return diag_release_pages(vcpu);
case 0x44:
return __diag_time_slice_end(vcpu);
case 0x9c:
return __diag_time_slice_end_directed(vcpu);
case 0x258:
return __diag_page_ref_service(vcpu);
case 0x308:
return __diag_ipl_functions(vcpu);
case 0x500:
return __diag_virtio_hypercall(vcpu);
default:
vcpu->stat.instruction_diagnose_other++;
return -EOPNOTSUPP;
}
}
| linux-master | arch/s390/kvm/diag.c |
// SPDX-License-Identifier: GPL-2.0
/*
* s390 kvm PCI passthrough support
*
* Copyright IBM Corp. 2022
*
* Author(s): Matthew Rosato <[email protected]>
*/
#include <linux/kvm_host.h>
#include <linux/pci.h>
#include <asm/pci.h>
#include <asm/pci_insn.h>
#include <asm/pci_io.h>
#include <asm/sclp.h>
#include "pci.h"
#include "kvm-s390.h"
struct zpci_aift *aift;
static inline int __set_irq_noiib(u16 ctl, u8 isc)
{
union zpci_sic_iib iib = {{0}};
return zpci_set_irq_ctrl(ctl, isc, &iib);
}
void kvm_s390_pci_aen_exit(void)
{
unsigned long flags;
struct kvm_zdev **gait_kzdev;
lockdep_assert_held(&aift->aift_lock);
/*
* Contents of the aipb remain registered for the life of the host
* kernel, the information preserved in zpci_aipb and zpci_aif_sbv
* in case we insert the KVM module again later. Clear the AIFT
* information and free anything not registered with underlying
* firmware.
*/
spin_lock_irqsave(&aift->gait_lock, flags);
gait_kzdev = aift->kzdev;
aift->gait = NULL;
aift->sbv = NULL;
aift->kzdev = NULL;
spin_unlock_irqrestore(&aift->gait_lock, flags);
kfree(gait_kzdev);
}
static int zpci_setup_aipb(u8 nisc)
{
struct page *page;
int size, rc;
zpci_aipb = kzalloc(sizeof(union zpci_sic_iib), GFP_KERNEL);
if (!zpci_aipb)
return -ENOMEM;
aift->sbv = airq_iv_create(ZPCI_NR_DEVICES, AIRQ_IV_ALLOC, NULL);
if (!aift->sbv) {
rc = -ENOMEM;
goto free_aipb;
}
zpci_aif_sbv = aift->sbv;
size = get_order(PAGE_ALIGN(ZPCI_NR_DEVICES *
sizeof(struct zpci_gaite)));
page = alloc_pages(GFP_KERNEL | __GFP_ZERO, size);
if (!page) {
rc = -ENOMEM;
goto free_sbv;
}
aift->gait = (struct zpci_gaite *)page_to_virt(page);
zpci_aipb->aipb.faisb = virt_to_phys(aift->sbv->vector);
zpci_aipb->aipb.gait = virt_to_phys(aift->gait);
zpci_aipb->aipb.afi = nisc;
zpci_aipb->aipb.faal = ZPCI_NR_DEVICES;
/* Setup Adapter Event Notification Interpretation */
if (zpci_set_irq_ctrl(SIC_SET_AENI_CONTROLS, 0, zpci_aipb)) {
rc = -EIO;
goto free_gait;
}
return 0;
free_gait:
free_pages((unsigned long)aift->gait, size);
free_sbv:
airq_iv_release(aift->sbv);
zpci_aif_sbv = NULL;
free_aipb:
kfree(zpci_aipb);
zpci_aipb = NULL;
return rc;
}
static int zpci_reset_aipb(u8 nisc)
{
/*
* AEN registration can only happen once per system boot. If
* an aipb already exists then AEN was already registered and
* we can re-use the aipb contents. This can only happen if
* the KVM module was removed and re-inserted. However, we must
* ensure that the same forwarding ISC is used as this is assigned
* during KVM module load.
*/
if (zpci_aipb->aipb.afi != nisc)
return -EINVAL;
aift->sbv = zpci_aif_sbv;
aift->gait = phys_to_virt(zpci_aipb->aipb.gait);
return 0;
}
int kvm_s390_pci_aen_init(u8 nisc)
{
int rc = 0;
/* If already enabled for AEN, bail out now */
if (aift->gait || aift->sbv)
return -EPERM;
mutex_lock(&aift->aift_lock);
aift->kzdev = kcalloc(ZPCI_NR_DEVICES, sizeof(struct kvm_zdev *),
GFP_KERNEL);
if (!aift->kzdev) {
rc = -ENOMEM;
goto unlock;
}
if (!zpci_aipb)
rc = zpci_setup_aipb(nisc);
else
rc = zpci_reset_aipb(nisc);
if (rc)
goto free_zdev;
/* Enable floating IRQs */
if (__set_irq_noiib(SIC_IRQ_MODE_SINGLE, nisc)) {
rc = -EIO;
kvm_s390_pci_aen_exit();
}
goto unlock;
free_zdev:
kfree(aift->kzdev);
unlock:
mutex_unlock(&aift->aift_lock);
return rc;
}
/* Modify PCI: Register floating adapter interruption forwarding */
static int kvm_zpci_set_airq(struct zpci_dev *zdev)
{
u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_REG_INT);
struct zpci_fib fib = {};
u8 status;
fib.fmt0.isc = zdev->kzdev->fib.fmt0.isc;
fib.fmt0.sum = 1; /* enable summary notifications */
fib.fmt0.noi = airq_iv_end(zdev->aibv);
fib.fmt0.aibv = virt_to_phys(zdev->aibv->vector);
fib.fmt0.aibvo = 0;
fib.fmt0.aisb = virt_to_phys(aift->sbv->vector + (zdev->aisb / 64) * 8);
fib.fmt0.aisbo = zdev->aisb & 63;
fib.gd = zdev->gisa;
return zpci_mod_fc(req, &fib, &status) ? -EIO : 0;
}
/* Modify PCI: Unregister floating adapter interruption forwarding */
static int kvm_zpci_clear_airq(struct zpci_dev *zdev)
{
u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_DEREG_INT);
struct zpci_fib fib = {};
u8 cc, status;
fib.gd = zdev->gisa;
cc = zpci_mod_fc(req, &fib, &status);
if (cc == 3 || (cc == 1 && status == 24))
/* Function already gone or IRQs already deregistered. */
cc = 0;
return cc ? -EIO : 0;
}
static inline void unaccount_mem(unsigned long nr_pages)
{
struct user_struct *user = get_uid(current_user());
if (user)
atomic_long_sub(nr_pages, &user->locked_vm);
if (current->mm)
atomic64_sub(nr_pages, ¤t->mm->pinned_vm);
}
static inline int account_mem(unsigned long nr_pages)
{
struct user_struct *user = get_uid(current_user());
unsigned long page_limit, cur_pages, new_pages;
page_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
do {
cur_pages = atomic_long_read(&user->locked_vm);
new_pages = cur_pages + nr_pages;
if (new_pages > page_limit)
return -ENOMEM;
} while (atomic_long_cmpxchg(&user->locked_vm, cur_pages,
new_pages) != cur_pages);
atomic64_add(nr_pages, ¤t->mm->pinned_vm);
return 0;
}
static int kvm_s390_pci_aif_enable(struct zpci_dev *zdev, struct zpci_fib *fib,
bool assist)
{
struct page *pages[1], *aibv_page, *aisb_page = NULL;
unsigned int msi_vecs, idx;
struct zpci_gaite *gaite;
unsigned long hva, bit;
struct kvm *kvm;
phys_addr_t gaddr;
int rc = 0, gisc, npages, pcount = 0;
/*
* Interrupt forwarding is only applicable if the device is already
* enabled for interpretation
*/
if (zdev->gisa == 0)
return -EINVAL;
kvm = zdev->kzdev->kvm;
msi_vecs = min_t(unsigned int, fib->fmt0.noi, zdev->max_msi);
/* Get the associated forwarding ISC - if invalid, return the error */
gisc = kvm_s390_gisc_register(kvm, fib->fmt0.isc);
if (gisc < 0)
return gisc;
/* Replace AIBV address */
idx = srcu_read_lock(&kvm->srcu);
hva = gfn_to_hva(kvm, gpa_to_gfn((gpa_t)fib->fmt0.aibv));
npages = pin_user_pages_fast(hva, 1, FOLL_WRITE | FOLL_LONGTERM, pages);
srcu_read_unlock(&kvm->srcu, idx);
if (npages < 1) {
rc = -EIO;
goto out;
}
aibv_page = pages[0];
pcount++;
gaddr = page_to_phys(aibv_page) + (fib->fmt0.aibv & ~PAGE_MASK);
fib->fmt0.aibv = gaddr;
/* Pin the guest AISB if one was specified */
if (fib->fmt0.sum == 1) {
idx = srcu_read_lock(&kvm->srcu);
hva = gfn_to_hva(kvm, gpa_to_gfn((gpa_t)fib->fmt0.aisb));
npages = pin_user_pages_fast(hva, 1, FOLL_WRITE | FOLL_LONGTERM,
pages);
srcu_read_unlock(&kvm->srcu, idx);
if (npages < 1) {
rc = -EIO;
goto unpin1;
}
aisb_page = pages[0];
pcount++;
}
/* Account for pinned pages, roll back on failure */
if (account_mem(pcount))
goto unpin2;
/* AISB must be allocated before we can fill in GAITE */
mutex_lock(&aift->aift_lock);
bit = airq_iv_alloc_bit(aift->sbv);
if (bit == -1UL)
goto unlock;
zdev->aisb = bit; /* store the summary bit number */
zdev->aibv = airq_iv_create(msi_vecs, AIRQ_IV_DATA |
AIRQ_IV_BITLOCK |
AIRQ_IV_GUESTVEC,
phys_to_virt(fib->fmt0.aibv));
spin_lock_irq(&aift->gait_lock);
gaite = (struct zpci_gaite *)aift->gait + (zdev->aisb *
sizeof(struct zpci_gaite));
/* If assist not requested, host will get all alerts */
if (assist)
gaite->gisa = (u32)virt_to_phys(&kvm->arch.sie_page2->gisa);
else
gaite->gisa = 0;
gaite->gisc = fib->fmt0.isc;
gaite->count++;
gaite->aisbo = fib->fmt0.aisbo;
gaite->aisb = virt_to_phys(page_address(aisb_page) + (fib->fmt0.aisb &
~PAGE_MASK));
aift->kzdev[zdev->aisb] = zdev->kzdev;
spin_unlock_irq(&aift->gait_lock);
/* Update guest FIB for re-issue */
fib->fmt0.aisbo = zdev->aisb & 63;
fib->fmt0.aisb = virt_to_phys(aift->sbv->vector + (zdev->aisb / 64) * 8);
fib->fmt0.isc = gisc;
/* Save some guest fib values in the host for later use */
zdev->kzdev->fib.fmt0.isc = fib->fmt0.isc;
zdev->kzdev->fib.fmt0.aibv = fib->fmt0.aibv;
mutex_unlock(&aift->aift_lock);
/* Issue the clp to setup the irq now */
rc = kvm_zpci_set_airq(zdev);
return rc;
unlock:
mutex_unlock(&aift->aift_lock);
unpin2:
if (fib->fmt0.sum == 1)
unpin_user_page(aisb_page);
unpin1:
unpin_user_page(aibv_page);
out:
return rc;
}
static int kvm_s390_pci_aif_disable(struct zpci_dev *zdev, bool force)
{
struct kvm_zdev *kzdev = zdev->kzdev;
struct zpci_gaite *gaite;
struct page *vpage = NULL, *spage = NULL;
int rc, pcount = 0;
u8 isc;
if (zdev->gisa == 0)
return -EINVAL;
mutex_lock(&aift->aift_lock);
/*
* If the clear fails due to an error, leave now unless we know this
* device is about to go away (force) -- In that case clear the GAITE
* regardless.
*/
rc = kvm_zpci_clear_airq(zdev);
if (rc && !force)
goto out;
if (zdev->kzdev->fib.fmt0.aibv == 0)
goto out;
spin_lock_irq(&aift->gait_lock);
gaite = (struct zpci_gaite *)aift->gait + (zdev->aisb *
sizeof(struct zpci_gaite));
isc = gaite->gisc;
gaite->count--;
if (gaite->count == 0) {
/* Release guest AIBV and AISB */
vpage = phys_to_page(kzdev->fib.fmt0.aibv);
if (gaite->aisb != 0)
spage = phys_to_page(gaite->aisb);
/* Clear the GAIT entry */
gaite->aisb = 0;
gaite->gisc = 0;
gaite->aisbo = 0;
gaite->gisa = 0;
aift->kzdev[zdev->aisb] = NULL;
/* Clear zdev info */
airq_iv_free_bit(aift->sbv, zdev->aisb);
airq_iv_release(zdev->aibv);
zdev->aisb = 0;
zdev->aibv = NULL;
}
spin_unlock_irq(&aift->gait_lock);
kvm_s390_gisc_unregister(kzdev->kvm, isc);
kzdev->fib.fmt0.isc = 0;
kzdev->fib.fmt0.aibv = 0;
if (vpage) {
unpin_user_page(vpage);
pcount++;
}
if (spage) {
unpin_user_page(spage);
pcount++;
}
if (pcount > 0)
unaccount_mem(pcount);
out:
mutex_unlock(&aift->aift_lock);
return rc;
}
static int kvm_s390_pci_dev_open(struct zpci_dev *zdev)
{
struct kvm_zdev *kzdev;
kzdev = kzalloc(sizeof(struct kvm_zdev), GFP_KERNEL);
if (!kzdev)
return -ENOMEM;
kzdev->zdev = zdev;
zdev->kzdev = kzdev;
return 0;
}
static void kvm_s390_pci_dev_release(struct zpci_dev *zdev)
{
struct kvm_zdev *kzdev;
kzdev = zdev->kzdev;
WARN_ON(kzdev->zdev != zdev);
zdev->kzdev = NULL;
kfree(kzdev);
}
/*
* Register device with the specified KVM. If interpretation facilities are
* available, enable them and let userspace indicate whether or not they will
* be used (specify SHM bit to disable).
*/
static int kvm_s390_pci_register_kvm(void *opaque, struct kvm *kvm)
{
struct zpci_dev *zdev = opaque;
u8 status;
int rc;
if (!zdev)
return -EINVAL;
mutex_lock(&zdev->kzdev_lock);
if (zdev->kzdev || zdev->gisa != 0 || !kvm) {
mutex_unlock(&zdev->kzdev_lock);
return -EINVAL;
}
kvm_get_kvm(kvm);
mutex_lock(&kvm->lock);
rc = kvm_s390_pci_dev_open(zdev);
if (rc)
goto err;
/*
* If interpretation facilities aren't available, add the device to
* the kzdev list but don't enable for interpretation.
*/
if (!kvm_s390_pci_interp_allowed())
goto out;
/*
* If this is the first request to use an interpreted device, make the
* necessary vcpu changes
*/
if (!kvm->arch.use_zpci_interp)
kvm_s390_vcpu_pci_enable_interp(kvm);
if (zdev_enabled(zdev)) {
rc = zpci_disable_device(zdev);
if (rc)
goto err;
}
/*
* Store information about the identity of the kvm guest allowed to
* access this device via interpretation to be used by host CLP
*/
zdev->gisa = (u32)virt_to_phys(&kvm->arch.sie_page2->gisa);
rc = zpci_enable_device(zdev);
if (rc)
goto clear_gisa;
/* Re-register the IOMMU that was already created */
rc = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
virt_to_phys(zdev->dma_table), &status);
if (rc)
goto clear_gisa;
out:
zdev->kzdev->kvm = kvm;
spin_lock(&kvm->arch.kzdev_list_lock);
list_add_tail(&zdev->kzdev->entry, &kvm->arch.kzdev_list);
spin_unlock(&kvm->arch.kzdev_list_lock);
mutex_unlock(&kvm->lock);
mutex_unlock(&zdev->kzdev_lock);
return 0;
clear_gisa:
zdev->gisa = 0;
err:
if (zdev->kzdev)
kvm_s390_pci_dev_release(zdev);
mutex_unlock(&kvm->lock);
mutex_unlock(&zdev->kzdev_lock);
kvm_put_kvm(kvm);
return rc;
}
static void kvm_s390_pci_unregister_kvm(void *opaque)
{
struct zpci_dev *zdev = opaque;
struct kvm *kvm;
u8 status;
if (!zdev)
return;
mutex_lock(&zdev->kzdev_lock);
if (WARN_ON(!zdev->kzdev)) {
mutex_unlock(&zdev->kzdev_lock);
return;
}
kvm = zdev->kzdev->kvm;
mutex_lock(&kvm->lock);
/*
* A 0 gisa means interpretation was never enabled, just remove the
* device from the list.
*/
if (zdev->gisa == 0)
goto out;
/* Forwarding must be turned off before interpretation */
if (zdev->kzdev->fib.fmt0.aibv != 0)
kvm_s390_pci_aif_disable(zdev, true);
/* Remove the host CLP guest designation */
zdev->gisa = 0;
if (zdev_enabled(zdev)) {
if (zpci_disable_device(zdev))
goto out;
}
if (zpci_enable_device(zdev))
goto out;
/* Re-register the IOMMU that was already created */
zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
virt_to_phys(zdev->dma_table), &status);
out:
spin_lock(&kvm->arch.kzdev_list_lock);
list_del(&zdev->kzdev->entry);
spin_unlock(&kvm->arch.kzdev_list_lock);
kvm_s390_pci_dev_release(zdev);
mutex_unlock(&kvm->lock);
mutex_unlock(&zdev->kzdev_lock);
kvm_put_kvm(kvm);
}
void kvm_s390_pci_init_list(struct kvm *kvm)
{
spin_lock_init(&kvm->arch.kzdev_list_lock);
INIT_LIST_HEAD(&kvm->arch.kzdev_list);
}
void kvm_s390_pci_clear_list(struct kvm *kvm)
{
/*
* This list should already be empty, either via vfio device closures
* or kvm fd cleanup.
*/
spin_lock(&kvm->arch.kzdev_list_lock);
WARN_ON_ONCE(!list_empty(&kvm->arch.kzdev_list));
spin_unlock(&kvm->arch.kzdev_list_lock);
}
static struct zpci_dev *get_zdev_from_kvm_by_fh(struct kvm *kvm, u32 fh)
{
struct zpci_dev *zdev = NULL;
struct kvm_zdev *kzdev;
spin_lock(&kvm->arch.kzdev_list_lock);
list_for_each_entry(kzdev, &kvm->arch.kzdev_list, entry) {
if (kzdev->zdev->fh == fh) {
zdev = kzdev->zdev;
break;
}
}
spin_unlock(&kvm->arch.kzdev_list_lock);
return zdev;
}
static int kvm_s390_pci_zpci_reg_aen(struct zpci_dev *zdev,
struct kvm_s390_zpci_op *args)
{
struct zpci_fib fib = {};
bool hostflag;
fib.fmt0.aibv = args->u.reg_aen.ibv;
fib.fmt0.isc = args->u.reg_aen.isc;
fib.fmt0.noi = args->u.reg_aen.noi;
if (args->u.reg_aen.sb != 0) {
fib.fmt0.aisb = args->u.reg_aen.sb;
fib.fmt0.aisbo = args->u.reg_aen.sbo;
fib.fmt0.sum = 1;
} else {
fib.fmt0.aisb = 0;
fib.fmt0.aisbo = 0;
fib.fmt0.sum = 0;
}
hostflag = !(args->u.reg_aen.flags & KVM_S390_ZPCIOP_REGAEN_HOST);
return kvm_s390_pci_aif_enable(zdev, &fib, hostflag);
}
int kvm_s390_pci_zpci_op(struct kvm *kvm, struct kvm_s390_zpci_op *args)
{
struct kvm_zdev *kzdev;
struct zpci_dev *zdev;
int r;
zdev = get_zdev_from_kvm_by_fh(kvm, args->fh);
if (!zdev)
return -ENODEV;
mutex_lock(&zdev->kzdev_lock);
mutex_lock(&kvm->lock);
kzdev = zdev->kzdev;
if (!kzdev) {
r = -ENODEV;
goto out;
}
if (kzdev->kvm != kvm) {
r = -EPERM;
goto out;
}
switch (args->op) {
case KVM_S390_ZPCIOP_REG_AEN:
/* Fail on unknown flags */
if (args->u.reg_aen.flags & ~KVM_S390_ZPCIOP_REGAEN_HOST) {
r = -EINVAL;
break;
}
r = kvm_s390_pci_zpci_reg_aen(zdev, args);
break;
case KVM_S390_ZPCIOP_DEREG_AEN:
r = kvm_s390_pci_aif_disable(zdev, false);
break;
default:
r = -EINVAL;
}
out:
mutex_unlock(&kvm->lock);
mutex_unlock(&zdev->kzdev_lock);
return r;
}
int __init kvm_s390_pci_init(void)
{
zpci_kvm_hook.kvm_register = kvm_s390_pci_register_kvm;
zpci_kvm_hook.kvm_unregister = kvm_s390_pci_unregister_kvm;
if (!kvm_s390_pci_interp_allowed())
return 0;
aift = kzalloc(sizeof(struct zpci_aift), GFP_KERNEL);
if (!aift)
return -ENOMEM;
spin_lock_init(&aift->gait_lock);
mutex_init(&aift->aift_lock);
return 0;
}
void kvm_s390_pci_exit(void)
{
zpci_kvm_hook.kvm_register = NULL;
zpci_kvm_hook.kvm_unregister = NULL;
if (!kvm_s390_pci_interp_allowed())
return;
mutex_destroy(&aift->aift_lock);
kfree(aift);
}
| linux-master | arch/s390/kvm/pci.c |
// SPDX-License-Identifier: GPL-2.0
/*
* guest access functions
*
* Copyright IBM Corp. 2014
*
*/
#include <linux/vmalloc.h>
#include <linux/mm_types.h>
#include <linux/err.h>
#include <linux/pgtable.h>
#include <linux/bitfield.h>
#include <asm/gmap.h>
#include "kvm-s390.h"
#include "gaccess.h"
#include <asm/switch_to.h>
union asce {
unsigned long val;
struct {
unsigned long origin : 52; /* Region- or Segment-Table Origin */
unsigned long : 2;
unsigned long g : 1; /* Subspace Group Control */
unsigned long p : 1; /* Private Space Control */
unsigned long s : 1; /* Storage-Alteration-Event Control */
unsigned long x : 1; /* Space-Switch-Event Control */
unsigned long r : 1; /* Real-Space Control */
unsigned long : 1;
unsigned long dt : 2; /* Designation-Type Control */
unsigned long tl : 2; /* Region- or Segment-Table Length */
};
};
enum {
ASCE_TYPE_SEGMENT = 0,
ASCE_TYPE_REGION3 = 1,
ASCE_TYPE_REGION2 = 2,
ASCE_TYPE_REGION1 = 3
};
union region1_table_entry {
unsigned long val;
struct {
unsigned long rto: 52;/* Region-Table Origin */
unsigned long : 2;
unsigned long p : 1; /* DAT-Protection Bit */
unsigned long : 1;
unsigned long tf : 2; /* Region-Second-Table Offset */
unsigned long i : 1; /* Region-Invalid Bit */
unsigned long : 1;
unsigned long tt : 2; /* Table-Type Bits */
unsigned long tl : 2; /* Region-Second-Table Length */
};
};
union region2_table_entry {
unsigned long val;
struct {
unsigned long rto: 52;/* Region-Table Origin */
unsigned long : 2;
unsigned long p : 1; /* DAT-Protection Bit */
unsigned long : 1;
unsigned long tf : 2; /* Region-Third-Table Offset */
unsigned long i : 1; /* Region-Invalid Bit */
unsigned long : 1;
unsigned long tt : 2; /* Table-Type Bits */
unsigned long tl : 2; /* Region-Third-Table Length */
};
};
struct region3_table_entry_fc0 {
unsigned long sto: 52;/* Segment-Table Origin */
unsigned long : 1;
unsigned long fc : 1; /* Format-Control */
unsigned long p : 1; /* DAT-Protection Bit */
unsigned long : 1;
unsigned long tf : 2; /* Segment-Table Offset */
unsigned long i : 1; /* Region-Invalid Bit */
unsigned long cr : 1; /* Common-Region Bit */
unsigned long tt : 2; /* Table-Type Bits */
unsigned long tl : 2; /* Segment-Table Length */
};
struct region3_table_entry_fc1 {
unsigned long rfaa : 33; /* Region-Frame Absolute Address */
unsigned long : 14;
unsigned long av : 1; /* ACCF-Validity Control */
unsigned long acc: 4; /* Access-Control Bits */
unsigned long f : 1; /* Fetch-Protection Bit */
unsigned long fc : 1; /* Format-Control */
unsigned long p : 1; /* DAT-Protection Bit */
unsigned long iep: 1; /* Instruction-Execution-Protection */
unsigned long : 2;
unsigned long i : 1; /* Region-Invalid Bit */
unsigned long cr : 1; /* Common-Region Bit */
unsigned long tt : 2; /* Table-Type Bits */
unsigned long : 2;
};
union region3_table_entry {
unsigned long val;
struct region3_table_entry_fc0 fc0;
struct region3_table_entry_fc1 fc1;
struct {
unsigned long : 53;
unsigned long fc : 1; /* Format-Control */
unsigned long : 4;
unsigned long i : 1; /* Region-Invalid Bit */
unsigned long cr : 1; /* Common-Region Bit */
unsigned long tt : 2; /* Table-Type Bits */
unsigned long : 2;
};
};
struct segment_entry_fc0 {
unsigned long pto: 53;/* Page-Table Origin */
unsigned long fc : 1; /* Format-Control */
unsigned long p : 1; /* DAT-Protection Bit */
unsigned long : 3;
unsigned long i : 1; /* Segment-Invalid Bit */
unsigned long cs : 1; /* Common-Segment Bit */
unsigned long tt : 2; /* Table-Type Bits */
unsigned long : 2;
};
struct segment_entry_fc1 {
unsigned long sfaa : 44; /* Segment-Frame Absolute Address */
unsigned long : 3;
unsigned long av : 1; /* ACCF-Validity Control */
unsigned long acc: 4; /* Access-Control Bits */
unsigned long f : 1; /* Fetch-Protection Bit */
unsigned long fc : 1; /* Format-Control */
unsigned long p : 1; /* DAT-Protection Bit */
unsigned long iep: 1; /* Instruction-Execution-Protection */
unsigned long : 2;
unsigned long i : 1; /* Segment-Invalid Bit */
unsigned long cs : 1; /* Common-Segment Bit */
unsigned long tt : 2; /* Table-Type Bits */
unsigned long : 2;
};
union segment_table_entry {
unsigned long val;
struct segment_entry_fc0 fc0;
struct segment_entry_fc1 fc1;
struct {
unsigned long : 53;
unsigned long fc : 1; /* Format-Control */
unsigned long : 4;
unsigned long i : 1; /* Segment-Invalid Bit */
unsigned long cs : 1; /* Common-Segment Bit */
unsigned long tt : 2; /* Table-Type Bits */
unsigned long : 2;
};
};
enum {
TABLE_TYPE_SEGMENT = 0,
TABLE_TYPE_REGION3 = 1,
TABLE_TYPE_REGION2 = 2,
TABLE_TYPE_REGION1 = 3
};
union page_table_entry {
unsigned long val;
struct {
unsigned long pfra : 52; /* Page-Frame Real Address */
unsigned long z : 1; /* Zero Bit */
unsigned long i : 1; /* Page-Invalid Bit */
unsigned long p : 1; /* DAT-Protection Bit */
unsigned long iep: 1; /* Instruction-Execution-Protection */
unsigned long : 8;
};
};
/*
* vaddress union in order to easily decode a virtual address into its
* region first index, region second index etc. parts.
*/
union vaddress {
unsigned long addr;
struct {
unsigned long rfx : 11;
unsigned long rsx : 11;
unsigned long rtx : 11;
unsigned long sx : 11;
unsigned long px : 8;
unsigned long bx : 12;
};
struct {
unsigned long rfx01 : 2;
unsigned long : 9;
unsigned long rsx01 : 2;
unsigned long : 9;
unsigned long rtx01 : 2;
unsigned long : 9;
unsigned long sx01 : 2;
unsigned long : 29;
};
};
/*
* raddress union which will contain the result (real or absolute address)
* after a page table walk. The rfaa, sfaa and pfra members are used to
* simply assign them the value of a region, segment or page table entry.
*/
union raddress {
unsigned long addr;
unsigned long rfaa : 33; /* Region-Frame Absolute Address */
unsigned long sfaa : 44; /* Segment-Frame Absolute Address */
unsigned long pfra : 52; /* Page-Frame Real Address */
};
union alet {
u32 val;
struct {
u32 reserved : 7;
u32 p : 1;
u32 alesn : 8;
u32 alen : 16;
};
};
union ald {
u32 val;
struct {
u32 : 1;
u32 alo : 24;
u32 all : 7;
};
};
struct ale {
unsigned long i : 1; /* ALEN-Invalid Bit */
unsigned long : 5;
unsigned long fo : 1; /* Fetch-Only Bit */
unsigned long p : 1; /* Private Bit */
unsigned long alesn : 8; /* Access-List-Entry Sequence Number */
unsigned long aleax : 16; /* Access-List-Entry Authorization Index */
unsigned long : 32;
unsigned long : 1;
unsigned long asteo : 25; /* ASN-Second-Table-Entry Origin */
unsigned long : 6;
unsigned long astesn : 32; /* ASTE Sequence Number */
};
struct aste {
unsigned long i : 1; /* ASX-Invalid Bit */
unsigned long ato : 29; /* Authority-Table Origin */
unsigned long : 1;
unsigned long b : 1; /* Base-Space Bit */
unsigned long ax : 16; /* Authorization Index */
unsigned long atl : 12; /* Authority-Table Length */
unsigned long : 2;
unsigned long ca : 1; /* Controlled-ASN Bit */
unsigned long ra : 1; /* Reusable-ASN Bit */
unsigned long asce : 64; /* Address-Space-Control Element */
unsigned long ald : 32;
unsigned long astesn : 32;
/* .. more fields there */
};
int ipte_lock_held(struct kvm *kvm)
{
if (sclp.has_siif) {
int rc;
read_lock(&kvm->arch.sca_lock);
rc = kvm_s390_get_ipte_control(kvm)->kh != 0;
read_unlock(&kvm->arch.sca_lock);
return rc;
}
return kvm->arch.ipte_lock_count != 0;
}
static void ipte_lock_simple(struct kvm *kvm)
{
union ipte_control old, new, *ic;
mutex_lock(&kvm->arch.ipte_mutex);
kvm->arch.ipte_lock_count++;
if (kvm->arch.ipte_lock_count > 1)
goto out;
retry:
read_lock(&kvm->arch.sca_lock);
ic = kvm_s390_get_ipte_control(kvm);
do {
old = READ_ONCE(*ic);
if (old.k) {
read_unlock(&kvm->arch.sca_lock);
cond_resched();
goto retry;
}
new = old;
new.k = 1;
} while (cmpxchg(&ic->val, old.val, new.val) != old.val);
read_unlock(&kvm->arch.sca_lock);
out:
mutex_unlock(&kvm->arch.ipte_mutex);
}
static void ipte_unlock_simple(struct kvm *kvm)
{
union ipte_control old, new, *ic;
mutex_lock(&kvm->arch.ipte_mutex);
kvm->arch.ipte_lock_count--;
if (kvm->arch.ipte_lock_count)
goto out;
read_lock(&kvm->arch.sca_lock);
ic = kvm_s390_get_ipte_control(kvm);
do {
old = READ_ONCE(*ic);
new = old;
new.k = 0;
} while (cmpxchg(&ic->val, old.val, new.val) != old.val);
read_unlock(&kvm->arch.sca_lock);
wake_up(&kvm->arch.ipte_wq);
out:
mutex_unlock(&kvm->arch.ipte_mutex);
}
static void ipte_lock_siif(struct kvm *kvm)
{
union ipte_control old, new, *ic;
retry:
read_lock(&kvm->arch.sca_lock);
ic = kvm_s390_get_ipte_control(kvm);
do {
old = READ_ONCE(*ic);
if (old.kg) {
read_unlock(&kvm->arch.sca_lock);
cond_resched();
goto retry;
}
new = old;
new.k = 1;
new.kh++;
} while (cmpxchg(&ic->val, old.val, new.val) != old.val);
read_unlock(&kvm->arch.sca_lock);
}
static void ipte_unlock_siif(struct kvm *kvm)
{
union ipte_control old, new, *ic;
read_lock(&kvm->arch.sca_lock);
ic = kvm_s390_get_ipte_control(kvm);
do {
old = READ_ONCE(*ic);
new = old;
new.kh--;
if (!new.kh)
new.k = 0;
} while (cmpxchg(&ic->val, old.val, new.val) != old.val);
read_unlock(&kvm->arch.sca_lock);
if (!new.kh)
wake_up(&kvm->arch.ipte_wq);
}
void ipte_lock(struct kvm *kvm)
{
if (sclp.has_siif)
ipte_lock_siif(kvm);
else
ipte_lock_simple(kvm);
}
void ipte_unlock(struct kvm *kvm)
{
if (sclp.has_siif)
ipte_unlock_siif(kvm);
else
ipte_unlock_simple(kvm);
}
static int ar_translation(struct kvm_vcpu *vcpu, union asce *asce, u8 ar,
enum gacc_mode mode)
{
union alet alet;
struct ale ale;
struct aste aste;
unsigned long ald_addr, authority_table_addr;
union ald ald;
int eax, rc;
u8 authority_table;
if (ar >= NUM_ACRS)
return -EINVAL;
save_access_regs(vcpu->run->s.regs.acrs);
alet.val = vcpu->run->s.regs.acrs[ar];
if (ar == 0 || alet.val == 0) {
asce->val = vcpu->arch.sie_block->gcr[1];
return 0;
} else if (alet.val == 1) {
asce->val = vcpu->arch.sie_block->gcr[7];
return 0;
}
if (alet.reserved)
return PGM_ALET_SPECIFICATION;
if (alet.p)
ald_addr = vcpu->arch.sie_block->gcr[5];
else
ald_addr = vcpu->arch.sie_block->gcr[2];
ald_addr &= 0x7fffffc0;
rc = read_guest_real(vcpu, ald_addr + 16, &ald.val, sizeof(union ald));
if (rc)
return rc;
if (alet.alen / 8 > ald.all)
return PGM_ALEN_TRANSLATION;
if (0x7fffffff - ald.alo * 128 < alet.alen * 16)
return PGM_ADDRESSING;
rc = read_guest_real(vcpu, ald.alo * 128 + alet.alen * 16, &ale,
sizeof(struct ale));
if (rc)
return rc;
if (ale.i == 1)
return PGM_ALEN_TRANSLATION;
if (ale.alesn != alet.alesn)
return PGM_ALE_SEQUENCE;
rc = read_guest_real(vcpu, ale.asteo * 64, &aste, sizeof(struct aste));
if (rc)
return rc;
if (aste.i)
return PGM_ASTE_VALIDITY;
if (aste.astesn != ale.astesn)
return PGM_ASTE_SEQUENCE;
if (ale.p == 1) {
eax = (vcpu->arch.sie_block->gcr[8] >> 16) & 0xffff;
if (ale.aleax != eax) {
if (eax / 16 > aste.atl)
return PGM_EXTENDED_AUTHORITY;
authority_table_addr = aste.ato * 4 + eax / 4;
rc = read_guest_real(vcpu, authority_table_addr,
&authority_table,
sizeof(u8));
if (rc)
return rc;
if ((authority_table & (0x40 >> ((eax & 3) * 2))) == 0)
return PGM_EXTENDED_AUTHORITY;
}
}
if (ale.fo == 1 && mode == GACC_STORE)
return PGM_PROTECTION;
asce->val = aste.asce;
return 0;
}
struct trans_exc_code_bits {
unsigned long addr : 52; /* Translation-exception Address */
unsigned long fsi : 2; /* Access Exception Fetch/Store Indication */
unsigned long : 2;
unsigned long b56 : 1;
unsigned long : 3;
unsigned long b60 : 1;
unsigned long b61 : 1;
unsigned long as : 2; /* ASCE Identifier */
};
enum {
FSI_UNKNOWN = 0, /* Unknown whether fetch or store */
FSI_STORE = 1, /* Exception was due to store operation */
FSI_FETCH = 2 /* Exception was due to fetch operation */
};
enum prot_type {
PROT_TYPE_LA = 0,
PROT_TYPE_KEYC = 1,
PROT_TYPE_ALC = 2,
PROT_TYPE_DAT = 3,
PROT_TYPE_IEP = 4,
/* Dummy value for passing an initialized value when code != PGM_PROTECTION */
PROT_NONE,
};
static int trans_exc_ending(struct kvm_vcpu *vcpu, int code, unsigned long gva, u8 ar,
enum gacc_mode mode, enum prot_type prot, bool terminate)
{
struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm;
struct trans_exc_code_bits *tec;
memset(pgm, 0, sizeof(*pgm));
pgm->code = code;
tec = (struct trans_exc_code_bits *)&pgm->trans_exc_code;
switch (code) {
case PGM_PROTECTION:
switch (prot) {
case PROT_NONE:
/* We should never get here, acts like termination */
WARN_ON_ONCE(1);
break;
case PROT_TYPE_IEP:
tec->b61 = 1;
fallthrough;
case PROT_TYPE_LA:
tec->b56 = 1;
break;
case PROT_TYPE_KEYC:
tec->b60 = 1;
break;
case PROT_TYPE_ALC:
tec->b60 = 1;
fallthrough;
case PROT_TYPE_DAT:
tec->b61 = 1;
break;
}
if (terminate) {
tec->b56 = 0;
tec->b60 = 0;
tec->b61 = 0;
}
fallthrough;
case PGM_ASCE_TYPE:
case PGM_PAGE_TRANSLATION:
case PGM_REGION_FIRST_TRANS:
case PGM_REGION_SECOND_TRANS:
case PGM_REGION_THIRD_TRANS:
case PGM_SEGMENT_TRANSLATION:
/*
* op_access_id only applies to MOVE_PAGE -> set bit 61
* exc_access_id has to be set to 0 for some instructions. Both
* cases have to be handled by the caller.
*/
tec->addr = gva >> PAGE_SHIFT;
tec->fsi = mode == GACC_STORE ? FSI_STORE : FSI_FETCH;
tec->as = psw_bits(vcpu->arch.sie_block->gpsw).as;
fallthrough;
case PGM_ALEN_TRANSLATION:
case PGM_ALE_SEQUENCE:
case PGM_ASTE_VALIDITY:
case PGM_ASTE_SEQUENCE:
case PGM_EXTENDED_AUTHORITY:
/*
* We can always store exc_access_id, as it is
* undefined for non-ar cases. It is undefined for
* most DAT protection exceptions.
*/
pgm->exc_access_id = ar;
break;
}
return code;
}
static int trans_exc(struct kvm_vcpu *vcpu, int code, unsigned long gva, u8 ar,
enum gacc_mode mode, enum prot_type prot)
{
return trans_exc_ending(vcpu, code, gva, ar, mode, prot, false);
}
static int get_vcpu_asce(struct kvm_vcpu *vcpu, union asce *asce,
unsigned long ga, u8 ar, enum gacc_mode mode)
{
int rc;
struct psw_bits psw = psw_bits(vcpu->arch.sie_block->gpsw);
if (!psw.dat) {
asce->val = 0;
asce->r = 1;
return 0;
}
if ((mode == GACC_IFETCH) && (psw.as != PSW_BITS_AS_HOME))
psw.as = PSW_BITS_AS_PRIMARY;
switch (psw.as) {
case PSW_BITS_AS_PRIMARY:
asce->val = vcpu->arch.sie_block->gcr[1];
return 0;
case PSW_BITS_AS_SECONDARY:
asce->val = vcpu->arch.sie_block->gcr[7];
return 0;
case PSW_BITS_AS_HOME:
asce->val = vcpu->arch.sie_block->gcr[13];
return 0;
case PSW_BITS_AS_ACCREG:
rc = ar_translation(vcpu, asce, ar, mode);
if (rc > 0)
return trans_exc(vcpu, rc, ga, ar, mode, PROT_TYPE_ALC);
return rc;
}
return 0;
}
static int deref_table(struct kvm *kvm, unsigned long gpa, unsigned long *val)
{
return kvm_read_guest(kvm, gpa, val, sizeof(*val));
}
/**
* guest_translate - translate a guest virtual into a guest absolute address
* @vcpu: virtual cpu
* @gva: guest virtual address
* @gpa: points to where guest physical (absolute) address should be stored
* @asce: effective asce
* @mode: indicates the access mode to be used
* @prot: returns the type for protection exceptions
*
* Translate a guest virtual address into a guest absolute address by means
* of dynamic address translation as specified by the architecture.
* If the resulting absolute address is not available in the configuration
* an addressing exception is indicated and @gpa will not be changed.
*
* Returns: - zero on success; @gpa contains the resulting absolute address
* - a negative value if guest access failed due to e.g. broken
* guest mapping
* - a positive value if an access exception happened. In this case
* the returned value is the program interruption code as defined
* by the architecture
*/
static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva,
unsigned long *gpa, const union asce asce,
enum gacc_mode mode, enum prot_type *prot)
{
union vaddress vaddr = {.addr = gva};
union raddress raddr = {.addr = gva};
union page_table_entry pte;
int dat_protection = 0;
int iep_protection = 0;
union ctlreg0 ctlreg0;
unsigned long ptr;
int edat1, edat2, iep;
ctlreg0.val = vcpu->arch.sie_block->gcr[0];
edat1 = ctlreg0.edat && test_kvm_facility(vcpu->kvm, 8);
edat2 = edat1 && test_kvm_facility(vcpu->kvm, 78);
iep = ctlreg0.iep && test_kvm_facility(vcpu->kvm, 130);
if (asce.r)
goto real_address;
ptr = asce.origin * PAGE_SIZE;
switch (asce.dt) {
case ASCE_TYPE_REGION1:
if (vaddr.rfx01 > asce.tl)
return PGM_REGION_FIRST_TRANS;
ptr += vaddr.rfx * 8;
break;
case ASCE_TYPE_REGION2:
if (vaddr.rfx)
return PGM_ASCE_TYPE;
if (vaddr.rsx01 > asce.tl)
return PGM_REGION_SECOND_TRANS;
ptr += vaddr.rsx * 8;
break;
case ASCE_TYPE_REGION3:
if (vaddr.rfx || vaddr.rsx)
return PGM_ASCE_TYPE;
if (vaddr.rtx01 > asce.tl)
return PGM_REGION_THIRD_TRANS;
ptr += vaddr.rtx * 8;
break;
case ASCE_TYPE_SEGMENT:
if (vaddr.rfx || vaddr.rsx || vaddr.rtx)
return PGM_ASCE_TYPE;
if (vaddr.sx01 > asce.tl)
return PGM_SEGMENT_TRANSLATION;
ptr += vaddr.sx * 8;
break;
}
switch (asce.dt) {
case ASCE_TYPE_REGION1: {
union region1_table_entry rfte;
if (kvm_is_error_gpa(vcpu->kvm, ptr))
return PGM_ADDRESSING;
if (deref_table(vcpu->kvm, ptr, &rfte.val))
return -EFAULT;
if (rfte.i)
return PGM_REGION_FIRST_TRANS;
if (rfte.tt != TABLE_TYPE_REGION1)
return PGM_TRANSLATION_SPEC;
if (vaddr.rsx01 < rfte.tf || vaddr.rsx01 > rfte.tl)
return PGM_REGION_SECOND_TRANS;
if (edat1)
dat_protection |= rfte.p;
ptr = rfte.rto * PAGE_SIZE + vaddr.rsx * 8;
}
fallthrough;
case ASCE_TYPE_REGION2: {
union region2_table_entry rste;
if (kvm_is_error_gpa(vcpu->kvm, ptr))
return PGM_ADDRESSING;
if (deref_table(vcpu->kvm, ptr, &rste.val))
return -EFAULT;
if (rste.i)
return PGM_REGION_SECOND_TRANS;
if (rste.tt != TABLE_TYPE_REGION2)
return PGM_TRANSLATION_SPEC;
if (vaddr.rtx01 < rste.tf || vaddr.rtx01 > rste.tl)
return PGM_REGION_THIRD_TRANS;
if (edat1)
dat_protection |= rste.p;
ptr = rste.rto * PAGE_SIZE + vaddr.rtx * 8;
}
fallthrough;
case ASCE_TYPE_REGION3: {
union region3_table_entry rtte;
if (kvm_is_error_gpa(vcpu->kvm, ptr))
return PGM_ADDRESSING;
if (deref_table(vcpu->kvm, ptr, &rtte.val))
return -EFAULT;
if (rtte.i)
return PGM_REGION_THIRD_TRANS;
if (rtte.tt != TABLE_TYPE_REGION3)
return PGM_TRANSLATION_SPEC;
if (rtte.cr && asce.p && edat2)
return PGM_TRANSLATION_SPEC;
if (rtte.fc && edat2) {
dat_protection |= rtte.fc1.p;
iep_protection = rtte.fc1.iep;
raddr.rfaa = rtte.fc1.rfaa;
goto absolute_address;
}
if (vaddr.sx01 < rtte.fc0.tf)
return PGM_SEGMENT_TRANSLATION;
if (vaddr.sx01 > rtte.fc0.tl)
return PGM_SEGMENT_TRANSLATION;
if (edat1)
dat_protection |= rtte.fc0.p;
ptr = rtte.fc0.sto * PAGE_SIZE + vaddr.sx * 8;
}
fallthrough;
case ASCE_TYPE_SEGMENT: {
union segment_table_entry ste;
if (kvm_is_error_gpa(vcpu->kvm, ptr))
return PGM_ADDRESSING;
if (deref_table(vcpu->kvm, ptr, &ste.val))
return -EFAULT;
if (ste.i)
return PGM_SEGMENT_TRANSLATION;
if (ste.tt != TABLE_TYPE_SEGMENT)
return PGM_TRANSLATION_SPEC;
if (ste.cs && asce.p)
return PGM_TRANSLATION_SPEC;
if (ste.fc && edat1) {
dat_protection |= ste.fc1.p;
iep_protection = ste.fc1.iep;
raddr.sfaa = ste.fc1.sfaa;
goto absolute_address;
}
dat_protection |= ste.fc0.p;
ptr = ste.fc0.pto * (PAGE_SIZE / 2) + vaddr.px * 8;
}
}
if (kvm_is_error_gpa(vcpu->kvm, ptr))
return PGM_ADDRESSING;
if (deref_table(vcpu->kvm, ptr, &pte.val))
return -EFAULT;
if (pte.i)
return PGM_PAGE_TRANSLATION;
if (pte.z)
return PGM_TRANSLATION_SPEC;
dat_protection |= pte.p;
iep_protection = pte.iep;
raddr.pfra = pte.pfra;
real_address:
raddr.addr = kvm_s390_real_to_abs(vcpu, raddr.addr);
absolute_address:
if (mode == GACC_STORE && dat_protection) {
*prot = PROT_TYPE_DAT;
return PGM_PROTECTION;
}
if (mode == GACC_IFETCH && iep_protection && iep) {
*prot = PROT_TYPE_IEP;
return PGM_PROTECTION;
}
if (kvm_is_error_gpa(vcpu->kvm, raddr.addr))
return PGM_ADDRESSING;
*gpa = raddr.addr;
return 0;
}
static inline int is_low_address(unsigned long ga)
{
/* Check for address ranges 0..511 and 4096..4607 */
return (ga & ~0x11fful) == 0;
}
static int low_address_protection_enabled(struct kvm_vcpu *vcpu,
const union asce asce)
{
union ctlreg0 ctlreg0 = {.val = vcpu->arch.sie_block->gcr[0]};
psw_t *psw = &vcpu->arch.sie_block->gpsw;
if (!ctlreg0.lap)
return 0;
if (psw_bits(*psw).dat && asce.p)
return 0;
return 1;
}
static int vm_check_access_key(struct kvm *kvm, u8 access_key,
enum gacc_mode mode, gpa_t gpa)
{
u8 storage_key, access_control;
bool fetch_protected;
unsigned long hva;
int r;
if (access_key == 0)
return 0;
hva = gfn_to_hva(kvm, gpa_to_gfn(gpa));
if (kvm_is_error_hva(hva))
return PGM_ADDRESSING;
mmap_read_lock(current->mm);
r = get_guest_storage_key(current->mm, hva, &storage_key);
mmap_read_unlock(current->mm);
if (r)
return r;
access_control = FIELD_GET(_PAGE_ACC_BITS, storage_key);
if (access_control == access_key)
return 0;
fetch_protected = storage_key & _PAGE_FP_BIT;
if ((mode == GACC_FETCH || mode == GACC_IFETCH) && !fetch_protected)
return 0;
return PGM_PROTECTION;
}
static bool fetch_prot_override_applicable(struct kvm_vcpu *vcpu, enum gacc_mode mode,
union asce asce)
{
psw_t *psw = &vcpu->arch.sie_block->gpsw;
unsigned long override;
if (mode == GACC_FETCH || mode == GACC_IFETCH) {
/* check if fetch protection override enabled */
override = vcpu->arch.sie_block->gcr[0];
override &= CR0_FETCH_PROTECTION_OVERRIDE;
/* not applicable if subject to DAT && private space */
override = override && !(psw_bits(*psw).dat && asce.p);
return override;
}
return false;
}
static bool fetch_prot_override_applies(unsigned long ga, unsigned int len)
{
return ga < 2048 && ga + len <= 2048;
}
static bool storage_prot_override_applicable(struct kvm_vcpu *vcpu)
{
/* check if storage protection override enabled */
return vcpu->arch.sie_block->gcr[0] & CR0_STORAGE_PROTECTION_OVERRIDE;
}
static bool storage_prot_override_applies(u8 access_control)
{
/* matches special storage protection override key (9) -> allow */
return access_control == PAGE_SPO_ACC;
}
static int vcpu_check_access_key(struct kvm_vcpu *vcpu, u8 access_key,
enum gacc_mode mode, union asce asce, gpa_t gpa,
unsigned long ga, unsigned int len)
{
u8 storage_key, access_control;
unsigned long hva;
int r;
/* access key 0 matches any storage key -> allow */
if (access_key == 0)
return 0;
/*
* caller needs to ensure that gfn is accessible, so we can
* assume that this cannot fail
*/
hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(gpa));
mmap_read_lock(current->mm);
r = get_guest_storage_key(current->mm, hva, &storage_key);
mmap_read_unlock(current->mm);
if (r)
return r;
access_control = FIELD_GET(_PAGE_ACC_BITS, storage_key);
/* access key matches storage key -> allow */
if (access_control == access_key)
return 0;
if (mode == GACC_FETCH || mode == GACC_IFETCH) {
/* it is a fetch and fetch protection is off -> allow */
if (!(storage_key & _PAGE_FP_BIT))
return 0;
if (fetch_prot_override_applicable(vcpu, mode, asce) &&
fetch_prot_override_applies(ga, len))
return 0;
}
if (storage_prot_override_applicable(vcpu) &&
storage_prot_override_applies(access_control))
return 0;
return PGM_PROTECTION;
}
/**
* guest_range_to_gpas() - Calculate guest physical addresses of page fragments
* covering a logical range
* @vcpu: virtual cpu
* @ga: guest address, start of range
* @ar: access register
* @gpas: output argument, may be NULL
* @len: length of range in bytes
* @asce: address-space-control element to use for translation
* @mode: access mode
* @access_key: access key to mach the range's storage keys against
*
* Translate a logical range to a series of guest absolute addresses,
* such that the concatenation of page fragments starting at each gpa make up
* the whole range.
* The translation is performed as if done by the cpu for the given @asce, @ar,
* @mode and state of the @vcpu.
* If the translation causes an exception, its program interruption code is
* returned and the &struct kvm_s390_pgm_info pgm member of @vcpu is modified
* such that a subsequent call to kvm_s390_inject_prog_vcpu() will inject
* a correct exception into the guest.
* The resulting gpas are stored into @gpas, unless it is NULL.
*
* Note: All fragments except the first one start at the beginning of a page.
* When deriving the boundaries of a fragment from a gpa, all but the last
* fragment end at the end of the page.
*
* Return:
* * 0 - success
* * <0 - translation could not be performed, for example if guest
* memory could not be accessed
* * >0 - an access exception occurred. In this case the returned value
* is the program interruption code and the contents of pgm may
* be used to inject an exception into the guest.
*/
static int guest_range_to_gpas(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar,
unsigned long *gpas, unsigned long len,
const union asce asce, enum gacc_mode mode,
u8 access_key)
{
psw_t *psw = &vcpu->arch.sie_block->gpsw;
unsigned int offset = offset_in_page(ga);
unsigned int fragment_len;
int lap_enabled, rc = 0;
enum prot_type prot;
unsigned long gpa;
lap_enabled = low_address_protection_enabled(vcpu, asce);
while (min(PAGE_SIZE - offset, len) > 0) {
fragment_len = min(PAGE_SIZE - offset, len);
ga = kvm_s390_logical_to_effective(vcpu, ga);
if (mode == GACC_STORE && lap_enabled && is_low_address(ga))
return trans_exc(vcpu, PGM_PROTECTION, ga, ar, mode,
PROT_TYPE_LA);
if (psw_bits(*psw).dat) {
rc = guest_translate(vcpu, ga, &gpa, asce, mode, &prot);
if (rc < 0)
return rc;
} else {
gpa = kvm_s390_real_to_abs(vcpu, ga);
if (kvm_is_error_gpa(vcpu->kvm, gpa)) {
rc = PGM_ADDRESSING;
prot = PROT_NONE;
}
}
if (rc)
return trans_exc(vcpu, rc, ga, ar, mode, prot);
rc = vcpu_check_access_key(vcpu, access_key, mode, asce, gpa, ga,
fragment_len);
if (rc)
return trans_exc(vcpu, rc, ga, ar, mode, PROT_TYPE_KEYC);
if (gpas)
*gpas++ = gpa;
offset = 0;
ga += fragment_len;
len -= fragment_len;
}
return 0;
}
static int access_guest_page(struct kvm *kvm, enum gacc_mode mode, gpa_t gpa,
void *data, unsigned int len)
{
const unsigned int offset = offset_in_page(gpa);
const gfn_t gfn = gpa_to_gfn(gpa);
int rc;
if (mode == GACC_STORE)
rc = kvm_write_guest_page(kvm, gfn, data, offset, len);
else
rc = kvm_read_guest_page(kvm, gfn, data, offset, len);
return rc;
}
static int
access_guest_page_with_key(struct kvm *kvm, enum gacc_mode mode, gpa_t gpa,
void *data, unsigned int len, u8 access_key)
{
struct kvm_memory_slot *slot;
bool writable;
gfn_t gfn;
hva_t hva;
int rc;
gfn = gpa >> PAGE_SHIFT;
slot = gfn_to_memslot(kvm, gfn);
hva = gfn_to_hva_memslot_prot(slot, gfn, &writable);
if (kvm_is_error_hva(hva))
return PGM_ADDRESSING;
/*
* Check if it's a ro memslot, even tho that can't occur (they're unsupported).
* Don't try to actually handle that case.
*/
if (!writable && mode == GACC_STORE)
return -EOPNOTSUPP;
hva += offset_in_page(gpa);
if (mode == GACC_STORE)
rc = copy_to_user_key((void __user *)hva, data, len, access_key);
else
rc = copy_from_user_key(data, (void __user *)hva, len, access_key);
if (rc)
return PGM_PROTECTION;
if (mode == GACC_STORE)
mark_page_dirty_in_slot(kvm, slot, gfn);
return 0;
}
int access_guest_abs_with_key(struct kvm *kvm, gpa_t gpa, void *data,
unsigned long len, enum gacc_mode mode, u8 access_key)
{
int offset = offset_in_page(gpa);
int fragment_len;
int rc;
while (min(PAGE_SIZE - offset, len) > 0) {
fragment_len = min(PAGE_SIZE - offset, len);
rc = access_guest_page_with_key(kvm, mode, gpa, data, fragment_len, access_key);
if (rc)
return rc;
offset = 0;
len -= fragment_len;
data += fragment_len;
gpa += fragment_len;
}
return 0;
}
int access_guest_with_key(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar,
void *data, unsigned long len, enum gacc_mode mode,
u8 access_key)
{
psw_t *psw = &vcpu->arch.sie_block->gpsw;
unsigned long nr_pages, idx;
unsigned long gpa_array[2];
unsigned int fragment_len;
unsigned long *gpas;
enum prot_type prot;
int need_ipte_lock;
union asce asce;
bool try_storage_prot_override;
bool try_fetch_prot_override;
int rc;
if (!len)
return 0;
ga = kvm_s390_logical_to_effective(vcpu, ga);
rc = get_vcpu_asce(vcpu, &asce, ga, ar, mode);
if (rc)
return rc;
nr_pages = (((ga & ~PAGE_MASK) + len - 1) >> PAGE_SHIFT) + 1;
gpas = gpa_array;
if (nr_pages > ARRAY_SIZE(gpa_array))
gpas = vmalloc(array_size(nr_pages, sizeof(unsigned long)));
if (!gpas)
return -ENOMEM;
try_fetch_prot_override = fetch_prot_override_applicable(vcpu, mode, asce);
try_storage_prot_override = storage_prot_override_applicable(vcpu);
need_ipte_lock = psw_bits(*psw).dat && !asce.r;
if (need_ipte_lock)
ipte_lock(vcpu->kvm);
/*
* Since we do the access further down ultimately via a move instruction
* that does key checking and returns an error in case of a protection
* violation, we don't need to do the check during address translation.
* Skip it by passing access key 0, which matches any storage key,
* obviating the need for any further checks. As a result the check is
* handled entirely in hardware on access, we only need to take care to
* forego key protection checking if fetch protection override applies or
* retry with the special key 9 in case of storage protection override.
*/
rc = guest_range_to_gpas(vcpu, ga, ar, gpas, len, asce, mode, 0);
if (rc)
goto out_unlock;
for (idx = 0; idx < nr_pages; idx++) {
fragment_len = min(PAGE_SIZE - offset_in_page(gpas[idx]), len);
if (try_fetch_prot_override && fetch_prot_override_applies(ga, fragment_len)) {
rc = access_guest_page(vcpu->kvm, mode, gpas[idx],
data, fragment_len);
} else {
rc = access_guest_page_with_key(vcpu->kvm, mode, gpas[idx],
data, fragment_len, access_key);
}
if (rc == PGM_PROTECTION && try_storage_prot_override)
rc = access_guest_page_with_key(vcpu->kvm, mode, gpas[idx],
data, fragment_len, PAGE_SPO_ACC);
if (rc)
break;
len -= fragment_len;
data += fragment_len;
ga = kvm_s390_logical_to_effective(vcpu, ga + fragment_len);
}
if (rc > 0) {
bool terminate = (mode == GACC_STORE) && (idx > 0);
if (rc == PGM_PROTECTION)
prot = PROT_TYPE_KEYC;
else
prot = PROT_NONE;
rc = trans_exc_ending(vcpu, rc, ga, ar, mode, prot, terminate);
}
out_unlock:
if (need_ipte_lock)
ipte_unlock(vcpu->kvm);
if (nr_pages > ARRAY_SIZE(gpa_array))
vfree(gpas);
return rc;
}
int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
void *data, unsigned long len, enum gacc_mode mode)
{
unsigned int fragment_len;
unsigned long gpa;
int rc = 0;
while (len && !rc) {
gpa = kvm_s390_real_to_abs(vcpu, gra);
fragment_len = min(PAGE_SIZE - offset_in_page(gpa), len);
rc = access_guest_page(vcpu->kvm, mode, gpa, data, fragment_len);
len -= fragment_len;
gra += fragment_len;
data += fragment_len;
}
return rc;
}
/**
* cmpxchg_guest_abs_with_key() - Perform cmpxchg on guest absolute address.
* @kvm: Virtual machine instance.
* @gpa: Absolute guest address of the location to be changed.
* @len: Operand length of the cmpxchg, required: 1 <= len <= 16. Providing a
* non power of two will result in failure.
* @old_addr: Pointer to old value. If the location at @gpa contains this value,
* the exchange will succeed. After calling cmpxchg_guest_abs_with_key()
* *@old_addr contains the value at @gpa before the attempt to
* exchange the value.
* @new: The value to place at @gpa.
* @access_key: The access key to use for the guest access.
* @success: output value indicating if an exchange occurred.
*
* Atomically exchange the value at @gpa by @new, if it contains *@old.
* Honors storage keys.
*
* Return: * 0: successful exchange
* * >0: a program interruption code indicating the reason cmpxchg could
* not be attempted
* * -EINVAL: address misaligned or len not power of two
* * -EAGAIN: transient failure (len 1 or 2)
* * -EOPNOTSUPP: read-only memslot (should never occur)
*/
int cmpxchg_guest_abs_with_key(struct kvm *kvm, gpa_t gpa, int len,
__uint128_t *old_addr, __uint128_t new,
u8 access_key, bool *success)
{
gfn_t gfn = gpa_to_gfn(gpa);
struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
bool writable;
hva_t hva;
int ret;
if (!IS_ALIGNED(gpa, len))
return -EINVAL;
hva = gfn_to_hva_memslot_prot(slot, gfn, &writable);
if (kvm_is_error_hva(hva))
return PGM_ADDRESSING;
/*
* Check if it's a read-only memslot, even though that cannot occur
* since those are unsupported.
* Don't try to actually handle that case.
*/
if (!writable)
return -EOPNOTSUPP;
hva += offset_in_page(gpa);
/*
* The cmpxchg_user_key macro depends on the type of "old", so we need
* a case for each valid length and get some code duplication as long
* as we don't introduce a new macro.
*/
switch (len) {
case 1: {
u8 old;
ret = cmpxchg_user_key((u8 __user *)hva, &old, *old_addr, new, access_key);
*success = !ret && old == *old_addr;
*old_addr = old;
break;
}
case 2: {
u16 old;
ret = cmpxchg_user_key((u16 __user *)hva, &old, *old_addr, new, access_key);
*success = !ret && old == *old_addr;
*old_addr = old;
break;
}
case 4: {
u32 old;
ret = cmpxchg_user_key((u32 __user *)hva, &old, *old_addr, new, access_key);
*success = !ret && old == *old_addr;
*old_addr = old;
break;
}
case 8: {
u64 old;
ret = cmpxchg_user_key((u64 __user *)hva, &old, *old_addr, new, access_key);
*success = !ret && old == *old_addr;
*old_addr = old;
break;
}
case 16: {
__uint128_t old;
ret = cmpxchg_user_key((__uint128_t __user *)hva, &old, *old_addr, new, access_key);
*success = !ret && old == *old_addr;
*old_addr = old;
break;
}
default:
return -EINVAL;
}
if (*success)
mark_page_dirty_in_slot(kvm, slot, gfn);
/*
* Assume that the fault is caused by protection, either key protection
* or user page write protection.
*/
if (ret == -EFAULT)
ret = PGM_PROTECTION;
return ret;
}
/**
* guest_translate_address_with_key - translate guest logical into guest absolute address
* @vcpu: virtual cpu
* @gva: Guest virtual address
* @ar: Access register
* @gpa: Guest physical address
* @mode: Translation access mode
* @access_key: access key to mach the storage key with
*
* Parameter semantics are the same as the ones from guest_translate.
* The memory contents at the guest address are not changed.
*
* Note: The IPTE lock is not taken during this function, so the caller
* has to take care of this.
*/
int guest_translate_address_with_key(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar,
unsigned long *gpa, enum gacc_mode mode,
u8 access_key)
{
union asce asce;
int rc;
gva = kvm_s390_logical_to_effective(vcpu, gva);
rc = get_vcpu_asce(vcpu, &asce, gva, ar, mode);
if (rc)
return rc;
return guest_range_to_gpas(vcpu, gva, ar, gpa, 1, asce, mode,
access_key);
}
/**
* check_gva_range - test a range of guest virtual addresses for accessibility
* @vcpu: virtual cpu
* @gva: Guest virtual address
* @ar: Access register
* @length: Length of test range
* @mode: Translation access mode
* @access_key: access key to mach the storage keys with
*/
int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar,
unsigned long length, enum gacc_mode mode, u8 access_key)
{
union asce asce;
int rc = 0;
rc = get_vcpu_asce(vcpu, &asce, gva, ar, mode);
if (rc)
return rc;
ipte_lock(vcpu->kvm);
rc = guest_range_to_gpas(vcpu, gva, ar, NULL, length, asce, mode,
access_key);
ipte_unlock(vcpu->kvm);
return rc;
}
/**
* check_gpa_range - test a range of guest physical addresses for accessibility
* @kvm: virtual machine instance
* @gpa: guest physical address
* @length: length of test range
* @mode: access mode to test, relevant for storage keys
* @access_key: access key to mach the storage keys with
*/
int check_gpa_range(struct kvm *kvm, unsigned long gpa, unsigned long length,
enum gacc_mode mode, u8 access_key)
{
unsigned int fragment_len;
int rc = 0;
while (length && !rc) {
fragment_len = min(PAGE_SIZE - offset_in_page(gpa), length);
rc = vm_check_access_key(kvm, access_key, mode, gpa);
length -= fragment_len;
gpa += fragment_len;
}
return rc;
}
/**
* kvm_s390_check_low_addr_prot_real - check for low-address protection
* @vcpu: virtual cpu
* @gra: Guest real address
*
* Checks whether an address is subject to low-address protection and set
* up vcpu->arch.pgm accordingly if necessary.
*
* Return: 0 if no protection exception, or PGM_PROTECTION if protected.
*/
int kvm_s390_check_low_addr_prot_real(struct kvm_vcpu *vcpu, unsigned long gra)
{
union ctlreg0 ctlreg0 = {.val = vcpu->arch.sie_block->gcr[0]};
if (!ctlreg0.lap || !is_low_address(gra))
return 0;
return trans_exc(vcpu, PGM_PROTECTION, gra, 0, GACC_STORE, PROT_TYPE_LA);
}
/**
* kvm_s390_shadow_tables - walk the guest page table and create shadow tables
* @sg: pointer to the shadow guest address space structure
* @saddr: faulting address in the shadow gmap
* @pgt: pointer to the beginning of the page table for the given address if
* successful (return value 0), or to the first invalid DAT entry in
* case of exceptions (return value > 0)
* @dat_protection: referenced memory is write protected
* @fake: pgt references contiguous guest memory block, not a pgtable
*/
static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
unsigned long *pgt, int *dat_protection,
int *fake)
{
struct gmap *parent;
union asce asce;
union vaddress vaddr;
unsigned long ptr;
int rc;
*fake = 0;
*dat_protection = 0;
parent = sg->parent;
vaddr.addr = saddr;
asce.val = sg->orig_asce;
ptr = asce.origin * PAGE_SIZE;
if (asce.r) {
*fake = 1;
ptr = 0;
asce.dt = ASCE_TYPE_REGION1;
}
switch (asce.dt) {
case ASCE_TYPE_REGION1:
if (vaddr.rfx01 > asce.tl && !*fake)
return PGM_REGION_FIRST_TRANS;
break;
case ASCE_TYPE_REGION2:
if (vaddr.rfx)
return PGM_ASCE_TYPE;
if (vaddr.rsx01 > asce.tl)
return PGM_REGION_SECOND_TRANS;
break;
case ASCE_TYPE_REGION3:
if (vaddr.rfx || vaddr.rsx)
return PGM_ASCE_TYPE;
if (vaddr.rtx01 > asce.tl)
return PGM_REGION_THIRD_TRANS;
break;
case ASCE_TYPE_SEGMENT:
if (vaddr.rfx || vaddr.rsx || vaddr.rtx)
return PGM_ASCE_TYPE;
if (vaddr.sx01 > asce.tl)
return PGM_SEGMENT_TRANSLATION;
break;
}
switch (asce.dt) {
case ASCE_TYPE_REGION1: {
union region1_table_entry rfte;
if (*fake) {
ptr += vaddr.rfx * _REGION1_SIZE;
rfte.val = ptr;
goto shadow_r2t;
}
*pgt = ptr + vaddr.rfx * 8;
rc = gmap_read_table(parent, ptr + vaddr.rfx * 8, &rfte.val);
if (rc)
return rc;
if (rfte.i)
return PGM_REGION_FIRST_TRANS;
if (rfte.tt != TABLE_TYPE_REGION1)
return PGM_TRANSLATION_SPEC;
if (vaddr.rsx01 < rfte.tf || vaddr.rsx01 > rfte.tl)
return PGM_REGION_SECOND_TRANS;
if (sg->edat_level >= 1)
*dat_protection |= rfte.p;
ptr = rfte.rto * PAGE_SIZE;
shadow_r2t:
rc = gmap_shadow_r2t(sg, saddr, rfte.val, *fake);
if (rc)
return rc;
}
fallthrough;
case ASCE_TYPE_REGION2: {
union region2_table_entry rste;
if (*fake) {
ptr += vaddr.rsx * _REGION2_SIZE;
rste.val = ptr;
goto shadow_r3t;
}
*pgt = ptr + vaddr.rsx * 8;
rc = gmap_read_table(parent, ptr + vaddr.rsx * 8, &rste.val);
if (rc)
return rc;
if (rste.i)
return PGM_REGION_SECOND_TRANS;
if (rste.tt != TABLE_TYPE_REGION2)
return PGM_TRANSLATION_SPEC;
if (vaddr.rtx01 < rste.tf || vaddr.rtx01 > rste.tl)
return PGM_REGION_THIRD_TRANS;
if (sg->edat_level >= 1)
*dat_protection |= rste.p;
ptr = rste.rto * PAGE_SIZE;
shadow_r3t:
rste.p |= *dat_protection;
rc = gmap_shadow_r3t(sg, saddr, rste.val, *fake);
if (rc)
return rc;
}
fallthrough;
case ASCE_TYPE_REGION3: {
union region3_table_entry rtte;
if (*fake) {
ptr += vaddr.rtx * _REGION3_SIZE;
rtte.val = ptr;
goto shadow_sgt;
}
*pgt = ptr + vaddr.rtx * 8;
rc = gmap_read_table(parent, ptr + vaddr.rtx * 8, &rtte.val);
if (rc)
return rc;
if (rtte.i)
return PGM_REGION_THIRD_TRANS;
if (rtte.tt != TABLE_TYPE_REGION3)
return PGM_TRANSLATION_SPEC;
if (rtte.cr && asce.p && sg->edat_level >= 2)
return PGM_TRANSLATION_SPEC;
if (rtte.fc && sg->edat_level >= 2) {
*dat_protection |= rtte.fc0.p;
*fake = 1;
ptr = rtte.fc1.rfaa * _REGION3_SIZE;
rtte.val = ptr;
goto shadow_sgt;
}
if (vaddr.sx01 < rtte.fc0.tf || vaddr.sx01 > rtte.fc0.tl)
return PGM_SEGMENT_TRANSLATION;
if (sg->edat_level >= 1)
*dat_protection |= rtte.fc0.p;
ptr = rtte.fc0.sto * PAGE_SIZE;
shadow_sgt:
rtte.fc0.p |= *dat_protection;
rc = gmap_shadow_sgt(sg, saddr, rtte.val, *fake);
if (rc)
return rc;
}
fallthrough;
case ASCE_TYPE_SEGMENT: {
union segment_table_entry ste;
if (*fake) {
ptr += vaddr.sx * _SEGMENT_SIZE;
ste.val = ptr;
goto shadow_pgt;
}
*pgt = ptr + vaddr.sx * 8;
rc = gmap_read_table(parent, ptr + vaddr.sx * 8, &ste.val);
if (rc)
return rc;
if (ste.i)
return PGM_SEGMENT_TRANSLATION;
if (ste.tt != TABLE_TYPE_SEGMENT)
return PGM_TRANSLATION_SPEC;
if (ste.cs && asce.p)
return PGM_TRANSLATION_SPEC;
*dat_protection |= ste.fc0.p;
if (ste.fc && sg->edat_level >= 1) {
*fake = 1;
ptr = ste.fc1.sfaa * _SEGMENT_SIZE;
ste.val = ptr;
goto shadow_pgt;
}
ptr = ste.fc0.pto * (PAGE_SIZE / 2);
shadow_pgt:
ste.fc0.p |= *dat_protection;
rc = gmap_shadow_pgt(sg, saddr, ste.val, *fake);
if (rc)
return rc;
}
}
/* Return the parent address of the page table */
*pgt = ptr;
return 0;
}
/**
* kvm_s390_shadow_fault - handle fault on a shadow page table
* @vcpu: virtual cpu
* @sg: pointer to the shadow guest address space structure
* @saddr: faulting address in the shadow gmap
* @datptr: will contain the address of the faulting DAT table entry, or of
* the valid leaf, plus some flags
*
* Returns: - 0 if the shadow fault was successfully resolved
* - > 0 (pgm exception code) on exceptions while faulting
* - -EAGAIN if the caller can retry immediately
* - -EFAULT when accessing invalid guest addresses
* - -ENOMEM if out of memory
*/
int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *sg,
unsigned long saddr, unsigned long *datptr)
{
union vaddress vaddr;
union page_table_entry pte;
unsigned long pgt = 0;
int dat_protection, fake;
int rc;
mmap_read_lock(sg->mm);
/*
* We don't want any guest-2 tables to change - so the parent
* tables/pointers we read stay valid - unshadowing is however
* always possible - only guest_table_lock protects us.
*/
ipte_lock(vcpu->kvm);
rc = gmap_shadow_pgt_lookup(sg, saddr, &pgt, &dat_protection, &fake);
if (rc)
rc = kvm_s390_shadow_tables(sg, saddr, &pgt, &dat_protection,
&fake);
vaddr.addr = saddr;
if (fake) {
pte.val = pgt + vaddr.px * PAGE_SIZE;
goto shadow_page;
}
switch (rc) {
case PGM_SEGMENT_TRANSLATION:
case PGM_REGION_THIRD_TRANS:
case PGM_REGION_SECOND_TRANS:
case PGM_REGION_FIRST_TRANS:
pgt |= PEI_NOT_PTE;
break;
case 0:
pgt += vaddr.px * 8;
rc = gmap_read_table(sg->parent, pgt, &pte.val);
}
if (datptr)
*datptr = pgt | dat_protection * PEI_DAT_PROT;
if (!rc && pte.i)
rc = PGM_PAGE_TRANSLATION;
if (!rc && pte.z)
rc = PGM_TRANSLATION_SPEC;
shadow_page:
pte.p |= dat_protection;
if (!rc)
rc = gmap_shadow_page(sg, saddr, __pte(pte.val));
ipte_unlock(vcpu->kvm);
mmap_read_unlock(sg->mm);
return rc;
}
| linux-master | arch/s390/kvm/gaccess.c |
// SPDX-License-Identifier: GPL-2.0
/*
* in-kernel handling for sie intercepts
*
* Copyright IBM Corp. 2008, 2020
*
* Author(s): Carsten Otte <[email protected]>
* Christian Borntraeger <[email protected]>
*/
#include <linux/kvm_host.h>
#include <linux/errno.h>
#include <linux/pagemap.h>
#include <asm/asm-offsets.h>
#include <asm/irq.h>
#include <asm/sysinfo.h>
#include <asm/uv.h>
#include "kvm-s390.h"
#include "gaccess.h"
#include "trace.h"
#include "trace-s390.h"
u8 kvm_s390_get_ilen(struct kvm_vcpu *vcpu)
{
struct kvm_s390_sie_block *sie_block = vcpu->arch.sie_block;
u8 ilen = 0;
switch (vcpu->arch.sie_block->icptcode) {
case ICPT_INST:
case ICPT_INSTPROGI:
case ICPT_OPEREXC:
case ICPT_PARTEXEC:
case ICPT_IOINST:
/* instruction only stored for these icptcodes */
ilen = insn_length(vcpu->arch.sie_block->ipa >> 8);
/* Use the length of the EXECUTE instruction if necessary */
if (sie_block->icptstatus & 1) {
ilen = (sie_block->icptstatus >> 4) & 0x6;
if (!ilen)
ilen = 4;
}
break;
case ICPT_PROGI:
/* bit 1+2 of pgmilc are the ilc, so we directly get ilen */
ilen = vcpu->arch.sie_block->pgmilc & 0x6;
break;
}
return ilen;
}
static int handle_stop(struct kvm_vcpu *vcpu)
{
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
int rc = 0;
uint8_t flags, stop_pending;
vcpu->stat.exit_stop_request++;
/* delay the stop if any non-stop irq is pending */
if (kvm_s390_vcpu_has_irq(vcpu, 1))
return 0;
/* avoid races with the injection/SIGP STOP code */
spin_lock(&li->lock);
flags = li->irq.stop.flags;
stop_pending = kvm_s390_is_stop_irq_pending(vcpu);
spin_unlock(&li->lock);
trace_kvm_s390_stop_request(stop_pending, flags);
if (!stop_pending)
return 0;
if (flags & KVM_S390_STOP_FLAG_STORE_STATUS) {
rc = kvm_s390_vcpu_store_status(vcpu,
KVM_S390_STORE_STATUS_NOADDR);
if (rc)
return rc;
}
/*
* no need to check the return value of vcpu_stop as it can only have
* an error for protvirt, but protvirt means user cpu state
*/
if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
kvm_s390_vcpu_stop(vcpu);
return -EOPNOTSUPP;
}
static int handle_validity(struct kvm_vcpu *vcpu)
{
int viwhy = vcpu->arch.sie_block->ipb >> 16;
vcpu->stat.exit_validity++;
trace_kvm_s390_intercept_validity(vcpu, viwhy);
KVM_EVENT(3, "validity intercept 0x%x for pid %u (kvm 0x%pK)", viwhy,
current->pid, vcpu->kvm);
/* do not warn on invalid runtime instrumentation mode */
WARN_ONCE(viwhy != 0x44, "kvm: unhandled validity intercept 0x%x\n",
viwhy);
return -EINVAL;
}
static int handle_instruction(struct kvm_vcpu *vcpu)
{
vcpu->stat.exit_instruction++;
trace_kvm_s390_intercept_instruction(vcpu,
vcpu->arch.sie_block->ipa,
vcpu->arch.sie_block->ipb);
switch (vcpu->arch.sie_block->ipa >> 8) {
case 0x01:
return kvm_s390_handle_01(vcpu);
case 0x82:
return kvm_s390_handle_lpsw(vcpu);
case 0x83:
return kvm_s390_handle_diag(vcpu);
case 0xaa:
return kvm_s390_handle_aa(vcpu);
case 0xae:
return kvm_s390_handle_sigp(vcpu);
case 0xb2:
return kvm_s390_handle_b2(vcpu);
case 0xb6:
return kvm_s390_handle_stctl(vcpu);
case 0xb7:
return kvm_s390_handle_lctl(vcpu);
case 0xb9:
return kvm_s390_handle_b9(vcpu);
case 0xe3:
return kvm_s390_handle_e3(vcpu);
case 0xe5:
return kvm_s390_handle_e5(vcpu);
case 0xeb:
return kvm_s390_handle_eb(vcpu);
default:
return -EOPNOTSUPP;
}
}
static int inject_prog_on_prog_intercept(struct kvm_vcpu *vcpu)
{
struct kvm_s390_pgm_info pgm_info = {
.code = vcpu->arch.sie_block->iprcc,
/* the PSW has already been rewound */
.flags = KVM_S390_PGM_FLAGS_NO_REWIND,
};
switch (vcpu->arch.sie_block->iprcc & ~PGM_PER) {
case PGM_AFX_TRANSLATION:
case PGM_ASX_TRANSLATION:
case PGM_EX_TRANSLATION:
case PGM_LFX_TRANSLATION:
case PGM_LSTE_SEQUENCE:
case PGM_LSX_TRANSLATION:
case PGM_LX_TRANSLATION:
case PGM_PRIMARY_AUTHORITY:
case PGM_SECONDARY_AUTHORITY:
case PGM_SPACE_SWITCH:
pgm_info.trans_exc_code = vcpu->arch.sie_block->tecmc;
break;
case PGM_ALEN_TRANSLATION:
case PGM_ALE_SEQUENCE:
case PGM_ASTE_INSTANCE:
case PGM_ASTE_SEQUENCE:
case PGM_ASTE_VALIDITY:
case PGM_EXTENDED_AUTHORITY:
pgm_info.exc_access_id = vcpu->arch.sie_block->eai;
break;
case PGM_ASCE_TYPE:
case PGM_PAGE_TRANSLATION:
case PGM_REGION_FIRST_TRANS:
case PGM_REGION_SECOND_TRANS:
case PGM_REGION_THIRD_TRANS:
case PGM_SEGMENT_TRANSLATION:
pgm_info.trans_exc_code = vcpu->arch.sie_block->tecmc;
pgm_info.exc_access_id = vcpu->arch.sie_block->eai;
pgm_info.op_access_id = vcpu->arch.sie_block->oai;
break;
case PGM_MONITOR:
pgm_info.mon_class_nr = vcpu->arch.sie_block->mcn;
pgm_info.mon_code = vcpu->arch.sie_block->tecmc;
break;
case PGM_VECTOR_PROCESSING:
case PGM_DATA:
pgm_info.data_exc_code = vcpu->arch.sie_block->dxc;
break;
case PGM_PROTECTION:
pgm_info.trans_exc_code = vcpu->arch.sie_block->tecmc;
pgm_info.exc_access_id = vcpu->arch.sie_block->eai;
break;
default:
break;
}
if (vcpu->arch.sie_block->iprcc & PGM_PER) {
pgm_info.per_code = vcpu->arch.sie_block->perc;
pgm_info.per_atmid = vcpu->arch.sie_block->peratmid;
pgm_info.per_address = vcpu->arch.sie_block->peraddr;
pgm_info.per_access_id = vcpu->arch.sie_block->peraid;
}
return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
}
/*
* restore ITDB to program-interruption TDB in guest lowcore
* and set TX abort indication if required
*/
static int handle_itdb(struct kvm_vcpu *vcpu)
{
struct kvm_s390_itdb *itdb;
int rc;
if (!IS_TE_ENABLED(vcpu) || !IS_ITDB_VALID(vcpu))
return 0;
if (current->thread.per_flags & PER_FLAG_NO_TE)
return 0;
itdb = phys_to_virt(vcpu->arch.sie_block->itdba);
rc = write_guest_lc(vcpu, __LC_PGM_TDB, itdb, sizeof(*itdb));
if (rc)
return rc;
memset(itdb, 0, sizeof(*itdb));
return 0;
}
#define per_event(vcpu) (vcpu->arch.sie_block->iprcc & PGM_PER)
static bool should_handle_per_event(const struct kvm_vcpu *vcpu)
{
if (!guestdbg_enabled(vcpu) || !per_event(vcpu))
return false;
if (guestdbg_sstep_enabled(vcpu) &&
vcpu->arch.sie_block->iprcc != PGM_PER) {
/*
* __vcpu_run() will exit after delivering the concurrently
* indicated condition.
*/
return false;
}
return true;
}
static int handle_prog(struct kvm_vcpu *vcpu)
{
psw_t psw;
int rc;
vcpu->stat.exit_program_interruption++;
/*
* Intercept 8 indicates a loop of specification exceptions
* for protected guests.
*/
if (kvm_s390_pv_cpu_is_protected(vcpu))
return -EOPNOTSUPP;
if (should_handle_per_event(vcpu)) {
rc = kvm_s390_handle_per_event(vcpu);
if (rc)
return rc;
/* the interrupt might have been filtered out completely */
if (vcpu->arch.sie_block->iprcc == 0)
return 0;
}
trace_kvm_s390_intercept_prog(vcpu, vcpu->arch.sie_block->iprcc);
if (vcpu->arch.sie_block->iprcc == PGM_SPECIFICATION) {
rc = read_guest_lc(vcpu, __LC_PGM_NEW_PSW, &psw, sizeof(psw_t));
if (rc)
return rc;
/* Avoid endless loops of specification exceptions */
if (!is_valid_psw(&psw))
return -EOPNOTSUPP;
}
rc = handle_itdb(vcpu);
if (rc)
return rc;
return inject_prog_on_prog_intercept(vcpu);
}
/**
* handle_external_interrupt - used for external interruption interceptions
* @vcpu: virtual cpu
*
* This interception occurs if:
* - the CPUSTAT_EXT_INT bit was already set when the external interrupt
* occurred. In this case, the interrupt needs to be injected manually to
* preserve interrupt priority.
* - the external new PSW has external interrupts enabled, which will cause an
* interruption loop. We drop to userspace in this case.
*
* The latter case can be detected by inspecting the external mask bit in the
* external new psw.
*
* Under PV, only the latter case can occur, since interrupt priorities are
* handled in the ultravisor.
*/
static int handle_external_interrupt(struct kvm_vcpu *vcpu)
{
u16 eic = vcpu->arch.sie_block->eic;
struct kvm_s390_irq irq;
psw_t newpsw;
int rc;
vcpu->stat.exit_external_interrupt++;
if (kvm_s390_pv_cpu_is_protected(vcpu)) {
newpsw = vcpu->arch.sie_block->gpsw;
} else {
rc = read_guest_lc(vcpu, __LC_EXT_NEW_PSW, &newpsw, sizeof(psw_t));
if (rc)
return rc;
}
/*
* Clock comparator or timer interrupt with external interrupt enabled
* will cause interrupt loop. Drop to userspace.
*/
if ((eic == EXT_IRQ_CLK_COMP || eic == EXT_IRQ_CPU_TIMER) &&
(newpsw.mask & PSW_MASK_EXT))
return -EOPNOTSUPP;
switch (eic) {
case EXT_IRQ_CLK_COMP:
irq.type = KVM_S390_INT_CLOCK_COMP;
break;
case EXT_IRQ_CPU_TIMER:
irq.type = KVM_S390_INT_CPU_TIMER;
break;
case EXT_IRQ_EXTERNAL_CALL:
irq.type = KVM_S390_INT_EXTERNAL_CALL;
irq.u.extcall.code = vcpu->arch.sie_block->extcpuaddr;
rc = kvm_s390_inject_vcpu(vcpu, &irq);
/* ignore if another external call is already pending */
if (rc == -EBUSY)
return 0;
return rc;
default:
return -EOPNOTSUPP;
}
return kvm_s390_inject_vcpu(vcpu, &irq);
}
/**
* handle_mvpg_pei - Handle MOVE PAGE partial execution interception.
* @vcpu: virtual cpu
*
* This interception can only happen for guests with DAT disabled and
* addresses that are currently not mapped in the host. Thus we try to
* set up the mappings for the corresponding user pages here (or throw
* addressing exceptions in case of illegal guest addresses).
*/
static int handle_mvpg_pei(struct kvm_vcpu *vcpu)
{
unsigned long srcaddr, dstaddr;
int reg1, reg2, rc;
kvm_s390_get_regs_rre(vcpu, ®1, ®2);
/* Ensure that the source is paged-in, no actual access -> no key checking */
rc = guest_translate_address_with_key(vcpu, vcpu->run->s.regs.gprs[reg2],
reg2, &srcaddr, GACC_FETCH, 0);
if (rc)
return kvm_s390_inject_prog_cond(vcpu, rc);
rc = kvm_arch_fault_in_page(vcpu, srcaddr, 0);
if (rc != 0)
return rc;
/* Ensure that the source is paged-in, no actual access -> no key checking */
rc = guest_translate_address_with_key(vcpu, vcpu->run->s.regs.gprs[reg1],
reg1, &dstaddr, GACC_STORE, 0);
if (rc)
return kvm_s390_inject_prog_cond(vcpu, rc);
rc = kvm_arch_fault_in_page(vcpu, dstaddr, 1);
if (rc != 0)
return rc;
kvm_s390_retry_instr(vcpu);
return 0;
}
static int handle_partial_execution(struct kvm_vcpu *vcpu)
{
vcpu->stat.exit_pei++;
if (vcpu->arch.sie_block->ipa == 0xb254) /* MVPG */
return handle_mvpg_pei(vcpu);
if (vcpu->arch.sie_block->ipa >> 8 == 0xae) /* SIGP */
return kvm_s390_handle_sigp_pei(vcpu);
return -EOPNOTSUPP;
}
/*
* Handle the sthyi instruction that provides the guest with system
* information, like current CPU resources available at each level of
* the machine.
*/
int handle_sthyi(struct kvm_vcpu *vcpu)
{
int reg1, reg2, cc = 0, r = 0;
u64 code, addr, rc = 0;
struct sthyi_sctns *sctns = NULL;
if (!test_kvm_facility(vcpu->kvm, 74))
return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
kvm_s390_get_regs_rre(vcpu, ®1, ®2);
code = vcpu->run->s.regs.gprs[reg1];
addr = vcpu->run->s.regs.gprs[reg2];
vcpu->stat.instruction_sthyi++;
VCPU_EVENT(vcpu, 3, "STHYI: fc: %llu addr: 0x%016llx", code, addr);
trace_kvm_s390_handle_sthyi(vcpu, code, addr);
if (reg1 == reg2 || reg1 & 1 || reg2 & 1)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
if (code & 0xffff) {
cc = 3;
rc = 4;
goto out;
}
if (!kvm_s390_pv_cpu_is_protected(vcpu) && (addr & ~PAGE_MASK))
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
sctns = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
if (!sctns)
return -ENOMEM;
cc = sthyi_fill(sctns, &rc);
if (cc < 0) {
free_page((unsigned long)sctns);
return cc;
}
out:
if (!cc) {
if (kvm_s390_pv_cpu_is_protected(vcpu)) {
memcpy(sida_addr(vcpu->arch.sie_block), sctns, PAGE_SIZE);
} else {
r = write_guest(vcpu, addr, reg2, sctns, PAGE_SIZE);
if (r) {
free_page((unsigned long)sctns);
return kvm_s390_inject_prog_cond(vcpu, r);
}
}
}
free_page((unsigned long)sctns);
vcpu->run->s.regs.gprs[reg2 + 1] = rc;
kvm_s390_set_psw_cc(vcpu, cc);
return r;
}
static int handle_operexc(struct kvm_vcpu *vcpu)
{
psw_t oldpsw, newpsw;
int rc;
vcpu->stat.exit_operation_exception++;
trace_kvm_s390_handle_operexc(vcpu, vcpu->arch.sie_block->ipa,
vcpu->arch.sie_block->ipb);
if (vcpu->arch.sie_block->ipa == 0xb256)
return handle_sthyi(vcpu);
if (vcpu->arch.sie_block->ipa == 0 && vcpu->kvm->arch.user_instr0)
return -EOPNOTSUPP;
rc = read_guest_lc(vcpu, __LC_PGM_NEW_PSW, &newpsw, sizeof(psw_t));
if (rc)
return rc;
/*
* Avoid endless loops of operation exceptions, if the pgm new
* PSW will cause a new operation exception.
* The heuristic checks if the pgm new psw is within 6 bytes before
* the faulting psw address (with same DAT, AS settings) and the
* new psw is not a wait psw and the fault was not triggered by
* problem state.
*/
oldpsw = vcpu->arch.sie_block->gpsw;
if (oldpsw.addr - newpsw.addr <= 6 &&
!(newpsw.mask & PSW_MASK_WAIT) &&
!(oldpsw.mask & PSW_MASK_PSTATE) &&
(newpsw.mask & PSW_MASK_ASC) == (oldpsw.mask & PSW_MASK_ASC) &&
(newpsw.mask & PSW_MASK_DAT) == (oldpsw.mask & PSW_MASK_DAT))
return -EOPNOTSUPP;
return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
}
static int handle_pv_spx(struct kvm_vcpu *vcpu)
{
u32 pref = *(u32 *)sida_addr(vcpu->arch.sie_block);
kvm_s390_set_prefix(vcpu, pref);
trace_kvm_s390_handle_prefix(vcpu, 1, pref);
return 0;
}
static int handle_pv_sclp(struct kvm_vcpu *vcpu)
{
struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
spin_lock(&fi->lock);
/*
* 2 cases:
* a: an sccb answering interrupt was already pending or in flight.
* As the sccb value is not known we can simply set some value to
* trigger delivery of a saved SCCB. UV will then use its saved
* copy of the SCCB value.
* b: an error SCCB interrupt needs to be injected so we also inject
* a fake SCCB address. Firmware will use the proper one.
* This makes sure, that both errors and real sccb returns will only
* be delivered after a notification intercept (instruction has
* finished) but not after others.
*/
fi->srv_signal.ext_params |= 0x43000;
set_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs);
clear_bit(IRQ_PEND_EXT_SERVICE, &fi->masked_irqs);
spin_unlock(&fi->lock);
return 0;
}
static int handle_pv_uvc(struct kvm_vcpu *vcpu)
{
struct uv_cb_share *guest_uvcb = sida_addr(vcpu->arch.sie_block);
struct uv_cb_cts uvcb = {
.header.cmd = UVC_CMD_UNPIN_PAGE_SHARED,
.header.len = sizeof(uvcb),
.guest_handle = kvm_s390_pv_get_handle(vcpu->kvm),
.gaddr = guest_uvcb->paddr,
};
int rc;
if (guest_uvcb->header.cmd != UVC_CMD_REMOVE_SHARED_ACCESS) {
WARN_ONCE(1, "Unexpected notification intercept for UVC 0x%x\n",
guest_uvcb->header.cmd);
return 0;
}
rc = gmap_make_secure(vcpu->arch.gmap, uvcb.gaddr, &uvcb);
/*
* If the unpin did not succeed, the guest will exit again for the UVC
* and we will retry the unpin.
*/
if (rc == -EINVAL)
return 0;
/*
* If we got -EAGAIN here, we simply return it. It will eventually
* get propagated all the way to userspace, which should then try
* again.
*/
return rc;
}
static int handle_pv_notification(struct kvm_vcpu *vcpu)
{
int ret;
if (vcpu->arch.sie_block->ipa == 0xb210)
return handle_pv_spx(vcpu);
if (vcpu->arch.sie_block->ipa == 0xb220)
return handle_pv_sclp(vcpu);
if (vcpu->arch.sie_block->ipa == 0xb9a4)
return handle_pv_uvc(vcpu);
if (vcpu->arch.sie_block->ipa >> 8 == 0xae) {
/*
* Besides external call, other SIGP orders also cause a
* 108 (pv notify) intercept. In contrast to external call,
* these orders need to be emulated and hence the appropriate
* place to handle them is in handle_instruction().
* So first try kvm_s390_handle_sigp_pei() and if that isn't
* successful, go on with handle_instruction().
*/
ret = kvm_s390_handle_sigp_pei(vcpu);
if (!ret)
return ret;
}
return handle_instruction(vcpu);
}
static bool should_handle_per_ifetch(const struct kvm_vcpu *vcpu, int rc)
{
/* Process PER, also if the instruction is processed in user space. */
if (!(vcpu->arch.sie_block->icptstatus & 0x02))
return false;
if (rc != 0 && rc != -EOPNOTSUPP)
return false;
if (guestdbg_sstep_enabled(vcpu) && vcpu->arch.local_int.pending_irqs)
/* __vcpu_run() will exit after delivering the interrupt. */
return false;
return true;
}
int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu)
{
int rc, per_rc = 0;
if (kvm_is_ucontrol(vcpu->kvm))
return -EOPNOTSUPP;
switch (vcpu->arch.sie_block->icptcode) {
case ICPT_EXTREQ:
vcpu->stat.exit_external_request++;
return 0;
case ICPT_IOREQ:
vcpu->stat.exit_io_request++;
return 0;
case ICPT_INST:
rc = handle_instruction(vcpu);
break;
case ICPT_PROGI:
return handle_prog(vcpu);
case ICPT_EXTINT:
return handle_external_interrupt(vcpu);
case ICPT_WAIT:
return kvm_s390_handle_wait(vcpu);
case ICPT_VALIDITY:
return handle_validity(vcpu);
case ICPT_STOP:
return handle_stop(vcpu);
case ICPT_OPEREXC:
rc = handle_operexc(vcpu);
break;
case ICPT_PARTEXEC:
rc = handle_partial_execution(vcpu);
break;
case ICPT_KSS:
/* Instruction will be redriven, skip the PER check. */
return kvm_s390_skey_check_enable(vcpu);
case ICPT_MCHKREQ:
case ICPT_INT_ENABLE:
/*
* PSW bit 13 or a CR (0, 6, 14) changed and we might
* now be able to deliver interrupts. The pre-run code
* will take care of this.
*/
rc = 0;
break;
case ICPT_PV_INSTR:
rc = handle_instruction(vcpu);
break;
case ICPT_PV_NOTIFY:
rc = handle_pv_notification(vcpu);
break;
case ICPT_PV_PREF:
rc = 0;
gmap_convert_to_secure(vcpu->arch.gmap,
kvm_s390_get_prefix(vcpu));
gmap_convert_to_secure(vcpu->arch.gmap,
kvm_s390_get_prefix(vcpu) + PAGE_SIZE);
break;
default:
return -EOPNOTSUPP;
}
if (should_handle_per_ifetch(vcpu, rc))
per_rc = kvm_s390_handle_per_ifetch_icpt(vcpu);
return per_rc ? per_rc : rc;
}
| linux-master | arch/s390/kvm/intercept.c |
// SPDX-License-Identifier: GPL-2.0
/*
* hosting IBM Z kernel virtual machines (s390x)
*
* Copyright IBM Corp. 2008, 2020
*
* Author(s): Carsten Otte <[email protected]>
* Christian Borntraeger <[email protected]>
* Christian Ehrhardt <[email protected]>
* Jason J. Herne <[email protected]>
*/
#define KMSG_COMPONENT "kvm-s390"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/compiler.h>
#include <linux/err.h>
#include <linux/fs.h>
#include <linux/hrtimer.h>
#include <linux/init.h>
#include <linux/kvm.h>
#include <linux/kvm_host.h>
#include <linux/mman.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/random.h>
#include <linux/slab.h>
#include <linux/timer.h>
#include <linux/vmalloc.h>
#include <linux/bitmap.h>
#include <linux/sched/signal.h>
#include <linux/string.h>
#include <linux/pgtable.h>
#include <linux/mmu_notifier.h>
#include <asm/asm-offsets.h>
#include <asm/lowcore.h>
#include <asm/stp.h>
#include <asm/gmap.h>
#include <asm/nmi.h>
#include <asm/switch_to.h>
#include <asm/isc.h>
#include <asm/sclp.h>
#include <asm/cpacf.h>
#include <asm/timex.h>
#include <asm/ap.h>
#include <asm/uv.h>
#include <asm/fpu/api.h>
#include "kvm-s390.h"
#include "gaccess.h"
#include "pci.h"
#define CREATE_TRACE_POINTS
#include "trace.h"
#include "trace-s390.h"
#define MEM_OP_MAX_SIZE 65536 /* Maximum transfer size for KVM_S390_MEM_OP */
#define LOCAL_IRQS 32
#define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
(KVM_MAX_VCPUS + LOCAL_IRQS))
const struct _kvm_stats_desc kvm_vm_stats_desc[] = {
KVM_GENERIC_VM_STATS(),
STATS_DESC_COUNTER(VM, inject_io),
STATS_DESC_COUNTER(VM, inject_float_mchk),
STATS_DESC_COUNTER(VM, inject_pfault_done),
STATS_DESC_COUNTER(VM, inject_service_signal),
STATS_DESC_COUNTER(VM, inject_virtio),
STATS_DESC_COUNTER(VM, aen_forward)
};
const struct kvm_stats_header kvm_vm_stats_header = {
.name_size = KVM_STATS_NAME_SIZE,
.num_desc = ARRAY_SIZE(kvm_vm_stats_desc),
.id_offset = sizeof(struct kvm_stats_header),
.desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
.data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
sizeof(kvm_vm_stats_desc),
};
const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
KVM_GENERIC_VCPU_STATS(),
STATS_DESC_COUNTER(VCPU, exit_userspace),
STATS_DESC_COUNTER(VCPU, exit_null),
STATS_DESC_COUNTER(VCPU, exit_external_request),
STATS_DESC_COUNTER(VCPU, exit_io_request),
STATS_DESC_COUNTER(VCPU, exit_external_interrupt),
STATS_DESC_COUNTER(VCPU, exit_stop_request),
STATS_DESC_COUNTER(VCPU, exit_validity),
STATS_DESC_COUNTER(VCPU, exit_instruction),
STATS_DESC_COUNTER(VCPU, exit_pei),
STATS_DESC_COUNTER(VCPU, halt_no_poll_steal),
STATS_DESC_COUNTER(VCPU, instruction_lctl),
STATS_DESC_COUNTER(VCPU, instruction_lctlg),
STATS_DESC_COUNTER(VCPU, instruction_stctl),
STATS_DESC_COUNTER(VCPU, instruction_stctg),
STATS_DESC_COUNTER(VCPU, exit_program_interruption),
STATS_DESC_COUNTER(VCPU, exit_instr_and_program),
STATS_DESC_COUNTER(VCPU, exit_operation_exception),
STATS_DESC_COUNTER(VCPU, deliver_ckc),
STATS_DESC_COUNTER(VCPU, deliver_cputm),
STATS_DESC_COUNTER(VCPU, deliver_external_call),
STATS_DESC_COUNTER(VCPU, deliver_emergency_signal),
STATS_DESC_COUNTER(VCPU, deliver_service_signal),
STATS_DESC_COUNTER(VCPU, deliver_virtio),
STATS_DESC_COUNTER(VCPU, deliver_stop_signal),
STATS_DESC_COUNTER(VCPU, deliver_prefix_signal),
STATS_DESC_COUNTER(VCPU, deliver_restart_signal),
STATS_DESC_COUNTER(VCPU, deliver_program),
STATS_DESC_COUNTER(VCPU, deliver_io),
STATS_DESC_COUNTER(VCPU, deliver_machine_check),
STATS_DESC_COUNTER(VCPU, exit_wait_state),
STATS_DESC_COUNTER(VCPU, inject_ckc),
STATS_DESC_COUNTER(VCPU, inject_cputm),
STATS_DESC_COUNTER(VCPU, inject_external_call),
STATS_DESC_COUNTER(VCPU, inject_emergency_signal),
STATS_DESC_COUNTER(VCPU, inject_mchk),
STATS_DESC_COUNTER(VCPU, inject_pfault_init),
STATS_DESC_COUNTER(VCPU, inject_program),
STATS_DESC_COUNTER(VCPU, inject_restart),
STATS_DESC_COUNTER(VCPU, inject_set_prefix),
STATS_DESC_COUNTER(VCPU, inject_stop_signal),
STATS_DESC_COUNTER(VCPU, instruction_epsw),
STATS_DESC_COUNTER(VCPU, instruction_gs),
STATS_DESC_COUNTER(VCPU, instruction_io_other),
STATS_DESC_COUNTER(VCPU, instruction_lpsw),
STATS_DESC_COUNTER(VCPU, instruction_lpswe),
STATS_DESC_COUNTER(VCPU, instruction_pfmf),
STATS_DESC_COUNTER(VCPU, instruction_ptff),
STATS_DESC_COUNTER(VCPU, instruction_sck),
STATS_DESC_COUNTER(VCPU, instruction_sckpf),
STATS_DESC_COUNTER(VCPU, instruction_stidp),
STATS_DESC_COUNTER(VCPU, instruction_spx),
STATS_DESC_COUNTER(VCPU, instruction_stpx),
STATS_DESC_COUNTER(VCPU, instruction_stap),
STATS_DESC_COUNTER(VCPU, instruction_iske),
STATS_DESC_COUNTER(VCPU, instruction_ri),
STATS_DESC_COUNTER(VCPU, instruction_rrbe),
STATS_DESC_COUNTER(VCPU, instruction_sske),
STATS_DESC_COUNTER(VCPU, instruction_ipte_interlock),
STATS_DESC_COUNTER(VCPU, instruction_stsi),
STATS_DESC_COUNTER(VCPU, instruction_stfl),
STATS_DESC_COUNTER(VCPU, instruction_tb),
STATS_DESC_COUNTER(VCPU, instruction_tpi),
STATS_DESC_COUNTER(VCPU, instruction_tprot),
STATS_DESC_COUNTER(VCPU, instruction_tsch),
STATS_DESC_COUNTER(VCPU, instruction_sie),
STATS_DESC_COUNTER(VCPU, instruction_essa),
STATS_DESC_COUNTER(VCPU, instruction_sthyi),
STATS_DESC_COUNTER(VCPU, instruction_sigp_sense),
STATS_DESC_COUNTER(VCPU, instruction_sigp_sense_running),
STATS_DESC_COUNTER(VCPU, instruction_sigp_external_call),
STATS_DESC_COUNTER(VCPU, instruction_sigp_emergency),
STATS_DESC_COUNTER(VCPU, instruction_sigp_cond_emergency),
STATS_DESC_COUNTER(VCPU, instruction_sigp_start),
STATS_DESC_COUNTER(VCPU, instruction_sigp_stop),
STATS_DESC_COUNTER(VCPU, instruction_sigp_stop_store_status),
STATS_DESC_COUNTER(VCPU, instruction_sigp_store_status),
STATS_DESC_COUNTER(VCPU, instruction_sigp_store_adtl_status),
STATS_DESC_COUNTER(VCPU, instruction_sigp_arch),
STATS_DESC_COUNTER(VCPU, instruction_sigp_prefix),
STATS_DESC_COUNTER(VCPU, instruction_sigp_restart),
STATS_DESC_COUNTER(VCPU, instruction_sigp_init_cpu_reset),
STATS_DESC_COUNTER(VCPU, instruction_sigp_cpu_reset),
STATS_DESC_COUNTER(VCPU, instruction_sigp_unknown),
STATS_DESC_COUNTER(VCPU, instruction_diagnose_10),
STATS_DESC_COUNTER(VCPU, instruction_diagnose_44),
STATS_DESC_COUNTER(VCPU, instruction_diagnose_9c),
STATS_DESC_COUNTER(VCPU, diag_9c_ignored),
STATS_DESC_COUNTER(VCPU, diag_9c_forward),
STATS_DESC_COUNTER(VCPU, instruction_diagnose_258),
STATS_DESC_COUNTER(VCPU, instruction_diagnose_308),
STATS_DESC_COUNTER(VCPU, instruction_diagnose_500),
STATS_DESC_COUNTER(VCPU, instruction_diagnose_other),
STATS_DESC_COUNTER(VCPU, pfault_sync)
};
const struct kvm_stats_header kvm_vcpu_stats_header = {
.name_size = KVM_STATS_NAME_SIZE,
.num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc),
.id_offset = sizeof(struct kvm_stats_header),
.desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
.data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
sizeof(kvm_vcpu_stats_desc),
};
/* allow nested virtualization in KVM (if enabled by user space) */
static int nested;
module_param(nested, int, S_IRUGO);
MODULE_PARM_DESC(nested, "Nested virtualization support");
/* allow 1m huge page guest backing, if !nested */
static int hpage;
module_param(hpage, int, 0444);
MODULE_PARM_DESC(hpage, "1m huge page backing support");
/* maximum percentage of steal time for polling. >100 is treated like 100 */
static u8 halt_poll_max_steal = 10;
module_param(halt_poll_max_steal, byte, 0644);
MODULE_PARM_DESC(halt_poll_max_steal, "Maximum percentage of steal time to allow polling");
/* if set to true, the GISA will be initialized and used if available */
static bool use_gisa = true;
module_param(use_gisa, bool, 0644);
MODULE_PARM_DESC(use_gisa, "Use the GISA if the host supports it.");
/* maximum diag9c forwarding per second */
unsigned int diag9c_forwarding_hz;
module_param(diag9c_forwarding_hz, uint, 0644);
MODULE_PARM_DESC(diag9c_forwarding_hz, "Maximum diag9c forwarding per second, 0 to turn off");
/*
* allow asynchronous deinit for protected guests; enable by default since
* the feature is opt-in anyway
*/
static int async_destroy = 1;
module_param(async_destroy, int, 0444);
MODULE_PARM_DESC(async_destroy, "Asynchronous destroy for protected guests");
/*
* For now we handle at most 16 double words as this is what the s390 base
* kernel handles and stores in the prefix page. If we ever need to go beyond
* this, this requires changes to code, but the external uapi can stay.
*/
#define SIZE_INTERNAL 16
/*
* Base feature mask that defines default mask for facilities. Consists of the
* defines in FACILITIES_KVM and the non-hypervisor managed bits.
*/
static unsigned long kvm_s390_fac_base[SIZE_INTERNAL] = { FACILITIES_KVM };
/*
* Extended feature mask. Consists of the defines in FACILITIES_KVM_CPUMODEL
* and defines the facilities that can be enabled via a cpu model.
*/
static unsigned long kvm_s390_fac_ext[SIZE_INTERNAL] = { FACILITIES_KVM_CPUMODEL };
static unsigned long kvm_s390_fac_size(void)
{
BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_MASK_SIZE_U64);
BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_LIST_SIZE_U64);
BUILD_BUG_ON(SIZE_INTERNAL * sizeof(unsigned long) >
sizeof(stfle_fac_list));
return SIZE_INTERNAL;
}
/* available cpu features supported by kvm */
static DECLARE_BITMAP(kvm_s390_available_cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
/* available subfunctions indicated via query / "test bit" */
static struct kvm_s390_vm_cpu_subfunc kvm_s390_available_subfunc;
static struct gmap_notifier gmap_notifier;
static struct gmap_notifier vsie_gmap_notifier;
debug_info_t *kvm_s390_dbf;
debug_info_t *kvm_s390_dbf_uv;
/* Section: not file related */
/* forward declarations */
static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
unsigned long end);
static int sca_switch_to_extended(struct kvm *kvm);
static void kvm_clock_sync_scb(struct kvm_s390_sie_block *scb, u64 delta)
{
u8 delta_idx = 0;
/*
* The TOD jumps by delta, we have to compensate this by adding
* -delta to the epoch.
*/
delta = -delta;
/* sign-extension - we're adding to signed values below */
if ((s64)delta < 0)
delta_idx = -1;
scb->epoch += delta;
if (scb->ecd & ECD_MEF) {
scb->epdx += delta_idx;
if (scb->epoch < delta)
scb->epdx += 1;
}
}
/*
* This callback is executed during stop_machine(). All CPUs are therefore
* temporarily stopped. In order not to change guest behavior, we have to
* disable preemption whenever we touch the epoch of kvm and the VCPUs,
* so a CPU won't be stopped while calculating with the epoch.
*/
static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
void *v)
{
struct kvm *kvm;
struct kvm_vcpu *vcpu;
unsigned long i;
unsigned long long *delta = v;
list_for_each_entry(kvm, &vm_list, vm_list) {
kvm_for_each_vcpu(i, vcpu, kvm) {
kvm_clock_sync_scb(vcpu->arch.sie_block, *delta);
if (i == 0) {
kvm->arch.epoch = vcpu->arch.sie_block->epoch;
kvm->arch.epdx = vcpu->arch.sie_block->epdx;
}
if (vcpu->arch.cputm_enabled)
vcpu->arch.cputm_start += *delta;
if (vcpu->arch.vsie_block)
kvm_clock_sync_scb(vcpu->arch.vsie_block,
*delta);
}
}
return NOTIFY_OK;
}
static struct notifier_block kvm_clock_notifier = {
.notifier_call = kvm_clock_sync,
};
static void allow_cpu_feat(unsigned long nr)
{
set_bit_inv(nr, kvm_s390_available_cpu_feat);
}
static inline int plo_test_bit(unsigned char nr)
{
unsigned long function = (unsigned long)nr | 0x100;
int cc;
asm volatile(
" lgr 0,%[function]\n"
/* Parameter registers are ignored for "test bit" */
" plo 0,0,0,0(0)\n"
" ipm %0\n"
" srl %0,28\n"
: "=d" (cc)
: [function] "d" (function)
: "cc", "0");
return cc == 0;
}
static __always_inline void __insn32_query(unsigned int opcode, u8 *query)
{
asm volatile(
" lghi 0,0\n"
" lgr 1,%[query]\n"
/* Parameter registers are ignored */
" .insn rrf,%[opc] << 16,2,4,6,0\n"
:
: [query] "d" ((unsigned long)query), [opc] "i" (opcode)
: "cc", "memory", "0", "1");
}
#define INSN_SORTL 0xb938
#define INSN_DFLTCC 0xb939
static void __init kvm_s390_cpu_feat_init(void)
{
int i;
for (i = 0; i < 256; ++i) {
if (plo_test_bit(i))
kvm_s390_available_subfunc.plo[i >> 3] |= 0x80 >> (i & 7);
}
if (test_facility(28)) /* TOD-clock steering */
ptff(kvm_s390_available_subfunc.ptff,
sizeof(kvm_s390_available_subfunc.ptff),
PTFF_QAF);
if (test_facility(17)) { /* MSA */
__cpacf_query(CPACF_KMAC, (cpacf_mask_t *)
kvm_s390_available_subfunc.kmac);
__cpacf_query(CPACF_KMC, (cpacf_mask_t *)
kvm_s390_available_subfunc.kmc);
__cpacf_query(CPACF_KM, (cpacf_mask_t *)
kvm_s390_available_subfunc.km);
__cpacf_query(CPACF_KIMD, (cpacf_mask_t *)
kvm_s390_available_subfunc.kimd);
__cpacf_query(CPACF_KLMD, (cpacf_mask_t *)
kvm_s390_available_subfunc.klmd);
}
if (test_facility(76)) /* MSA3 */
__cpacf_query(CPACF_PCKMO, (cpacf_mask_t *)
kvm_s390_available_subfunc.pckmo);
if (test_facility(77)) { /* MSA4 */
__cpacf_query(CPACF_KMCTR, (cpacf_mask_t *)
kvm_s390_available_subfunc.kmctr);
__cpacf_query(CPACF_KMF, (cpacf_mask_t *)
kvm_s390_available_subfunc.kmf);
__cpacf_query(CPACF_KMO, (cpacf_mask_t *)
kvm_s390_available_subfunc.kmo);
__cpacf_query(CPACF_PCC, (cpacf_mask_t *)
kvm_s390_available_subfunc.pcc);
}
if (test_facility(57)) /* MSA5 */
__cpacf_query(CPACF_PRNO, (cpacf_mask_t *)
kvm_s390_available_subfunc.ppno);
if (test_facility(146)) /* MSA8 */
__cpacf_query(CPACF_KMA, (cpacf_mask_t *)
kvm_s390_available_subfunc.kma);
if (test_facility(155)) /* MSA9 */
__cpacf_query(CPACF_KDSA, (cpacf_mask_t *)
kvm_s390_available_subfunc.kdsa);
if (test_facility(150)) /* SORTL */
__insn32_query(INSN_SORTL, kvm_s390_available_subfunc.sortl);
if (test_facility(151)) /* DFLTCC */
__insn32_query(INSN_DFLTCC, kvm_s390_available_subfunc.dfltcc);
if (MACHINE_HAS_ESOP)
allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP);
/*
* We need SIE support, ESOP (PROT_READ protection for gmap_shadow),
* 64bit SCAO (SCA passthrough) and IDTE (for gmap_shadow unshadowing).
*/
if (!sclp.has_sief2 || !MACHINE_HAS_ESOP || !sclp.has_64bscao ||
!test_facility(3) || !nested)
return;
allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIEF2);
if (sclp.has_64bscao)
allow_cpu_feat(KVM_S390_VM_CPU_FEAT_64BSCAO);
if (sclp.has_siif)
allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIIF);
if (sclp.has_gpere)
allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GPERE);
if (sclp.has_gsls)
allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GSLS);
if (sclp.has_ib)
allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IB);
if (sclp.has_cei)
allow_cpu_feat(KVM_S390_VM_CPU_FEAT_CEI);
if (sclp.has_ibs)
allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IBS);
if (sclp.has_kss)
allow_cpu_feat(KVM_S390_VM_CPU_FEAT_KSS);
/*
* KVM_S390_VM_CPU_FEAT_SKEY: Wrong shadow of PTE.I bits will make
* all skey handling functions read/set the skey from the PGSTE
* instead of the real storage key.
*
* KVM_S390_VM_CPU_FEAT_CMMA: Wrong shadow of PTE.I bits will make
* pages being detected as preserved although they are resident.
*
* KVM_S390_VM_CPU_FEAT_PFMFI: Wrong shadow of PTE.I bits will
* have the same effect as for KVM_S390_VM_CPU_FEAT_SKEY.
*
* For KVM_S390_VM_CPU_FEAT_SKEY, KVM_S390_VM_CPU_FEAT_CMMA and
* KVM_S390_VM_CPU_FEAT_PFMFI, all PTE.I and PGSTE bits have to be
* correctly shadowed. We can do that for the PGSTE but not for PTE.I.
*
* KVM_S390_VM_CPU_FEAT_SIGPIF: Wrong SCB addresses in the SCA. We
* cannot easily shadow the SCA because of the ipte lock.
*/
}
static int __init __kvm_s390_init(void)
{
int rc = -ENOMEM;
kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
if (!kvm_s390_dbf)
return -ENOMEM;
kvm_s390_dbf_uv = debug_register("kvm-uv", 32, 1, 7 * sizeof(long));
if (!kvm_s390_dbf_uv)
goto err_kvm_uv;
if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view) ||
debug_register_view(kvm_s390_dbf_uv, &debug_sprintf_view))
goto err_debug_view;
kvm_s390_cpu_feat_init();
/* Register floating interrupt controller interface. */
rc = kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
if (rc) {
pr_err("A FLIC registration call failed with rc=%d\n", rc);
goto err_flic;
}
if (IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM)) {
rc = kvm_s390_pci_init();
if (rc) {
pr_err("Unable to allocate AIFT for PCI\n");
goto err_pci;
}
}
rc = kvm_s390_gib_init(GAL_ISC);
if (rc)
goto err_gib;
gmap_notifier.notifier_call = kvm_gmap_notifier;
gmap_register_pte_notifier(&gmap_notifier);
vsie_gmap_notifier.notifier_call = kvm_s390_vsie_gmap_notifier;
gmap_register_pte_notifier(&vsie_gmap_notifier);
atomic_notifier_chain_register(&s390_epoch_delta_notifier,
&kvm_clock_notifier);
return 0;
err_gib:
if (IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM))
kvm_s390_pci_exit();
err_pci:
err_flic:
err_debug_view:
debug_unregister(kvm_s390_dbf_uv);
err_kvm_uv:
debug_unregister(kvm_s390_dbf);
return rc;
}
static void __kvm_s390_exit(void)
{
gmap_unregister_pte_notifier(&gmap_notifier);
gmap_unregister_pte_notifier(&vsie_gmap_notifier);
atomic_notifier_chain_unregister(&s390_epoch_delta_notifier,
&kvm_clock_notifier);
kvm_s390_gib_destroy();
if (IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM))
kvm_s390_pci_exit();
debug_unregister(kvm_s390_dbf);
debug_unregister(kvm_s390_dbf_uv);
}
/* Section: device related */
long kvm_arch_dev_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
if (ioctl == KVM_S390_ENABLE_SIE)
return s390_enable_sie();
return -EINVAL;
}
int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
{
int r;
switch (ext) {
case KVM_CAP_S390_PSW:
case KVM_CAP_S390_GMAP:
case KVM_CAP_SYNC_MMU:
#ifdef CONFIG_KVM_S390_UCONTROL
case KVM_CAP_S390_UCONTROL:
#endif
case KVM_CAP_ASYNC_PF:
case KVM_CAP_SYNC_REGS:
case KVM_CAP_ONE_REG:
case KVM_CAP_ENABLE_CAP:
case KVM_CAP_S390_CSS_SUPPORT:
case KVM_CAP_IOEVENTFD:
case KVM_CAP_DEVICE_CTRL:
case KVM_CAP_S390_IRQCHIP:
case KVM_CAP_VM_ATTRIBUTES:
case KVM_CAP_MP_STATE:
case KVM_CAP_IMMEDIATE_EXIT:
case KVM_CAP_S390_INJECT_IRQ:
case KVM_CAP_S390_USER_SIGP:
case KVM_CAP_S390_USER_STSI:
case KVM_CAP_S390_SKEYS:
case KVM_CAP_S390_IRQ_STATE:
case KVM_CAP_S390_USER_INSTR0:
case KVM_CAP_S390_CMMA_MIGRATION:
case KVM_CAP_S390_AIS:
case KVM_CAP_S390_AIS_MIGRATION:
case KVM_CAP_S390_VCPU_RESETS:
case KVM_CAP_SET_GUEST_DEBUG:
case KVM_CAP_S390_DIAG318:
case KVM_CAP_IRQFD_RESAMPLE:
r = 1;
break;
case KVM_CAP_SET_GUEST_DEBUG2:
r = KVM_GUESTDBG_VALID_MASK;
break;
case KVM_CAP_S390_HPAGE_1M:
r = 0;
if (hpage && !kvm_is_ucontrol(kvm))
r = 1;
break;
case KVM_CAP_S390_MEM_OP:
r = MEM_OP_MAX_SIZE;
break;
case KVM_CAP_S390_MEM_OP_EXTENSION:
/*
* Flag bits indicating which extensions are supported.
* If r > 0, the base extension must also be supported/indicated,
* in order to maintain backwards compatibility.
*/
r = KVM_S390_MEMOP_EXTENSION_CAP_BASE |
KVM_S390_MEMOP_EXTENSION_CAP_CMPXCHG;
break;
case KVM_CAP_NR_VCPUS:
case KVM_CAP_MAX_VCPUS:
case KVM_CAP_MAX_VCPU_ID:
r = KVM_S390_BSCA_CPU_SLOTS;
if (!kvm_s390_use_sca_entries())
r = KVM_MAX_VCPUS;
else if (sclp.has_esca && sclp.has_64bscao)
r = KVM_S390_ESCA_CPU_SLOTS;
if (ext == KVM_CAP_NR_VCPUS)
r = min_t(unsigned int, num_online_cpus(), r);
break;
case KVM_CAP_S390_COW:
r = MACHINE_HAS_ESOP;
break;
case KVM_CAP_S390_VECTOR_REGISTERS:
r = MACHINE_HAS_VX;
break;
case KVM_CAP_S390_RI:
r = test_facility(64);
break;
case KVM_CAP_S390_GS:
r = test_facility(133);
break;
case KVM_CAP_S390_BPB:
r = test_facility(82);
break;
case KVM_CAP_S390_PROTECTED_ASYNC_DISABLE:
r = async_destroy && is_prot_virt_host();
break;
case KVM_CAP_S390_PROTECTED:
r = is_prot_virt_host();
break;
case KVM_CAP_S390_PROTECTED_DUMP: {
u64 pv_cmds_dump[] = {
BIT_UVC_CMD_DUMP_INIT,
BIT_UVC_CMD_DUMP_CONFIG_STOR_STATE,
BIT_UVC_CMD_DUMP_CPU,
BIT_UVC_CMD_DUMP_COMPLETE,
};
int i;
r = is_prot_virt_host();
for (i = 0; i < ARRAY_SIZE(pv_cmds_dump); i++) {
if (!test_bit_inv(pv_cmds_dump[i],
(unsigned long *)&uv_info.inst_calls_list)) {
r = 0;
break;
}
}
break;
}
case KVM_CAP_S390_ZPCI_OP:
r = kvm_s390_pci_interp_allowed();
break;
case KVM_CAP_S390_CPU_TOPOLOGY:
r = test_facility(11);
break;
default:
r = 0;
}
return r;
}
void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
{
int i;
gfn_t cur_gfn, last_gfn;
unsigned long gaddr, vmaddr;
struct gmap *gmap = kvm->arch.gmap;
DECLARE_BITMAP(bitmap, _PAGE_ENTRIES);
/* Loop over all guest segments */
cur_gfn = memslot->base_gfn;
last_gfn = memslot->base_gfn + memslot->npages;
for (; cur_gfn <= last_gfn; cur_gfn += _PAGE_ENTRIES) {
gaddr = gfn_to_gpa(cur_gfn);
vmaddr = gfn_to_hva_memslot(memslot, cur_gfn);
if (kvm_is_error_hva(vmaddr))
continue;
bitmap_zero(bitmap, _PAGE_ENTRIES);
gmap_sync_dirty_log_pmd(gmap, bitmap, gaddr, vmaddr);
for (i = 0; i < _PAGE_ENTRIES; i++) {
if (test_bit(i, bitmap))
mark_page_dirty(kvm, cur_gfn + i);
}
if (fatal_signal_pending(current))
return;
cond_resched();
}
}
/* Section: vm related */
static void sca_del_vcpu(struct kvm_vcpu *vcpu);
/*
* Get (and clear) the dirty memory log for a memory slot.
*/
int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
struct kvm_dirty_log *log)
{
int r;
unsigned long n;
struct kvm_memory_slot *memslot;
int is_dirty;
if (kvm_is_ucontrol(kvm))
return -EINVAL;
mutex_lock(&kvm->slots_lock);
r = -EINVAL;
if (log->slot >= KVM_USER_MEM_SLOTS)
goto out;
r = kvm_get_dirty_log(kvm, log, &is_dirty, &memslot);
if (r)
goto out;
/* Clear the dirty log */
if (is_dirty) {
n = kvm_dirty_bitmap_bytes(memslot);
memset(memslot->dirty_bitmap, 0, n);
}
r = 0;
out:
mutex_unlock(&kvm->slots_lock);
return r;
}
static void icpt_operexc_on_all_vcpus(struct kvm *kvm)
{
unsigned long i;
struct kvm_vcpu *vcpu;
kvm_for_each_vcpu(i, vcpu, kvm) {
kvm_s390_sync_request(KVM_REQ_ICPT_OPEREXC, vcpu);
}
}
int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
{
int r;
if (cap->flags)
return -EINVAL;
switch (cap->cap) {
case KVM_CAP_S390_IRQCHIP:
VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP");
kvm->arch.use_irqchip = 1;
r = 0;
break;
case KVM_CAP_S390_USER_SIGP:
VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP");
kvm->arch.user_sigp = 1;
r = 0;
break;
case KVM_CAP_S390_VECTOR_REGISTERS:
mutex_lock(&kvm->lock);
if (kvm->created_vcpus) {
r = -EBUSY;
} else if (MACHINE_HAS_VX) {
set_kvm_facility(kvm->arch.model.fac_mask, 129);
set_kvm_facility(kvm->arch.model.fac_list, 129);
if (test_facility(134)) {
set_kvm_facility(kvm->arch.model.fac_mask, 134);
set_kvm_facility(kvm->arch.model.fac_list, 134);
}
if (test_facility(135)) {
set_kvm_facility(kvm->arch.model.fac_mask, 135);
set_kvm_facility(kvm->arch.model.fac_list, 135);
}
if (test_facility(148)) {
set_kvm_facility(kvm->arch.model.fac_mask, 148);
set_kvm_facility(kvm->arch.model.fac_list, 148);
}
if (test_facility(152)) {
set_kvm_facility(kvm->arch.model.fac_mask, 152);
set_kvm_facility(kvm->arch.model.fac_list, 152);
}
if (test_facility(192)) {
set_kvm_facility(kvm->arch.model.fac_mask, 192);
set_kvm_facility(kvm->arch.model.fac_list, 192);
}
r = 0;
} else
r = -EINVAL;
mutex_unlock(&kvm->lock);
VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
r ? "(not available)" : "(success)");
break;
case KVM_CAP_S390_RI:
r = -EINVAL;
mutex_lock(&kvm->lock);
if (kvm->created_vcpus) {
r = -EBUSY;
} else if (test_facility(64)) {
set_kvm_facility(kvm->arch.model.fac_mask, 64);
set_kvm_facility(kvm->arch.model.fac_list, 64);
r = 0;
}
mutex_unlock(&kvm->lock);
VM_EVENT(kvm, 3, "ENABLE: CAP_S390_RI %s",
r ? "(not available)" : "(success)");
break;
case KVM_CAP_S390_AIS:
mutex_lock(&kvm->lock);
if (kvm->created_vcpus) {
r = -EBUSY;
} else {
set_kvm_facility(kvm->arch.model.fac_mask, 72);
set_kvm_facility(kvm->arch.model.fac_list, 72);
r = 0;
}
mutex_unlock(&kvm->lock);
VM_EVENT(kvm, 3, "ENABLE: AIS %s",
r ? "(not available)" : "(success)");
break;
case KVM_CAP_S390_GS:
r = -EINVAL;
mutex_lock(&kvm->lock);
if (kvm->created_vcpus) {
r = -EBUSY;
} else if (test_facility(133)) {
set_kvm_facility(kvm->arch.model.fac_mask, 133);
set_kvm_facility(kvm->arch.model.fac_list, 133);
r = 0;
}
mutex_unlock(&kvm->lock);
VM_EVENT(kvm, 3, "ENABLE: CAP_S390_GS %s",
r ? "(not available)" : "(success)");
break;
case KVM_CAP_S390_HPAGE_1M:
mutex_lock(&kvm->lock);
if (kvm->created_vcpus)
r = -EBUSY;
else if (!hpage || kvm->arch.use_cmma || kvm_is_ucontrol(kvm))
r = -EINVAL;
else {
r = 0;
mmap_write_lock(kvm->mm);
kvm->mm->context.allow_gmap_hpage_1m = 1;
mmap_write_unlock(kvm->mm);
/*
* We might have to create fake 4k page
* tables. To avoid that the hardware works on
* stale PGSTEs, we emulate these instructions.
*/
kvm->arch.use_skf = 0;
kvm->arch.use_pfmfi = 0;
}
mutex_unlock(&kvm->lock);
VM_EVENT(kvm, 3, "ENABLE: CAP_S390_HPAGE %s",
r ? "(not available)" : "(success)");
break;
case KVM_CAP_S390_USER_STSI:
VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
kvm->arch.user_stsi = 1;
r = 0;
break;
case KVM_CAP_S390_USER_INSTR0:
VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_INSTR0");
kvm->arch.user_instr0 = 1;
icpt_operexc_on_all_vcpus(kvm);
r = 0;
break;
case KVM_CAP_S390_CPU_TOPOLOGY:
r = -EINVAL;
mutex_lock(&kvm->lock);
if (kvm->created_vcpus) {
r = -EBUSY;
} else if (test_facility(11)) {
set_kvm_facility(kvm->arch.model.fac_mask, 11);
set_kvm_facility(kvm->arch.model.fac_list, 11);
r = 0;
}
mutex_unlock(&kvm->lock);
VM_EVENT(kvm, 3, "ENABLE: CAP_S390_CPU_TOPOLOGY %s",
r ? "(not available)" : "(success)");
break;
default:
r = -EINVAL;
break;
}
return r;
}
static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
{
int ret;
switch (attr->attr) {
case KVM_S390_VM_MEM_LIMIT_SIZE:
ret = 0;
VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes",
kvm->arch.mem_limit);
if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr))
ret = -EFAULT;
break;
default:
ret = -ENXIO;
break;
}
return ret;
}
static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
{
int ret;
unsigned int idx;
switch (attr->attr) {
case KVM_S390_VM_MEM_ENABLE_CMMA:
ret = -ENXIO;
if (!sclp.has_cmma)
break;
VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
mutex_lock(&kvm->lock);
if (kvm->created_vcpus)
ret = -EBUSY;
else if (kvm->mm->context.allow_gmap_hpage_1m)
ret = -EINVAL;
else {
kvm->arch.use_cmma = 1;
/* Not compatible with cmma. */
kvm->arch.use_pfmfi = 0;
ret = 0;
}
mutex_unlock(&kvm->lock);
break;
case KVM_S390_VM_MEM_CLR_CMMA:
ret = -ENXIO;
if (!sclp.has_cmma)
break;
ret = -EINVAL;
if (!kvm->arch.use_cmma)
break;
VM_EVENT(kvm, 3, "%s", "RESET: CMMA states");
mutex_lock(&kvm->lock);
idx = srcu_read_lock(&kvm->srcu);
s390_reset_cmma(kvm->arch.gmap->mm);
srcu_read_unlock(&kvm->srcu, idx);
mutex_unlock(&kvm->lock);
ret = 0;
break;
case KVM_S390_VM_MEM_LIMIT_SIZE: {
unsigned long new_limit;
if (kvm_is_ucontrol(kvm))
return -EINVAL;
if (get_user(new_limit, (u64 __user *)attr->addr))
return -EFAULT;
if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT &&
new_limit > kvm->arch.mem_limit)
return -E2BIG;
if (!new_limit)
return -EINVAL;
/* gmap_create takes last usable address */
if (new_limit != KVM_S390_NO_MEM_LIMIT)
new_limit -= 1;
ret = -EBUSY;
mutex_lock(&kvm->lock);
if (!kvm->created_vcpus) {
/* gmap_create will round the limit up */
struct gmap *new = gmap_create(current->mm, new_limit);
if (!new) {
ret = -ENOMEM;
} else {
gmap_remove(kvm->arch.gmap);
new->private = kvm;
kvm->arch.gmap = new;
ret = 0;
}
}
mutex_unlock(&kvm->lock);
VM_EVENT(kvm, 3, "SET: max guest address: %lu", new_limit);
VM_EVENT(kvm, 3, "New guest asce: 0x%pK",
(void *) kvm->arch.gmap->asce);
break;
}
default:
ret = -ENXIO;
break;
}
return ret;
}
static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
void kvm_s390_vcpu_crypto_reset_all(struct kvm *kvm)
{
struct kvm_vcpu *vcpu;
unsigned long i;
kvm_s390_vcpu_block_all(kvm);
kvm_for_each_vcpu(i, vcpu, kvm) {
kvm_s390_vcpu_crypto_setup(vcpu);
/* recreate the shadow crycb by leaving the VSIE handler */
kvm_s390_sync_request(KVM_REQ_VSIE_RESTART, vcpu);
}
kvm_s390_vcpu_unblock_all(kvm);
}
static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
{
mutex_lock(&kvm->lock);
switch (attr->attr) {
case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
if (!test_kvm_facility(kvm, 76)) {
mutex_unlock(&kvm->lock);
return -EINVAL;
}
get_random_bytes(
kvm->arch.crypto.crycb->aes_wrapping_key_mask,
sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
kvm->arch.crypto.aes_kw = 1;
VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support");
break;
case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
if (!test_kvm_facility(kvm, 76)) {
mutex_unlock(&kvm->lock);
return -EINVAL;
}
get_random_bytes(
kvm->arch.crypto.crycb->dea_wrapping_key_mask,
sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
kvm->arch.crypto.dea_kw = 1;
VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support");
break;
case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
if (!test_kvm_facility(kvm, 76)) {
mutex_unlock(&kvm->lock);
return -EINVAL;
}
kvm->arch.crypto.aes_kw = 0;
memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support");
break;
case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
if (!test_kvm_facility(kvm, 76)) {
mutex_unlock(&kvm->lock);
return -EINVAL;
}
kvm->arch.crypto.dea_kw = 0;
memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support");
break;
case KVM_S390_VM_CRYPTO_ENABLE_APIE:
if (!ap_instructions_available()) {
mutex_unlock(&kvm->lock);
return -EOPNOTSUPP;
}
kvm->arch.crypto.apie = 1;
break;
case KVM_S390_VM_CRYPTO_DISABLE_APIE:
if (!ap_instructions_available()) {
mutex_unlock(&kvm->lock);
return -EOPNOTSUPP;
}
kvm->arch.crypto.apie = 0;
break;
default:
mutex_unlock(&kvm->lock);
return -ENXIO;
}
kvm_s390_vcpu_crypto_reset_all(kvm);
mutex_unlock(&kvm->lock);
return 0;
}
static void kvm_s390_vcpu_pci_setup(struct kvm_vcpu *vcpu)
{
/* Only set the ECB bits after guest requests zPCI interpretation */
if (!vcpu->kvm->arch.use_zpci_interp)
return;
vcpu->arch.sie_block->ecb2 |= ECB2_ZPCI_LSI;
vcpu->arch.sie_block->ecb3 |= ECB3_AISII + ECB3_AISI;
}
void kvm_s390_vcpu_pci_enable_interp(struct kvm *kvm)
{
struct kvm_vcpu *vcpu;
unsigned long i;
lockdep_assert_held(&kvm->lock);
if (!kvm_s390_pci_interp_allowed())
return;
/*
* If host is configured for PCI and the necessary facilities are
* available, turn on interpretation for the life of this guest
*/
kvm->arch.use_zpci_interp = 1;
kvm_s390_vcpu_block_all(kvm);
kvm_for_each_vcpu(i, vcpu, kvm) {
kvm_s390_vcpu_pci_setup(vcpu);
kvm_s390_sync_request(KVM_REQ_VSIE_RESTART, vcpu);
}
kvm_s390_vcpu_unblock_all(kvm);
}
static void kvm_s390_sync_request_broadcast(struct kvm *kvm, int req)
{
unsigned long cx;
struct kvm_vcpu *vcpu;
kvm_for_each_vcpu(cx, vcpu, kvm)
kvm_s390_sync_request(req, vcpu);
}
/*
* Must be called with kvm->srcu held to avoid races on memslots, and with
* kvm->slots_lock to avoid races with ourselves and kvm_s390_vm_stop_migration.
*/
static int kvm_s390_vm_start_migration(struct kvm *kvm)
{
struct kvm_memory_slot *ms;
struct kvm_memslots *slots;
unsigned long ram_pages = 0;
int bkt;
/* migration mode already enabled */
if (kvm->arch.migration_mode)
return 0;
slots = kvm_memslots(kvm);
if (!slots || kvm_memslots_empty(slots))
return -EINVAL;
if (!kvm->arch.use_cmma) {
kvm->arch.migration_mode = 1;
return 0;
}
/* mark all the pages in active slots as dirty */
kvm_for_each_memslot(ms, bkt, slots) {
if (!ms->dirty_bitmap)
return -EINVAL;
/*
* The second half of the bitmap is only used on x86,
* and would be wasted otherwise, so we put it to good
* use here to keep track of the state of the storage
* attributes.
*/
memset(kvm_second_dirty_bitmap(ms), 0xff, kvm_dirty_bitmap_bytes(ms));
ram_pages += ms->npages;
}
atomic64_set(&kvm->arch.cmma_dirty_pages, ram_pages);
kvm->arch.migration_mode = 1;
kvm_s390_sync_request_broadcast(kvm, KVM_REQ_START_MIGRATION);
return 0;
}
/*
* Must be called with kvm->slots_lock to avoid races with ourselves and
* kvm_s390_vm_start_migration.
*/
static int kvm_s390_vm_stop_migration(struct kvm *kvm)
{
/* migration mode already disabled */
if (!kvm->arch.migration_mode)
return 0;
kvm->arch.migration_mode = 0;
if (kvm->arch.use_cmma)
kvm_s390_sync_request_broadcast(kvm, KVM_REQ_STOP_MIGRATION);
return 0;
}
static int kvm_s390_vm_set_migration(struct kvm *kvm,
struct kvm_device_attr *attr)
{
int res = -ENXIO;
mutex_lock(&kvm->slots_lock);
switch (attr->attr) {
case KVM_S390_VM_MIGRATION_START:
res = kvm_s390_vm_start_migration(kvm);
break;
case KVM_S390_VM_MIGRATION_STOP:
res = kvm_s390_vm_stop_migration(kvm);
break;
default:
break;
}
mutex_unlock(&kvm->slots_lock);
return res;
}
static int kvm_s390_vm_get_migration(struct kvm *kvm,
struct kvm_device_attr *attr)
{
u64 mig = kvm->arch.migration_mode;
if (attr->attr != KVM_S390_VM_MIGRATION_STATUS)
return -ENXIO;
if (copy_to_user((void __user *)attr->addr, &mig, sizeof(mig)))
return -EFAULT;
return 0;
}
static void __kvm_s390_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod);
static int kvm_s390_set_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
{
struct kvm_s390_vm_tod_clock gtod;
if (copy_from_user(>od, (void __user *)attr->addr, sizeof(gtod)))
return -EFAULT;
if (!test_kvm_facility(kvm, 139) && gtod.epoch_idx)
return -EINVAL;
__kvm_s390_set_tod_clock(kvm, >od);
VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x, TOD base: 0x%llx",
gtod.epoch_idx, gtod.tod);
return 0;
}
static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
{
u8 gtod_high;
if (copy_from_user(>od_high, (void __user *)attr->addr,
sizeof(gtod_high)))
return -EFAULT;
if (gtod_high != 0)
return -EINVAL;
VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x", gtod_high);
return 0;
}
static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
{
struct kvm_s390_vm_tod_clock gtod = { 0 };
if (copy_from_user(>od.tod, (void __user *)attr->addr,
sizeof(gtod.tod)))
return -EFAULT;
__kvm_s390_set_tod_clock(kvm, >od);
VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod.tod);
return 0;
}
static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
{
int ret;
if (attr->flags)
return -EINVAL;
mutex_lock(&kvm->lock);
/*
* For protected guests, the TOD is managed by the ultravisor, so trying
* to change it will never bring the expected results.
*/
if (kvm_s390_pv_is_protected(kvm)) {
ret = -EOPNOTSUPP;
goto out_unlock;
}
switch (attr->attr) {
case KVM_S390_VM_TOD_EXT:
ret = kvm_s390_set_tod_ext(kvm, attr);
break;
case KVM_S390_VM_TOD_HIGH:
ret = kvm_s390_set_tod_high(kvm, attr);
break;
case KVM_S390_VM_TOD_LOW:
ret = kvm_s390_set_tod_low(kvm, attr);
break;
default:
ret = -ENXIO;
break;
}
out_unlock:
mutex_unlock(&kvm->lock);
return ret;
}
static void kvm_s390_get_tod_clock(struct kvm *kvm,
struct kvm_s390_vm_tod_clock *gtod)
{
union tod_clock clk;
preempt_disable();
store_tod_clock_ext(&clk);
gtod->tod = clk.tod + kvm->arch.epoch;
gtod->epoch_idx = 0;
if (test_kvm_facility(kvm, 139)) {
gtod->epoch_idx = clk.ei + kvm->arch.epdx;
if (gtod->tod < clk.tod)
gtod->epoch_idx += 1;
}
preempt_enable();
}
static int kvm_s390_get_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
{
struct kvm_s390_vm_tod_clock gtod;
memset(>od, 0, sizeof(gtod));
kvm_s390_get_tod_clock(kvm, >od);
if (copy_to_user((void __user *)attr->addr, >od, sizeof(gtod)))
return -EFAULT;
VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x, TOD base: 0x%llx",
gtod.epoch_idx, gtod.tod);
return 0;
}
static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
{
u8 gtod_high = 0;
if (copy_to_user((void __user *)attr->addr, >od_high,
sizeof(gtod_high)))
return -EFAULT;
VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high);
return 0;
}
static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
{
u64 gtod;
gtod = kvm_s390_get_tod_clock_fast(kvm);
if (copy_to_user((void __user *)attr->addr, >od, sizeof(gtod)))
return -EFAULT;
VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod);
return 0;
}
static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
{
int ret;
if (attr->flags)
return -EINVAL;
switch (attr->attr) {
case KVM_S390_VM_TOD_EXT:
ret = kvm_s390_get_tod_ext(kvm, attr);
break;
case KVM_S390_VM_TOD_HIGH:
ret = kvm_s390_get_tod_high(kvm, attr);
break;
case KVM_S390_VM_TOD_LOW:
ret = kvm_s390_get_tod_low(kvm, attr);
break;
default:
ret = -ENXIO;
break;
}
return ret;
}
static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
{
struct kvm_s390_vm_cpu_processor *proc;
u16 lowest_ibc, unblocked_ibc;
int ret = 0;
mutex_lock(&kvm->lock);
if (kvm->created_vcpus) {
ret = -EBUSY;
goto out;
}
proc = kzalloc(sizeof(*proc), GFP_KERNEL_ACCOUNT);
if (!proc) {
ret = -ENOMEM;
goto out;
}
if (!copy_from_user(proc, (void __user *)attr->addr,
sizeof(*proc))) {
kvm->arch.model.cpuid = proc->cpuid;
lowest_ibc = sclp.ibc >> 16 & 0xfff;
unblocked_ibc = sclp.ibc & 0xfff;
if (lowest_ibc && proc->ibc) {
if (proc->ibc > unblocked_ibc)
kvm->arch.model.ibc = unblocked_ibc;
else if (proc->ibc < lowest_ibc)
kvm->arch.model.ibc = lowest_ibc;
else
kvm->arch.model.ibc = proc->ibc;
}
memcpy(kvm->arch.model.fac_list, proc->fac_list,
S390_ARCH_FAC_LIST_SIZE_BYTE);
VM_EVENT(kvm, 3, "SET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
kvm->arch.model.ibc,
kvm->arch.model.cpuid);
VM_EVENT(kvm, 3, "SET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
kvm->arch.model.fac_list[0],
kvm->arch.model.fac_list[1],
kvm->arch.model.fac_list[2]);
} else
ret = -EFAULT;
kfree(proc);
out:
mutex_unlock(&kvm->lock);
return ret;
}
static int kvm_s390_set_processor_feat(struct kvm *kvm,
struct kvm_device_attr *attr)
{
struct kvm_s390_vm_cpu_feat data;
if (copy_from_user(&data, (void __user *)attr->addr, sizeof(data)))
return -EFAULT;
if (!bitmap_subset((unsigned long *) data.feat,
kvm_s390_available_cpu_feat,
KVM_S390_VM_CPU_FEAT_NR_BITS))
return -EINVAL;
mutex_lock(&kvm->lock);
if (kvm->created_vcpus) {
mutex_unlock(&kvm->lock);
return -EBUSY;
}
bitmap_from_arr64(kvm->arch.cpu_feat, data.feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
mutex_unlock(&kvm->lock);
VM_EVENT(kvm, 3, "SET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
data.feat[0],
data.feat[1],
data.feat[2]);
return 0;
}
static int kvm_s390_set_processor_subfunc(struct kvm *kvm,
struct kvm_device_attr *attr)
{
mutex_lock(&kvm->lock);
if (kvm->created_vcpus) {
mutex_unlock(&kvm->lock);
return -EBUSY;
}
if (copy_from_user(&kvm->arch.model.subfuncs, (void __user *)attr->addr,
sizeof(struct kvm_s390_vm_cpu_subfunc))) {
mutex_unlock(&kvm->lock);
return -EFAULT;
}
mutex_unlock(&kvm->lock);
VM_EVENT(kvm, 3, "SET: guest PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
((unsigned long *) &kvm->arch.model.subfuncs.plo)[0],
((unsigned long *) &kvm->arch.model.subfuncs.plo)[1],
((unsigned long *) &kvm->arch.model.subfuncs.plo)[2],
((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]);
VM_EVENT(kvm, 3, "SET: guest PTFF subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0],
((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]);
VM_EVENT(kvm, 3, "SET: guest KMAC subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0],
((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]);
VM_EVENT(kvm, 3, "SET: guest KMC subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0],
((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]);
VM_EVENT(kvm, 3, "SET: guest KM subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm->arch.model.subfuncs.km)[0],
((unsigned long *) &kvm->arch.model.subfuncs.km)[1]);
VM_EVENT(kvm, 3, "SET: guest KIMD subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0],
((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]);
VM_EVENT(kvm, 3, "SET: guest KLMD subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0],
((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]);
VM_EVENT(kvm, 3, "SET: guest PCKMO subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0],
((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]);
VM_EVENT(kvm, 3, "SET: guest KMCTR subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0],
((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]);
VM_EVENT(kvm, 3, "SET: guest KMF subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0],
((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]);
VM_EVENT(kvm, 3, "SET: guest KMO subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0],
((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]);
VM_EVENT(kvm, 3, "SET: guest PCC subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0],
((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]);
VM_EVENT(kvm, 3, "SET: guest PPNO subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0],
((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]);
VM_EVENT(kvm, 3, "SET: guest KMA subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
VM_EVENT(kvm, 3, "SET: guest KDSA subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0],
((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]);
VM_EVENT(kvm, 3, "SET: guest SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0],
((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1],
((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2],
((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]);
VM_EVENT(kvm, 3, "SET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0],
((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1],
((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2],
((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]);
return 0;
}
#define KVM_S390_VM_CPU_UV_FEAT_GUEST_MASK \
( \
((struct kvm_s390_vm_cpu_uv_feat){ \
.ap = 1, \
.ap_intr = 1, \
}) \
.feat \
)
static int kvm_s390_set_uv_feat(struct kvm *kvm, struct kvm_device_attr *attr)
{
struct kvm_s390_vm_cpu_uv_feat __user *ptr = (void __user *)attr->addr;
unsigned long data, filter;
filter = uv_info.uv_feature_indications & KVM_S390_VM_CPU_UV_FEAT_GUEST_MASK;
if (get_user(data, &ptr->feat))
return -EFAULT;
if (!bitmap_subset(&data, &filter, KVM_S390_VM_CPU_UV_FEAT_NR_BITS))
return -EINVAL;
mutex_lock(&kvm->lock);
if (kvm->created_vcpus) {
mutex_unlock(&kvm->lock);
return -EBUSY;
}
kvm->arch.model.uv_feat_guest.feat = data;
mutex_unlock(&kvm->lock);
VM_EVENT(kvm, 3, "SET: guest UV-feat: 0x%16.16lx", data);
return 0;
}
static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
{
int ret = -ENXIO;
switch (attr->attr) {
case KVM_S390_VM_CPU_PROCESSOR:
ret = kvm_s390_set_processor(kvm, attr);
break;
case KVM_S390_VM_CPU_PROCESSOR_FEAT:
ret = kvm_s390_set_processor_feat(kvm, attr);
break;
case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
ret = kvm_s390_set_processor_subfunc(kvm, attr);
break;
case KVM_S390_VM_CPU_PROCESSOR_UV_FEAT_GUEST:
ret = kvm_s390_set_uv_feat(kvm, attr);
break;
}
return ret;
}
static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
{
struct kvm_s390_vm_cpu_processor *proc;
int ret = 0;
proc = kzalloc(sizeof(*proc), GFP_KERNEL_ACCOUNT);
if (!proc) {
ret = -ENOMEM;
goto out;
}
proc->cpuid = kvm->arch.model.cpuid;
proc->ibc = kvm->arch.model.ibc;
memcpy(&proc->fac_list, kvm->arch.model.fac_list,
S390_ARCH_FAC_LIST_SIZE_BYTE);
VM_EVENT(kvm, 3, "GET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
kvm->arch.model.ibc,
kvm->arch.model.cpuid);
VM_EVENT(kvm, 3, "GET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
kvm->arch.model.fac_list[0],
kvm->arch.model.fac_list[1],
kvm->arch.model.fac_list[2]);
if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
ret = -EFAULT;
kfree(proc);
out:
return ret;
}
static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
{
struct kvm_s390_vm_cpu_machine *mach;
int ret = 0;
mach = kzalloc(sizeof(*mach), GFP_KERNEL_ACCOUNT);
if (!mach) {
ret = -ENOMEM;
goto out;
}
get_cpu_id((struct cpuid *) &mach->cpuid);
mach->ibc = sclp.ibc;
memcpy(&mach->fac_mask, kvm->arch.model.fac_mask,
S390_ARCH_FAC_LIST_SIZE_BYTE);
memcpy((unsigned long *)&mach->fac_list, stfle_fac_list,
sizeof(stfle_fac_list));
VM_EVENT(kvm, 3, "GET: host ibc: 0x%4.4x, host cpuid: 0x%16.16llx",
kvm->arch.model.ibc,
kvm->arch.model.cpuid);
VM_EVENT(kvm, 3, "GET: host facmask: 0x%16.16llx.%16.16llx.%16.16llx",
mach->fac_mask[0],
mach->fac_mask[1],
mach->fac_mask[2]);
VM_EVENT(kvm, 3, "GET: host faclist: 0x%16.16llx.%16.16llx.%16.16llx",
mach->fac_list[0],
mach->fac_list[1],
mach->fac_list[2]);
if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
ret = -EFAULT;
kfree(mach);
out:
return ret;
}
static int kvm_s390_get_processor_feat(struct kvm *kvm,
struct kvm_device_attr *attr)
{
struct kvm_s390_vm_cpu_feat data;
bitmap_to_arr64(data.feat, kvm->arch.cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
return -EFAULT;
VM_EVENT(kvm, 3, "GET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
data.feat[0],
data.feat[1],
data.feat[2]);
return 0;
}
static int kvm_s390_get_machine_feat(struct kvm *kvm,
struct kvm_device_attr *attr)
{
struct kvm_s390_vm_cpu_feat data;
bitmap_to_arr64(data.feat, kvm_s390_available_cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
return -EFAULT;
VM_EVENT(kvm, 3, "GET: host feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
data.feat[0],
data.feat[1],
data.feat[2]);
return 0;
}
static int kvm_s390_get_processor_subfunc(struct kvm *kvm,
struct kvm_device_attr *attr)
{
if (copy_to_user((void __user *)attr->addr, &kvm->arch.model.subfuncs,
sizeof(struct kvm_s390_vm_cpu_subfunc)))
return -EFAULT;
VM_EVENT(kvm, 3, "GET: guest PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
((unsigned long *) &kvm->arch.model.subfuncs.plo)[0],
((unsigned long *) &kvm->arch.model.subfuncs.plo)[1],
((unsigned long *) &kvm->arch.model.subfuncs.plo)[2],
((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]);
VM_EVENT(kvm, 3, "GET: guest PTFF subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0],
((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]);
VM_EVENT(kvm, 3, "GET: guest KMAC subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0],
((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]);
VM_EVENT(kvm, 3, "GET: guest KMC subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0],
((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]);
VM_EVENT(kvm, 3, "GET: guest KM subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm->arch.model.subfuncs.km)[0],
((unsigned long *) &kvm->arch.model.subfuncs.km)[1]);
VM_EVENT(kvm, 3, "GET: guest KIMD subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0],
((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]);
VM_EVENT(kvm, 3, "GET: guest KLMD subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0],
((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]);
VM_EVENT(kvm, 3, "GET: guest PCKMO subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0],
((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]);
VM_EVENT(kvm, 3, "GET: guest KMCTR subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0],
((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]);
VM_EVENT(kvm, 3, "GET: guest KMF subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0],
((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]);
VM_EVENT(kvm, 3, "GET: guest KMO subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0],
((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]);
VM_EVENT(kvm, 3, "GET: guest PCC subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0],
((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]);
VM_EVENT(kvm, 3, "GET: guest PPNO subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0],
((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]);
VM_EVENT(kvm, 3, "GET: guest KMA subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
VM_EVENT(kvm, 3, "GET: guest KDSA subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0],
((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]);
VM_EVENT(kvm, 3, "GET: guest SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0],
((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1],
((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2],
((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]);
VM_EVENT(kvm, 3, "GET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0],
((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1],
((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2],
((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]);
return 0;
}
static int kvm_s390_get_machine_subfunc(struct kvm *kvm,
struct kvm_device_attr *attr)
{
if (copy_to_user((void __user *)attr->addr, &kvm_s390_available_subfunc,
sizeof(struct kvm_s390_vm_cpu_subfunc)))
return -EFAULT;
VM_EVENT(kvm, 3, "GET: host PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
((unsigned long *) &kvm_s390_available_subfunc.plo)[0],
((unsigned long *) &kvm_s390_available_subfunc.plo)[1],
((unsigned long *) &kvm_s390_available_subfunc.plo)[2],
((unsigned long *) &kvm_s390_available_subfunc.plo)[3]);
VM_EVENT(kvm, 3, "GET: host PTFF subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm_s390_available_subfunc.ptff)[0],
((unsigned long *) &kvm_s390_available_subfunc.ptff)[1]);
VM_EVENT(kvm, 3, "GET: host KMAC subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm_s390_available_subfunc.kmac)[0],
((unsigned long *) &kvm_s390_available_subfunc.kmac)[1]);
VM_EVENT(kvm, 3, "GET: host KMC subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm_s390_available_subfunc.kmc)[0],
((unsigned long *) &kvm_s390_available_subfunc.kmc)[1]);
VM_EVENT(kvm, 3, "GET: host KM subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm_s390_available_subfunc.km)[0],
((unsigned long *) &kvm_s390_available_subfunc.km)[1]);
VM_EVENT(kvm, 3, "GET: host KIMD subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm_s390_available_subfunc.kimd)[0],
((unsigned long *) &kvm_s390_available_subfunc.kimd)[1]);
VM_EVENT(kvm, 3, "GET: host KLMD subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm_s390_available_subfunc.klmd)[0],
((unsigned long *) &kvm_s390_available_subfunc.klmd)[1]);
VM_EVENT(kvm, 3, "GET: host PCKMO subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm_s390_available_subfunc.pckmo)[0],
((unsigned long *) &kvm_s390_available_subfunc.pckmo)[1]);
VM_EVENT(kvm, 3, "GET: host KMCTR subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm_s390_available_subfunc.kmctr)[0],
((unsigned long *) &kvm_s390_available_subfunc.kmctr)[1]);
VM_EVENT(kvm, 3, "GET: host KMF subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm_s390_available_subfunc.kmf)[0],
((unsigned long *) &kvm_s390_available_subfunc.kmf)[1]);
VM_EVENT(kvm, 3, "GET: host KMO subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm_s390_available_subfunc.kmo)[0],
((unsigned long *) &kvm_s390_available_subfunc.kmo)[1]);
VM_EVENT(kvm, 3, "GET: host PCC subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm_s390_available_subfunc.pcc)[0],
((unsigned long *) &kvm_s390_available_subfunc.pcc)[1]);
VM_EVENT(kvm, 3, "GET: host PPNO subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm_s390_available_subfunc.ppno)[0],
((unsigned long *) &kvm_s390_available_subfunc.ppno)[1]);
VM_EVENT(kvm, 3, "GET: host KMA subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm_s390_available_subfunc.kma)[0],
((unsigned long *) &kvm_s390_available_subfunc.kma)[1]);
VM_EVENT(kvm, 3, "GET: host KDSA subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm_s390_available_subfunc.kdsa)[0],
((unsigned long *) &kvm_s390_available_subfunc.kdsa)[1]);
VM_EVENT(kvm, 3, "GET: host SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
((unsigned long *) &kvm_s390_available_subfunc.sortl)[0],
((unsigned long *) &kvm_s390_available_subfunc.sortl)[1],
((unsigned long *) &kvm_s390_available_subfunc.sortl)[2],
((unsigned long *) &kvm_s390_available_subfunc.sortl)[3]);
VM_EVENT(kvm, 3, "GET: host DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[0],
((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[1],
((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[2],
((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[3]);
return 0;
}
static int kvm_s390_get_processor_uv_feat(struct kvm *kvm, struct kvm_device_attr *attr)
{
struct kvm_s390_vm_cpu_uv_feat __user *dst = (void __user *)attr->addr;
unsigned long feat = kvm->arch.model.uv_feat_guest.feat;
if (put_user(feat, &dst->feat))
return -EFAULT;
VM_EVENT(kvm, 3, "GET: guest UV-feat: 0x%16.16lx", feat);
return 0;
}
static int kvm_s390_get_machine_uv_feat(struct kvm *kvm, struct kvm_device_attr *attr)
{
struct kvm_s390_vm_cpu_uv_feat __user *dst = (void __user *)attr->addr;
unsigned long feat;
BUILD_BUG_ON(sizeof(*dst) != sizeof(uv_info.uv_feature_indications));
feat = uv_info.uv_feature_indications & KVM_S390_VM_CPU_UV_FEAT_GUEST_MASK;
if (put_user(feat, &dst->feat))
return -EFAULT;
VM_EVENT(kvm, 3, "GET: guest UV-feat: 0x%16.16lx", feat);
return 0;
}
static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
{
int ret = -ENXIO;
switch (attr->attr) {
case KVM_S390_VM_CPU_PROCESSOR:
ret = kvm_s390_get_processor(kvm, attr);
break;
case KVM_S390_VM_CPU_MACHINE:
ret = kvm_s390_get_machine(kvm, attr);
break;
case KVM_S390_VM_CPU_PROCESSOR_FEAT:
ret = kvm_s390_get_processor_feat(kvm, attr);
break;
case KVM_S390_VM_CPU_MACHINE_FEAT:
ret = kvm_s390_get_machine_feat(kvm, attr);
break;
case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
ret = kvm_s390_get_processor_subfunc(kvm, attr);
break;
case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
ret = kvm_s390_get_machine_subfunc(kvm, attr);
break;
case KVM_S390_VM_CPU_PROCESSOR_UV_FEAT_GUEST:
ret = kvm_s390_get_processor_uv_feat(kvm, attr);
break;
case KVM_S390_VM_CPU_MACHINE_UV_FEAT_GUEST:
ret = kvm_s390_get_machine_uv_feat(kvm, attr);
break;
}
return ret;
}
/**
* kvm_s390_update_topology_change_report - update CPU topology change report
* @kvm: guest KVM description
* @val: set or clear the MTCR bit
*
* Updates the Multiprocessor Topology-Change-Report bit to signal
* the guest with a topology change.
* This is only relevant if the topology facility is present.
*
* The SCA version, bsca or esca, doesn't matter as offset is the same.
*/
static void kvm_s390_update_topology_change_report(struct kvm *kvm, bool val)
{
union sca_utility new, old;
struct bsca_block *sca;
read_lock(&kvm->arch.sca_lock);
sca = kvm->arch.sca;
do {
old = READ_ONCE(sca->utility);
new = old;
new.mtcr = val;
} while (cmpxchg(&sca->utility.val, old.val, new.val) != old.val);
read_unlock(&kvm->arch.sca_lock);
}
static int kvm_s390_set_topo_change_indication(struct kvm *kvm,
struct kvm_device_attr *attr)
{
if (!test_kvm_facility(kvm, 11))
return -ENXIO;
kvm_s390_update_topology_change_report(kvm, !!attr->attr);
return 0;
}
static int kvm_s390_get_topo_change_indication(struct kvm *kvm,
struct kvm_device_attr *attr)
{
u8 topo;
if (!test_kvm_facility(kvm, 11))
return -ENXIO;
read_lock(&kvm->arch.sca_lock);
topo = ((struct bsca_block *)kvm->arch.sca)->utility.mtcr;
read_unlock(&kvm->arch.sca_lock);
return put_user(topo, (u8 __user *)attr->addr);
}
static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
{
int ret;
switch (attr->group) {
case KVM_S390_VM_MEM_CTRL:
ret = kvm_s390_set_mem_control(kvm, attr);
break;
case KVM_S390_VM_TOD:
ret = kvm_s390_set_tod(kvm, attr);
break;
case KVM_S390_VM_CPU_MODEL:
ret = kvm_s390_set_cpu_model(kvm, attr);
break;
case KVM_S390_VM_CRYPTO:
ret = kvm_s390_vm_set_crypto(kvm, attr);
break;
case KVM_S390_VM_MIGRATION:
ret = kvm_s390_vm_set_migration(kvm, attr);
break;
case KVM_S390_VM_CPU_TOPOLOGY:
ret = kvm_s390_set_topo_change_indication(kvm, attr);
break;
default:
ret = -ENXIO;
break;
}
return ret;
}
static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
{
int ret;
switch (attr->group) {
case KVM_S390_VM_MEM_CTRL:
ret = kvm_s390_get_mem_control(kvm, attr);
break;
case KVM_S390_VM_TOD:
ret = kvm_s390_get_tod(kvm, attr);
break;
case KVM_S390_VM_CPU_MODEL:
ret = kvm_s390_get_cpu_model(kvm, attr);
break;
case KVM_S390_VM_MIGRATION:
ret = kvm_s390_vm_get_migration(kvm, attr);
break;
case KVM_S390_VM_CPU_TOPOLOGY:
ret = kvm_s390_get_topo_change_indication(kvm, attr);
break;
default:
ret = -ENXIO;
break;
}
return ret;
}
static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
{
int ret;
switch (attr->group) {
case KVM_S390_VM_MEM_CTRL:
switch (attr->attr) {
case KVM_S390_VM_MEM_ENABLE_CMMA:
case KVM_S390_VM_MEM_CLR_CMMA:
ret = sclp.has_cmma ? 0 : -ENXIO;
break;
case KVM_S390_VM_MEM_LIMIT_SIZE:
ret = 0;
break;
default:
ret = -ENXIO;
break;
}
break;
case KVM_S390_VM_TOD:
switch (attr->attr) {
case KVM_S390_VM_TOD_LOW:
case KVM_S390_VM_TOD_HIGH:
ret = 0;
break;
default:
ret = -ENXIO;
break;
}
break;
case KVM_S390_VM_CPU_MODEL:
switch (attr->attr) {
case KVM_S390_VM_CPU_PROCESSOR:
case KVM_S390_VM_CPU_MACHINE:
case KVM_S390_VM_CPU_PROCESSOR_FEAT:
case KVM_S390_VM_CPU_MACHINE_FEAT:
case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
case KVM_S390_VM_CPU_MACHINE_UV_FEAT_GUEST:
case KVM_S390_VM_CPU_PROCESSOR_UV_FEAT_GUEST:
ret = 0;
break;
default:
ret = -ENXIO;
break;
}
break;
case KVM_S390_VM_CRYPTO:
switch (attr->attr) {
case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
ret = 0;
break;
case KVM_S390_VM_CRYPTO_ENABLE_APIE:
case KVM_S390_VM_CRYPTO_DISABLE_APIE:
ret = ap_instructions_available() ? 0 : -ENXIO;
break;
default:
ret = -ENXIO;
break;
}
break;
case KVM_S390_VM_MIGRATION:
ret = 0;
break;
case KVM_S390_VM_CPU_TOPOLOGY:
ret = test_kvm_facility(kvm, 11) ? 0 : -ENXIO;
break;
default:
ret = -ENXIO;
break;
}
return ret;
}
static int kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
{
uint8_t *keys;
uint64_t hva;
int srcu_idx, i, r = 0;
if (args->flags != 0)
return -EINVAL;
/* Is this guest using storage keys? */
if (!mm_uses_skeys(current->mm))
return KVM_S390_GET_SKEYS_NONE;
/* Enforce sane limit on memory allocation */
if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
return -EINVAL;
keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL_ACCOUNT);
if (!keys)
return -ENOMEM;
mmap_read_lock(current->mm);
srcu_idx = srcu_read_lock(&kvm->srcu);
for (i = 0; i < args->count; i++) {
hva = gfn_to_hva(kvm, args->start_gfn + i);
if (kvm_is_error_hva(hva)) {
r = -EFAULT;
break;
}
r = get_guest_storage_key(current->mm, hva, &keys[i]);
if (r)
break;
}
srcu_read_unlock(&kvm->srcu, srcu_idx);
mmap_read_unlock(current->mm);
if (!r) {
r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
sizeof(uint8_t) * args->count);
if (r)
r = -EFAULT;
}
kvfree(keys);
return r;
}
static int kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
{
uint8_t *keys;
uint64_t hva;
int srcu_idx, i, r = 0;
bool unlocked;
if (args->flags != 0)
return -EINVAL;
/* Enforce sane limit on memory allocation */
if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
return -EINVAL;
keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL_ACCOUNT);
if (!keys)
return -ENOMEM;
r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
sizeof(uint8_t) * args->count);
if (r) {
r = -EFAULT;
goto out;
}
/* Enable storage key handling for the guest */
r = s390_enable_skey();
if (r)
goto out;
i = 0;
mmap_read_lock(current->mm);
srcu_idx = srcu_read_lock(&kvm->srcu);
while (i < args->count) {
unlocked = false;
hva = gfn_to_hva(kvm, args->start_gfn + i);
if (kvm_is_error_hva(hva)) {
r = -EFAULT;
break;
}
/* Lowest order bit is reserved */
if (keys[i] & 0x01) {
r = -EINVAL;
break;
}
r = set_guest_storage_key(current->mm, hva, keys[i], 0);
if (r) {
r = fixup_user_fault(current->mm, hva,
FAULT_FLAG_WRITE, &unlocked);
if (r)
break;
}
if (!r)
i++;
}
srcu_read_unlock(&kvm->srcu, srcu_idx);
mmap_read_unlock(current->mm);
out:
kvfree(keys);
return r;
}
/*
* Base address and length must be sent at the start of each block, therefore
* it's cheaper to send some clean data, as long as it's less than the size of
* two longs.
*/
#define KVM_S390_MAX_BIT_DISTANCE (2 * sizeof(void *))
/* for consistency */
#define KVM_S390_CMMA_SIZE_MAX ((u32)KVM_S390_SKEYS_MAX)
static int kvm_s390_peek_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args,
u8 *res, unsigned long bufsize)
{
unsigned long pgstev, hva, cur_gfn = args->start_gfn;
args->count = 0;
while (args->count < bufsize) {
hva = gfn_to_hva(kvm, cur_gfn);
/*
* We return an error if the first value was invalid, but we
* return successfully if at least one value was copied.
*/
if (kvm_is_error_hva(hva))
return args->count ? 0 : -EFAULT;
if (get_pgste(kvm->mm, hva, &pgstev) < 0)
pgstev = 0;
res[args->count++] = (pgstev >> 24) & 0x43;
cur_gfn++;
}
return 0;
}
static struct kvm_memory_slot *gfn_to_memslot_approx(struct kvm_memslots *slots,
gfn_t gfn)
{
return ____gfn_to_memslot(slots, gfn, true);
}
static unsigned long kvm_s390_next_dirty_cmma(struct kvm_memslots *slots,
unsigned long cur_gfn)
{
struct kvm_memory_slot *ms = gfn_to_memslot_approx(slots, cur_gfn);
unsigned long ofs = cur_gfn - ms->base_gfn;
struct rb_node *mnode = &ms->gfn_node[slots->node_idx];
if (ms->base_gfn + ms->npages <= cur_gfn) {
mnode = rb_next(mnode);
/* If we are above the highest slot, wrap around */
if (!mnode)
mnode = rb_first(&slots->gfn_tree);
ms = container_of(mnode, struct kvm_memory_slot, gfn_node[slots->node_idx]);
ofs = 0;
}
if (cur_gfn < ms->base_gfn)
ofs = 0;
ofs = find_next_bit(kvm_second_dirty_bitmap(ms), ms->npages, ofs);
while (ofs >= ms->npages && (mnode = rb_next(mnode))) {
ms = container_of(mnode, struct kvm_memory_slot, gfn_node[slots->node_idx]);
ofs = find_first_bit(kvm_second_dirty_bitmap(ms), ms->npages);
}
return ms->base_gfn + ofs;
}
static int kvm_s390_get_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args,
u8 *res, unsigned long bufsize)
{
unsigned long mem_end, cur_gfn, next_gfn, hva, pgstev;
struct kvm_memslots *slots = kvm_memslots(kvm);
struct kvm_memory_slot *ms;
if (unlikely(kvm_memslots_empty(slots)))
return 0;
cur_gfn = kvm_s390_next_dirty_cmma(slots, args->start_gfn);
ms = gfn_to_memslot(kvm, cur_gfn);
args->count = 0;
args->start_gfn = cur_gfn;
if (!ms)
return 0;
next_gfn = kvm_s390_next_dirty_cmma(slots, cur_gfn + 1);
mem_end = kvm_s390_get_gfn_end(slots);
while (args->count < bufsize) {
hva = gfn_to_hva(kvm, cur_gfn);
if (kvm_is_error_hva(hva))
return 0;
/* Decrement only if we actually flipped the bit to 0 */
if (test_and_clear_bit(cur_gfn - ms->base_gfn, kvm_second_dirty_bitmap(ms)))
atomic64_dec(&kvm->arch.cmma_dirty_pages);
if (get_pgste(kvm->mm, hva, &pgstev) < 0)
pgstev = 0;
/* Save the value */
res[args->count++] = (pgstev >> 24) & 0x43;
/* If the next bit is too far away, stop. */
if (next_gfn > cur_gfn + KVM_S390_MAX_BIT_DISTANCE)
return 0;
/* If we reached the previous "next", find the next one */
if (cur_gfn == next_gfn)
next_gfn = kvm_s390_next_dirty_cmma(slots, cur_gfn + 1);
/* Reached the end of memory or of the buffer, stop */
if ((next_gfn >= mem_end) ||
(next_gfn - args->start_gfn >= bufsize))
return 0;
cur_gfn++;
/* Reached the end of the current memslot, take the next one. */
if (cur_gfn - ms->base_gfn >= ms->npages) {
ms = gfn_to_memslot(kvm, cur_gfn);
if (!ms)
return 0;
}
}
return 0;
}
/*
* This function searches for the next page with dirty CMMA attributes, and
* saves the attributes in the buffer up to either the end of the buffer or
* until a block of at least KVM_S390_MAX_BIT_DISTANCE clean bits is found;
* no trailing clean bytes are saved.
* In case no dirty bits were found, or if CMMA was not enabled or used, the
* output buffer will indicate 0 as length.
*/
static int kvm_s390_get_cmma_bits(struct kvm *kvm,
struct kvm_s390_cmma_log *args)
{
unsigned long bufsize;
int srcu_idx, peek, ret;
u8 *values;
if (!kvm->arch.use_cmma)
return -ENXIO;
/* Invalid/unsupported flags were specified */
if (args->flags & ~KVM_S390_CMMA_PEEK)
return -EINVAL;
/* Migration mode query, and we are not doing a migration */
peek = !!(args->flags & KVM_S390_CMMA_PEEK);
if (!peek && !kvm->arch.migration_mode)
return -EINVAL;
/* CMMA is disabled or was not used, or the buffer has length zero */
bufsize = min(args->count, KVM_S390_CMMA_SIZE_MAX);
if (!bufsize || !kvm->mm->context.uses_cmm) {
memset(args, 0, sizeof(*args));
return 0;
}
/* We are not peeking, and there are no dirty pages */
if (!peek && !atomic64_read(&kvm->arch.cmma_dirty_pages)) {
memset(args, 0, sizeof(*args));
return 0;
}
values = vmalloc(bufsize);
if (!values)
return -ENOMEM;
mmap_read_lock(kvm->mm);
srcu_idx = srcu_read_lock(&kvm->srcu);
if (peek)
ret = kvm_s390_peek_cmma(kvm, args, values, bufsize);
else
ret = kvm_s390_get_cmma(kvm, args, values, bufsize);
srcu_read_unlock(&kvm->srcu, srcu_idx);
mmap_read_unlock(kvm->mm);
if (kvm->arch.migration_mode)
args->remaining = atomic64_read(&kvm->arch.cmma_dirty_pages);
else
args->remaining = 0;
if (copy_to_user((void __user *)args->values, values, args->count))
ret = -EFAULT;
vfree(values);
return ret;
}
/*
* This function sets the CMMA attributes for the given pages. If the input
* buffer has zero length, no action is taken, otherwise the attributes are
* set and the mm->context.uses_cmm flag is set.
*/
static int kvm_s390_set_cmma_bits(struct kvm *kvm,
const struct kvm_s390_cmma_log *args)
{
unsigned long hva, mask, pgstev, i;
uint8_t *bits;
int srcu_idx, r = 0;
mask = args->mask;
if (!kvm->arch.use_cmma)
return -ENXIO;
/* invalid/unsupported flags */
if (args->flags != 0)
return -EINVAL;
/* Enforce sane limit on memory allocation */
if (args->count > KVM_S390_CMMA_SIZE_MAX)
return -EINVAL;
/* Nothing to do */
if (args->count == 0)
return 0;
bits = vmalloc(array_size(sizeof(*bits), args->count));
if (!bits)
return -ENOMEM;
r = copy_from_user(bits, (void __user *)args->values, args->count);
if (r) {
r = -EFAULT;
goto out;
}
mmap_read_lock(kvm->mm);
srcu_idx = srcu_read_lock(&kvm->srcu);
for (i = 0; i < args->count; i++) {
hva = gfn_to_hva(kvm, args->start_gfn + i);
if (kvm_is_error_hva(hva)) {
r = -EFAULT;
break;
}
pgstev = bits[i];
pgstev = pgstev << 24;
mask &= _PGSTE_GPS_USAGE_MASK | _PGSTE_GPS_NODAT;
set_pgste_bits(kvm->mm, hva, mask, pgstev);
}
srcu_read_unlock(&kvm->srcu, srcu_idx);
mmap_read_unlock(kvm->mm);
if (!kvm->mm->context.uses_cmm) {
mmap_write_lock(kvm->mm);
kvm->mm->context.uses_cmm = 1;
mmap_write_unlock(kvm->mm);
}
out:
vfree(bits);
return r;
}
/**
* kvm_s390_cpus_from_pv - Convert all protected vCPUs in a protected VM to
* non protected.
* @kvm: the VM whose protected vCPUs are to be converted
* @rc: return value for the RC field of the UVC (in case of error)
* @rrc: return value for the RRC field of the UVC (in case of error)
*
* Does not stop in case of error, tries to convert as many
* CPUs as possible. In case of error, the RC and RRC of the last error are
* returned.
*
* Return: 0 in case of success, otherwise -EIO
*/
int kvm_s390_cpus_from_pv(struct kvm *kvm, u16 *rc, u16 *rrc)
{
struct kvm_vcpu *vcpu;
unsigned long i;
u16 _rc, _rrc;
int ret = 0;
/*
* We ignore failures and try to destroy as many CPUs as possible.
* At the same time we must not free the assigned resources when
* this fails, as the ultravisor has still access to that memory.
* So kvm_s390_pv_destroy_cpu can leave a "wanted" memory leak
* behind.
* We want to return the first failure rc and rrc, though.
*/
kvm_for_each_vcpu(i, vcpu, kvm) {
mutex_lock(&vcpu->mutex);
if (kvm_s390_pv_destroy_cpu(vcpu, &_rc, &_rrc) && !ret) {
*rc = _rc;
*rrc = _rrc;
ret = -EIO;
}
mutex_unlock(&vcpu->mutex);
}
/* Ensure that we re-enable gisa if the non-PV guest used it but the PV guest did not. */
if (use_gisa)
kvm_s390_gisa_enable(kvm);
return ret;
}
/**
* kvm_s390_cpus_to_pv - Convert all non-protected vCPUs in a protected VM
* to protected.
* @kvm: the VM whose protected vCPUs are to be converted
* @rc: return value for the RC field of the UVC (in case of error)
* @rrc: return value for the RRC field of the UVC (in case of error)
*
* Tries to undo the conversion in case of error.
*
* Return: 0 in case of success, otherwise -EIO
*/
static int kvm_s390_cpus_to_pv(struct kvm *kvm, u16 *rc, u16 *rrc)
{
unsigned long i;
int r = 0;
u16 dummy;
struct kvm_vcpu *vcpu;
/* Disable the GISA if the ultravisor does not support AIV. */
if (!uv_has_feature(BIT_UV_FEAT_AIV))
kvm_s390_gisa_disable(kvm);
kvm_for_each_vcpu(i, vcpu, kvm) {
mutex_lock(&vcpu->mutex);
r = kvm_s390_pv_create_cpu(vcpu, rc, rrc);
mutex_unlock(&vcpu->mutex);
if (r)
break;
}
if (r)
kvm_s390_cpus_from_pv(kvm, &dummy, &dummy);
return r;
}
/*
* Here we provide user space with a direct interface to query UV
* related data like UV maxima and available features as well as
* feature specific data.
*
* To facilitate future extension of the data structures we'll try to
* write data up to the maximum requested length.
*/
static ssize_t kvm_s390_handle_pv_info(struct kvm_s390_pv_info *info)
{
ssize_t len_min;
switch (info->header.id) {
case KVM_PV_INFO_VM: {
len_min = sizeof(info->header) + sizeof(info->vm);
if (info->header.len_max < len_min)
return -EINVAL;
memcpy(info->vm.inst_calls_list,
uv_info.inst_calls_list,
sizeof(uv_info.inst_calls_list));
/* It's max cpuid not max cpus, so it's off by one */
info->vm.max_cpus = uv_info.max_guest_cpu_id + 1;
info->vm.max_guests = uv_info.max_num_sec_conf;
info->vm.max_guest_addr = uv_info.max_sec_stor_addr;
info->vm.feature_indication = uv_info.uv_feature_indications;
return len_min;
}
case KVM_PV_INFO_DUMP: {
len_min = sizeof(info->header) + sizeof(info->dump);
if (info->header.len_max < len_min)
return -EINVAL;
info->dump.dump_cpu_buffer_len = uv_info.guest_cpu_stor_len;
info->dump.dump_config_mem_buffer_per_1m = uv_info.conf_dump_storage_state_len;
info->dump.dump_config_finalize_len = uv_info.conf_dump_finalize_len;
return len_min;
}
default:
return -EINVAL;
}
}
static int kvm_s390_pv_dmp(struct kvm *kvm, struct kvm_pv_cmd *cmd,
struct kvm_s390_pv_dmp dmp)
{
int r = -EINVAL;
void __user *result_buff = (void __user *)dmp.buff_addr;
switch (dmp.subcmd) {
case KVM_PV_DUMP_INIT: {
if (kvm->arch.pv.dumping)
break;
/*
* Block SIE entry as concurrent dump UVCs could lead
* to validities.
*/
kvm_s390_vcpu_block_all(kvm);
r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
UVC_CMD_DUMP_INIT, &cmd->rc, &cmd->rrc);
KVM_UV_EVENT(kvm, 3, "PROTVIRT DUMP INIT: rc %x rrc %x",
cmd->rc, cmd->rrc);
if (!r) {
kvm->arch.pv.dumping = true;
} else {
kvm_s390_vcpu_unblock_all(kvm);
r = -EINVAL;
}
break;
}
case KVM_PV_DUMP_CONFIG_STOR_STATE: {
if (!kvm->arch.pv.dumping)
break;
/*
* gaddr is an output parameter since we might stop
* early. As dmp will be copied back in our caller, we
* don't need to do it ourselves.
*/
r = kvm_s390_pv_dump_stor_state(kvm, result_buff, &dmp.gaddr, dmp.buff_len,
&cmd->rc, &cmd->rrc);
break;
}
case KVM_PV_DUMP_COMPLETE: {
if (!kvm->arch.pv.dumping)
break;
r = -EINVAL;
if (dmp.buff_len < uv_info.conf_dump_finalize_len)
break;
r = kvm_s390_pv_dump_complete(kvm, result_buff,
&cmd->rc, &cmd->rrc);
break;
}
default:
r = -ENOTTY;
break;
}
return r;
}
static int kvm_s390_handle_pv(struct kvm *kvm, struct kvm_pv_cmd *cmd)
{
const bool need_lock = (cmd->cmd != KVM_PV_ASYNC_CLEANUP_PERFORM);
void __user *argp = (void __user *)cmd->data;
int r = 0;
u16 dummy;
if (need_lock)
mutex_lock(&kvm->lock);
switch (cmd->cmd) {
case KVM_PV_ENABLE: {
r = -EINVAL;
if (kvm_s390_pv_is_protected(kvm))
break;
/*
* FMT 4 SIE needs esca. As we never switch back to bsca from
* esca, we need no cleanup in the error cases below
*/
r = sca_switch_to_extended(kvm);
if (r)
break;
mmap_write_lock(current->mm);
r = gmap_mark_unmergeable();
mmap_write_unlock(current->mm);
if (r)
break;
r = kvm_s390_pv_init_vm(kvm, &cmd->rc, &cmd->rrc);
if (r)
break;
r = kvm_s390_cpus_to_pv(kvm, &cmd->rc, &cmd->rrc);
if (r)
kvm_s390_pv_deinit_vm(kvm, &dummy, &dummy);
/* we need to block service interrupts from now on */
set_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs);
break;
}
case KVM_PV_ASYNC_CLEANUP_PREPARE:
r = -EINVAL;
if (!kvm_s390_pv_is_protected(kvm) || !async_destroy)
break;
r = kvm_s390_cpus_from_pv(kvm, &cmd->rc, &cmd->rrc);
/*
* If a CPU could not be destroyed, destroy VM will also fail.
* There is no point in trying to destroy it. Instead return
* the rc and rrc from the first CPU that failed destroying.
*/
if (r)
break;
r = kvm_s390_pv_set_aside(kvm, &cmd->rc, &cmd->rrc);
/* no need to block service interrupts any more */
clear_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs);
break;
case KVM_PV_ASYNC_CLEANUP_PERFORM:
r = -EINVAL;
if (!async_destroy)
break;
/* kvm->lock must not be held; this is asserted inside the function. */
r = kvm_s390_pv_deinit_aside_vm(kvm, &cmd->rc, &cmd->rrc);
break;
case KVM_PV_DISABLE: {
r = -EINVAL;
if (!kvm_s390_pv_is_protected(kvm))
break;
r = kvm_s390_cpus_from_pv(kvm, &cmd->rc, &cmd->rrc);
/*
* If a CPU could not be destroyed, destroy VM will also fail.
* There is no point in trying to destroy it. Instead return
* the rc and rrc from the first CPU that failed destroying.
*/
if (r)
break;
r = kvm_s390_pv_deinit_cleanup_all(kvm, &cmd->rc, &cmd->rrc);
/* no need to block service interrupts any more */
clear_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs);
break;
}
case KVM_PV_SET_SEC_PARMS: {
struct kvm_s390_pv_sec_parm parms = {};
void *hdr;
r = -EINVAL;
if (!kvm_s390_pv_is_protected(kvm))
break;
r = -EFAULT;
if (copy_from_user(&parms, argp, sizeof(parms)))
break;
/* Currently restricted to 8KB */
r = -EINVAL;
if (parms.length > PAGE_SIZE * 2)
break;
r = -ENOMEM;
hdr = vmalloc(parms.length);
if (!hdr)
break;
r = -EFAULT;
if (!copy_from_user(hdr, (void __user *)parms.origin,
parms.length))
r = kvm_s390_pv_set_sec_parms(kvm, hdr, parms.length,
&cmd->rc, &cmd->rrc);
vfree(hdr);
break;
}
case KVM_PV_UNPACK: {
struct kvm_s390_pv_unp unp = {};
r = -EINVAL;
if (!kvm_s390_pv_is_protected(kvm) || !mm_is_protected(kvm->mm))
break;
r = -EFAULT;
if (copy_from_user(&unp, argp, sizeof(unp)))
break;
r = kvm_s390_pv_unpack(kvm, unp.addr, unp.size, unp.tweak,
&cmd->rc, &cmd->rrc);
break;
}
case KVM_PV_VERIFY: {
r = -EINVAL;
if (!kvm_s390_pv_is_protected(kvm))
break;
r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
UVC_CMD_VERIFY_IMG, &cmd->rc, &cmd->rrc);
KVM_UV_EVENT(kvm, 3, "PROTVIRT VERIFY: rc %x rrc %x", cmd->rc,
cmd->rrc);
break;
}
case KVM_PV_PREP_RESET: {
r = -EINVAL;
if (!kvm_s390_pv_is_protected(kvm))
break;
r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
UVC_CMD_PREPARE_RESET, &cmd->rc, &cmd->rrc);
KVM_UV_EVENT(kvm, 3, "PROTVIRT PREP RESET: rc %x rrc %x",
cmd->rc, cmd->rrc);
break;
}
case KVM_PV_UNSHARE_ALL: {
r = -EINVAL;
if (!kvm_s390_pv_is_protected(kvm))
break;
r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
UVC_CMD_SET_UNSHARE_ALL, &cmd->rc, &cmd->rrc);
KVM_UV_EVENT(kvm, 3, "PROTVIRT UNSHARE: rc %x rrc %x",
cmd->rc, cmd->rrc);
break;
}
case KVM_PV_INFO: {
struct kvm_s390_pv_info info = {};
ssize_t data_len;
/*
* No need to check the VM protection here.
*
* Maybe user space wants to query some of the data
* when the VM is still unprotected. If we see the
* need to fence a new data command we can still
* return an error in the info handler.
*/
r = -EFAULT;
if (copy_from_user(&info, argp, sizeof(info.header)))
break;
r = -EINVAL;
if (info.header.len_max < sizeof(info.header))
break;
data_len = kvm_s390_handle_pv_info(&info);
if (data_len < 0) {
r = data_len;
break;
}
/*
* If a data command struct is extended (multiple
* times) this can be used to determine how much of it
* is valid.
*/
info.header.len_written = data_len;
r = -EFAULT;
if (copy_to_user(argp, &info, data_len))
break;
r = 0;
break;
}
case KVM_PV_DUMP: {
struct kvm_s390_pv_dmp dmp;
r = -EINVAL;
if (!kvm_s390_pv_is_protected(kvm))
break;
r = -EFAULT;
if (copy_from_user(&dmp, argp, sizeof(dmp)))
break;
r = kvm_s390_pv_dmp(kvm, cmd, dmp);
if (r)
break;
if (copy_to_user(argp, &dmp, sizeof(dmp))) {
r = -EFAULT;
break;
}
break;
}
default:
r = -ENOTTY;
}
if (need_lock)
mutex_unlock(&kvm->lock);
return r;
}
static int mem_op_validate_common(struct kvm_s390_mem_op *mop, u64 supported_flags)
{
if (mop->flags & ~supported_flags || !mop->size)
return -EINVAL;
if (mop->size > MEM_OP_MAX_SIZE)
return -E2BIG;
if (mop->flags & KVM_S390_MEMOP_F_SKEY_PROTECTION) {
if (mop->key > 0xf)
return -EINVAL;
} else {
mop->key = 0;
}
return 0;
}
static int kvm_s390_vm_mem_op_abs(struct kvm *kvm, struct kvm_s390_mem_op *mop)
{
void __user *uaddr = (void __user *)mop->buf;
enum gacc_mode acc_mode;
void *tmpbuf = NULL;
int r, srcu_idx;
r = mem_op_validate_common(mop, KVM_S390_MEMOP_F_SKEY_PROTECTION |
KVM_S390_MEMOP_F_CHECK_ONLY);
if (r)
return r;
if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
tmpbuf = vmalloc(mop->size);
if (!tmpbuf)
return -ENOMEM;
}
srcu_idx = srcu_read_lock(&kvm->srcu);
if (kvm_is_error_gpa(kvm, mop->gaddr)) {
r = PGM_ADDRESSING;
goto out_unlock;
}
acc_mode = mop->op == KVM_S390_MEMOP_ABSOLUTE_READ ? GACC_FETCH : GACC_STORE;
if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
r = check_gpa_range(kvm, mop->gaddr, mop->size, acc_mode, mop->key);
goto out_unlock;
}
if (acc_mode == GACC_FETCH) {
r = access_guest_abs_with_key(kvm, mop->gaddr, tmpbuf,
mop->size, GACC_FETCH, mop->key);
if (r)
goto out_unlock;
if (copy_to_user(uaddr, tmpbuf, mop->size))
r = -EFAULT;
} else {
if (copy_from_user(tmpbuf, uaddr, mop->size)) {
r = -EFAULT;
goto out_unlock;
}
r = access_guest_abs_with_key(kvm, mop->gaddr, tmpbuf,
mop->size, GACC_STORE, mop->key);
}
out_unlock:
srcu_read_unlock(&kvm->srcu, srcu_idx);
vfree(tmpbuf);
return r;
}
static int kvm_s390_vm_mem_op_cmpxchg(struct kvm *kvm, struct kvm_s390_mem_op *mop)
{
void __user *uaddr = (void __user *)mop->buf;
void __user *old_addr = (void __user *)mop->old_addr;
union {
__uint128_t quad;
char raw[sizeof(__uint128_t)];
} old = { .quad = 0}, new = { .quad = 0 };
unsigned int off_in_quad = sizeof(new) - mop->size;
int r, srcu_idx;
bool success;
r = mem_op_validate_common(mop, KVM_S390_MEMOP_F_SKEY_PROTECTION);
if (r)
return r;
/*
* This validates off_in_quad. Checking that size is a power
* of two is not necessary, as cmpxchg_guest_abs_with_key
* takes care of that
*/
if (mop->size > sizeof(new))
return -EINVAL;
if (copy_from_user(&new.raw[off_in_quad], uaddr, mop->size))
return -EFAULT;
if (copy_from_user(&old.raw[off_in_quad], old_addr, mop->size))
return -EFAULT;
srcu_idx = srcu_read_lock(&kvm->srcu);
if (kvm_is_error_gpa(kvm, mop->gaddr)) {
r = PGM_ADDRESSING;
goto out_unlock;
}
r = cmpxchg_guest_abs_with_key(kvm, mop->gaddr, mop->size, &old.quad,
new.quad, mop->key, &success);
if (!success && copy_to_user(old_addr, &old.raw[off_in_quad], mop->size))
r = -EFAULT;
out_unlock:
srcu_read_unlock(&kvm->srcu, srcu_idx);
return r;
}
static int kvm_s390_vm_mem_op(struct kvm *kvm, struct kvm_s390_mem_op *mop)
{
/*
* This is technically a heuristic only, if the kvm->lock is not
* taken, it is not guaranteed that the vm is/remains non-protected.
* This is ok from a kernel perspective, wrongdoing is detected
* on the access, -EFAULT is returned and the vm may crash the
* next time it accesses the memory in question.
* There is no sane usecase to do switching and a memop on two
* different CPUs at the same time.
*/
if (kvm_s390_pv_get_handle(kvm))
return -EINVAL;
switch (mop->op) {
case KVM_S390_MEMOP_ABSOLUTE_READ:
case KVM_S390_MEMOP_ABSOLUTE_WRITE:
return kvm_s390_vm_mem_op_abs(kvm, mop);
case KVM_S390_MEMOP_ABSOLUTE_CMPXCHG:
return kvm_s390_vm_mem_op_cmpxchg(kvm, mop);
default:
return -EINVAL;
}
}
int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
{
struct kvm *kvm = filp->private_data;
void __user *argp = (void __user *)arg;
struct kvm_device_attr attr;
int r;
switch (ioctl) {
case KVM_S390_INTERRUPT: {
struct kvm_s390_interrupt s390int;
r = -EFAULT;
if (copy_from_user(&s390int, argp, sizeof(s390int)))
break;
r = kvm_s390_inject_vm(kvm, &s390int);
break;
}
case KVM_CREATE_IRQCHIP: {
struct kvm_irq_routing_entry routing;
r = -EINVAL;
if (kvm->arch.use_irqchip) {
/* Set up dummy routing. */
memset(&routing, 0, sizeof(routing));
r = kvm_set_irq_routing(kvm, &routing, 0, 0);
}
break;
}
case KVM_SET_DEVICE_ATTR: {
r = -EFAULT;
if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
break;
r = kvm_s390_vm_set_attr(kvm, &attr);
break;
}
case KVM_GET_DEVICE_ATTR: {
r = -EFAULT;
if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
break;
r = kvm_s390_vm_get_attr(kvm, &attr);
break;
}
case KVM_HAS_DEVICE_ATTR: {
r = -EFAULT;
if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
break;
r = kvm_s390_vm_has_attr(kvm, &attr);
break;
}
case KVM_S390_GET_SKEYS: {
struct kvm_s390_skeys args;
r = -EFAULT;
if (copy_from_user(&args, argp,
sizeof(struct kvm_s390_skeys)))
break;
r = kvm_s390_get_skeys(kvm, &args);
break;
}
case KVM_S390_SET_SKEYS: {
struct kvm_s390_skeys args;
r = -EFAULT;
if (copy_from_user(&args, argp,
sizeof(struct kvm_s390_skeys)))
break;
r = kvm_s390_set_skeys(kvm, &args);
break;
}
case KVM_S390_GET_CMMA_BITS: {
struct kvm_s390_cmma_log args;
r = -EFAULT;
if (copy_from_user(&args, argp, sizeof(args)))
break;
mutex_lock(&kvm->slots_lock);
r = kvm_s390_get_cmma_bits(kvm, &args);
mutex_unlock(&kvm->slots_lock);
if (!r) {
r = copy_to_user(argp, &args, sizeof(args));
if (r)
r = -EFAULT;
}
break;
}
case KVM_S390_SET_CMMA_BITS: {
struct kvm_s390_cmma_log args;
r = -EFAULT;
if (copy_from_user(&args, argp, sizeof(args)))
break;
mutex_lock(&kvm->slots_lock);
r = kvm_s390_set_cmma_bits(kvm, &args);
mutex_unlock(&kvm->slots_lock);
break;
}
case KVM_S390_PV_COMMAND: {
struct kvm_pv_cmd args;
/* protvirt means user cpu state */
kvm_s390_set_user_cpu_state_ctrl(kvm);
r = 0;
if (!is_prot_virt_host()) {
r = -EINVAL;
break;
}
if (copy_from_user(&args, argp, sizeof(args))) {
r = -EFAULT;
break;
}
if (args.flags) {
r = -EINVAL;
break;
}
/* must be called without kvm->lock */
r = kvm_s390_handle_pv(kvm, &args);
if (copy_to_user(argp, &args, sizeof(args))) {
r = -EFAULT;
break;
}
break;
}
case KVM_S390_MEM_OP: {
struct kvm_s390_mem_op mem_op;
if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
r = kvm_s390_vm_mem_op(kvm, &mem_op);
else
r = -EFAULT;
break;
}
case KVM_S390_ZPCI_OP: {
struct kvm_s390_zpci_op args;
r = -EINVAL;
if (!IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM))
break;
if (copy_from_user(&args, argp, sizeof(args))) {
r = -EFAULT;
break;
}
r = kvm_s390_pci_zpci_op(kvm, &args);
break;
}
default:
r = -ENOTTY;
}
return r;
}
static int kvm_s390_apxa_installed(void)
{
struct ap_config_info info;
if (ap_instructions_available()) {
if (ap_qci(&info) == 0)
return info.apxa;
}
return 0;
}
/*
* The format of the crypto control block (CRYCB) is specified in the 3 low
* order bits of the CRYCB designation (CRYCBD) field as follows:
* Format 0: Neither the message security assist extension 3 (MSAX3) nor the
* AP extended addressing (APXA) facility are installed.
* Format 1: The APXA facility is not installed but the MSAX3 facility is.
* Format 2: Both the APXA and MSAX3 facilities are installed
*/
static void kvm_s390_set_crycb_format(struct kvm *kvm)
{
kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
/* Clear the CRYCB format bits - i.e., set format 0 by default */
kvm->arch.crypto.crycbd &= ~(CRYCB_FORMAT_MASK);
/* Check whether MSAX3 is installed */
if (!test_kvm_facility(kvm, 76))
return;
if (kvm_s390_apxa_installed())
kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
else
kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
}
/*
* kvm_arch_crypto_set_masks
*
* @kvm: pointer to the target guest's KVM struct containing the crypto masks
* to be set.
* @apm: the mask identifying the accessible AP adapters
* @aqm: the mask identifying the accessible AP domains
* @adm: the mask identifying the accessible AP control domains
*
* Set the masks that identify the adapters, domains and control domains to
* which the KVM guest is granted access.
*
* Note: The kvm->lock mutex must be locked by the caller before invoking this
* function.
*/
void kvm_arch_crypto_set_masks(struct kvm *kvm, unsigned long *apm,
unsigned long *aqm, unsigned long *adm)
{
struct kvm_s390_crypto_cb *crycb = kvm->arch.crypto.crycb;
kvm_s390_vcpu_block_all(kvm);
switch (kvm->arch.crypto.crycbd & CRYCB_FORMAT_MASK) {
case CRYCB_FORMAT2: /* APCB1 use 256 bits */
memcpy(crycb->apcb1.apm, apm, 32);
VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx %016lx %016lx %016lx",
apm[0], apm[1], apm[2], apm[3]);
memcpy(crycb->apcb1.aqm, aqm, 32);
VM_EVENT(kvm, 3, "SET CRYCB: aqm %016lx %016lx %016lx %016lx",
aqm[0], aqm[1], aqm[2], aqm[3]);
memcpy(crycb->apcb1.adm, adm, 32);
VM_EVENT(kvm, 3, "SET CRYCB: adm %016lx %016lx %016lx %016lx",
adm[0], adm[1], adm[2], adm[3]);
break;
case CRYCB_FORMAT1:
case CRYCB_FORMAT0: /* Fall through both use APCB0 */
memcpy(crycb->apcb0.apm, apm, 8);
memcpy(crycb->apcb0.aqm, aqm, 2);
memcpy(crycb->apcb0.adm, adm, 2);
VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx aqm %04x adm %04x",
apm[0], *((unsigned short *)aqm),
*((unsigned short *)adm));
break;
default: /* Can not happen */
break;
}
/* recreate the shadow crycb for each vcpu */
kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART);
kvm_s390_vcpu_unblock_all(kvm);
}
EXPORT_SYMBOL_GPL(kvm_arch_crypto_set_masks);
/*
* kvm_arch_crypto_clear_masks
*
* @kvm: pointer to the target guest's KVM struct containing the crypto masks
* to be cleared.
*
* Clear the masks that identify the adapters, domains and control domains to
* which the KVM guest is granted access.
*
* Note: The kvm->lock mutex must be locked by the caller before invoking this
* function.
*/
void kvm_arch_crypto_clear_masks(struct kvm *kvm)
{
kvm_s390_vcpu_block_all(kvm);
memset(&kvm->arch.crypto.crycb->apcb0, 0,
sizeof(kvm->arch.crypto.crycb->apcb0));
memset(&kvm->arch.crypto.crycb->apcb1, 0,
sizeof(kvm->arch.crypto.crycb->apcb1));
VM_EVENT(kvm, 3, "%s", "CLR CRYCB:");
/* recreate the shadow crycb for each vcpu */
kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART);
kvm_s390_vcpu_unblock_all(kvm);
}
EXPORT_SYMBOL_GPL(kvm_arch_crypto_clear_masks);
static u64 kvm_s390_get_initial_cpuid(void)
{
struct cpuid cpuid;
get_cpu_id(&cpuid);
cpuid.version = 0xff;
return *((u64 *) &cpuid);
}
static void kvm_s390_crypto_init(struct kvm *kvm)
{
kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb;
kvm_s390_set_crycb_format(kvm);
init_rwsem(&kvm->arch.crypto.pqap_hook_rwsem);
if (!test_kvm_facility(kvm, 76))
return;
/* Enable AES/DEA protected key functions by default */
kvm->arch.crypto.aes_kw = 1;
kvm->arch.crypto.dea_kw = 1;
get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
}
static void sca_dispose(struct kvm *kvm)
{
if (kvm->arch.use_esca)
free_pages_exact(kvm->arch.sca, sizeof(struct esca_block));
else
free_page((unsigned long)(kvm->arch.sca));
kvm->arch.sca = NULL;
}
void kvm_arch_free_vm(struct kvm *kvm)
{
if (IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM))
kvm_s390_pci_clear_list(kvm);
__kvm_arch_free_vm(kvm);
}
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
{
gfp_t alloc_flags = GFP_KERNEL_ACCOUNT;
int i, rc;
char debug_name[16];
static unsigned long sca_offset;
rc = -EINVAL;
#ifdef CONFIG_KVM_S390_UCONTROL
if (type & ~KVM_VM_S390_UCONTROL)
goto out_err;
if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
goto out_err;
#else
if (type)
goto out_err;
#endif
rc = s390_enable_sie();
if (rc)
goto out_err;
rc = -ENOMEM;
if (!sclp.has_64bscao)
alloc_flags |= GFP_DMA;
rwlock_init(&kvm->arch.sca_lock);
/* start with basic SCA */
kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags);
if (!kvm->arch.sca)
goto out_err;
mutex_lock(&kvm_lock);
sca_offset += 16;
if (sca_offset + sizeof(struct bsca_block) > PAGE_SIZE)
sca_offset = 0;
kvm->arch.sca = (struct bsca_block *)
((char *) kvm->arch.sca + sca_offset);
mutex_unlock(&kvm_lock);
sprintf(debug_name, "kvm-%u", current->pid);
kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long));
if (!kvm->arch.dbf)
goto out_err;
BUILD_BUG_ON(sizeof(struct sie_page2) != 4096);
kvm->arch.sie_page2 =
(struct sie_page2 *) get_zeroed_page(GFP_KERNEL_ACCOUNT | GFP_DMA);
if (!kvm->arch.sie_page2)
goto out_err;
kvm->arch.sie_page2->kvm = kvm;
kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list;
for (i = 0; i < kvm_s390_fac_size(); i++) {
kvm->arch.model.fac_mask[i] = stfle_fac_list[i] &
(kvm_s390_fac_base[i] |
kvm_s390_fac_ext[i]);
kvm->arch.model.fac_list[i] = stfle_fac_list[i] &
kvm_s390_fac_base[i];
}
kvm->arch.model.subfuncs = kvm_s390_available_subfunc;
/* we are always in czam mode - even on pre z14 machines */
set_kvm_facility(kvm->arch.model.fac_mask, 138);
set_kvm_facility(kvm->arch.model.fac_list, 138);
/* we emulate STHYI in kvm */
set_kvm_facility(kvm->arch.model.fac_mask, 74);
set_kvm_facility(kvm->arch.model.fac_list, 74);
if (MACHINE_HAS_TLB_GUEST) {
set_kvm_facility(kvm->arch.model.fac_mask, 147);
set_kvm_facility(kvm->arch.model.fac_list, 147);
}
if (css_general_characteristics.aiv && test_facility(65))
set_kvm_facility(kvm->arch.model.fac_mask, 65);
kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid();
kvm->arch.model.ibc = sclp.ibc & 0x0fff;
kvm->arch.model.uv_feat_guest.feat = 0;
kvm_s390_crypto_init(kvm);
if (IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM)) {
mutex_lock(&kvm->lock);
kvm_s390_pci_init_list(kvm);
kvm_s390_vcpu_pci_enable_interp(kvm);
mutex_unlock(&kvm->lock);
}
mutex_init(&kvm->arch.float_int.ais_lock);
spin_lock_init(&kvm->arch.float_int.lock);
for (i = 0; i < FIRQ_LIST_COUNT; i++)
INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
init_waitqueue_head(&kvm->arch.ipte_wq);
mutex_init(&kvm->arch.ipte_mutex);
debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
VM_EVENT(kvm, 3, "vm created with type %lu", type);
if (type & KVM_VM_S390_UCONTROL) {
kvm->arch.gmap = NULL;
kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT;
} else {
if (sclp.hamax == U64_MAX)
kvm->arch.mem_limit = TASK_SIZE_MAX;
else
kvm->arch.mem_limit = min_t(unsigned long, TASK_SIZE_MAX,
sclp.hamax + 1);
kvm->arch.gmap = gmap_create(current->mm, kvm->arch.mem_limit - 1);
if (!kvm->arch.gmap)
goto out_err;
kvm->arch.gmap->private = kvm;
kvm->arch.gmap->pfault_enabled = 0;
}
kvm->arch.use_pfmfi = sclp.has_pfmfi;
kvm->arch.use_skf = sclp.has_skey;
spin_lock_init(&kvm->arch.start_stop_lock);
kvm_s390_vsie_init(kvm);
if (use_gisa)
kvm_s390_gisa_init(kvm);
INIT_LIST_HEAD(&kvm->arch.pv.need_cleanup);
kvm->arch.pv.set_aside = NULL;
KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid);
return 0;
out_err:
free_page((unsigned long)kvm->arch.sie_page2);
debug_unregister(kvm->arch.dbf);
sca_dispose(kvm);
KVM_EVENT(3, "creation of vm failed: %d", rc);
return rc;
}
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
{
u16 rc, rrc;
VCPU_EVENT(vcpu, 3, "%s", "free cpu");
trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
kvm_s390_clear_local_irqs(vcpu);
kvm_clear_async_pf_completion_queue(vcpu);
if (!kvm_is_ucontrol(vcpu->kvm))
sca_del_vcpu(vcpu);
kvm_s390_update_topology_change_report(vcpu->kvm, 1);
if (kvm_is_ucontrol(vcpu->kvm))
gmap_remove(vcpu->arch.gmap);
if (vcpu->kvm->arch.use_cmma)
kvm_s390_vcpu_unsetup_cmma(vcpu);
/* We can not hold the vcpu mutex here, we are already dying */
if (kvm_s390_pv_cpu_get_handle(vcpu))
kvm_s390_pv_destroy_cpu(vcpu, &rc, &rrc);
free_page((unsigned long)(vcpu->arch.sie_block));
}
void kvm_arch_destroy_vm(struct kvm *kvm)
{
u16 rc, rrc;
kvm_destroy_vcpus(kvm);
sca_dispose(kvm);
kvm_s390_gisa_destroy(kvm);
/*
* We are already at the end of life and kvm->lock is not taken.
* This is ok as the file descriptor is closed by now and nobody
* can mess with the pv state.
*/
kvm_s390_pv_deinit_cleanup_all(kvm, &rc, &rrc);
/*
* Remove the mmu notifier only when the whole KVM VM is torn down,
* and only if one was registered to begin with. If the VM is
* currently not protected, but has been previously been protected,
* then it's possible that the notifier is still registered.
*/
if (kvm->arch.pv.mmu_notifier.ops)
mmu_notifier_unregister(&kvm->arch.pv.mmu_notifier, kvm->mm);
debug_unregister(kvm->arch.dbf);
free_page((unsigned long)kvm->arch.sie_page2);
if (!kvm_is_ucontrol(kvm))
gmap_remove(kvm->arch.gmap);
kvm_s390_destroy_adapters(kvm);
kvm_s390_clear_float_irqs(kvm);
kvm_s390_vsie_destroy(kvm);
KVM_EVENT(3, "vm 0x%pK destroyed", kvm);
}
/* Section: vcpu related */
static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
{
vcpu->arch.gmap = gmap_create(current->mm, -1UL);
if (!vcpu->arch.gmap)
return -ENOMEM;
vcpu->arch.gmap->private = vcpu->kvm;
return 0;
}
static void sca_del_vcpu(struct kvm_vcpu *vcpu)
{
if (!kvm_s390_use_sca_entries())
return;
read_lock(&vcpu->kvm->arch.sca_lock);
if (vcpu->kvm->arch.use_esca) {
struct esca_block *sca = vcpu->kvm->arch.sca;
clear_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
sca->cpu[vcpu->vcpu_id].sda = 0;
} else {
struct bsca_block *sca = vcpu->kvm->arch.sca;
clear_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
sca->cpu[vcpu->vcpu_id].sda = 0;
}
read_unlock(&vcpu->kvm->arch.sca_lock);
}
static void sca_add_vcpu(struct kvm_vcpu *vcpu)
{
if (!kvm_s390_use_sca_entries()) {
phys_addr_t sca_phys = virt_to_phys(vcpu->kvm->arch.sca);
/* we still need the basic sca for the ipte control */
vcpu->arch.sie_block->scaoh = sca_phys >> 32;
vcpu->arch.sie_block->scaol = sca_phys;
return;
}
read_lock(&vcpu->kvm->arch.sca_lock);
if (vcpu->kvm->arch.use_esca) {
struct esca_block *sca = vcpu->kvm->arch.sca;
phys_addr_t sca_phys = virt_to_phys(sca);
sca->cpu[vcpu->vcpu_id].sda = virt_to_phys(vcpu->arch.sie_block);
vcpu->arch.sie_block->scaoh = sca_phys >> 32;
vcpu->arch.sie_block->scaol = sca_phys & ESCA_SCAOL_MASK;
vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
set_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
} else {
struct bsca_block *sca = vcpu->kvm->arch.sca;
phys_addr_t sca_phys = virt_to_phys(sca);
sca->cpu[vcpu->vcpu_id].sda = virt_to_phys(vcpu->arch.sie_block);
vcpu->arch.sie_block->scaoh = sca_phys >> 32;
vcpu->arch.sie_block->scaol = sca_phys;
set_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
}
read_unlock(&vcpu->kvm->arch.sca_lock);
}
/* Basic SCA to Extended SCA data copy routines */
static inline void sca_copy_entry(struct esca_entry *d, struct bsca_entry *s)
{
d->sda = s->sda;
d->sigp_ctrl.c = s->sigp_ctrl.c;
d->sigp_ctrl.scn = s->sigp_ctrl.scn;
}
static void sca_copy_b_to_e(struct esca_block *d, struct bsca_block *s)
{
int i;
d->ipte_control = s->ipte_control;
d->mcn[0] = s->mcn;
for (i = 0; i < KVM_S390_BSCA_CPU_SLOTS; i++)
sca_copy_entry(&d->cpu[i], &s->cpu[i]);
}
static int sca_switch_to_extended(struct kvm *kvm)
{
struct bsca_block *old_sca = kvm->arch.sca;
struct esca_block *new_sca;
struct kvm_vcpu *vcpu;
unsigned long vcpu_idx;
u32 scaol, scaoh;
phys_addr_t new_sca_phys;
if (kvm->arch.use_esca)
return 0;
new_sca = alloc_pages_exact(sizeof(*new_sca), GFP_KERNEL_ACCOUNT | __GFP_ZERO);
if (!new_sca)
return -ENOMEM;
new_sca_phys = virt_to_phys(new_sca);
scaoh = new_sca_phys >> 32;
scaol = new_sca_phys & ESCA_SCAOL_MASK;
kvm_s390_vcpu_block_all(kvm);
write_lock(&kvm->arch.sca_lock);
sca_copy_b_to_e(new_sca, old_sca);
kvm_for_each_vcpu(vcpu_idx, vcpu, kvm) {
vcpu->arch.sie_block->scaoh = scaoh;
vcpu->arch.sie_block->scaol = scaol;
vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
}
kvm->arch.sca = new_sca;
kvm->arch.use_esca = 1;
write_unlock(&kvm->arch.sca_lock);
kvm_s390_vcpu_unblock_all(kvm);
free_page((unsigned long)old_sca);
VM_EVENT(kvm, 2, "Switched to ESCA (0x%pK -> 0x%pK)",
old_sca, kvm->arch.sca);
return 0;
}
static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id)
{
int rc;
if (!kvm_s390_use_sca_entries()) {
if (id < KVM_MAX_VCPUS)
return true;
return false;
}
if (id < KVM_S390_BSCA_CPU_SLOTS)
return true;
if (!sclp.has_esca || !sclp.has_64bscao)
return false;
rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm);
return rc == 0 && id < KVM_S390_ESCA_CPU_SLOTS;
}
/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
static void __start_cpu_timer_accounting(struct kvm_vcpu *vcpu)
{
WARN_ON_ONCE(vcpu->arch.cputm_start != 0);
raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
vcpu->arch.cputm_start = get_tod_clock_fast();
raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
}
/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
static void __stop_cpu_timer_accounting(struct kvm_vcpu *vcpu)
{
WARN_ON_ONCE(vcpu->arch.cputm_start == 0);
raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
vcpu->arch.sie_block->cputm -= get_tod_clock_fast() - vcpu->arch.cputm_start;
vcpu->arch.cputm_start = 0;
raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
}
/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
static void __enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
{
WARN_ON_ONCE(vcpu->arch.cputm_enabled);
vcpu->arch.cputm_enabled = true;
__start_cpu_timer_accounting(vcpu);
}
/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
static void __disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
{
WARN_ON_ONCE(!vcpu->arch.cputm_enabled);
__stop_cpu_timer_accounting(vcpu);
vcpu->arch.cputm_enabled = false;
}
static void enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
{
preempt_disable(); /* protect from TOD sync and vcpu_load/put */
__enable_cpu_timer_accounting(vcpu);
preempt_enable();
}
static void disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
{
preempt_disable(); /* protect from TOD sync and vcpu_load/put */
__disable_cpu_timer_accounting(vcpu);
preempt_enable();
}
/* set the cpu timer - may only be called from the VCPU thread itself */
void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm)
{
preempt_disable(); /* protect from TOD sync and vcpu_load/put */
raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
if (vcpu->arch.cputm_enabled)
vcpu->arch.cputm_start = get_tod_clock_fast();
vcpu->arch.sie_block->cputm = cputm;
raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
preempt_enable();
}
/* update and get the cpu timer - can also be called from other VCPU threads */
__u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu)
{
unsigned int seq;
__u64 value;
if (unlikely(!vcpu->arch.cputm_enabled))
return vcpu->arch.sie_block->cputm;
preempt_disable(); /* protect from TOD sync and vcpu_load/put */
do {
seq = raw_read_seqcount(&vcpu->arch.cputm_seqcount);
/*
* If the writer would ever execute a read in the critical
* section, e.g. in irq context, we have a deadlock.
*/
WARN_ON_ONCE((seq & 1) && smp_processor_id() == vcpu->cpu);
value = vcpu->arch.sie_block->cputm;
/* if cputm_start is 0, accounting is being started/stopped */
if (likely(vcpu->arch.cputm_start))
value -= get_tod_clock_fast() - vcpu->arch.cputm_start;
} while (read_seqcount_retry(&vcpu->arch.cputm_seqcount, seq & ~1));
preempt_enable();
return value;
}
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
gmap_enable(vcpu->arch.enabled_gmap);
kvm_s390_set_cpuflags(vcpu, CPUSTAT_RUNNING);
if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
__start_cpu_timer_accounting(vcpu);
vcpu->cpu = cpu;
}
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
{
vcpu->cpu = -1;
if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
__stop_cpu_timer_accounting(vcpu);
kvm_s390_clear_cpuflags(vcpu, CPUSTAT_RUNNING);
vcpu->arch.enabled_gmap = gmap_get_enabled();
gmap_disable(vcpu->arch.enabled_gmap);
}
void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
{
mutex_lock(&vcpu->kvm->lock);
preempt_disable();
vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
vcpu->arch.sie_block->epdx = vcpu->kvm->arch.epdx;
preempt_enable();
mutex_unlock(&vcpu->kvm->lock);
if (!kvm_is_ucontrol(vcpu->kvm)) {
vcpu->arch.gmap = vcpu->kvm->arch.gmap;
sca_add_vcpu(vcpu);
}
if (test_kvm_facility(vcpu->kvm, 74) || vcpu->kvm->arch.user_instr0)
vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
/* make vcpu_load load the right gmap on the first trigger */
vcpu->arch.enabled_gmap = vcpu->arch.gmap;
}
static bool kvm_has_pckmo_subfunc(struct kvm *kvm, unsigned long nr)
{
if (test_bit_inv(nr, (unsigned long *)&kvm->arch.model.subfuncs.pckmo) &&
test_bit_inv(nr, (unsigned long *)&kvm_s390_available_subfunc.pckmo))
return true;
return false;
}
static bool kvm_has_pckmo_ecc(struct kvm *kvm)
{
/* At least one ECC subfunction must be present */
return kvm_has_pckmo_subfunc(kvm, 32) ||
kvm_has_pckmo_subfunc(kvm, 33) ||
kvm_has_pckmo_subfunc(kvm, 34) ||
kvm_has_pckmo_subfunc(kvm, 40) ||
kvm_has_pckmo_subfunc(kvm, 41);
}
static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
{
/*
* If the AP instructions are not being interpreted and the MSAX3
* facility is not configured for the guest, there is nothing to set up.
*/
if (!vcpu->kvm->arch.crypto.apie && !test_kvm_facility(vcpu->kvm, 76))
return;
vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
vcpu->arch.sie_block->eca &= ~ECA_APIE;
vcpu->arch.sie_block->ecd &= ~ECD_ECC;
if (vcpu->kvm->arch.crypto.apie)
vcpu->arch.sie_block->eca |= ECA_APIE;
/* Set up protected key support */
if (vcpu->kvm->arch.crypto.aes_kw) {
vcpu->arch.sie_block->ecb3 |= ECB3_AES;
/* ecc is also wrapped with AES key */
if (kvm_has_pckmo_ecc(vcpu->kvm))
vcpu->arch.sie_block->ecd |= ECD_ECC;
}
if (vcpu->kvm->arch.crypto.dea_kw)
vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
}
void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
{
free_page((unsigned long)phys_to_virt(vcpu->arch.sie_block->cbrlo));
vcpu->arch.sie_block->cbrlo = 0;
}
int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
{
void *cbrlo_page = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
if (!cbrlo_page)
return -ENOMEM;
vcpu->arch.sie_block->cbrlo = virt_to_phys(cbrlo_page);
return 0;
}
static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
{
struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
vcpu->arch.sie_block->ibc = model->ibc;
if (test_kvm_facility(vcpu->kvm, 7))
vcpu->arch.sie_block->fac = virt_to_phys(model->fac_list);
}
static int kvm_s390_vcpu_setup(struct kvm_vcpu *vcpu)
{
int rc = 0;
u16 uvrc, uvrrc;
atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
CPUSTAT_SM |
CPUSTAT_STOPPED);
if (test_kvm_facility(vcpu->kvm, 78))
kvm_s390_set_cpuflags(vcpu, CPUSTAT_GED2);
else if (test_kvm_facility(vcpu->kvm, 8))
kvm_s390_set_cpuflags(vcpu, CPUSTAT_GED);
kvm_s390_vcpu_setup_model(vcpu);
/* pgste_set_pte has special handling for !MACHINE_HAS_ESOP */
if (MACHINE_HAS_ESOP)
vcpu->arch.sie_block->ecb |= ECB_HOSTPROTINT;
if (test_kvm_facility(vcpu->kvm, 9))
vcpu->arch.sie_block->ecb |= ECB_SRSI;
if (test_kvm_facility(vcpu->kvm, 11))
vcpu->arch.sie_block->ecb |= ECB_PTF;
if (test_kvm_facility(vcpu->kvm, 73))
vcpu->arch.sie_block->ecb |= ECB_TE;
if (!kvm_is_ucontrol(vcpu->kvm))
vcpu->arch.sie_block->ecb |= ECB_SPECI;
if (test_kvm_facility(vcpu->kvm, 8) && vcpu->kvm->arch.use_pfmfi)
vcpu->arch.sie_block->ecb2 |= ECB2_PFMFI;
if (test_kvm_facility(vcpu->kvm, 130))
vcpu->arch.sie_block->ecb2 |= ECB2_IEP;
vcpu->arch.sie_block->eca = ECA_MVPGI | ECA_PROTEXCI;
if (sclp.has_cei)
vcpu->arch.sie_block->eca |= ECA_CEI;
if (sclp.has_ib)
vcpu->arch.sie_block->eca |= ECA_IB;
if (sclp.has_siif)
vcpu->arch.sie_block->eca |= ECA_SII;
if (sclp.has_sigpif)
vcpu->arch.sie_block->eca |= ECA_SIGPI;
if (test_kvm_facility(vcpu->kvm, 129)) {
vcpu->arch.sie_block->eca |= ECA_VX;
vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
}
if (test_kvm_facility(vcpu->kvm, 139))
vcpu->arch.sie_block->ecd |= ECD_MEF;
if (test_kvm_facility(vcpu->kvm, 156))
vcpu->arch.sie_block->ecd |= ECD_ETOKENF;
if (vcpu->arch.sie_block->gd) {
vcpu->arch.sie_block->eca |= ECA_AIV;
VCPU_EVENT(vcpu, 3, "AIV gisa format-%u enabled for cpu %03u",
vcpu->arch.sie_block->gd & 0x3, vcpu->vcpu_id);
}
vcpu->arch.sie_block->sdnxo = virt_to_phys(&vcpu->run->s.regs.sdnx) | SDNXC;
vcpu->arch.sie_block->riccbd = virt_to_phys(&vcpu->run->s.regs.riccb);
if (sclp.has_kss)
kvm_s390_set_cpuflags(vcpu, CPUSTAT_KSS);
else
vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
if (vcpu->kvm->arch.use_cmma) {
rc = kvm_s390_vcpu_setup_cmma(vcpu);
if (rc)
return rc;
}
hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
vcpu->arch.sie_block->hpid = HPID_KVM;
kvm_s390_vcpu_crypto_setup(vcpu);
kvm_s390_vcpu_pci_setup(vcpu);
mutex_lock(&vcpu->kvm->lock);
if (kvm_s390_pv_is_protected(vcpu->kvm)) {
rc = kvm_s390_pv_create_cpu(vcpu, &uvrc, &uvrrc);
if (rc)
kvm_s390_vcpu_unsetup_cmma(vcpu);
}
mutex_unlock(&vcpu->kvm->lock);
return rc;
}
int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
{
if (!kvm_is_ucontrol(kvm) && !sca_can_add_vcpu(kvm, id))
return -EINVAL;
return 0;
}
int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
{
struct sie_page *sie_page;
int rc;
BUILD_BUG_ON(sizeof(struct sie_page) != 4096);
sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL_ACCOUNT);
if (!sie_page)
return -ENOMEM;
vcpu->arch.sie_block = &sie_page->sie_block;
vcpu->arch.sie_block->itdba = virt_to_phys(&sie_page->itdb);
/* the real guest size will always be smaller than msl */
vcpu->arch.sie_block->mso = 0;
vcpu->arch.sie_block->msl = sclp.hamax;
vcpu->arch.sie_block->icpua = vcpu->vcpu_id;
spin_lock_init(&vcpu->arch.local_int.lock);
vcpu->arch.sie_block->gd = kvm_s390_get_gisa_desc(vcpu->kvm);
seqcount_init(&vcpu->arch.cputm_seqcount);
vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
kvm_clear_async_pf_completion_queue(vcpu);
vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
KVM_SYNC_GPRS |
KVM_SYNC_ACRS |
KVM_SYNC_CRS |
KVM_SYNC_ARCH0 |
KVM_SYNC_PFAULT |
KVM_SYNC_DIAG318;
kvm_s390_set_prefix(vcpu, 0);
if (test_kvm_facility(vcpu->kvm, 64))
vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
if (test_kvm_facility(vcpu->kvm, 82))
vcpu->run->kvm_valid_regs |= KVM_SYNC_BPBC;
if (test_kvm_facility(vcpu->kvm, 133))
vcpu->run->kvm_valid_regs |= KVM_SYNC_GSCB;
if (test_kvm_facility(vcpu->kvm, 156))
vcpu->run->kvm_valid_regs |= KVM_SYNC_ETOKEN;
/* fprs can be synchronized via vrs, even if the guest has no vx. With
* MACHINE_HAS_VX, (load|store)_fpu_regs() will work with vrs format.
*/
if (MACHINE_HAS_VX)
vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
else
vcpu->run->kvm_valid_regs |= KVM_SYNC_FPRS;
if (kvm_is_ucontrol(vcpu->kvm)) {
rc = __kvm_ucontrol_vcpu_init(vcpu);
if (rc)
goto out_free_sie_block;
}
VM_EVENT(vcpu->kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK",
vcpu->vcpu_id, vcpu, vcpu->arch.sie_block);
trace_kvm_s390_create_vcpu(vcpu->vcpu_id, vcpu, vcpu->arch.sie_block);
rc = kvm_s390_vcpu_setup(vcpu);
if (rc)
goto out_ucontrol_uninit;
kvm_s390_update_topology_change_report(vcpu->kvm, 1);
return 0;
out_ucontrol_uninit:
if (kvm_is_ucontrol(vcpu->kvm))
gmap_remove(vcpu->arch.gmap);
out_free_sie_block:
free_page((unsigned long)(vcpu->arch.sie_block));
return rc;
}
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
{
clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.gisa_int.kicked_mask);
return kvm_s390_vcpu_has_irq(vcpu, 0);
}
bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
{
return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE);
}
void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
{
atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
exit_sie(vcpu);
}
void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
{
atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
}
static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
{
atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
exit_sie(vcpu);
}
bool kvm_s390_vcpu_sie_inhibited(struct kvm_vcpu *vcpu)
{
return atomic_read(&vcpu->arch.sie_block->prog20) &
(PROG_BLOCK_SIE | PROG_REQUEST);
}
static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
{
atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
}
/*
* Kick a guest cpu out of (v)SIE and wait until (v)SIE is not running.
* If the CPU is not running (e.g. waiting as idle) the function will
* return immediately. */
void exit_sie(struct kvm_vcpu *vcpu)
{
kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOP_INT);
kvm_s390_vsie_kick(vcpu);
while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
cpu_relax();
}
/* Kick a guest cpu out of SIE to process a request synchronously */
void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
{
__kvm_make_request(req, vcpu);
kvm_s390_vcpu_request(vcpu);
}
static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
unsigned long end)
{
struct kvm *kvm = gmap->private;
struct kvm_vcpu *vcpu;
unsigned long prefix;
unsigned long i;
if (gmap_is_shadow(gmap))
return;
if (start >= 1UL << 31)
/* We are only interested in prefix pages */
return;
kvm_for_each_vcpu(i, vcpu, kvm) {
/* match against both prefix pages */
prefix = kvm_s390_get_prefix(vcpu);
if (prefix <= end && start <= prefix + 2*PAGE_SIZE - 1) {
VCPU_EVENT(vcpu, 2, "gmap notifier for %lx-%lx",
start, end);
kvm_s390_sync_request(KVM_REQ_REFRESH_GUEST_PREFIX, vcpu);
}
}
}
bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
{
/* do not poll with more than halt_poll_max_steal percent of steal time */
if (S390_lowcore.avg_steal_timer * 100 / (TICK_USEC << 12) >=
READ_ONCE(halt_poll_max_steal)) {
vcpu->stat.halt_no_poll_steal++;
return true;
}
return false;
}
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
{
/* kvm common code refers to this, but never calls it */
BUG();
return 0;
}
static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
struct kvm_one_reg *reg)
{
int r = -EINVAL;
switch (reg->id) {
case KVM_REG_S390_TODPR:
r = put_user(vcpu->arch.sie_block->todpr,
(u32 __user *)reg->addr);
break;
case KVM_REG_S390_EPOCHDIFF:
r = put_user(vcpu->arch.sie_block->epoch,
(u64 __user *)reg->addr);
break;
case KVM_REG_S390_CPU_TIMER:
r = put_user(kvm_s390_get_cpu_timer(vcpu),
(u64 __user *)reg->addr);
break;
case KVM_REG_S390_CLOCK_COMP:
r = put_user(vcpu->arch.sie_block->ckc,
(u64 __user *)reg->addr);
break;
case KVM_REG_S390_PFTOKEN:
r = put_user(vcpu->arch.pfault_token,
(u64 __user *)reg->addr);
break;
case KVM_REG_S390_PFCOMPARE:
r = put_user(vcpu->arch.pfault_compare,
(u64 __user *)reg->addr);
break;
case KVM_REG_S390_PFSELECT:
r = put_user(vcpu->arch.pfault_select,
(u64 __user *)reg->addr);
break;
case KVM_REG_S390_PP:
r = put_user(vcpu->arch.sie_block->pp,
(u64 __user *)reg->addr);
break;
case KVM_REG_S390_GBEA:
r = put_user(vcpu->arch.sie_block->gbea,
(u64 __user *)reg->addr);
break;
default:
break;
}
return r;
}
static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
struct kvm_one_reg *reg)
{
int r = -EINVAL;
__u64 val;
switch (reg->id) {
case KVM_REG_S390_TODPR:
r = get_user(vcpu->arch.sie_block->todpr,
(u32 __user *)reg->addr);
break;
case KVM_REG_S390_EPOCHDIFF:
r = get_user(vcpu->arch.sie_block->epoch,
(u64 __user *)reg->addr);
break;
case KVM_REG_S390_CPU_TIMER:
r = get_user(val, (u64 __user *)reg->addr);
if (!r)
kvm_s390_set_cpu_timer(vcpu, val);
break;
case KVM_REG_S390_CLOCK_COMP:
r = get_user(vcpu->arch.sie_block->ckc,
(u64 __user *)reg->addr);
break;
case KVM_REG_S390_PFTOKEN:
r = get_user(vcpu->arch.pfault_token,
(u64 __user *)reg->addr);
if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
kvm_clear_async_pf_completion_queue(vcpu);
break;
case KVM_REG_S390_PFCOMPARE:
r = get_user(vcpu->arch.pfault_compare,
(u64 __user *)reg->addr);
break;
case KVM_REG_S390_PFSELECT:
r = get_user(vcpu->arch.pfault_select,
(u64 __user *)reg->addr);
break;
case KVM_REG_S390_PP:
r = get_user(vcpu->arch.sie_block->pp,
(u64 __user *)reg->addr);
break;
case KVM_REG_S390_GBEA:
r = get_user(vcpu->arch.sie_block->gbea,
(u64 __user *)reg->addr);
break;
default:
break;
}
return r;
}
static void kvm_arch_vcpu_ioctl_normal_reset(struct kvm_vcpu *vcpu)
{
vcpu->arch.sie_block->gpsw.mask &= ~PSW_MASK_RI;
vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
memset(vcpu->run->s.regs.riccb, 0, sizeof(vcpu->run->s.regs.riccb));
kvm_clear_async_pf_completion_queue(vcpu);
if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
kvm_s390_vcpu_stop(vcpu);
kvm_s390_clear_local_irqs(vcpu);
}
static void kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
{
/* Initial reset is a superset of the normal reset */
kvm_arch_vcpu_ioctl_normal_reset(vcpu);
/*
* This equals initial cpu reset in pop, but we don't switch to ESA.
* We do not only reset the internal data, but also ...
*/
vcpu->arch.sie_block->gpsw.mask = 0;
vcpu->arch.sie_block->gpsw.addr = 0;
kvm_s390_set_prefix(vcpu, 0);
kvm_s390_set_cpu_timer(vcpu, 0);
vcpu->arch.sie_block->ckc = 0;
memset(vcpu->arch.sie_block->gcr, 0, sizeof(vcpu->arch.sie_block->gcr));
vcpu->arch.sie_block->gcr[0] = CR0_INITIAL_MASK;
vcpu->arch.sie_block->gcr[14] = CR14_INITIAL_MASK;
/* ... the data in sync regs */
memset(vcpu->run->s.regs.crs, 0, sizeof(vcpu->run->s.regs.crs));
vcpu->run->s.regs.ckc = 0;
vcpu->run->s.regs.crs[0] = CR0_INITIAL_MASK;
vcpu->run->s.regs.crs[14] = CR14_INITIAL_MASK;
vcpu->run->psw_addr = 0;
vcpu->run->psw_mask = 0;
vcpu->run->s.regs.todpr = 0;
vcpu->run->s.regs.cputm = 0;
vcpu->run->s.regs.ckc = 0;
vcpu->run->s.regs.pp = 0;
vcpu->run->s.regs.gbea = 1;
vcpu->run->s.regs.fpc = 0;
/*
* Do not reset these registers in the protected case, as some of
* them are overlaid and they are not accessible in this case
* anyway.
*/
if (!kvm_s390_pv_cpu_is_protected(vcpu)) {
vcpu->arch.sie_block->gbea = 1;
vcpu->arch.sie_block->pp = 0;
vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
vcpu->arch.sie_block->todpr = 0;
}
}
static void kvm_arch_vcpu_ioctl_clear_reset(struct kvm_vcpu *vcpu)
{
struct kvm_sync_regs *regs = &vcpu->run->s.regs;
/* Clear reset is a superset of the initial reset */
kvm_arch_vcpu_ioctl_initial_reset(vcpu);
memset(®s->gprs, 0, sizeof(regs->gprs));
memset(®s->vrs, 0, sizeof(regs->vrs));
memset(®s->acrs, 0, sizeof(regs->acrs));
memset(®s->gscb, 0, sizeof(regs->gscb));
regs->etoken = 0;
regs->etoken_extension = 0;
}
int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{
vcpu_load(vcpu);
memcpy(&vcpu->run->s.regs.gprs, ®s->gprs, sizeof(regs->gprs));
vcpu_put(vcpu);
return 0;
}
int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{
vcpu_load(vcpu);
memcpy(®s->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
vcpu_put(vcpu);
return 0;
}
int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
struct kvm_sregs *sregs)
{
vcpu_load(vcpu);
memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
vcpu_put(vcpu);
return 0;
}
int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
struct kvm_sregs *sregs)
{
vcpu_load(vcpu);
memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
vcpu_put(vcpu);
return 0;
}
int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{
int ret = 0;
vcpu_load(vcpu);
if (test_fp_ctl(fpu->fpc)) {
ret = -EINVAL;
goto out;
}
vcpu->run->s.regs.fpc = fpu->fpc;
if (MACHINE_HAS_VX)
convert_fp_to_vx((__vector128 *) vcpu->run->s.regs.vrs,
(freg_t *) fpu->fprs);
else
memcpy(vcpu->run->s.regs.fprs, &fpu->fprs, sizeof(fpu->fprs));
out:
vcpu_put(vcpu);
return ret;
}
int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{
vcpu_load(vcpu);
/* make sure we have the latest values */
save_fpu_regs();
if (MACHINE_HAS_VX)
convert_vx_to_fp((freg_t *) fpu->fprs,
(__vector128 *) vcpu->run->s.regs.vrs);
else
memcpy(fpu->fprs, vcpu->run->s.regs.fprs, sizeof(fpu->fprs));
fpu->fpc = vcpu->run->s.regs.fpc;
vcpu_put(vcpu);
return 0;
}
static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
{
int rc = 0;
if (!is_vcpu_stopped(vcpu))
rc = -EBUSY;
else {
vcpu->run->psw_mask = psw.mask;
vcpu->run->psw_addr = psw.addr;
}
return rc;
}
int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
struct kvm_translation *tr)
{
return -EINVAL; /* not implemented yet */
}
#define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
KVM_GUESTDBG_USE_HW_BP | \
KVM_GUESTDBG_ENABLE)
int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
struct kvm_guest_debug *dbg)
{
int rc = 0;
vcpu_load(vcpu);
vcpu->guest_debug = 0;
kvm_s390_clear_bp_data(vcpu);
if (dbg->control & ~VALID_GUESTDBG_FLAGS) {
rc = -EINVAL;
goto out;
}
if (!sclp.has_gpere) {
rc = -EINVAL;
goto out;
}
if (dbg->control & KVM_GUESTDBG_ENABLE) {
vcpu->guest_debug = dbg->control;
/* enforce guest PER */
kvm_s390_set_cpuflags(vcpu, CPUSTAT_P);
if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
rc = kvm_s390_import_bp_data(vcpu, dbg);
} else {
kvm_s390_clear_cpuflags(vcpu, CPUSTAT_P);
vcpu->arch.guestdbg.last_bp = 0;
}
if (rc) {
vcpu->guest_debug = 0;
kvm_s390_clear_bp_data(vcpu);
kvm_s390_clear_cpuflags(vcpu, CPUSTAT_P);
}
out:
vcpu_put(vcpu);
return rc;
}
int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
struct kvm_mp_state *mp_state)
{
int ret;
vcpu_load(vcpu);
/* CHECK_STOP and LOAD are not supported yet */
ret = is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
KVM_MP_STATE_OPERATING;
vcpu_put(vcpu);
return ret;
}
int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
struct kvm_mp_state *mp_state)
{
int rc = 0;
vcpu_load(vcpu);
/* user space knows about this interface - let it control the state */
kvm_s390_set_user_cpu_state_ctrl(vcpu->kvm);
switch (mp_state->mp_state) {
case KVM_MP_STATE_STOPPED:
rc = kvm_s390_vcpu_stop(vcpu);
break;
case KVM_MP_STATE_OPERATING:
rc = kvm_s390_vcpu_start(vcpu);
break;
case KVM_MP_STATE_LOAD:
if (!kvm_s390_pv_cpu_is_protected(vcpu)) {
rc = -ENXIO;
break;
}
rc = kvm_s390_pv_set_cpu_state(vcpu, PV_CPU_STATE_OPR_LOAD);
break;
case KVM_MP_STATE_CHECK_STOP:
fallthrough; /* CHECK_STOP and LOAD are not supported yet */
default:
rc = -ENXIO;
}
vcpu_put(vcpu);
return rc;
}
static bool ibs_enabled(struct kvm_vcpu *vcpu)
{
return kvm_s390_test_cpuflags(vcpu, CPUSTAT_IBS);
}
static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
{
retry:
kvm_s390_vcpu_request_handled(vcpu);
if (!kvm_request_pending(vcpu))
return 0;
/*
* If the guest prefix changed, re-arm the ipte notifier for the
* guest prefix page. gmap_mprotect_notify will wait on the ptl lock.
* This ensures that the ipte instruction for this request has
* already finished. We might race against a second unmapper that
* wants to set the blocking bit. Lets just retry the request loop.
*/
if (kvm_check_request(KVM_REQ_REFRESH_GUEST_PREFIX, vcpu)) {
int rc;
rc = gmap_mprotect_notify(vcpu->arch.gmap,
kvm_s390_get_prefix(vcpu),
PAGE_SIZE * 2, PROT_WRITE);
if (rc) {
kvm_make_request(KVM_REQ_REFRESH_GUEST_PREFIX, vcpu);
return rc;
}
goto retry;
}
if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
vcpu->arch.sie_block->ihcpu = 0xffff;
goto retry;
}
if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
if (!ibs_enabled(vcpu)) {
trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
kvm_s390_set_cpuflags(vcpu, CPUSTAT_IBS);
}
goto retry;
}
if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
if (ibs_enabled(vcpu)) {
trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
kvm_s390_clear_cpuflags(vcpu, CPUSTAT_IBS);
}
goto retry;
}
if (kvm_check_request(KVM_REQ_ICPT_OPEREXC, vcpu)) {
vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
goto retry;
}
if (kvm_check_request(KVM_REQ_START_MIGRATION, vcpu)) {
/*
* Disable CMM virtualization; we will emulate the ESSA
* instruction manually, in order to provide additional
* functionalities needed for live migration.
*/
vcpu->arch.sie_block->ecb2 &= ~ECB2_CMMA;
goto retry;
}
if (kvm_check_request(KVM_REQ_STOP_MIGRATION, vcpu)) {
/*
* Re-enable CMM virtualization if CMMA is available and
* CMM has been used.
*/
if ((vcpu->kvm->arch.use_cmma) &&
(vcpu->kvm->mm->context.uses_cmm))
vcpu->arch.sie_block->ecb2 |= ECB2_CMMA;
goto retry;
}
/* we left the vsie handler, nothing to do, just clear the request */
kvm_clear_request(KVM_REQ_VSIE_RESTART, vcpu);
return 0;
}
static void __kvm_s390_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod)
{
struct kvm_vcpu *vcpu;
union tod_clock clk;
unsigned long i;
preempt_disable();
store_tod_clock_ext(&clk);
kvm->arch.epoch = gtod->tod - clk.tod;
kvm->arch.epdx = 0;
if (test_kvm_facility(kvm, 139)) {
kvm->arch.epdx = gtod->epoch_idx - clk.ei;
if (kvm->arch.epoch > gtod->tod)
kvm->arch.epdx -= 1;
}
kvm_s390_vcpu_block_all(kvm);
kvm_for_each_vcpu(i, vcpu, kvm) {
vcpu->arch.sie_block->epoch = kvm->arch.epoch;
vcpu->arch.sie_block->epdx = kvm->arch.epdx;
}
kvm_s390_vcpu_unblock_all(kvm);
preempt_enable();
}
int kvm_s390_try_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod)
{
if (!mutex_trylock(&kvm->lock))
return 0;
__kvm_s390_set_tod_clock(kvm, gtod);
mutex_unlock(&kvm->lock);
return 1;
}
/**
* kvm_arch_fault_in_page - fault-in guest page if necessary
* @vcpu: The corresponding virtual cpu
* @gpa: Guest physical address
* @writable: Whether the page should be writable or not
*
* Make sure that a guest page has been faulted-in on the host.
*
* Return: Zero on success, negative error code otherwise.
*/
long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
{
return gmap_fault(vcpu->arch.gmap, gpa,
writable ? FAULT_FLAG_WRITE : 0);
}
static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
unsigned long token)
{
struct kvm_s390_interrupt inti;
struct kvm_s390_irq irq;
if (start_token) {
irq.u.ext.ext_params2 = token;
irq.type = KVM_S390_INT_PFAULT_INIT;
WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
} else {
inti.type = KVM_S390_INT_PFAULT_DONE;
inti.parm64 = token;
WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
}
}
bool kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
struct kvm_async_pf *work)
{
trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
__kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
return true;
}
void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
struct kvm_async_pf *work)
{
trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
__kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
}
void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
struct kvm_async_pf *work)
{
/* s390 will always inject the page directly */
}
bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu)
{
/*
* s390 will always inject the page directly,
* but we still want check_async_completion to cleanup
*/
return true;
}
static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
{
hva_t hva;
struct kvm_arch_async_pf arch;
if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
return false;
if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
vcpu->arch.pfault_compare)
return false;
if (psw_extint_disabled(vcpu))
return false;
if (kvm_s390_vcpu_has_irq(vcpu, 0))
return false;
if (!(vcpu->arch.sie_block->gcr[0] & CR0_SERVICE_SIGNAL_SUBMASK))
return false;
if (!vcpu->arch.gmap->pfault_enabled)
return false;
hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
hva += current->thread.gmap_addr & ~PAGE_MASK;
if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
return false;
return kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
}
static int vcpu_pre_run(struct kvm_vcpu *vcpu)
{
int rc, cpuflags;
/*
* On s390 notifications for arriving pages will be delivered directly
* to the guest but the house keeping for completed pfaults is
* handled outside the worker.
*/
kvm_check_async_pf_completion(vcpu);
vcpu->arch.sie_block->gg14 = vcpu->run->s.regs.gprs[14];
vcpu->arch.sie_block->gg15 = vcpu->run->s.regs.gprs[15];
if (need_resched())
schedule();
if (!kvm_is_ucontrol(vcpu->kvm)) {
rc = kvm_s390_deliver_pending_interrupts(vcpu);
if (rc || guestdbg_exit_pending(vcpu))
return rc;
}
rc = kvm_s390_handle_requests(vcpu);
if (rc)
return rc;
if (guestdbg_enabled(vcpu)) {
kvm_s390_backup_guest_per_regs(vcpu);
kvm_s390_patch_guest_per_regs(vcpu);
}
clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.gisa_int.kicked_mask);
vcpu->arch.sie_block->icptcode = 0;
cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
trace_kvm_s390_sie_enter(vcpu, cpuflags);
return 0;
}
static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
{
struct kvm_s390_pgm_info pgm_info = {
.code = PGM_ADDRESSING,
};
u8 opcode, ilen;
int rc;
VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
trace_kvm_s390_sie_fault(vcpu);
/*
* We want to inject an addressing exception, which is defined as a
* suppressing or terminating exception. However, since we came here
* by a DAT access exception, the PSW still points to the faulting
* instruction since DAT exceptions are nullifying. So we've got
* to look up the current opcode to get the length of the instruction
* to be able to forward the PSW.
*/
rc = read_guest_instr(vcpu, vcpu->arch.sie_block->gpsw.addr, &opcode, 1);
ilen = insn_length(opcode);
if (rc < 0) {
return rc;
} else if (rc) {
/* Instruction-Fetching Exceptions - we can't detect the ilen.
* Forward by arbitrary ilc, injection will take care of
* nullification if necessary.
*/
pgm_info = vcpu->arch.pgm;
ilen = 4;
}
pgm_info.flags = ilen | KVM_S390_PGM_FLAGS_ILC_VALID;
kvm_s390_forward_psw(vcpu, ilen);
return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
}
static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
{
struct mcck_volatile_info *mcck_info;
struct sie_page *sie_page;
VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
vcpu->arch.sie_block->icptcode);
trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
if (guestdbg_enabled(vcpu))
kvm_s390_restore_guest_per_regs(vcpu);
vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14;
vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15;
if (exit_reason == -EINTR) {
VCPU_EVENT(vcpu, 3, "%s", "machine check");
sie_page = container_of(vcpu->arch.sie_block,
struct sie_page, sie_block);
mcck_info = &sie_page->mcck_info;
kvm_s390_reinject_machine_check(vcpu, mcck_info);
return 0;
}
if (vcpu->arch.sie_block->icptcode > 0) {
int rc = kvm_handle_sie_intercept(vcpu);
if (rc != -EOPNOTSUPP)
return rc;
vcpu->run->exit_reason = KVM_EXIT_S390_SIEIC;
vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
return -EREMOTE;
} else if (exit_reason != -EFAULT) {
vcpu->stat.exit_null++;
return 0;
} else if (kvm_is_ucontrol(vcpu->kvm)) {
vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
vcpu->run->s390_ucontrol.trans_exc_code =
current->thread.gmap_addr;
vcpu->run->s390_ucontrol.pgm_code = 0x10;
return -EREMOTE;
} else if (current->thread.gmap_pfault) {
trace_kvm_s390_major_guest_pfault(vcpu);
current->thread.gmap_pfault = 0;
if (kvm_arch_setup_async_pf(vcpu))
return 0;
vcpu->stat.pfault_sync++;
return kvm_arch_fault_in_page(vcpu, current->thread.gmap_addr, 1);
}
return vcpu_post_run_fault_in_sie(vcpu);
}
#define PSW_INT_MASK (PSW_MASK_EXT | PSW_MASK_IO | PSW_MASK_MCHECK)
static int __vcpu_run(struct kvm_vcpu *vcpu)
{
int rc, exit_reason;
struct sie_page *sie_page = (struct sie_page *)vcpu->arch.sie_block;
/*
* We try to hold kvm->srcu during most of vcpu_run (except when run-
* ning the guest), so that memslots (and other stuff) are protected
*/
kvm_vcpu_srcu_read_lock(vcpu);
do {
rc = vcpu_pre_run(vcpu);
if (rc || guestdbg_exit_pending(vcpu))
break;
kvm_vcpu_srcu_read_unlock(vcpu);
/*
* As PF_VCPU will be used in fault handler, between
* guest_enter and guest_exit should be no uaccess.
*/
local_irq_disable();
guest_enter_irqoff();
__disable_cpu_timer_accounting(vcpu);
local_irq_enable();
if (kvm_s390_pv_cpu_is_protected(vcpu)) {
memcpy(sie_page->pv_grregs,
vcpu->run->s.regs.gprs,
sizeof(sie_page->pv_grregs));
}
if (test_cpu_flag(CIF_FPU))
load_fpu_regs();
exit_reason = sie64a(vcpu->arch.sie_block,
vcpu->run->s.regs.gprs);
if (kvm_s390_pv_cpu_is_protected(vcpu)) {
memcpy(vcpu->run->s.regs.gprs,
sie_page->pv_grregs,
sizeof(sie_page->pv_grregs));
/*
* We're not allowed to inject interrupts on intercepts
* that leave the guest state in an "in-between" state
* where the next SIE entry will do a continuation.
* Fence interrupts in our "internal" PSW.
*/
if (vcpu->arch.sie_block->icptcode == ICPT_PV_INSTR ||
vcpu->arch.sie_block->icptcode == ICPT_PV_PREF) {
vcpu->arch.sie_block->gpsw.mask &= ~PSW_INT_MASK;
}
}
local_irq_disable();
__enable_cpu_timer_accounting(vcpu);
guest_exit_irqoff();
local_irq_enable();
kvm_vcpu_srcu_read_lock(vcpu);
rc = vcpu_post_run(vcpu, exit_reason);
} while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
kvm_vcpu_srcu_read_unlock(vcpu);
return rc;
}
static void sync_regs_fmt2(struct kvm_vcpu *vcpu)
{
struct kvm_run *kvm_run = vcpu->run;
struct runtime_instr_cb *riccb;
struct gs_cb *gscb;
riccb = (struct runtime_instr_cb *) &kvm_run->s.regs.riccb;
gscb = (struct gs_cb *) &kvm_run->s.regs.gscb;
vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
}
if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
vcpu->arch.pfault_token = kvm_run->s.regs.pft;
vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
kvm_clear_async_pf_completion_queue(vcpu);
}
if (kvm_run->kvm_dirty_regs & KVM_SYNC_DIAG318) {
vcpu->arch.diag318_info.val = kvm_run->s.regs.diag318;
vcpu->arch.sie_block->cpnc = vcpu->arch.diag318_info.cpnc;
VCPU_EVENT(vcpu, 3, "setting cpnc to %d", vcpu->arch.diag318_info.cpnc);
}
/*
* If userspace sets the riccb (e.g. after migration) to a valid state,
* we should enable RI here instead of doing the lazy enablement.
*/
if ((kvm_run->kvm_dirty_regs & KVM_SYNC_RICCB) &&
test_kvm_facility(vcpu->kvm, 64) &&
riccb->v &&
!(vcpu->arch.sie_block->ecb3 & ECB3_RI)) {
VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (sync_regs)");
vcpu->arch.sie_block->ecb3 |= ECB3_RI;
}
/*
* If userspace sets the gscb (e.g. after migration) to non-zero,
* we should enable GS here instead of doing the lazy enablement.
*/
if ((kvm_run->kvm_dirty_regs & KVM_SYNC_GSCB) &&
test_kvm_facility(vcpu->kvm, 133) &&
gscb->gssm &&
!vcpu->arch.gs_enabled) {
VCPU_EVENT(vcpu, 3, "%s", "ENABLE: GS (sync_regs)");
vcpu->arch.sie_block->ecb |= ECB_GS;
vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
vcpu->arch.gs_enabled = 1;
}
if ((kvm_run->kvm_dirty_regs & KVM_SYNC_BPBC) &&
test_kvm_facility(vcpu->kvm, 82)) {
vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
vcpu->arch.sie_block->fpf |= kvm_run->s.regs.bpbc ? FPF_BPBC : 0;
}
if (MACHINE_HAS_GS) {
preempt_disable();
__ctl_set_bit(2, 4);
if (current->thread.gs_cb) {
vcpu->arch.host_gscb = current->thread.gs_cb;
save_gs_cb(vcpu->arch.host_gscb);
}
if (vcpu->arch.gs_enabled) {
current->thread.gs_cb = (struct gs_cb *)
&vcpu->run->s.regs.gscb;
restore_gs_cb(current->thread.gs_cb);
}
preempt_enable();
}
/* SIE will load etoken directly from SDNX and therefore kvm_run */
}
static void sync_regs(struct kvm_vcpu *vcpu)
{
struct kvm_run *kvm_run = vcpu->run;
if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
/* some control register changes require a tlb flush */
kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
}
if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
kvm_s390_set_cpu_timer(vcpu, kvm_run->s.regs.cputm);
vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
}
save_access_regs(vcpu->arch.host_acrs);
restore_access_regs(vcpu->run->s.regs.acrs);
/* save host (userspace) fprs/vrs */
save_fpu_regs();
vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc;
vcpu->arch.host_fpregs.regs = current->thread.fpu.regs;
if (MACHINE_HAS_VX)
current->thread.fpu.regs = vcpu->run->s.regs.vrs;
else
current->thread.fpu.regs = vcpu->run->s.regs.fprs;
current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
if (test_fp_ctl(current->thread.fpu.fpc))
/* User space provided an invalid FPC, let's clear it */
current->thread.fpu.fpc = 0;
/* Sync fmt2 only data */
if (likely(!kvm_s390_pv_cpu_is_protected(vcpu))) {
sync_regs_fmt2(vcpu);
} else {
/*
* In several places we have to modify our internal view to
* not do things that are disallowed by the ultravisor. For
* example we must not inject interrupts after specific exits
* (e.g. 112 prefix page not secure). We do this by turning
* off the machine check, external and I/O interrupt bits
* of our PSW copy. To avoid getting validity intercepts, we
* do only accept the condition code from userspace.
*/
vcpu->arch.sie_block->gpsw.mask &= ~PSW_MASK_CC;
vcpu->arch.sie_block->gpsw.mask |= kvm_run->psw_mask &
PSW_MASK_CC;
}
kvm_run->kvm_dirty_regs = 0;
}
static void store_regs_fmt2(struct kvm_vcpu *vcpu)
{
struct kvm_run *kvm_run = vcpu->run;
kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == FPF_BPBC;
kvm_run->s.regs.diag318 = vcpu->arch.diag318_info.val;
if (MACHINE_HAS_GS) {
preempt_disable();
__ctl_set_bit(2, 4);
if (vcpu->arch.gs_enabled)
save_gs_cb(current->thread.gs_cb);
current->thread.gs_cb = vcpu->arch.host_gscb;
restore_gs_cb(vcpu->arch.host_gscb);
if (!vcpu->arch.host_gscb)
__ctl_clear_bit(2, 4);
vcpu->arch.host_gscb = NULL;
preempt_enable();
}
/* SIE will save etoken directly into SDNX and therefore kvm_run */
}
static void store_regs(struct kvm_vcpu *vcpu)
{
struct kvm_run *kvm_run = vcpu->run;
kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
kvm_run->s.regs.cputm = kvm_s390_get_cpu_timer(vcpu);
kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
kvm_run->s.regs.pft = vcpu->arch.pfault_token;
kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
save_access_regs(vcpu->run->s.regs.acrs);
restore_access_regs(vcpu->arch.host_acrs);
/* Save guest register state */
save_fpu_regs();
vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
/* Restore will be done lazily at return */
current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
if (likely(!kvm_s390_pv_cpu_is_protected(vcpu)))
store_regs_fmt2(vcpu);
}
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
{
struct kvm_run *kvm_run = vcpu->run;
int rc;
/*
* Running a VM while dumping always has the potential to
* produce inconsistent dump data. But for PV vcpus a SIE
* entry while dumping could also lead to a fatal validity
* intercept which we absolutely want to avoid.
*/
if (vcpu->kvm->arch.pv.dumping)
return -EINVAL;
if (kvm_run->immediate_exit)
return -EINTR;
if (kvm_run->kvm_valid_regs & ~KVM_SYNC_S390_VALID_FIELDS ||
kvm_run->kvm_dirty_regs & ~KVM_SYNC_S390_VALID_FIELDS)
return -EINVAL;
vcpu_load(vcpu);
if (guestdbg_exit_pending(vcpu)) {
kvm_s390_prepare_debug_exit(vcpu);
rc = 0;
goto out;
}
kvm_sigset_activate(vcpu);
/*
* no need to check the return value of vcpu_start as it can only have
* an error for protvirt, but protvirt means user cpu state
*/
if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
kvm_s390_vcpu_start(vcpu);
} else if (is_vcpu_stopped(vcpu)) {
pr_err_ratelimited("can't run stopped vcpu %d\n",
vcpu->vcpu_id);
rc = -EINVAL;
goto out;
}
sync_regs(vcpu);
enable_cpu_timer_accounting(vcpu);
might_fault();
rc = __vcpu_run(vcpu);
if (signal_pending(current) && !rc) {
kvm_run->exit_reason = KVM_EXIT_INTR;
rc = -EINTR;
}
if (guestdbg_exit_pending(vcpu) && !rc) {
kvm_s390_prepare_debug_exit(vcpu);
rc = 0;
}
if (rc == -EREMOTE) {
/* userspace support is needed, kvm_run has been prepared */
rc = 0;
}
disable_cpu_timer_accounting(vcpu);
store_regs(vcpu);
kvm_sigset_deactivate(vcpu);
vcpu->stat.exit_userspace++;
out:
vcpu_put(vcpu);
return rc;
}
/*
* store status at address
* we use have two special cases:
* KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
* KVM_S390_STORE_STATUS_PREFIXED: -> prefix
*/
int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
{
unsigned char archmode = 1;
freg_t fprs[NUM_FPRS];
unsigned int px;
u64 clkcomp, cputm;
int rc;
px = kvm_s390_get_prefix(vcpu);
if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
if (write_guest_abs(vcpu, 163, &archmode, 1))
return -EFAULT;
gpa = 0;
} else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
if (write_guest_real(vcpu, 163, &archmode, 1))
return -EFAULT;
gpa = px;
} else
gpa -= __LC_FPREGS_SAVE_AREA;
/* manually convert vector registers if necessary */
if (MACHINE_HAS_VX) {
convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
fprs, 128);
} else {
rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
vcpu->run->s.regs.fprs, 128);
}
rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA,
vcpu->run->s.regs.gprs, 128);
rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA,
&vcpu->arch.sie_block->gpsw, 16);
rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA,
&px, 4);
rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA,
&vcpu->run->s.regs.fpc, 4);
rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA,
&vcpu->arch.sie_block->todpr, 4);
cputm = kvm_s390_get_cpu_timer(vcpu);
rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA,
&cputm, 8);
clkcomp = vcpu->arch.sie_block->ckc >> 8;
rc |= write_guest_abs(vcpu, gpa + __LC_CLOCK_COMP_SAVE_AREA,
&clkcomp, 8);
rc |= write_guest_abs(vcpu, gpa + __LC_AREGS_SAVE_AREA,
&vcpu->run->s.regs.acrs, 64);
rc |= write_guest_abs(vcpu, gpa + __LC_CREGS_SAVE_AREA,
&vcpu->arch.sie_block->gcr, 128);
return rc ? -EFAULT : 0;
}
int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
{
/*
* The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
* switch in the run ioctl. Let's update our copies before we save
* it into the save area
*/
save_fpu_regs();
vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
save_access_regs(vcpu->run->s.regs.acrs);
return kvm_s390_store_status_unloaded(vcpu, addr);
}
static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
{
kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu);
}
static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
{
unsigned long i;
struct kvm_vcpu *vcpu;
kvm_for_each_vcpu(i, vcpu, kvm) {
__disable_ibs_on_vcpu(vcpu);
}
}
static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
{
if (!sclp.has_ibs)
return;
kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
}
int kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
{
int i, online_vcpus, r = 0, started_vcpus = 0;
if (!is_vcpu_stopped(vcpu))
return 0;
trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
/* Only one cpu at a time may enter/leave the STOPPED state. */
spin_lock(&vcpu->kvm->arch.start_stop_lock);
online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
/* Let's tell the UV that we want to change into the operating state */
if (kvm_s390_pv_cpu_is_protected(vcpu)) {
r = kvm_s390_pv_set_cpu_state(vcpu, PV_CPU_STATE_OPR);
if (r) {
spin_unlock(&vcpu->kvm->arch.start_stop_lock);
return r;
}
}
for (i = 0; i < online_vcpus; i++) {
if (!is_vcpu_stopped(kvm_get_vcpu(vcpu->kvm, i)))
started_vcpus++;
}
if (started_vcpus == 0) {
/* we're the only active VCPU -> speed it up */
__enable_ibs_on_vcpu(vcpu);
} else if (started_vcpus == 1) {
/*
* As we are starting a second VCPU, we have to disable
* the IBS facility on all VCPUs to remove potentially
* outstanding ENABLE requests.
*/
__disable_ibs_on_all_vcpus(vcpu->kvm);
}
kvm_s390_clear_cpuflags(vcpu, CPUSTAT_STOPPED);
/*
* The real PSW might have changed due to a RESTART interpreted by the
* ultravisor. We block all interrupts and let the next sie exit
* refresh our view.
*/
if (kvm_s390_pv_cpu_is_protected(vcpu))
vcpu->arch.sie_block->gpsw.mask &= ~PSW_INT_MASK;
/*
* Another VCPU might have used IBS while we were offline.
* Let's play safe and flush the VCPU at startup.
*/
kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
spin_unlock(&vcpu->kvm->arch.start_stop_lock);
return 0;
}
int kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
{
int i, online_vcpus, r = 0, started_vcpus = 0;
struct kvm_vcpu *started_vcpu = NULL;
if (is_vcpu_stopped(vcpu))
return 0;
trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
/* Only one cpu at a time may enter/leave the STOPPED state. */
spin_lock(&vcpu->kvm->arch.start_stop_lock);
online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
/* Let's tell the UV that we want to change into the stopped state */
if (kvm_s390_pv_cpu_is_protected(vcpu)) {
r = kvm_s390_pv_set_cpu_state(vcpu, PV_CPU_STATE_STP);
if (r) {
spin_unlock(&vcpu->kvm->arch.start_stop_lock);
return r;
}
}
/*
* Set the VCPU to STOPPED and THEN clear the interrupt flag,
* now that the SIGP STOP and SIGP STOP AND STORE STATUS orders
* have been fully processed. This will ensure that the VCPU
* is kept BUSY if another VCPU is inquiring with SIGP SENSE.
*/
kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOPPED);
kvm_s390_clear_stop_irq(vcpu);
__disable_ibs_on_vcpu(vcpu);
for (i = 0; i < online_vcpus; i++) {
struct kvm_vcpu *tmp = kvm_get_vcpu(vcpu->kvm, i);
if (!is_vcpu_stopped(tmp)) {
started_vcpus++;
started_vcpu = tmp;
}
}
if (started_vcpus == 1) {
/*
* As we only have one VCPU left, we want to enable the
* IBS facility for that VCPU to speed it up.
*/
__enable_ibs_on_vcpu(started_vcpu);
}
spin_unlock(&vcpu->kvm->arch.start_stop_lock);
return 0;
}
static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
struct kvm_enable_cap *cap)
{
int r;
if (cap->flags)
return -EINVAL;
switch (cap->cap) {
case KVM_CAP_S390_CSS_SUPPORT:
if (!vcpu->kvm->arch.css_support) {
vcpu->kvm->arch.css_support = 1;
VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support");
trace_kvm_s390_enable_css(vcpu->kvm);
}
r = 0;
break;
default:
r = -EINVAL;
break;
}
return r;
}
static long kvm_s390_vcpu_sida_op(struct kvm_vcpu *vcpu,
struct kvm_s390_mem_op *mop)
{
void __user *uaddr = (void __user *)mop->buf;
void *sida_addr;
int r = 0;
if (mop->flags || !mop->size)
return -EINVAL;
if (mop->size + mop->sida_offset < mop->size)
return -EINVAL;
if (mop->size + mop->sida_offset > sida_size(vcpu->arch.sie_block))
return -E2BIG;
if (!kvm_s390_pv_cpu_is_protected(vcpu))
return -EINVAL;
sida_addr = (char *)sida_addr(vcpu->arch.sie_block) + mop->sida_offset;
switch (mop->op) {
case KVM_S390_MEMOP_SIDA_READ:
if (copy_to_user(uaddr, sida_addr, mop->size))
r = -EFAULT;
break;
case KVM_S390_MEMOP_SIDA_WRITE:
if (copy_from_user(sida_addr, uaddr, mop->size))
r = -EFAULT;
break;
}
return r;
}
static long kvm_s390_vcpu_mem_op(struct kvm_vcpu *vcpu,
struct kvm_s390_mem_op *mop)
{
void __user *uaddr = (void __user *)mop->buf;
enum gacc_mode acc_mode;
void *tmpbuf = NULL;
int r;
r = mem_op_validate_common(mop, KVM_S390_MEMOP_F_INJECT_EXCEPTION |
KVM_S390_MEMOP_F_CHECK_ONLY |
KVM_S390_MEMOP_F_SKEY_PROTECTION);
if (r)
return r;
if (mop->ar >= NUM_ACRS)
return -EINVAL;
if (kvm_s390_pv_cpu_is_protected(vcpu))
return -EINVAL;
if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
tmpbuf = vmalloc(mop->size);
if (!tmpbuf)
return -ENOMEM;
}
acc_mode = mop->op == KVM_S390_MEMOP_LOGICAL_READ ? GACC_FETCH : GACC_STORE;
if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size,
acc_mode, mop->key);
goto out_inject;
}
if (acc_mode == GACC_FETCH) {
r = read_guest_with_key(vcpu, mop->gaddr, mop->ar, tmpbuf,
mop->size, mop->key);
if (r)
goto out_inject;
if (copy_to_user(uaddr, tmpbuf, mop->size)) {
r = -EFAULT;
goto out_free;
}
} else {
if (copy_from_user(tmpbuf, uaddr, mop->size)) {
r = -EFAULT;
goto out_free;
}
r = write_guest_with_key(vcpu, mop->gaddr, mop->ar, tmpbuf,
mop->size, mop->key);
}
out_inject:
if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
out_free:
vfree(tmpbuf);
return r;
}
static long kvm_s390_vcpu_memsida_op(struct kvm_vcpu *vcpu,
struct kvm_s390_mem_op *mop)
{
int r, srcu_idx;
srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
switch (mop->op) {
case KVM_S390_MEMOP_LOGICAL_READ:
case KVM_S390_MEMOP_LOGICAL_WRITE:
r = kvm_s390_vcpu_mem_op(vcpu, mop);
break;
case KVM_S390_MEMOP_SIDA_READ:
case KVM_S390_MEMOP_SIDA_WRITE:
/* we are locked against sida going away by the vcpu->mutex */
r = kvm_s390_vcpu_sida_op(vcpu, mop);
break;
default:
r = -EINVAL;
}
srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
return r;
}
long kvm_arch_vcpu_async_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
struct kvm_vcpu *vcpu = filp->private_data;
void __user *argp = (void __user *)arg;
int rc;
switch (ioctl) {
case KVM_S390_IRQ: {
struct kvm_s390_irq s390irq;
if (copy_from_user(&s390irq, argp, sizeof(s390irq)))
return -EFAULT;
rc = kvm_s390_inject_vcpu(vcpu, &s390irq);
break;
}
case KVM_S390_INTERRUPT: {
struct kvm_s390_interrupt s390int;
struct kvm_s390_irq s390irq = {};
if (copy_from_user(&s390int, argp, sizeof(s390int)))
return -EFAULT;
if (s390int_to_s390irq(&s390int, &s390irq))
return -EINVAL;
rc = kvm_s390_inject_vcpu(vcpu, &s390irq);
break;
}
default:
rc = -ENOIOCTLCMD;
break;
}
/*
* To simplify single stepping of userspace-emulated instructions,
* KVM_EXIT_S390_SIEIC exit sets KVM_GUESTDBG_EXIT_PENDING (see
* should_handle_per_ifetch()). However, if userspace emulation injects
* an interrupt, it needs to be cleared, so that KVM_EXIT_DEBUG happens
* after (and not before) the interrupt delivery.
*/
if (!rc)
vcpu->guest_debug &= ~KVM_GUESTDBG_EXIT_PENDING;
return rc;
}
static int kvm_s390_handle_pv_vcpu_dump(struct kvm_vcpu *vcpu,
struct kvm_pv_cmd *cmd)
{
struct kvm_s390_pv_dmp dmp;
void *data;
int ret;
/* Dump initialization is a prerequisite */
if (!vcpu->kvm->arch.pv.dumping)
return -EINVAL;
if (copy_from_user(&dmp, (__u8 __user *)cmd->data, sizeof(dmp)))
return -EFAULT;
/* We only handle this subcmd right now */
if (dmp.subcmd != KVM_PV_DUMP_CPU)
return -EINVAL;
/* CPU dump length is the same as create cpu storage donation. */
if (dmp.buff_len != uv_info.guest_cpu_stor_len)
return -EINVAL;
data = kvzalloc(uv_info.guest_cpu_stor_len, GFP_KERNEL);
if (!data)
return -ENOMEM;
ret = kvm_s390_pv_dump_cpu(vcpu, data, &cmd->rc, &cmd->rrc);
VCPU_EVENT(vcpu, 3, "PROTVIRT DUMP CPU %d rc %x rrc %x",
vcpu->vcpu_id, cmd->rc, cmd->rrc);
if (ret)
ret = -EINVAL;
/* On success copy over the dump data */
if (!ret && copy_to_user((__u8 __user *)dmp.buff_addr, data, uv_info.guest_cpu_stor_len))
ret = -EFAULT;
kvfree(data);
return ret;
}
long kvm_arch_vcpu_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
struct kvm_vcpu *vcpu = filp->private_data;
void __user *argp = (void __user *)arg;
int idx;
long r;
u16 rc, rrc;
vcpu_load(vcpu);
switch (ioctl) {
case KVM_S390_STORE_STATUS:
idx = srcu_read_lock(&vcpu->kvm->srcu);
r = kvm_s390_store_status_unloaded(vcpu, arg);
srcu_read_unlock(&vcpu->kvm->srcu, idx);
break;
case KVM_S390_SET_INITIAL_PSW: {
psw_t psw;
r = -EFAULT;
if (copy_from_user(&psw, argp, sizeof(psw)))
break;
r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
break;
}
case KVM_S390_CLEAR_RESET:
r = 0;
kvm_arch_vcpu_ioctl_clear_reset(vcpu);
if (kvm_s390_pv_cpu_is_protected(vcpu)) {
r = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu),
UVC_CMD_CPU_RESET_CLEAR, &rc, &rrc);
VCPU_EVENT(vcpu, 3, "PROTVIRT RESET CLEAR VCPU: rc %x rrc %x",
rc, rrc);
}
break;
case KVM_S390_INITIAL_RESET:
r = 0;
kvm_arch_vcpu_ioctl_initial_reset(vcpu);
if (kvm_s390_pv_cpu_is_protected(vcpu)) {
r = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu),
UVC_CMD_CPU_RESET_INITIAL,
&rc, &rrc);
VCPU_EVENT(vcpu, 3, "PROTVIRT RESET INITIAL VCPU: rc %x rrc %x",
rc, rrc);
}
break;
case KVM_S390_NORMAL_RESET:
r = 0;
kvm_arch_vcpu_ioctl_normal_reset(vcpu);
if (kvm_s390_pv_cpu_is_protected(vcpu)) {
r = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu),
UVC_CMD_CPU_RESET, &rc, &rrc);
VCPU_EVENT(vcpu, 3, "PROTVIRT RESET NORMAL VCPU: rc %x rrc %x",
rc, rrc);
}
break;
case KVM_SET_ONE_REG:
case KVM_GET_ONE_REG: {
struct kvm_one_reg reg;
r = -EINVAL;
if (kvm_s390_pv_cpu_is_protected(vcpu))
break;
r = -EFAULT;
if (copy_from_user(®, argp, sizeof(reg)))
break;
if (ioctl == KVM_SET_ONE_REG)
r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, ®);
else
r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, ®);
break;
}
#ifdef CONFIG_KVM_S390_UCONTROL
case KVM_S390_UCAS_MAP: {
struct kvm_s390_ucas_mapping ucasmap;
if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
r = -EFAULT;
break;
}
if (!kvm_is_ucontrol(vcpu->kvm)) {
r = -EINVAL;
break;
}
r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
ucasmap.vcpu_addr, ucasmap.length);
break;
}
case KVM_S390_UCAS_UNMAP: {
struct kvm_s390_ucas_mapping ucasmap;
if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
r = -EFAULT;
break;
}
if (!kvm_is_ucontrol(vcpu->kvm)) {
r = -EINVAL;
break;
}
r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
ucasmap.length);
break;
}
#endif
case KVM_S390_VCPU_FAULT: {
r = gmap_fault(vcpu->arch.gmap, arg, 0);
break;
}
case KVM_ENABLE_CAP:
{
struct kvm_enable_cap cap;
r = -EFAULT;
if (copy_from_user(&cap, argp, sizeof(cap)))
break;
r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
break;
}
case KVM_S390_MEM_OP: {
struct kvm_s390_mem_op mem_op;
if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
r = kvm_s390_vcpu_memsida_op(vcpu, &mem_op);
else
r = -EFAULT;
break;
}
case KVM_S390_SET_IRQ_STATE: {
struct kvm_s390_irq_state irq_state;
r = -EFAULT;
if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
break;
if (irq_state.len > VCPU_IRQS_MAX_BUF ||
irq_state.len == 0 ||
irq_state.len % sizeof(struct kvm_s390_irq) > 0) {
r = -EINVAL;
break;
}
/* do not use irq_state.flags, it will break old QEMUs */
r = kvm_s390_set_irq_state(vcpu,
(void __user *) irq_state.buf,
irq_state.len);
break;
}
case KVM_S390_GET_IRQ_STATE: {
struct kvm_s390_irq_state irq_state;
r = -EFAULT;
if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
break;
if (irq_state.len == 0) {
r = -EINVAL;
break;
}
/* do not use irq_state.flags, it will break old QEMUs */
r = kvm_s390_get_irq_state(vcpu,
(__u8 __user *) irq_state.buf,
irq_state.len);
break;
}
case KVM_S390_PV_CPU_COMMAND: {
struct kvm_pv_cmd cmd;
r = -EINVAL;
if (!is_prot_virt_host())
break;
r = -EFAULT;
if (copy_from_user(&cmd, argp, sizeof(cmd)))
break;
r = -EINVAL;
if (cmd.flags)
break;
/* We only handle this cmd right now */
if (cmd.cmd != KVM_PV_DUMP)
break;
r = kvm_s390_handle_pv_vcpu_dump(vcpu, &cmd);
/* Always copy over UV rc / rrc data */
if (copy_to_user((__u8 __user *)argp, &cmd.rc,
sizeof(cmd.rc) + sizeof(cmd.rrc)))
r = -EFAULT;
break;
}
default:
r = -ENOTTY;
}
vcpu_put(vcpu);
return r;
}
vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
{
#ifdef CONFIG_KVM_S390_UCONTROL
if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
&& (kvm_is_ucontrol(vcpu->kvm))) {
vmf->page = virt_to_page(vcpu->arch.sie_block);
get_page(vmf->page);
return 0;
}
#endif
return VM_FAULT_SIGBUS;
}
bool kvm_arch_irqchip_in_kernel(struct kvm *kvm)
{
return true;
}
/* Section: memory related */
int kvm_arch_prepare_memory_region(struct kvm *kvm,
const struct kvm_memory_slot *old,
struct kvm_memory_slot *new,
enum kvm_mr_change change)
{
gpa_t size;
/* When we are protected, we should not change the memory slots */
if (kvm_s390_pv_get_handle(kvm))
return -EINVAL;
if (change != KVM_MR_DELETE && change != KVM_MR_FLAGS_ONLY) {
/*
* A few sanity checks. We can have memory slots which have to be
* located/ended at a segment boundary (1MB). The memory in userland is
* ok to be fragmented into various different vmas. It is okay to mmap()
* and munmap() stuff in this slot after doing this call at any time
*/
if (new->userspace_addr & 0xffffful)
return -EINVAL;
size = new->npages * PAGE_SIZE;
if (size & 0xffffful)
return -EINVAL;
if ((new->base_gfn * PAGE_SIZE) + size > kvm->arch.mem_limit)
return -EINVAL;
}
if (!kvm->arch.migration_mode)
return 0;
/*
* Turn off migration mode when:
* - userspace creates a new memslot with dirty logging off,
* - userspace modifies an existing memslot (MOVE or FLAGS_ONLY) and
* dirty logging is turned off.
* Migration mode expects dirty page logging being enabled to store
* its dirty bitmap.
*/
if (change != KVM_MR_DELETE &&
!(new->flags & KVM_MEM_LOG_DIRTY_PAGES))
WARN(kvm_s390_vm_stop_migration(kvm),
"Failed to stop migration mode");
return 0;
}
void kvm_arch_commit_memory_region(struct kvm *kvm,
struct kvm_memory_slot *old,
const struct kvm_memory_slot *new,
enum kvm_mr_change change)
{
int rc = 0;
switch (change) {
case KVM_MR_DELETE:
rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE,
old->npages * PAGE_SIZE);
break;
case KVM_MR_MOVE:
rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE,
old->npages * PAGE_SIZE);
if (rc)
break;
fallthrough;
case KVM_MR_CREATE:
rc = gmap_map_segment(kvm->arch.gmap, new->userspace_addr,
new->base_gfn * PAGE_SIZE,
new->npages * PAGE_SIZE);
break;
case KVM_MR_FLAGS_ONLY:
break;
default:
WARN(1, "Unknown KVM MR CHANGE: %d\n", change);
}
if (rc)
pr_warn("failed to commit memory region\n");
return;
}
static inline unsigned long nonhyp_mask(int i)
{
unsigned int nonhyp_fai = (sclp.hmfai << i * 2) >> 30;
return 0x0000ffffffffffffUL >> (nonhyp_fai << 4);
}
static int __init kvm_s390_init(void)
{
int i, r;
if (!sclp.has_sief2) {
pr_info("SIE is not available\n");
return -ENODEV;
}
if (nested && hpage) {
pr_info("A KVM host that supports nesting cannot back its KVM guests with huge pages\n");
return -EINVAL;
}
for (i = 0; i < 16; i++)
kvm_s390_fac_base[i] |=
stfle_fac_list[i] & nonhyp_mask(i);
r = __kvm_s390_init();
if (r)
return r;
r = kvm_init(sizeof(struct kvm_vcpu), 0, THIS_MODULE);
if (r) {
__kvm_s390_exit();
return r;
}
return 0;
}
static void __exit kvm_s390_exit(void)
{
kvm_exit();
__kvm_s390_exit();
}
module_init(kvm_s390_init);
module_exit(kvm_s390_exit);
/*
* Enable autoloading of the kvm module.
* Note that we add the module alias here instead of virt/kvm/kvm_main.c
* since x86 takes a different approach.
*/
#include <linux/miscdevice.h>
MODULE_ALIAS_MISCDEV(KVM_MINOR);
MODULE_ALIAS("devname:kvm");
| linux-master | arch/s390/kvm/kvm-s390.c |
// SPDX-License-Identifier: GPL-2.0
/*
* kvm nested virtualization support for s390x
*
* Copyright IBM Corp. 2016, 2018
*
* Author(s): David Hildenbrand <[email protected]>
*/
#include <linux/vmalloc.h>
#include <linux/kvm_host.h>
#include <linux/bug.h>
#include <linux/list.h>
#include <linux/bitmap.h>
#include <linux/sched/signal.h>
#include <asm/gmap.h>
#include <asm/mmu_context.h>
#include <asm/sclp.h>
#include <asm/nmi.h>
#include <asm/dis.h>
#include <asm/fpu/api.h>
#include "kvm-s390.h"
#include "gaccess.h"
struct vsie_page {
struct kvm_s390_sie_block scb_s; /* 0x0000 */
/*
* the backup info for machine check. ensure it's at
* the same offset as that in struct sie_page!
*/
struct mcck_volatile_info mcck_info; /* 0x0200 */
/*
* The pinned original scb. Be aware that other VCPUs can modify
* it while we read from it. Values that are used for conditions or
* are reused conditionally, should be accessed via READ_ONCE.
*/
struct kvm_s390_sie_block *scb_o; /* 0x0218 */
/* the shadow gmap in use by the vsie_page */
struct gmap *gmap; /* 0x0220 */
/* address of the last reported fault to guest2 */
unsigned long fault_addr; /* 0x0228 */
/* calculated guest addresses of satellite control blocks */
gpa_t sca_gpa; /* 0x0230 */
gpa_t itdba_gpa; /* 0x0238 */
gpa_t gvrd_gpa; /* 0x0240 */
gpa_t riccbd_gpa; /* 0x0248 */
gpa_t sdnx_gpa; /* 0x0250 */
__u8 reserved[0x0700 - 0x0258]; /* 0x0258 */
struct kvm_s390_crypto_cb crycb; /* 0x0700 */
__u8 fac[S390_ARCH_FAC_LIST_SIZE_BYTE]; /* 0x0800 */
};
/* trigger a validity icpt for the given scb */
static int set_validity_icpt(struct kvm_s390_sie_block *scb,
__u16 reason_code)
{
scb->ipa = 0x1000;
scb->ipb = ((__u32) reason_code) << 16;
scb->icptcode = ICPT_VALIDITY;
return 1;
}
/* mark the prefix as unmapped, this will block the VSIE */
static void prefix_unmapped(struct vsie_page *vsie_page)
{
atomic_or(PROG_REQUEST, &vsie_page->scb_s.prog20);
}
/* mark the prefix as unmapped and wait until the VSIE has been left */
static void prefix_unmapped_sync(struct vsie_page *vsie_page)
{
prefix_unmapped(vsie_page);
if (vsie_page->scb_s.prog0c & PROG_IN_SIE)
atomic_or(CPUSTAT_STOP_INT, &vsie_page->scb_s.cpuflags);
while (vsie_page->scb_s.prog0c & PROG_IN_SIE)
cpu_relax();
}
/* mark the prefix as mapped, this will allow the VSIE to run */
static void prefix_mapped(struct vsie_page *vsie_page)
{
atomic_andnot(PROG_REQUEST, &vsie_page->scb_s.prog20);
}
/* test if the prefix is mapped into the gmap shadow */
static int prefix_is_mapped(struct vsie_page *vsie_page)
{
return !(atomic_read(&vsie_page->scb_s.prog20) & PROG_REQUEST);
}
/* copy the updated intervention request bits into the shadow scb */
static void update_intervention_requests(struct vsie_page *vsie_page)
{
const int bits = CPUSTAT_STOP_INT | CPUSTAT_IO_INT | CPUSTAT_EXT_INT;
int cpuflags;
cpuflags = atomic_read(&vsie_page->scb_o->cpuflags);
atomic_andnot(bits, &vsie_page->scb_s.cpuflags);
atomic_or(cpuflags & bits, &vsie_page->scb_s.cpuflags);
}
/* shadow (filter and validate) the cpuflags */
static int prepare_cpuflags(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
{
struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
int newflags, cpuflags = atomic_read(&scb_o->cpuflags);
/* we don't allow ESA/390 guests */
if (!(cpuflags & CPUSTAT_ZARCH))
return set_validity_icpt(scb_s, 0x0001U);
if (cpuflags & (CPUSTAT_RRF | CPUSTAT_MCDS))
return set_validity_icpt(scb_s, 0x0001U);
else if (cpuflags & (CPUSTAT_SLSV | CPUSTAT_SLSR))
return set_validity_icpt(scb_s, 0x0007U);
/* intervention requests will be set later */
newflags = CPUSTAT_ZARCH;
if (cpuflags & CPUSTAT_GED && test_kvm_facility(vcpu->kvm, 8))
newflags |= CPUSTAT_GED;
if (cpuflags & CPUSTAT_GED2 && test_kvm_facility(vcpu->kvm, 78)) {
if (cpuflags & CPUSTAT_GED)
return set_validity_icpt(scb_s, 0x0001U);
newflags |= CPUSTAT_GED2;
}
if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_GPERE))
newflags |= cpuflags & CPUSTAT_P;
if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_GSLS))
newflags |= cpuflags & CPUSTAT_SM;
if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_IBS))
newflags |= cpuflags & CPUSTAT_IBS;
if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_KSS))
newflags |= cpuflags & CPUSTAT_KSS;
atomic_set(&scb_s->cpuflags, newflags);
return 0;
}
/* Copy to APCB FORMAT1 from APCB FORMAT0 */
static int setup_apcb10(struct kvm_vcpu *vcpu, struct kvm_s390_apcb1 *apcb_s,
unsigned long crycb_gpa, struct kvm_s390_apcb1 *apcb_h)
{
struct kvm_s390_apcb0 tmp;
unsigned long apcb_gpa;
apcb_gpa = crycb_gpa + offsetof(struct kvm_s390_crypto_cb, apcb0);
if (read_guest_real(vcpu, apcb_gpa, &tmp,
sizeof(struct kvm_s390_apcb0)))
return -EFAULT;
apcb_s->apm[0] = apcb_h->apm[0] & tmp.apm[0];
apcb_s->aqm[0] = apcb_h->aqm[0] & tmp.aqm[0] & 0xffff000000000000UL;
apcb_s->adm[0] = apcb_h->adm[0] & tmp.adm[0] & 0xffff000000000000UL;
return 0;
}
/**
* setup_apcb00 - Copy to APCB FORMAT0 from APCB FORMAT0
* @vcpu: pointer to the virtual CPU
* @apcb_s: pointer to start of apcb in the shadow crycb
* @crycb_gpa: guest physical address to start of original guest crycb
* @apcb_h: pointer to start of apcb in the guest1
*
* Returns 0 and -EFAULT on error reading guest apcb
*/
static int setup_apcb00(struct kvm_vcpu *vcpu, unsigned long *apcb_s,
unsigned long crycb_gpa, unsigned long *apcb_h)
{
unsigned long apcb_gpa;
apcb_gpa = crycb_gpa + offsetof(struct kvm_s390_crypto_cb, apcb0);
if (read_guest_real(vcpu, apcb_gpa, apcb_s,
sizeof(struct kvm_s390_apcb0)))
return -EFAULT;
bitmap_and(apcb_s, apcb_s, apcb_h,
BITS_PER_BYTE * sizeof(struct kvm_s390_apcb0));
return 0;
}
/**
* setup_apcb11 - Copy the FORMAT1 APCB from the guest to the shadow CRYCB
* @vcpu: pointer to the virtual CPU
* @apcb_s: pointer to start of apcb in the shadow crycb
* @crycb_gpa: guest physical address to start of original guest crycb
* @apcb_h: pointer to start of apcb in the host
*
* Returns 0 and -EFAULT on error reading guest apcb
*/
static int setup_apcb11(struct kvm_vcpu *vcpu, unsigned long *apcb_s,
unsigned long crycb_gpa,
unsigned long *apcb_h)
{
unsigned long apcb_gpa;
apcb_gpa = crycb_gpa + offsetof(struct kvm_s390_crypto_cb, apcb1);
if (read_guest_real(vcpu, apcb_gpa, apcb_s,
sizeof(struct kvm_s390_apcb1)))
return -EFAULT;
bitmap_and(apcb_s, apcb_s, apcb_h,
BITS_PER_BYTE * sizeof(struct kvm_s390_apcb1));
return 0;
}
/**
* setup_apcb - Create a shadow copy of the apcb.
* @vcpu: pointer to the virtual CPU
* @crycb_s: pointer to shadow crycb
* @crycb_gpa: guest physical address of original guest crycb
* @crycb_h: pointer to the host crycb
* @fmt_o: format of the original guest crycb.
* @fmt_h: format of the host crycb.
*
* Checks the compatibility between the guest and host crycb and calls the
* appropriate copy function.
*
* Return 0 or an error number if the guest and host crycb are incompatible.
*/
static int setup_apcb(struct kvm_vcpu *vcpu, struct kvm_s390_crypto_cb *crycb_s,
const u32 crycb_gpa,
struct kvm_s390_crypto_cb *crycb_h,
int fmt_o, int fmt_h)
{
switch (fmt_o) {
case CRYCB_FORMAT2:
if ((crycb_gpa & PAGE_MASK) != ((crycb_gpa + 256) & PAGE_MASK))
return -EACCES;
if (fmt_h != CRYCB_FORMAT2)
return -EINVAL;
return setup_apcb11(vcpu, (unsigned long *)&crycb_s->apcb1,
crycb_gpa,
(unsigned long *)&crycb_h->apcb1);
case CRYCB_FORMAT1:
switch (fmt_h) {
case CRYCB_FORMAT2:
return setup_apcb10(vcpu, &crycb_s->apcb1,
crycb_gpa,
&crycb_h->apcb1);
case CRYCB_FORMAT1:
return setup_apcb00(vcpu,
(unsigned long *) &crycb_s->apcb0,
crycb_gpa,
(unsigned long *) &crycb_h->apcb0);
}
break;
case CRYCB_FORMAT0:
if ((crycb_gpa & PAGE_MASK) != ((crycb_gpa + 32) & PAGE_MASK))
return -EACCES;
switch (fmt_h) {
case CRYCB_FORMAT2:
return setup_apcb10(vcpu, &crycb_s->apcb1,
crycb_gpa,
&crycb_h->apcb1);
case CRYCB_FORMAT1:
case CRYCB_FORMAT0:
return setup_apcb00(vcpu,
(unsigned long *) &crycb_s->apcb0,
crycb_gpa,
(unsigned long *) &crycb_h->apcb0);
}
}
return -EINVAL;
}
/**
* shadow_crycb - Create a shadow copy of the crycb block
* @vcpu: a pointer to the virtual CPU
* @vsie_page: a pointer to internal date used for the vSIE
*
* Create a shadow copy of the crycb block and setup key wrapping, if
* requested for guest 3 and enabled for guest 2.
*
* We accept format-1 or format-2, but we convert format-1 into format-2
* in the shadow CRYCB.
* Using format-2 enables the firmware to choose the right format when
* scheduling the SIE.
* There is nothing to do for format-0.
*
* This function centralize the issuing of set_validity_icpt() for all
* the subfunctions working on the crycb.
*
* Returns: - 0 if shadowed or nothing to do
* - > 0 if control has to be given to guest 2
*/
static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
{
struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
const uint32_t crycbd_o = READ_ONCE(scb_o->crycbd);
const u32 crycb_addr = crycbd_o & 0x7ffffff8U;
unsigned long *b1, *b2;
u8 ecb3_flags;
u32 ecd_flags;
int apie_h;
int apie_s;
int key_msk = test_kvm_facility(vcpu->kvm, 76);
int fmt_o = crycbd_o & CRYCB_FORMAT_MASK;
int fmt_h = vcpu->arch.sie_block->crycbd & CRYCB_FORMAT_MASK;
int ret = 0;
scb_s->crycbd = 0;
apie_h = vcpu->arch.sie_block->eca & ECA_APIE;
apie_s = apie_h & scb_o->eca;
if (!apie_s && (!key_msk || (fmt_o == CRYCB_FORMAT0)))
return 0;
if (!crycb_addr)
return set_validity_icpt(scb_s, 0x0039U);
if (fmt_o == CRYCB_FORMAT1)
if ((crycb_addr & PAGE_MASK) !=
((crycb_addr + 128) & PAGE_MASK))
return set_validity_icpt(scb_s, 0x003CU);
if (apie_s) {
ret = setup_apcb(vcpu, &vsie_page->crycb, crycb_addr,
vcpu->kvm->arch.crypto.crycb,
fmt_o, fmt_h);
if (ret)
goto end;
scb_s->eca |= scb_o->eca & ECA_APIE;
}
/* we may only allow it if enabled for guest 2 */
ecb3_flags = scb_o->ecb3 & vcpu->arch.sie_block->ecb3 &
(ECB3_AES | ECB3_DEA);
ecd_flags = scb_o->ecd & vcpu->arch.sie_block->ecd & ECD_ECC;
if (!ecb3_flags && !ecd_flags)
goto end;
/* copy only the wrapping keys */
if (read_guest_real(vcpu, crycb_addr + 72,
vsie_page->crycb.dea_wrapping_key_mask, 56))
return set_validity_icpt(scb_s, 0x0035U);
scb_s->ecb3 |= ecb3_flags;
scb_s->ecd |= ecd_flags;
/* xor both blocks in one run */
b1 = (unsigned long *) vsie_page->crycb.dea_wrapping_key_mask;
b2 = (unsigned long *)
vcpu->kvm->arch.crypto.crycb->dea_wrapping_key_mask;
/* as 56%8 == 0, bitmap_xor won't overwrite any data */
bitmap_xor(b1, b1, b2, BITS_PER_BYTE * 56);
end:
switch (ret) {
case -EINVAL:
return set_validity_icpt(scb_s, 0x0022U);
case -EFAULT:
return set_validity_icpt(scb_s, 0x0035U);
case -EACCES:
return set_validity_icpt(scb_s, 0x003CU);
}
scb_s->crycbd = ((__u32)(__u64) &vsie_page->crycb) | CRYCB_FORMAT2;
return 0;
}
/* shadow (round up/down) the ibc to avoid validity icpt */
static void prepare_ibc(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
{
struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
/* READ_ONCE does not work on bitfields - use a temporary variable */
const uint32_t __new_ibc = scb_o->ibc;
const uint32_t new_ibc = READ_ONCE(__new_ibc) & 0x0fffU;
__u64 min_ibc = (sclp.ibc >> 16) & 0x0fffU;
scb_s->ibc = 0;
/* ibc installed in g2 and requested for g3 */
if (vcpu->kvm->arch.model.ibc && new_ibc) {
scb_s->ibc = new_ibc;
/* takte care of the minimum ibc level of the machine */
if (scb_s->ibc < min_ibc)
scb_s->ibc = min_ibc;
/* take care of the maximum ibc level set for the guest */
if (scb_s->ibc > vcpu->kvm->arch.model.ibc)
scb_s->ibc = vcpu->kvm->arch.model.ibc;
}
}
/* unshadow the scb, copying parameters back to the real scb */
static void unshadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
{
struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
/* interception */
scb_o->icptcode = scb_s->icptcode;
scb_o->icptstatus = scb_s->icptstatus;
scb_o->ipa = scb_s->ipa;
scb_o->ipb = scb_s->ipb;
scb_o->gbea = scb_s->gbea;
/* timer */
scb_o->cputm = scb_s->cputm;
scb_o->ckc = scb_s->ckc;
scb_o->todpr = scb_s->todpr;
/* guest state */
scb_o->gpsw = scb_s->gpsw;
scb_o->gg14 = scb_s->gg14;
scb_o->gg15 = scb_s->gg15;
memcpy(scb_o->gcr, scb_s->gcr, 128);
scb_o->pp = scb_s->pp;
/* branch prediction */
if (test_kvm_facility(vcpu->kvm, 82)) {
scb_o->fpf &= ~FPF_BPBC;
scb_o->fpf |= scb_s->fpf & FPF_BPBC;
}
/* interrupt intercept */
switch (scb_s->icptcode) {
case ICPT_PROGI:
case ICPT_INSTPROGI:
case ICPT_EXTINT:
memcpy((void *)((u64)scb_o + 0xc0),
(void *)((u64)scb_s + 0xc0), 0xf0 - 0xc0);
break;
}
if (scb_s->ihcpu != 0xffffU)
scb_o->ihcpu = scb_s->ihcpu;
}
/*
* Setup the shadow scb by copying and checking the relevant parts of the g2
* provided scb.
*
* Returns: - 0 if the scb has been shadowed
* - > 0 if control has to be given to guest 2
*/
static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
{
struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
/* READ_ONCE does not work on bitfields - use a temporary variable */
const uint32_t __new_prefix = scb_o->prefix;
const uint32_t new_prefix = READ_ONCE(__new_prefix);
const bool wants_tx = READ_ONCE(scb_o->ecb) & ECB_TE;
bool had_tx = scb_s->ecb & ECB_TE;
unsigned long new_mso = 0;
int rc;
/* make sure we don't have any leftovers when reusing the scb */
scb_s->icptcode = 0;
scb_s->eca = 0;
scb_s->ecb = 0;
scb_s->ecb2 = 0;
scb_s->ecb3 = 0;
scb_s->ecd = 0;
scb_s->fac = 0;
scb_s->fpf = 0;
rc = prepare_cpuflags(vcpu, vsie_page);
if (rc)
goto out;
/* timer */
scb_s->cputm = scb_o->cputm;
scb_s->ckc = scb_o->ckc;
scb_s->todpr = scb_o->todpr;
scb_s->epoch = scb_o->epoch;
/* guest state */
scb_s->gpsw = scb_o->gpsw;
scb_s->gg14 = scb_o->gg14;
scb_s->gg15 = scb_o->gg15;
memcpy(scb_s->gcr, scb_o->gcr, 128);
scb_s->pp = scb_o->pp;
/* interception / execution handling */
scb_s->gbea = scb_o->gbea;
scb_s->lctl = scb_o->lctl;
scb_s->svcc = scb_o->svcc;
scb_s->ictl = scb_o->ictl;
/*
* SKEY handling functions can't deal with false setting of PTE invalid
* bits. Therefore we cannot provide interpretation and would later
* have to provide own emulation handlers.
*/
if (!(atomic_read(&scb_s->cpuflags) & CPUSTAT_KSS))
scb_s->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
scb_s->icpua = scb_o->icpua;
if (!(atomic_read(&scb_s->cpuflags) & CPUSTAT_SM))
new_mso = READ_ONCE(scb_o->mso) & 0xfffffffffff00000UL;
/* if the hva of the prefix changes, we have to remap the prefix */
if (scb_s->mso != new_mso || scb_s->prefix != new_prefix)
prefix_unmapped(vsie_page);
/* SIE will do mso/msl validity and exception checks for us */
scb_s->msl = scb_o->msl & 0xfffffffffff00000UL;
scb_s->mso = new_mso;
scb_s->prefix = new_prefix;
/* We have to definitely flush the tlb if this scb never ran */
if (scb_s->ihcpu != 0xffffU)
scb_s->ihcpu = scb_o->ihcpu;
/* MVPG and Protection Exception Interpretation are always available */
scb_s->eca |= scb_o->eca & (ECA_MVPGI | ECA_PROTEXCI);
/* Host-protection-interruption introduced with ESOP */
if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_ESOP))
scb_s->ecb |= scb_o->ecb & ECB_HOSTPROTINT;
/*
* CPU Topology
* This facility only uses the utility field of the SCA and none of
* the cpu entries that are problematic with the other interpretation
* facilities so we can pass it through
*/
if (test_kvm_facility(vcpu->kvm, 11))
scb_s->ecb |= scb_o->ecb & ECB_PTF;
/* transactional execution */
if (test_kvm_facility(vcpu->kvm, 73) && wants_tx) {
/* remap the prefix is tx is toggled on */
if (!had_tx)
prefix_unmapped(vsie_page);
scb_s->ecb |= ECB_TE;
}
/* specification exception interpretation */
scb_s->ecb |= scb_o->ecb & ECB_SPECI;
/* branch prediction */
if (test_kvm_facility(vcpu->kvm, 82))
scb_s->fpf |= scb_o->fpf & FPF_BPBC;
/* SIMD */
if (test_kvm_facility(vcpu->kvm, 129)) {
scb_s->eca |= scb_o->eca & ECA_VX;
scb_s->ecd |= scb_o->ecd & ECD_HOSTREGMGMT;
}
/* Run-time-Instrumentation */
if (test_kvm_facility(vcpu->kvm, 64))
scb_s->ecb3 |= scb_o->ecb3 & ECB3_RI;
/* Instruction Execution Prevention */
if (test_kvm_facility(vcpu->kvm, 130))
scb_s->ecb2 |= scb_o->ecb2 & ECB2_IEP;
/* Guarded Storage */
if (test_kvm_facility(vcpu->kvm, 133)) {
scb_s->ecb |= scb_o->ecb & ECB_GS;
scb_s->ecd |= scb_o->ecd & ECD_HOSTREGMGMT;
}
if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_SIIF))
scb_s->eca |= scb_o->eca & ECA_SII;
if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_IB))
scb_s->eca |= scb_o->eca & ECA_IB;
if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_CEI))
scb_s->eca |= scb_o->eca & ECA_CEI;
/* Epoch Extension */
if (test_kvm_facility(vcpu->kvm, 139)) {
scb_s->ecd |= scb_o->ecd & ECD_MEF;
scb_s->epdx = scb_o->epdx;
}
/* etoken */
if (test_kvm_facility(vcpu->kvm, 156))
scb_s->ecd |= scb_o->ecd & ECD_ETOKENF;
scb_s->hpid = HPID_VSIE;
scb_s->cpnc = scb_o->cpnc;
prepare_ibc(vcpu, vsie_page);
rc = shadow_crycb(vcpu, vsie_page);
out:
if (rc)
unshadow_scb(vcpu, vsie_page);
return rc;
}
void kvm_s390_vsie_gmap_notifier(struct gmap *gmap, unsigned long start,
unsigned long end)
{
struct kvm *kvm = gmap->private;
struct vsie_page *cur;
unsigned long prefix;
struct page *page;
int i;
if (!gmap_is_shadow(gmap))
return;
if (start >= 1UL << 31)
/* We are only interested in prefix pages */
return;
/*
* Only new shadow blocks are added to the list during runtime,
* therefore we can safely reference them all the time.
*/
for (i = 0; i < kvm->arch.vsie.page_count; i++) {
page = READ_ONCE(kvm->arch.vsie.pages[i]);
if (!page)
continue;
cur = page_to_virt(page);
if (READ_ONCE(cur->gmap) != gmap)
continue;
prefix = cur->scb_s.prefix << GUEST_PREFIX_SHIFT;
/* with mso/msl, the prefix lies at an offset */
prefix += cur->scb_s.mso;
if (prefix <= end && start <= prefix + 2 * PAGE_SIZE - 1)
prefix_unmapped_sync(cur);
}
}
/*
* Map the first prefix page and if tx is enabled also the second prefix page.
*
* The prefix will be protected, a gmap notifier will inform about unmaps.
* The shadow scb must not be executed until the prefix is remapped, this is
* guaranteed by properly handling PROG_REQUEST.
*
* Returns: - 0 on if successfully mapped or already mapped
* - > 0 if control has to be given to guest 2
* - -EAGAIN if the caller can retry immediately
* - -ENOMEM if out of memory
*/
static int map_prefix(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
{
struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
u64 prefix = scb_s->prefix << GUEST_PREFIX_SHIFT;
int rc;
if (prefix_is_mapped(vsie_page))
return 0;
/* mark it as mapped so we can catch any concurrent unmappers */
prefix_mapped(vsie_page);
/* with mso/msl, the prefix lies at offset *mso* */
prefix += scb_s->mso;
rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap, prefix, NULL);
if (!rc && (scb_s->ecb & ECB_TE))
rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap,
prefix + PAGE_SIZE, NULL);
/*
* We don't have to mprotect, we will be called for all unshadows.
* SIE will detect if protection applies and trigger a validity.
*/
if (rc)
prefix_unmapped(vsie_page);
if (rc > 0 || rc == -EFAULT)
rc = set_validity_icpt(scb_s, 0x0037U);
return rc;
}
/*
* Pin the guest page given by gpa and set hpa to the pinned host address.
* Will always be pinned writable.
*
* Returns: - 0 on success
* - -EINVAL if the gpa is not valid guest storage
*/
static int pin_guest_page(struct kvm *kvm, gpa_t gpa, hpa_t *hpa)
{
struct page *page;
page = gfn_to_page(kvm, gpa_to_gfn(gpa));
if (is_error_page(page))
return -EINVAL;
*hpa = (hpa_t)page_to_phys(page) + (gpa & ~PAGE_MASK);
return 0;
}
/* Unpins a page previously pinned via pin_guest_page, marking it as dirty. */
static void unpin_guest_page(struct kvm *kvm, gpa_t gpa, hpa_t hpa)
{
kvm_release_pfn_dirty(hpa >> PAGE_SHIFT);
/* mark the page always as dirty for migration */
mark_page_dirty(kvm, gpa_to_gfn(gpa));
}
/* unpin all blocks previously pinned by pin_blocks(), marking them dirty */
static void unpin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
{
struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
hpa_t hpa;
hpa = (u64) scb_s->scaoh << 32 | scb_s->scaol;
if (hpa) {
unpin_guest_page(vcpu->kvm, vsie_page->sca_gpa, hpa);
vsie_page->sca_gpa = 0;
scb_s->scaol = 0;
scb_s->scaoh = 0;
}
hpa = scb_s->itdba;
if (hpa) {
unpin_guest_page(vcpu->kvm, vsie_page->itdba_gpa, hpa);
vsie_page->itdba_gpa = 0;
scb_s->itdba = 0;
}
hpa = scb_s->gvrd;
if (hpa) {
unpin_guest_page(vcpu->kvm, vsie_page->gvrd_gpa, hpa);
vsie_page->gvrd_gpa = 0;
scb_s->gvrd = 0;
}
hpa = scb_s->riccbd;
if (hpa) {
unpin_guest_page(vcpu->kvm, vsie_page->riccbd_gpa, hpa);
vsie_page->riccbd_gpa = 0;
scb_s->riccbd = 0;
}
hpa = scb_s->sdnxo;
if (hpa) {
unpin_guest_page(vcpu->kvm, vsie_page->sdnx_gpa, hpa);
vsie_page->sdnx_gpa = 0;
scb_s->sdnxo = 0;
}
}
/*
* Instead of shadowing some blocks, we can simply forward them because the
* addresses in the scb are 64 bit long.
*
* This works as long as the data lies in one page. If blocks ever exceed one
* page, we have to fall back to shadowing.
*
* As we reuse the sca, the vcpu pointers contained in it are invalid. We must
* therefore not enable any facilities that access these pointers (e.g. SIGPIF).
*
* Returns: - 0 if all blocks were pinned.
* - > 0 if control has to be given to guest 2
* - -ENOMEM if out of memory
*/
static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
{
struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
hpa_t hpa;
gpa_t gpa;
int rc = 0;
gpa = READ_ONCE(scb_o->scaol) & ~0xfUL;
if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_64BSCAO))
gpa |= (u64) READ_ONCE(scb_o->scaoh) << 32;
if (gpa) {
if (gpa < 2 * PAGE_SIZE)
rc = set_validity_icpt(scb_s, 0x0038U);
else if ((gpa & ~0x1fffUL) == kvm_s390_get_prefix(vcpu))
rc = set_validity_icpt(scb_s, 0x0011U);
else if ((gpa & PAGE_MASK) !=
((gpa + sizeof(struct bsca_block) - 1) & PAGE_MASK))
rc = set_validity_icpt(scb_s, 0x003bU);
if (!rc) {
rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
if (rc)
rc = set_validity_icpt(scb_s, 0x0034U);
}
if (rc)
goto unpin;
vsie_page->sca_gpa = gpa;
scb_s->scaoh = (u32)((u64)hpa >> 32);
scb_s->scaol = (u32)(u64)hpa;
}
gpa = READ_ONCE(scb_o->itdba) & ~0xffUL;
if (gpa && (scb_s->ecb & ECB_TE)) {
if (gpa < 2 * PAGE_SIZE) {
rc = set_validity_icpt(scb_s, 0x0080U);
goto unpin;
}
/* 256 bytes cannot cross page boundaries */
rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
if (rc) {
rc = set_validity_icpt(scb_s, 0x0080U);
goto unpin;
}
vsie_page->itdba_gpa = gpa;
scb_s->itdba = hpa;
}
gpa = READ_ONCE(scb_o->gvrd) & ~0x1ffUL;
if (gpa && (scb_s->eca & ECA_VX) && !(scb_s->ecd & ECD_HOSTREGMGMT)) {
if (gpa < 2 * PAGE_SIZE) {
rc = set_validity_icpt(scb_s, 0x1310U);
goto unpin;
}
/*
* 512 bytes vector registers cannot cross page boundaries
* if this block gets bigger, we have to shadow it.
*/
rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
if (rc) {
rc = set_validity_icpt(scb_s, 0x1310U);
goto unpin;
}
vsie_page->gvrd_gpa = gpa;
scb_s->gvrd = hpa;
}
gpa = READ_ONCE(scb_o->riccbd) & ~0x3fUL;
if (gpa && (scb_s->ecb3 & ECB3_RI)) {
if (gpa < 2 * PAGE_SIZE) {
rc = set_validity_icpt(scb_s, 0x0043U);
goto unpin;
}
/* 64 bytes cannot cross page boundaries */
rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
if (rc) {
rc = set_validity_icpt(scb_s, 0x0043U);
goto unpin;
}
/* Validity 0x0044 will be checked by SIE */
vsie_page->riccbd_gpa = gpa;
scb_s->riccbd = hpa;
}
if (((scb_s->ecb & ECB_GS) && !(scb_s->ecd & ECD_HOSTREGMGMT)) ||
(scb_s->ecd & ECD_ETOKENF)) {
unsigned long sdnxc;
gpa = READ_ONCE(scb_o->sdnxo) & ~0xfUL;
sdnxc = READ_ONCE(scb_o->sdnxo) & 0xfUL;
if (!gpa || gpa < 2 * PAGE_SIZE) {
rc = set_validity_icpt(scb_s, 0x10b0U);
goto unpin;
}
if (sdnxc < 6 || sdnxc > 12) {
rc = set_validity_icpt(scb_s, 0x10b1U);
goto unpin;
}
if (gpa & ((1 << sdnxc) - 1)) {
rc = set_validity_icpt(scb_s, 0x10b2U);
goto unpin;
}
/* Due to alignment rules (checked above) this cannot
* cross page boundaries
*/
rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
if (rc) {
rc = set_validity_icpt(scb_s, 0x10b0U);
goto unpin;
}
vsie_page->sdnx_gpa = gpa;
scb_s->sdnxo = hpa | sdnxc;
}
return 0;
unpin:
unpin_blocks(vcpu, vsie_page);
return rc;
}
/* unpin the scb provided by guest 2, marking it as dirty */
static void unpin_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page,
gpa_t gpa)
{
hpa_t hpa = (hpa_t) vsie_page->scb_o;
if (hpa)
unpin_guest_page(vcpu->kvm, gpa, hpa);
vsie_page->scb_o = NULL;
}
/*
* Pin the scb at gpa provided by guest 2 at vsie_page->scb_o.
*
* Returns: - 0 if the scb was pinned.
* - > 0 if control has to be given to guest 2
*/
static int pin_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page,
gpa_t gpa)
{
hpa_t hpa;
int rc;
rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
if (rc) {
rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
WARN_ON_ONCE(rc);
return 1;
}
vsie_page->scb_o = phys_to_virt(hpa);
return 0;
}
/*
* Inject a fault into guest 2.
*
* Returns: - > 0 if control has to be given to guest 2
* < 0 if an error occurred during injection.
*/
static int inject_fault(struct kvm_vcpu *vcpu, __u16 code, __u64 vaddr,
bool write_flag)
{
struct kvm_s390_pgm_info pgm = {
.code = code,
.trans_exc_code =
/* 0-51: virtual address */
(vaddr & 0xfffffffffffff000UL) |
/* 52-53: store / fetch */
(((unsigned int) !write_flag) + 1) << 10,
/* 62-63: asce id (always primary == 0) */
.exc_access_id = 0, /* always primary */
.op_access_id = 0, /* not MVPG */
};
int rc;
if (code == PGM_PROTECTION)
pgm.trans_exc_code |= 0x4UL;
rc = kvm_s390_inject_prog_irq(vcpu, &pgm);
return rc ? rc : 1;
}
/*
* Handle a fault during vsie execution on a gmap shadow.
*
* Returns: - 0 if the fault was resolved
* - > 0 if control has to be given to guest 2
* - < 0 if an error occurred
*/
static int handle_fault(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
{
int rc;
if (current->thread.gmap_int_code == PGM_PROTECTION)
/* we can directly forward all protection exceptions */
return inject_fault(vcpu, PGM_PROTECTION,
current->thread.gmap_addr, 1);
rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap,
current->thread.gmap_addr, NULL);
if (rc > 0) {
rc = inject_fault(vcpu, rc,
current->thread.gmap_addr,
current->thread.gmap_write_flag);
if (rc >= 0)
vsie_page->fault_addr = current->thread.gmap_addr;
}
return rc;
}
/*
* Retry the previous fault that required guest 2 intervention. This avoids
* one superfluous SIE re-entry and direct exit.
*
* Will ignore any errors. The next SIE fault will do proper fault handling.
*/
static void handle_last_fault(struct kvm_vcpu *vcpu,
struct vsie_page *vsie_page)
{
if (vsie_page->fault_addr)
kvm_s390_shadow_fault(vcpu, vsie_page->gmap,
vsie_page->fault_addr, NULL);
vsie_page->fault_addr = 0;
}
static inline void clear_vsie_icpt(struct vsie_page *vsie_page)
{
vsie_page->scb_s.icptcode = 0;
}
/* rewind the psw and clear the vsie icpt, so we can retry execution */
static void retry_vsie_icpt(struct vsie_page *vsie_page)
{
struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
int ilen = insn_length(scb_s->ipa >> 8);
/* take care of EXECUTE instructions */
if (scb_s->icptstatus & 1) {
ilen = (scb_s->icptstatus >> 4) & 0x6;
if (!ilen)
ilen = 4;
}
scb_s->gpsw.addr = __rewind_psw(scb_s->gpsw, ilen);
clear_vsie_icpt(vsie_page);
}
/*
* Try to shadow + enable the guest 2 provided facility list.
* Retry instruction execution if enabled for and provided by guest 2.
*
* Returns: - 0 if handled (retry or guest 2 icpt)
* - > 0 if control has to be given to guest 2
*/
static int handle_stfle(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
{
struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
__u32 fac = READ_ONCE(vsie_page->scb_o->fac) & 0x7ffffff8U;
if (fac && test_kvm_facility(vcpu->kvm, 7)) {
retry_vsie_icpt(vsie_page);
if (read_guest_real(vcpu, fac, &vsie_page->fac,
sizeof(vsie_page->fac)))
return set_validity_icpt(scb_s, 0x1090U);
scb_s->fac = (__u32)(__u64) &vsie_page->fac;
}
return 0;
}
/*
* Get a register for a nested guest.
* @vcpu the vcpu of the guest
* @vsie_page the vsie_page for the nested guest
* @reg the register number, the upper 4 bits are ignored.
* returns: the value of the register.
*/
static u64 vsie_get_register(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page, u8 reg)
{
/* no need to validate the parameter and/or perform error handling */
reg &= 0xf;
switch (reg) {
case 15:
return vsie_page->scb_s.gg15;
case 14:
return vsie_page->scb_s.gg14;
default:
return vcpu->run->s.regs.gprs[reg];
}
}
static int vsie_handle_mvpg(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
{
struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
unsigned long pei_dest, pei_src, src, dest, mask, prefix;
u64 *pei_block = &vsie_page->scb_o->mcic;
int edat, rc_dest, rc_src;
union ctlreg0 cr0;
cr0.val = vcpu->arch.sie_block->gcr[0];
edat = cr0.edat && test_kvm_facility(vcpu->kvm, 8);
mask = _kvm_s390_logical_to_effective(&scb_s->gpsw, PAGE_MASK);
prefix = scb_s->prefix << GUEST_PREFIX_SHIFT;
dest = vsie_get_register(vcpu, vsie_page, scb_s->ipb >> 20) & mask;
dest = _kvm_s390_real_to_abs(prefix, dest) + scb_s->mso;
src = vsie_get_register(vcpu, vsie_page, scb_s->ipb >> 16) & mask;
src = _kvm_s390_real_to_abs(prefix, src) + scb_s->mso;
rc_dest = kvm_s390_shadow_fault(vcpu, vsie_page->gmap, dest, &pei_dest);
rc_src = kvm_s390_shadow_fault(vcpu, vsie_page->gmap, src, &pei_src);
/*
* Either everything went well, or something non-critical went wrong
* e.g. because of a race. In either case, simply retry.
*/
if (rc_dest == -EAGAIN || rc_src == -EAGAIN || (!rc_dest && !rc_src)) {
retry_vsie_icpt(vsie_page);
return -EAGAIN;
}
/* Something more serious went wrong, propagate the error */
if (rc_dest < 0)
return rc_dest;
if (rc_src < 0)
return rc_src;
/* The only possible suppressing exception: just deliver it */
if (rc_dest == PGM_TRANSLATION_SPEC || rc_src == PGM_TRANSLATION_SPEC) {
clear_vsie_icpt(vsie_page);
rc_dest = kvm_s390_inject_program_int(vcpu, PGM_TRANSLATION_SPEC);
WARN_ON_ONCE(rc_dest);
return 1;
}
/*
* Forward the PEI intercept to the guest if it was a page fault, or
* also for segment and region table faults if EDAT applies.
*/
if (edat) {
rc_dest = rc_dest == PGM_ASCE_TYPE ? rc_dest : 0;
rc_src = rc_src == PGM_ASCE_TYPE ? rc_src : 0;
} else {
rc_dest = rc_dest != PGM_PAGE_TRANSLATION ? rc_dest : 0;
rc_src = rc_src != PGM_PAGE_TRANSLATION ? rc_src : 0;
}
if (!rc_dest && !rc_src) {
pei_block[0] = pei_dest;
pei_block[1] = pei_src;
return 1;
}
retry_vsie_icpt(vsie_page);
/*
* The host has edat, and the guest does not, or it was an ASCE type
* exception. The host needs to inject the appropriate DAT interrupts
* into the guest.
*/
if (rc_dest)
return inject_fault(vcpu, rc_dest, dest, 1);
return inject_fault(vcpu, rc_src, src, 0);
}
/*
* Run the vsie on a shadow scb and a shadow gmap, without any further
* sanity checks, handling SIE faults.
*
* Returns: - 0 everything went fine
* - > 0 if control has to be given to guest 2
* - < 0 if an error occurred
*/
static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
__releases(vcpu->kvm->srcu)
__acquires(vcpu->kvm->srcu)
{
struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
int guest_bp_isolation;
int rc = 0;
handle_last_fault(vcpu, vsie_page);
kvm_vcpu_srcu_read_unlock(vcpu);
/* save current guest state of bp isolation override */
guest_bp_isolation = test_thread_flag(TIF_ISOLATE_BP_GUEST);
/*
* The guest is running with BPBC, so we have to force it on for our
* nested guest. This is done by enabling BPBC globally, so the BPBC
* control in the SCB (which the nested guest can modify) is simply
* ignored.
*/
if (test_kvm_facility(vcpu->kvm, 82) &&
vcpu->arch.sie_block->fpf & FPF_BPBC)
set_thread_flag(TIF_ISOLATE_BP_GUEST);
local_irq_disable();
guest_enter_irqoff();
local_irq_enable();
/*
* Simulate a SIE entry of the VCPU (see sie64a), so VCPU blocking
* and VCPU requests also hinder the vSIE from running and lead
* to an immediate exit. kvm_s390_vsie_kick() has to be used to
* also kick the vSIE.
*/
vcpu->arch.sie_block->prog0c |= PROG_IN_SIE;
barrier();
if (test_cpu_flag(CIF_FPU))
load_fpu_regs();
if (!kvm_s390_vcpu_sie_inhibited(vcpu))
rc = sie64a(scb_s, vcpu->run->s.regs.gprs);
barrier();
vcpu->arch.sie_block->prog0c &= ~PROG_IN_SIE;
local_irq_disable();
guest_exit_irqoff();
local_irq_enable();
/* restore guest state for bp isolation override */
if (!guest_bp_isolation)
clear_thread_flag(TIF_ISOLATE_BP_GUEST);
kvm_vcpu_srcu_read_lock(vcpu);
if (rc == -EINTR) {
VCPU_EVENT(vcpu, 3, "%s", "machine check");
kvm_s390_reinject_machine_check(vcpu, &vsie_page->mcck_info);
return 0;
}
if (rc > 0)
rc = 0; /* we could still have an icpt */
else if (rc == -EFAULT)
return handle_fault(vcpu, vsie_page);
switch (scb_s->icptcode) {
case ICPT_INST:
if (scb_s->ipa == 0xb2b0)
rc = handle_stfle(vcpu, vsie_page);
break;
case ICPT_STOP:
/* stop not requested by g2 - must have been a kick */
if (!(atomic_read(&scb_o->cpuflags) & CPUSTAT_STOP_INT))
clear_vsie_icpt(vsie_page);
break;
case ICPT_VALIDITY:
if ((scb_s->ipa & 0xf000) != 0xf000)
scb_s->ipa += 0x1000;
break;
case ICPT_PARTEXEC:
if (scb_s->ipa == 0xb254)
rc = vsie_handle_mvpg(vcpu, vsie_page);
break;
}
return rc;
}
static void release_gmap_shadow(struct vsie_page *vsie_page)
{
if (vsie_page->gmap)
gmap_put(vsie_page->gmap);
WRITE_ONCE(vsie_page->gmap, NULL);
prefix_unmapped(vsie_page);
}
static int acquire_gmap_shadow(struct kvm_vcpu *vcpu,
struct vsie_page *vsie_page)
{
unsigned long asce;
union ctlreg0 cr0;
struct gmap *gmap;
int edat;
asce = vcpu->arch.sie_block->gcr[1];
cr0.val = vcpu->arch.sie_block->gcr[0];
edat = cr0.edat && test_kvm_facility(vcpu->kvm, 8);
edat += edat && test_kvm_facility(vcpu->kvm, 78);
/*
* ASCE or EDAT could have changed since last icpt, or the gmap
* we're holding has been unshadowed. If the gmap is still valid,
* we can safely reuse it.
*/
if (vsie_page->gmap && gmap_shadow_valid(vsie_page->gmap, asce, edat))
return 0;
/* release the old shadow - if any, and mark the prefix as unmapped */
release_gmap_shadow(vsie_page);
gmap = gmap_shadow(vcpu->arch.gmap, asce, edat);
if (IS_ERR(gmap))
return PTR_ERR(gmap);
gmap->private = vcpu->kvm;
WRITE_ONCE(vsie_page->gmap, gmap);
return 0;
}
/*
* Register the shadow scb at the VCPU, e.g. for kicking out of vsie.
*/
static void register_shadow_scb(struct kvm_vcpu *vcpu,
struct vsie_page *vsie_page)
{
struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
WRITE_ONCE(vcpu->arch.vsie_block, &vsie_page->scb_s);
/*
* External calls have to lead to a kick of the vcpu and
* therefore the vsie -> Simulate Wait state.
*/
kvm_s390_set_cpuflags(vcpu, CPUSTAT_WAIT);
/*
* We have to adjust the g3 epoch by the g2 epoch. The epoch will
* automatically be adjusted on tod clock changes via kvm_sync_clock.
*/
preempt_disable();
scb_s->epoch += vcpu->kvm->arch.epoch;
if (scb_s->ecd & ECD_MEF) {
scb_s->epdx += vcpu->kvm->arch.epdx;
if (scb_s->epoch < vcpu->kvm->arch.epoch)
scb_s->epdx += 1;
}
preempt_enable();
}
/*
* Unregister a shadow scb from a VCPU.
*/
static void unregister_shadow_scb(struct kvm_vcpu *vcpu)
{
kvm_s390_clear_cpuflags(vcpu, CPUSTAT_WAIT);
WRITE_ONCE(vcpu->arch.vsie_block, NULL);
}
/*
* Run the vsie on a shadowed scb, managing the gmap shadow, handling
* prefix pages and faults.
*
* Returns: - 0 if no errors occurred
* - > 0 if control has to be given to guest 2
* - -ENOMEM if out of memory
*/
static int vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
{
struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
int rc = 0;
while (1) {
rc = acquire_gmap_shadow(vcpu, vsie_page);
if (!rc)
rc = map_prefix(vcpu, vsie_page);
if (!rc) {
gmap_enable(vsie_page->gmap);
update_intervention_requests(vsie_page);
rc = do_vsie_run(vcpu, vsie_page);
gmap_enable(vcpu->arch.gmap);
}
atomic_andnot(PROG_BLOCK_SIE, &scb_s->prog20);
if (rc == -EAGAIN)
rc = 0;
if (rc || scb_s->icptcode || signal_pending(current) ||
kvm_s390_vcpu_has_irq(vcpu, 0) ||
kvm_s390_vcpu_sie_inhibited(vcpu))
break;
cond_resched();
}
if (rc == -EFAULT) {
/*
* Addressing exceptions are always presentes as intercepts.
* As addressing exceptions are suppressing and our guest 3 PSW
* points at the responsible instruction, we have to
* forward the PSW and set the ilc. If we can't read guest 3
* instruction, we can use an arbitrary ilc. Let's always use
* ilen = 4 for now, so we can avoid reading in guest 3 virtual
* memory. (we could also fake the shadow so the hardware
* handles it).
*/
scb_s->icptcode = ICPT_PROGI;
scb_s->iprcc = PGM_ADDRESSING;
scb_s->pgmilc = 4;
scb_s->gpsw.addr = __rewind_psw(scb_s->gpsw, 4);
rc = 1;
}
return rc;
}
/*
* Get or create a vsie page for a scb address.
*
* Returns: - address of a vsie page (cached or new one)
* - NULL if the same scb address is already used by another VCPU
* - ERR_PTR(-ENOMEM) if out of memory
*/
static struct vsie_page *get_vsie_page(struct kvm *kvm, unsigned long addr)
{
struct vsie_page *vsie_page;
struct page *page;
int nr_vcpus;
rcu_read_lock();
page = radix_tree_lookup(&kvm->arch.vsie.addr_to_page, addr >> 9);
rcu_read_unlock();
if (page) {
if (page_ref_inc_return(page) == 2)
return page_to_virt(page);
page_ref_dec(page);
}
/*
* We want at least #online_vcpus shadows, so every VCPU can execute
* the VSIE in parallel.
*/
nr_vcpus = atomic_read(&kvm->online_vcpus);
mutex_lock(&kvm->arch.vsie.mutex);
if (kvm->arch.vsie.page_count < nr_vcpus) {
page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO | GFP_DMA);
if (!page) {
mutex_unlock(&kvm->arch.vsie.mutex);
return ERR_PTR(-ENOMEM);
}
page_ref_inc(page);
kvm->arch.vsie.pages[kvm->arch.vsie.page_count] = page;
kvm->arch.vsie.page_count++;
} else {
/* reuse an existing entry that belongs to nobody */
while (true) {
page = kvm->arch.vsie.pages[kvm->arch.vsie.next];
if (page_ref_inc_return(page) == 2)
break;
page_ref_dec(page);
kvm->arch.vsie.next++;
kvm->arch.vsie.next %= nr_vcpus;
}
radix_tree_delete(&kvm->arch.vsie.addr_to_page, page->index >> 9);
}
page->index = addr;
/* double use of the same address */
if (radix_tree_insert(&kvm->arch.vsie.addr_to_page, addr >> 9, page)) {
page_ref_dec(page);
mutex_unlock(&kvm->arch.vsie.mutex);
return NULL;
}
mutex_unlock(&kvm->arch.vsie.mutex);
vsie_page = page_to_virt(page);
memset(&vsie_page->scb_s, 0, sizeof(struct kvm_s390_sie_block));
release_gmap_shadow(vsie_page);
vsie_page->fault_addr = 0;
vsie_page->scb_s.ihcpu = 0xffffU;
return vsie_page;
}
/* put a vsie page acquired via get_vsie_page */
static void put_vsie_page(struct kvm *kvm, struct vsie_page *vsie_page)
{
struct page *page = pfn_to_page(__pa(vsie_page) >> PAGE_SHIFT);
page_ref_dec(page);
}
int kvm_s390_handle_vsie(struct kvm_vcpu *vcpu)
{
struct vsie_page *vsie_page;
unsigned long scb_addr;
int rc;
vcpu->stat.instruction_sie++;
if (!test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_SIEF2))
return -EOPNOTSUPP;
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
BUILD_BUG_ON(sizeof(struct vsie_page) != PAGE_SIZE);
scb_addr = kvm_s390_get_base_disp_s(vcpu, NULL);
/* 512 byte alignment */
if (unlikely(scb_addr & 0x1ffUL))
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
if (signal_pending(current) || kvm_s390_vcpu_has_irq(vcpu, 0) ||
kvm_s390_vcpu_sie_inhibited(vcpu))
return 0;
vsie_page = get_vsie_page(vcpu->kvm, scb_addr);
if (IS_ERR(vsie_page))
return PTR_ERR(vsie_page);
else if (!vsie_page)
/* double use of sie control block - simply do nothing */
return 0;
rc = pin_scb(vcpu, vsie_page, scb_addr);
if (rc)
goto out_put;
rc = shadow_scb(vcpu, vsie_page);
if (rc)
goto out_unpin_scb;
rc = pin_blocks(vcpu, vsie_page);
if (rc)
goto out_unshadow;
register_shadow_scb(vcpu, vsie_page);
rc = vsie_run(vcpu, vsie_page);
unregister_shadow_scb(vcpu);
unpin_blocks(vcpu, vsie_page);
out_unshadow:
unshadow_scb(vcpu, vsie_page);
out_unpin_scb:
unpin_scb(vcpu, vsie_page, scb_addr);
out_put:
put_vsie_page(vcpu->kvm, vsie_page);
return rc < 0 ? rc : 0;
}
/* Init the vsie data structures. To be called when a vm is initialized. */
void kvm_s390_vsie_init(struct kvm *kvm)
{
mutex_init(&kvm->arch.vsie.mutex);
INIT_RADIX_TREE(&kvm->arch.vsie.addr_to_page, GFP_KERNEL_ACCOUNT);
}
/* Destroy the vsie data structures. To be called when a vm is destroyed. */
void kvm_s390_vsie_destroy(struct kvm *kvm)
{
struct vsie_page *vsie_page;
struct page *page;
int i;
mutex_lock(&kvm->arch.vsie.mutex);
for (i = 0; i < kvm->arch.vsie.page_count; i++) {
page = kvm->arch.vsie.pages[i];
kvm->arch.vsie.pages[i] = NULL;
vsie_page = page_to_virt(page);
release_gmap_shadow(vsie_page);
/* free the radix tree entry */
radix_tree_delete(&kvm->arch.vsie.addr_to_page, page->index >> 9);
__free_page(page);
}
kvm->arch.vsie.page_count = 0;
mutex_unlock(&kvm->arch.vsie.mutex);
}
void kvm_s390_vsie_kick(struct kvm_vcpu *vcpu)
{
struct kvm_s390_sie_block *scb = READ_ONCE(vcpu->arch.vsie_block);
/*
* Even if the VCPU lets go of the shadow sie block reference, it is
* still valid in the cache. So we can safely kick it.
*/
if (scb) {
atomic_or(PROG_BLOCK_SIE, &scb->prog20);
if (scb->prog0c & PROG_IN_SIE)
atomic_or(CPUSTAT_STOP_INT, &scb->cpuflags);
}
}
| linux-master | arch/s390/kvm/vsie.c |
// SPDX-License-Identifier: GPL-2.0
/*
* handling interprocessor communication
*
* Copyright IBM Corp. 2008, 2013
*
* Author(s): Carsten Otte <[email protected]>
* Christian Borntraeger <[email protected]>
* Christian Ehrhardt <[email protected]>
*/
#include <linux/kvm.h>
#include <linux/kvm_host.h>
#include <linux/slab.h>
#include <asm/sigp.h>
#include "gaccess.h"
#include "kvm-s390.h"
#include "trace.h"
static int __sigp_sense(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu,
u64 *reg)
{
const bool stopped = kvm_s390_test_cpuflags(dst_vcpu, CPUSTAT_STOPPED);
int rc;
int ext_call_pending;
ext_call_pending = kvm_s390_ext_call_pending(dst_vcpu);
if (!stopped && !ext_call_pending)
rc = SIGP_CC_ORDER_CODE_ACCEPTED;
else {
*reg &= 0xffffffff00000000UL;
if (ext_call_pending)
*reg |= SIGP_STATUS_EXT_CALL_PENDING;
if (stopped)
*reg |= SIGP_STATUS_STOPPED;
rc = SIGP_CC_STATUS_STORED;
}
VCPU_EVENT(vcpu, 4, "sensed status of cpu %x rc %x", dst_vcpu->vcpu_id,
rc);
return rc;
}
static int __inject_sigp_emergency(struct kvm_vcpu *vcpu,
struct kvm_vcpu *dst_vcpu)
{
struct kvm_s390_irq irq = {
.type = KVM_S390_INT_EMERGENCY,
.u.emerg.code = vcpu->vcpu_id,
};
int rc = 0;
rc = kvm_s390_inject_vcpu(dst_vcpu, &irq);
if (!rc)
VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x",
dst_vcpu->vcpu_id);
return rc ? rc : SIGP_CC_ORDER_CODE_ACCEPTED;
}
static int __sigp_emergency(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu)
{
return __inject_sigp_emergency(vcpu, dst_vcpu);
}
static int __sigp_conditional_emergency(struct kvm_vcpu *vcpu,
struct kvm_vcpu *dst_vcpu,
u16 asn, u64 *reg)
{
const u64 psw_int_mask = PSW_MASK_IO | PSW_MASK_EXT;
u16 p_asn, s_asn;
psw_t *psw;
bool idle;
idle = is_vcpu_idle(vcpu);
psw = &dst_vcpu->arch.sie_block->gpsw;
p_asn = dst_vcpu->arch.sie_block->gcr[4] & 0xffff; /* Primary ASN */
s_asn = dst_vcpu->arch.sie_block->gcr[3] & 0xffff; /* Secondary ASN */
/* Inject the emergency signal? */
if (!is_vcpu_stopped(vcpu)
|| (psw->mask & psw_int_mask) != psw_int_mask
|| (idle && psw->addr != 0)
|| (!idle && (asn == p_asn || asn == s_asn))) {
return __inject_sigp_emergency(vcpu, dst_vcpu);
} else {
*reg &= 0xffffffff00000000UL;
*reg |= SIGP_STATUS_INCORRECT_STATE;
return SIGP_CC_STATUS_STORED;
}
}
static int __sigp_external_call(struct kvm_vcpu *vcpu,
struct kvm_vcpu *dst_vcpu, u64 *reg)
{
struct kvm_s390_irq irq = {
.type = KVM_S390_INT_EXTERNAL_CALL,
.u.extcall.code = vcpu->vcpu_id,
};
int rc;
rc = kvm_s390_inject_vcpu(dst_vcpu, &irq);
if (rc == -EBUSY) {
*reg &= 0xffffffff00000000UL;
*reg |= SIGP_STATUS_EXT_CALL_PENDING;
return SIGP_CC_STATUS_STORED;
} else if (rc == 0) {
VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x",
dst_vcpu->vcpu_id);
}
return rc ? rc : SIGP_CC_ORDER_CODE_ACCEPTED;
}
static int __sigp_stop(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu)
{
struct kvm_s390_irq irq = {
.type = KVM_S390_SIGP_STOP,
};
int rc;
rc = kvm_s390_inject_vcpu(dst_vcpu, &irq);
if (rc == -EBUSY)
rc = SIGP_CC_BUSY;
else if (rc == 0)
VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x",
dst_vcpu->vcpu_id);
return rc;
}
static int __sigp_stop_and_store_status(struct kvm_vcpu *vcpu,
struct kvm_vcpu *dst_vcpu, u64 *reg)
{
struct kvm_s390_irq irq = {
.type = KVM_S390_SIGP_STOP,
.u.stop.flags = KVM_S390_STOP_FLAG_STORE_STATUS,
};
int rc;
rc = kvm_s390_inject_vcpu(dst_vcpu, &irq);
if (rc == -EBUSY)
rc = SIGP_CC_BUSY;
else if (rc == 0)
VCPU_EVENT(vcpu, 4, "sent sigp stop and store status to cpu %x",
dst_vcpu->vcpu_id);
return rc;
}
static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter,
u64 *status_reg)
{
*status_reg &= 0xffffffff00000000UL;
/* Reject set arch order, with czam we're always in z/Arch mode. */
*status_reg |= SIGP_STATUS_INVALID_PARAMETER;
return SIGP_CC_STATUS_STORED;
}
static int __sigp_set_prefix(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu,
u32 address, u64 *reg)
{
struct kvm_s390_irq irq = {
.type = KVM_S390_SIGP_SET_PREFIX,
.u.prefix.address = address & 0x7fffe000u,
};
int rc;
/*
* Make sure the new value is valid memory. We only need to check the
* first page, since address is 8k aligned and memory pieces are always
* at least 1MB aligned and have at least a size of 1MB.
*/
if (kvm_is_error_gpa(vcpu->kvm, irq.u.prefix.address)) {
*reg &= 0xffffffff00000000UL;
*reg |= SIGP_STATUS_INVALID_PARAMETER;
return SIGP_CC_STATUS_STORED;
}
rc = kvm_s390_inject_vcpu(dst_vcpu, &irq);
if (rc == -EBUSY) {
*reg &= 0xffffffff00000000UL;
*reg |= SIGP_STATUS_INCORRECT_STATE;
return SIGP_CC_STATUS_STORED;
}
return rc;
}
static int __sigp_store_status_at_addr(struct kvm_vcpu *vcpu,
struct kvm_vcpu *dst_vcpu,
u32 addr, u64 *reg)
{
int rc;
if (!kvm_s390_test_cpuflags(dst_vcpu, CPUSTAT_STOPPED)) {
*reg &= 0xffffffff00000000UL;
*reg |= SIGP_STATUS_INCORRECT_STATE;
return SIGP_CC_STATUS_STORED;
}
addr &= 0x7ffffe00;
rc = kvm_s390_store_status_unloaded(dst_vcpu, addr);
if (rc == -EFAULT) {
*reg &= 0xffffffff00000000UL;
*reg |= SIGP_STATUS_INVALID_PARAMETER;
rc = SIGP_CC_STATUS_STORED;
}
return rc;
}
static int __sigp_sense_running(struct kvm_vcpu *vcpu,
struct kvm_vcpu *dst_vcpu, u64 *reg)
{
int rc;
if (!test_kvm_facility(vcpu->kvm, 9)) {
*reg &= 0xffffffff00000000UL;
*reg |= SIGP_STATUS_INVALID_ORDER;
return SIGP_CC_STATUS_STORED;
}
if (kvm_s390_test_cpuflags(dst_vcpu, CPUSTAT_RUNNING)) {
/* running */
rc = SIGP_CC_ORDER_CODE_ACCEPTED;
} else {
/* not running */
*reg &= 0xffffffff00000000UL;
*reg |= SIGP_STATUS_NOT_RUNNING;
rc = SIGP_CC_STATUS_STORED;
}
VCPU_EVENT(vcpu, 4, "sensed running status of cpu %x rc %x",
dst_vcpu->vcpu_id, rc);
return rc;
}
static int __prepare_sigp_re_start(struct kvm_vcpu *vcpu,
struct kvm_vcpu *dst_vcpu, u8 order_code)
{
struct kvm_s390_local_interrupt *li = &dst_vcpu->arch.local_int;
/* handle (RE)START in user space */
int rc = -EOPNOTSUPP;
/* make sure we don't race with STOP irq injection */
spin_lock(&li->lock);
if (kvm_s390_is_stop_irq_pending(dst_vcpu))
rc = SIGP_CC_BUSY;
spin_unlock(&li->lock);
return rc;
}
static int __prepare_sigp_cpu_reset(struct kvm_vcpu *vcpu,
struct kvm_vcpu *dst_vcpu, u8 order_code)
{
/* handle (INITIAL) CPU RESET in user space */
return -EOPNOTSUPP;
}
static int __prepare_sigp_unknown(struct kvm_vcpu *vcpu,
struct kvm_vcpu *dst_vcpu)
{
/* handle unknown orders in user space */
return -EOPNOTSUPP;
}
static int handle_sigp_dst(struct kvm_vcpu *vcpu, u8 order_code,
u16 cpu_addr, u32 parameter, u64 *status_reg)
{
int rc;
struct kvm_vcpu *dst_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, cpu_addr);
if (!dst_vcpu)
return SIGP_CC_NOT_OPERATIONAL;
/*
* SIGP RESTART, SIGP STOP, and SIGP STOP AND STORE STATUS orders
* are processed asynchronously. Until the affected VCPU finishes
* its work and calls back into KVM to clear the (RESTART or STOP)
* interrupt, we need to return any new non-reset orders "busy".
*
* This is important because a single VCPU could issue:
* 1) SIGP STOP $DESTINATION
* 2) SIGP SENSE $DESTINATION
*
* If the SIGP SENSE would not be rejected as "busy", it could
* return an incorrect answer as to whether the VCPU is STOPPED
* or OPERATING.
*/
if (order_code != SIGP_INITIAL_CPU_RESET &&
order_code != SIGP_CPU_RESET) {
/*
* Lockless check. Both SIGP STOP and SIGP (RE)START
* properly synchronize everything while processing
* their orders, while the guest cannot observe a
* difference when issuing other orders from two
* different VCPUs.
*/
if (kvm_s390_is_stop_irq_pending(dst_vcpu) ||
kvm_s390_is_restart_irq_pending(dst_vcpu))
return SIGP_CC_BUSY;
}
switch (order_code) {
case SIGP_SENSE:
vcpu->stat.instruction_sigp_sense++;
rc = __sigp_sense(vcpu, dst_vcpu, status_reg);
break;
case SIGP_EXTERNAL_CALL:
vcpu->stat.instruction_sigp_external_call++;
rc = __sigp_external_call(vcpu, dst_vcpu, status_reg);
break;
case SIGP_EMERGENCY_SIGNAL:
vcpu->stat.instruction_sigp_emergency++;
rc = __sigp_emergency(vcpu, dst_vcpu);
break;
case SIGP_STOP:
vcpu->stat.instruction_sigp_stop++;
rc = __sigp_stop(vcpu, dst_vcpu);
break;
case SIGP_STOP_AND_STORE_STATUS:
vcpu->stat.instruction_sigp_stop_store_status++;
rc = __sigp_stop_and_store_status(vcpu, dst_vcpu, status_reg);
break;
case SIGP_STORE_STATUS_AT_ADDRESS:
vcpu->stat.instruction_sigp_store_status++;
rc = __sigp_store_status_at_addr(vcpu, dst_vcpu, parameter,
status_reg);
break;
case SIGP_SET_PREFIX:
vcpu->stat.instruction_sigp_prefix++;
rc = __sigp_set_prefix(vcpu, dst_vcpu, parameter, status_reg);
break;
case SIGP_COND_EMERGENCY_SIGNAL:
vcpu->stat.instruction_sigp_cond_emergency++;
rc = __sigp_conditional_emergency(vcpu, dst_vcpu, parameter,
status_reg);
break;
case SIGP_SENSE_RUNNING:
vcpu->stat.instruction_sigp_sense_running++;
rc = __sigp_sense_running(vcpu, dst_vcpu, status_reg);
break;
case SIGP_START:
vcpu->stat.instruction_sigp_start++;
rc = __prepare_sigp_re_start(vcpu, dst_vcpu, order_code);
break;
case SIGP_RESTART:
vcpu->stat.instruction_sigp_restart++;
rc = __prepare_sigp_re_start(vcpu, dst_vcpu, order_code);
break;
case SIGP_INITIAL_CPU_RESET:
vcpu->stat.instruction_sigp_init_cpu_reset++;
rc = __prepare_sigp_cpu_reset(vcpu, dst_vcpu, order_code);
break;
case SIGP_CPU_RESET:
vcpu->stat.instruction_sigp_cpu_reset++;
rc = __prepare_sigp_cpu_reset(vcpu, dst_vcpu, order_code);
break;
default:
vcpu->stat.instruction_sigp_unknown++;
rc = __prepare_sigp_unknown(vcpu, dst_vcpu);
}
if (rc == -EOPNOTSUPP)
VCPU_EVENT(vcpu, 4,
"sigp order %u -> cpu %x: handled in user space",
order_code, dst_vcpu->vcpu_id);
return rc;
}
static int handle_sigp_order_in_user_space(struct kvm_vcpu *vcpu, u8 order_code,
u16 cpu_addr)
{
if (!vcpu->kvm->arch.user_sigp)
return 0;
switch (order_code) {
case SIGP_SENSE:
case SIGP_EXTERNAL_CALL:
case SIGP_EMERGENCY_SIGNAL:
case SIGP_COND_EMERGENCY_SIGNAL:
case SIGP_SENSE_RUNNING:
return 0;
/* update counters as we're directly dropping to user space */
case SIGP_STOP:
vcpu->stat.instruction_sigp_stop++;
break;
case SIGP_STOP_AND_STORE_STATUS:
vcpu->stat.instruction_sigp_stop_store_status++;
break;
case SIGP_STORE_STATUS_AT_ADDRESS:
vcpu->stat.instruction_sigp_store_status++;
break;
case SIGP_STORE_ADDITIONAL_STATUS:
vcpu->stat.instruction_sigp_store_adtl_status++;
break;
case SIGP_SET_PREFIX:
vcpu->stat.instruction_sigp_prefix++;
break;
case SIGP_START:
vcpu->stat.instruction_sigp_start++;
break;
case SIGP_RESTART:
vcpu->stat.instruction_sigp_restart++;
break;
case SIGP_INITIAL_CPU_RESET:
vcpu->stat.instruction_sigp_init_cpu_reset++;
break;
case SIGP_CPU_RESET:
vcpu->stat.instruction_sigp_cpu_reset++;
break;
default:
vcpu->stat.instruction_sigp_unknown++;
}
VCPU_EVENT(vcpu, 3, "SIGP: order %u for CPU %d handled in userspace",
order_code, cpu_addr);
return 1;
}
int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
{
int r1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
int r3 = vcpu->arch.sie_block->ipa & 0x000f;
u32 parameter;
u16 cpu_addr = vcpu->run->s.regs.gprs[r3];
u8 order_code;
int rc;
/* sigp in userspace can exit */
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
order_code = kvm_s390_get_base_disp_rs(vcpu, NULL);
if (handle_sigp_order_in_user_space(vcpu, order_code, cpu_addr))
return -EOPNOTSUPP;
if (r1 % 2)
parameter = vcpu->run->s.regs.gprs[r1];
else
parameter = vcpu->run->s.regs.gprs[r1 + 1];
trace_kvm_s390_handle_sigp(vcpu, order_code, cpu_addr, parameter);
switch (order_code) {
case SIGP_SET_ARCHITECTURE:
vcpu->stat.instruction_sigp_arch++;
rc = __sigp_set_arch(vcpu, parameter,
&vcpu->run->s.regs.gprs[r1]);
break;
default:
rc = handle_sigp_dst(vcpu, order_code, cpu_addr,
parameter,
&vcpu->run->s.regs.gprs[r1]);
}
if (rc < 0)
return rc;
kvm_s390_set_psw_cc(vcpu, rc);
return 0;
}
/*
* Handle SIGP partial execution interception.
*
* This interception will occur at the source cpu when a source cpu sends an
* external call to a target cpu and the target cpu has the WAIT bit set in
* its cpuflags. Interception will occur after the interrupt indicator bits at
* the target cpu have been set. All error cases will lead to instruction
* interception, therefore nothing is to be checked or prepared.
*/
int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu)
{
int r3 = vcpu->arch.sie_block->ipa & 0x000f;
u16 cpu_addr = vcpu->run->s.regs.gprs[r3];
struct kvm_vcpu *dest_vcpu;
u8 order_code = kvm_s390_get_base_disp_rs(vcpu, NULL);
if (order_code == SIGP_EXTERNAL_CALL) {
trace_kvm_s390_handle_sigp_pei(vcpu, order_code, cpu_addr);
dest_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, cpu_addr);
BUG_ON(dest_vcpu == NULL);
kvm_s390_vcpu_wakeup(dest_vcpu);
kvm_s390_set_psw_cc(vcpu, SIGP_CC_ORDER_CODE_ACCEPTED);
return 0;
}
return -EOPNOTSUPP;
}
| linux-master | arch/s390/kvm/sigp.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Hosting Protected Virtual Machines
*
* Copyright IBM Corp. 2019, 2020
* Author(s): Janosch Frank <[email protected]>
*/
#include <linux/kvm.h>
#include <linux/kvm_host.h>
#include <linux/minmax.h>
#include <linux/pagemap.h>
#include <linux/sched/signal.h>
#include <asm/gmap.h>
#include <asm/uv.h>
#include <asm/mman.h>
#include <linux/pagewalk.h>
#include <linux/sched/mm.h>
#include <linux/mmu_notifier.h>
#include "kvm-s390.h"
bool kvm_s390_pv_is_protected(struct kvm *kvm)
{
lockdep_assert_held(&kvm->lock);
return !!kvm_s390_pv_get_handle(kvm);
}
EXPORT_SYMBOL_GPL(kvm_s390_pv_is_protected);
bool kvm_s390_pv_cpu_is_protected(struct kvm_vcpu *vcpu)
{
lockdep_assert_held(&vcpu->mutex);
return !!kvm_s390_pv_cpu_get_handle(vcpu);
}
EXPORT_SYMBOL_GPL(kvm_s390_pv_cpu_is_protected);
/**
* struct pv_vm_to_be_destroyed - Represents a protected VM that needs to
* be destroyed
*
* @list: list head for the list of leftover VMs
* @old_gmap_table: the gmap table of the leftover protected VM
* @handle: the handle of the leftover protected VM
* @stor_var: pointer to the variable storage of the leftover protected VM
* @stor_base: address of the base storage of the leftover protected VM
*
* Represents a protected VM that is still registered with the Ultravisor,
* but which does not correspond any longer to an active KVM VM. It should
* be destroyed at some point later, either asynchronously or when the
* process terminates.
*/
struct pv_vm_to_be_destroyed {
struct list_head list;
unsigned long old_gmap_table;
u64 handle;
void *stor_var;
unsigned long stor_base;
};
static void kvm_s390_clear_pv_state(struct kvm *kvm)
{
kvm->arch.pv.handle = 0;
kvm->arch.pv.guest_len = 0;
kvm->arch.pv.stor_base = 0;
kvm->arch.pv.stor_var = NULL;
}
int kvm_s390_pv_destroy_cpu(struct kvm_vcpu *vcpu, u16 *rc, u16 *rrc)
{
int cc;
if (!kvm_s390_pv_cpu_get_handle(vcpu))
return 0;
cc = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu), UVC_CMD_DESTROY_SEC_CPU, rc, rrc);
KVM_UV_EVENT(vcpu->kvm, 3, "PROTVIRT DESTROY VCPU %d: rc %x rrc %x",
vcpu->vcpu_id, *rc, *rrc);
WARN_ONCE(cc, "protvirt destroy cpu failed rc %x rrc %x", *rc, *rrc);
/* Intended memory leak for something that should never happen. */
if (!cc)
free_pages(vcpu->arch.pv.stor_base,
get_order(uv_info.guest_cpu_stor_len));
free_page((unsigned long)sida_addr(vcpu->arch.sie_block));
vcpu->arch.sie_block->pv_handle_cpu = 0;
vcpu->arch.sie_block->pv_handle_config = 0;
memset(&vcpu->arch.pv, 0, sizeof(vcpu->arch.pv));
vcpu->arch.sie_block->sdf = 0;
/*
* The sidad field (for sdf == 2) is now the gbea field (for sdf == 0).
* Use the reset value of gbea to avoid leaking the kernel pointer of
* the just freed sida.
*/
vcpu->arch.sie_block->gbea = 1;
kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
return cc ? EIO : 0;
}
int kvm_s390_pv_create_cpu(struct kvm_vcpu *vcpu, u16 *rc, u16 *rrc)
{
struct uv_cb_csc uvcb = {
.header.cmd = UVC_CMD_CREATE_SEC_CPU,
.header.len = sizeof(uvcb),
};
void *sida_addr;
int cc;
if (kvm_s390_pv_cpu_get_handle(vcpu))
return -EINVAL;
vcpu->arch.pv.stor_base = __get_free_pages(GFP_KERNEL_ACCOUNT,
get_order(uv_info.guest_cpu_stor_len));
if (!vcpu->arch.pv.stor_base)
return -ENOMEM;
/* Input */
uvcb.guest_handle = kvm_s390_pv_get_handle(vcpu->kvm);
uvcb.num = vcpu->arch.sie_block->icpua;
uvcb.state_origin = virt_to_phys(vcpu->arch.sie_block);
uvcb.stor_origin = virt_to_phys((void *)vcpu->arch.pv.stor_base);
/* Alloc Secure Instruction Data Area Designation */
sida_addr = (void *)__get_free_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
if (!sida_addr) {
free_pages(vcpu->arch.pv.stor_base,
get_order(uv_info.guest_cpu_stor_len));
return -ENOMEM;
}
vcpu->arch.sie_block->sidad = virt_to_phys(sida_addr);
cc = uv_call(0, (u64)&uvcb);
*rc = uvcb.header.rc;
*rrc = uvcb.header.rrc;
KVM_UV_EVENT(vcpu->kvm, 3,
"PROTVIRT CREATE VCPU: cpu %d handle %llx rc %x rrc %x",
vcpu->vcpu_id, uvcb.cpu_handle, uvcb.header.rc,
uvcb.header.rrc);
if (cc) {
u16 dummy;
kvm_s390_pv_destroy_cpu(vcpu, &dummy, &dummy);
return -EIO;
}
/* Output */
vcpu->arch.pv.handle = uvcb.cpu_handle;
vcpu->arch.sie_block->pv_handle_cpu = uvcb.cpu_handle;
vcpu->arch.sie_block->pv_handle_config = kvm_s390_pv_get_handle(vcpu->kvm);
vcpu->arch.sie_block->sdf = 2;
kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
return 0;
}
/* only free resources when the destroy was successful */
static void kvm_s390_pv_dealloc_vm(struct kvm *kvm)
{
vfree(kvm->arch.pv.stor_var);
free_pages(kvm->arch.pv.stor_base,
get_order(uv_info.guest_base_stor_len));
kvm_s390_clear_pv_state(kvm);
}
static int kvm_s390_pv_alloc_vm(struct kvm *kvm)
{
unsigned long base = uv_info.guest_base_stor_len;
unsigned long virt = uv_info.guest_virt_var_stor_len;
unsigned long npages = 0, vlen = 0;
kvm->arch.pv.stor_var = NULL;
kvm->arch.pv.stor_base = __get_free_pages(GFP_KERNEL_ACCOUNT, get_order(base));
if (!kvm->arch.pv.stor_base)
return -ENOMEM;
/*
* Calculate current guest storage for allocation of the
* variable storage, which is based on the length in MB.
*
* Slots are sorted by GFN
*/
mutex_lock(&kvm->slots_lock);
npages = kvm_s390_get_gfn_end(kvm_memslots(kvm));
mutex_unlock(&kvm->slots_lock);
kvm->arch.pv.guest_len = npages * PAGE_SIZE;
/* Allocate variable storage */
vlen = ALIGN(virt * ((npages * PAGE_SIZE) / HPAGE_SIZE), PAGE_SIZE);
vlen += uv_info.guest_virt_base_stor_len;
kvm->arch.pv.stor_var = vzalloc(vlen);
if (!kvm->arch.pv.stor_var)
goto out_err;
return 0;
out_err:
kvm_s390_pv_dealloc_vm(kvm);
return -ENOMEM;
}
/**
* kvm_s390_pv_dispose_one_leftover - Clean up one leftover protected VM.
* @kvm: the KVM that was associated with this leftover protected VM
* @leftover: details about the leftover protected VM that needs a clean up
* @rc: the RC code of the Destroy Secure Configuration UVC
* @rrc: the RRC code of the Destroy Secure Configuration UVC
*
* Destroy one leftover protected VM.
* On success, kvm->mm->context.protected_count will be decremented atomically
* and all other resources used by the VM will be freed.
*
* Return: 0 in case of success, otherwise 1
*/
static int kvm_s390_pv_dispose_one_leftover(struct kvm *kvm,
struct pv_vm_to_be_destroyed *leftover,
u16 *rc, u16 *rrc)
{
int cc;
/* It used the destroy-fast UVC, nothing left to do here */
if (!leftover->handle)
goto done_fast;
cc = uv_cmd_nodata(leftover->handle, UVC_CMD_DESTROY_SEC_CONF, rc, rrc);
KVM_UV_EVENT(kvm, 3, "PROTVIRT DESTROY LEFTOVER VM: rc %x rrc %x", *rc, *rrc);
WARN_ONCE(cc, "protvirt destroy leftover vm failed rc %x rrc %x", *rc, *rrc);
if (cc)
return cc;
/*
* Intentionally leak unusable memory. If the UVC fails, the memory
* used for the VM and its metadata is permanently unusable.
* This can only happen in case of a serious KVM or hardware bug; it
* is not expected to happen in normal operation.
*/
free_pages(leftover->stor_base, get_order(uv_info.guest_base_stor_len));
free_pages(leftover->old_gmap_table, CRST_ALLOC_ORDER);
vfree(leftover->stor_var);
done_fast:
atomic_dec(&kvm->mm->context.protected_count);
return 0;
}
/**
* kvm_s390_destroy_lower_2g - Destroy the first 2GB of protected guest memory.
* @kvm: the VM whose memory is to be cleared.
*
* Destroy the first 2GB of guest memory, to avoid prefix issues after reboot.
* The CPUs of the protected VM need to be destroyed beforehand.
*/
static void kvm_s390_destroy_lower_2g(struct kvm *kvm)
{
const unsigned long pages_2g = SZ_2G / PAGE_SIZE;
struct kvm_memory_slot *slot;
unsigned long len;
int srcu_idx;
srcu_idx = srcu_read_lock(&kvm->srcu);
/* Take the memslot containing guest absolute address 0 */
slot = gfn_to_memslot(kvm, 0);
/* Clear all slots or parts thereof that are below 2GB */
while (slot && slot->base_gfn < pages_2g) {
len = min_t(u64, slot->npages, pages_2g - slot->base_gfn) * PAGE_SIZE;
s390_uv_destroy_range(kvm->mm, slot->userspace_addr, slot->userspace_addr + len);
/* Take the next memslot */
slot = gfn_to_memslot(kvm, slot->base_gfn + slot->npages);
}
srcu_read_unlock(&kvm->srcu, srcu_idx);
}
static int kvm_s390_pv_deinit_vm_fast(struct kvm *kvm, u16 *rc, u16 *rrc)
{
struct uv_cb_destroy_fast uvcb = {
.header.cmd = UVC_CMD_DESTROY_SEC_CONF_FAST,
.header.len = sizeof(uvcb),
.handle = kvm_s390_pv_get_handle(kvm),
};
int cc;
cc = uv_call_sched(0, (u64)&uvcb);
if (rc)
*rc = uvcb.header.rc;
if (rrc)
*rrc = uvcb.header.rrc;
WRITE_ONCE(kvm->arch.gmap->guest_handle, 0);
KVM_UV_EVENT(kvm, 3, "PROTVIRT DESTROY VM FAST: rc %x rrc %x",
uvcb.header.rc, uvcb.header.rrc);
WARN_ONCE(cc && uvcb.header.rc != 0x104,
"protvirt destroy vm fast failed handle %llx rc %x rrc %x",
kvm_s390_pv_get_handle(kvm), uvcb.header.rc, uvcb.header.rrc);
/* Intended memory leak on "impossible" error */
if (!cc)
kvm_s390_pv_dealloc_vm(kvm);
return cc ? -EIO : 0;
}
static inline bool is_destroy_fast_available(void)
{
return test_bit_inv(BIT_UVC_CMD_DESTROY_SEC_CONF_FAST, uv_info.inst_calls_list);
}
/**
* kvm_s390_pv_set_aside - Set aside a protected VM for later teardown.
* @kvm: the VM
* @rc: return value for the RC field of the UVCB
* @rrc: return value for the RRC field of the UVCB
*
* Set aside the protected VM for a subsequent teardown. The VM will be able
* to continue immediately as a non-secure VM, and the information needed to
* properly tear down the protected VM is set aside. If another protected VM
* was already set aside without starting its teardown, this function will
* fail.
* The CPUs of the protected VM need to be destroyed beforehand.
*
* Context: kvm->lock needs to be held
*
* Return: 0 in case of success, -EINVAL if another protected VM was already set
* aside, -ENOMEM if the system ran out of memory.
*/
int kvm_s390_pv_set_aside(struct kvm *kvm, u16 *rc, u16 *rrc)
{
struct pv_vm_to_be_destroyed *priv;
int res = 0;
lockdep_assert_held(&kvm->lock);
/*
* If another protected VM was already prepared for teardown, refuse.
* A normal deinitialization has to be performed instead.
*/
if (kvm->arch.pv.set_aside)
return -EINVAL;
/* Guest with segment type ASCE, refuse to destroy asynchronously */
if ((kvm->arch.gmap->asce & _ASCE_TYPE_MASK) == _ASCE_TYPE_SEGMENT)
return -EINVAL;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
if (is_destroy_fast_available()) {
res = kvm_s390_pv_deinit_vm_fast(kvm, rc, rrc);
} else {
priv->stor_var = kvm->arch.pv.stor_var;
priv->stor_base = kvm->arch.pv.stor_base;
priv->handle = kvm_s390_pv_get_handle(kvm);
priv->old_gmap_table = (unsigned long)kvm->arch.gmap->table;
WRITE_ONCE(kvm->arch.gmap->guest_handle, 0);
if (s390_replace_asce(kvm->arch.gmap))
res = -ENOMEM;
}
if (res) {
kfree(priv);
return res;
}
kvm_s390_destroy_lower_2g(kvm);
kvm_s390_clear_pv_state(kvm);
kvm->arch.pv.set_aside = priv;
*rc = UVC_RC_EXECUTED;
*rrc = 42;
return 0;
}
/**
* kvm_s390_pv_deinit_vm - Deinitialize the current protected VM
* @kvm: the KVM whose protected VM needs to be deinitialized
* @rc: the RC code of the UVC
* @rrc: the RRC code of the UVC
*
* Deinitialize the current protected VM. This function will destroy and
* cleanup the current protected VM, but it will not cleanup the guest
* memory. This function should only be called when the protected VM has
* just been created and therefore does not have any guest memory, or when
* the caller cleans up the guest memory separately.
*
* This function should not fail, but if it does, the donated memory must
* not be freed.
*
* Context: kvm->lock needs to be held
*
* Return: 0 in case of success, otherwise -EIO
*/
int kvm_s390_pv_deinit_vm(struct kvm *kvm, u16 *rc, u16 *rrc)
{
int cc;
cc = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
UVC_CMD_DESTROY_SEC_CONF, rc, rrc);
WRITE_ONCE(kvm->arch.gmap->guest_handle, 0);
if (!cc) {
atomic_dec(&kvm->mm->context.protected_count);
kvm_s390_pv_dealloc_vm(kvm);
} else {
/* Intended memory leak on "impossible" error */
s390_replace_asce(kvm->arch.gmap);
}
KVM_UV_EVENT(kvm, 3, "PROTVIRT DESTROY VM: rc %x rrc %x", *rc, *rrc);
WARN_ONCE(cc, "protvirt destroy vm failed rc %x rrc %x", *rc, *rrc);
return cc ? -EIO : 0;
}
/**
* kvm_s390_pv_deinit_cleanup_all - Clean up all protected VMs associated
* with a specific KVM.
* @kvm: the KVM to be cleaned up
* @rc: the RC code of the first failing UVC
* @rrc: the RRC code of the first failing UVC
*
* This function will clean up all protected VMs associated with a KVM.
* This includes the active one, the one prepared for deinitialization with
* kvm_s390_pv_set_aside, and any still pending in the need_cleanup list.
*
* Context: kvm->lock needs to be held unless being called from
* kvm_arch_destroy_vm.
*
* Return: 0 if all VMs are successfully cleaned up, otherwise -EIO
*/
int kvm_s390_pv_deinit_cleanup_all(struct kvm *kvm, u16 *rc, u16 *rrc)
{
struct pv_vm_to_be_destroyed *cur;
bool need_zap = false;
u16 _rc, _rrc;
int cc = 0;
/*
* Nothing to do if the counter was already 0. Otherwise make sure
* the counter does not reach 0 before calling s390_uv_destroy_range.
*/
if (!atomic_inc_not_zero(&kvm->mm->context.protected_count))
return 0;
*rc = 1;
/* If the current VM is protected, destroy it */
if (kvm_s390_pv_get_handle(kvm)) {
cc = kvm_s390_pv_deinit_vm(kvm, rc, rrc);
need_zap = true;
}
/* If a previous protected VM was set aside, put it in the need_cleanup list */
if (kvm->arch.pv.set_aside) {
list_add(kvm->arch.pv.set_aside, &kvm->arch.pv.need_cleanup);
kvm->arch.pv.set_aside = NULL;
}
/* Cleanup all protected VMs in the need_cleanup list */
while (!list_empty(&kvm->arch.pv.need_cleanup)) {
cur = list_first_entry(&kvm->arch.pv.need_cleanup, typeof(*cur), list);
need_zap = true;
if (kvm_s390_pv_dispose_one_leftover(kvm, cur, &_rc, &_rrc)) {
cc = 1;
/*
* Only return the first error rc and rrc, so make
* sure it is not overwritten. All destroys will
* additionally be reported via KVM_UV_EVENT().
*/
if (*rc == UVC_RC_EXECUTED) {
*rc = _rc;
*rrc = _rrc;
}
}
list_del(&cur->list);
kfree(cur);
}
/*
* If the mm still has a mapping, try to mark all its pages as
* accessible. The counter should not reach zero before this
* cleanup has been performed.
*/
if (need_zap && mmget_not_zero(kvm->mm)) {
s390_uv_destroy_range(kvm->mm, 0, TASK_SIZE);
mmput(kvm->mm);
}
/* Now the counter can safely reach 0 */
atomic_dec(&kvm->mm->context.protected_count);
return cc ? -EIO : 0;
}
/**
* kvm_s390_pv_deinit_aside_vm - Teardown a previously set aside protected VM.
* @kvm: the VM previously associated with the protected VM
* @rc: return value for the RC field of the UVCB
* @rrc: return value for the RRC field of the UVCB
*
* Tear down the protected VM that had been previously prepared for teardown
* using kvm_s390_pv_set_aside_vm. Ideally this should be called by
* userspace asynchronously from a separate thread.
*
* Context: kvm->lock must not be held.
*
* Return: 0 in case of success, -EINVAL if no protected VM had been
* prepared for asynchronous teardowm, -EIO in case of other errors.
*/
int kvm_s390_pv_deinit_aside_vm(struct kvm *kvm, u16 *rc, u16 *rrc)
{
struct pv_vm_to_be_destroyed *p;
int ret = 0;
lockdep_assert_not_held(&kvm->lock);
mutex_lock(&kvm->lock);
p = kvm->arch.pv.set_aside;
kvm->arch.pv.set_aside = NULL;
mutex_unlock(&kvm->lock);
if (!p)
return -EINVAL;
/* When a fatal signal is received, stop immediately */
if (s390_uv_destroy_range_interruptible(kvm->mm, 0, TASK_SIZE_MAX))
goto done;
if (kvm_s390_pv_dispose_one_leftover(kvm, p, rc, rrc))
ret = -EIO;
kfree(p);
p = NULL;
done:
/*
* p is not NULL if we aborted because of a fatal signal, in which
* case queue the leftover for later cleanup.
*/
if (p) {
mutex_lock(&kvm->lock);
list_add(&p->list, &kvm->arch.pv.need_cleanup);
mutex_unlock(&kvm->lock);
/* Did not finish, but pretend things went well */
*rc = UVC_RC_EXECUTED;
*rrc = 42;
}
return ret;
}
static void kvm_s390_pv_mmu_notifier_release(struct mmu_notifier *subscription,
struct mm_struct *mm)
{
struct kvm *kvm = container_of(subscription, struct kvm, arch.pv.mmu_notifier);
u16 dummy;
int r;
/*
* No locking is needed since this is the last thread of the last user of this
* struct mm.
* When the struct kvm gets deinitialized, this notifier is also
* unregistered. This means that if this notifier runs, then the
* struct kvm is still valid.
*/
r = kvm_s390_cpus_from_pv(kvm, &dummy, &dummy);
if (!r && is_destroy_fast_available() && kvm_s390_pv_get_handle(kvm))
kvm_s390_pv_deinit_vm_fast(kvm, &dummy, &dummy);
}
static const struct mmu_notifier_ops kvm_s390_pv_mmu_notifier_ops = {
.release = kvm_s390_pv_mmu_notifier_release,
};
int kvm_s390_pv_init_vm(struct kvm *kvm, u16 *rc, u16 *rrc)
{
struct uv_cb_cgc uvcb = {
.header.cmd = UVC_CMD_CREATE_SEC_CONF,
.header.len = sizeof(uvcb)
};
int cc, ret;
u16 dummy;
ret = kvm_s390_pv_alloc_vm(kvm);
if (ret)
return ret;
/* Inputs */
uvcb.guest_stor_origin = 0; /* MSO is 0 for KVM */
uvcb.guest_stor_len = kvm->arch.pv.guest_len;
uvcb.guest_asce = kvm->arch.gmap->asce;
uvcb.guest_sca = virt_to_phys(kvm->arch.sca);
uvcb.conf_base_stor_origin =
virt_to_phys((void *)kvm->arch.pv.stor_base);
uvcb.conf_virt_stor_origin = (u64)kvm->arch.pv.stor_var;
uvcb.flags.ap_allow_instr = kvm->arch.model.uv_feat_guest.ap;
uvcb.flags.ap_instr_intr = kvm->arch.model.uv_feat_guest.ap_intr;
cc = uv_call_sched(0, (u64)&uvcb);
*rc = uvcb.header.rc;
*rrc = uvcb.header.rrc;
KVM_UV_EVENT(kvm, 3, "PROTVIRT CREATE VM: handle %llx len %llx rc %x rrc %x flags %04x",
uvcb.guest_handle, uvcb.guest_stor_len, *rc, *rrc, uvcb.flags.raw);
/* Outputs */
kvm->arch.pv.handle = uvcb.guest_handle;
atomic_inc(&kvm->mm->context.protected_count);
if (cc) {
if (uvcb.header.rc & UVC_RC_NEED_DESTROY) {
kvm_s390_pv_deinit_vm(kvm, &dummy, &dummy);
} else {
atomic_dec(&kvm->mm->context.protected_count);
kvm_s390_pv_dealloc_vm(kvm);
}
return -EIO;
}
kvm->arch.gmap->guest_handle = uvcb.guest_handle;
/* Add the notifier only once. No races because we hold kvm->lock */
if (kvm->arch.pv.mmu_notifier.ops != &kvm_s390_pv_mmu_notifier_ops) {
kvm->arch.pv.mmu_notifier.ops = &kvm_s390_pv_mmu_notifier_ops;
mmu_notifier_register(&kvm->arch.pv.mmu_notifier, kvm->mm);
}
return 0;
}
int kvm_s390_pv_set_sec_parms(struct kvm *kvm, void *hdr, u64 length, u16 *rc,
u16 *rrc)
{
struct uv_cb_ssc uvcb = {
.header.cmd = UVC_CMD_SET_SEC_CONF_PARAMS,
.header.len = sizeof(uvcb),
.sec_header_origin = (u64)hdr,
.sec_header_len = length,
.guest_handle = kvm_s390_pv_get_handle(kvm),
};
int cc = uv_call(0, (u64)&uvcb);
*rc = uvcb.header.rc;
*rrc = uvcb.header.rrc;
KVM_UV_EVENT(kvm, 3, "PROTVIRT VM SET PARMS: rc %x rrc %x",
*rc, *rrc);
return cc ? -EINVAL : 0;
}
static int unpack_one(struct kvm *kvm, unsigned long addr, u64 tweak,
u64 offset, u16 *rc, u16 *rrc)
{
struct uv_cb_unp uvcb = {
.header.cmd = UVC_CMD_UNPACK_IMG,
.header.len = sizeof(uvcb),
.guest_handle = kvm_s390_pv_get_handle(kvm),
.gaddr = addr,
.tweak[0] = tweak,
.tweak[1] = offset,
};
int ret = gmap_make_secure(kvm->arch.gmap, addr, &uvcb);
*rc = uvcb.header.rc;
*rrc = uvcb.header.rrc;
if (ret && ret != -EAGAIN)
KVM_UV_EVENT(kvm, 3, "PROTVIRT VM UNPACK: failed addr %llx with rc %x rrc %x",
uvcb.gaddr, *rc, *rrc);
return ret;
}
int kvm_s390_pv_unpack(struct kvm *kvm, unsigned long addr, unsigned long size,
unsigned long tweak, u16 *rc, u16 *rrc)
{
u64 offset = 0;
int ret = 0;
if (addr & ~PAGE_MASK || !size || size & ~PAGE_MASK)
return -EINVAL;
KVM_UV_EVENT(kvm, 3, "PROTVIRT VM UNPACK: start addr %lx size %lx",
addr, size);
while (offset < size) {
ret = unpack_one(kvm, addr, tweak, offset, rc, rrc);
if (ret == -EAGAIN) {
cond_resched();
if (fatal_signal_pending(current))
break;
continue;
}
if (ret)
break;
addr += PAGE_SIZE;
offset += PAGE_SIZE;
}
if (!ret)
KVM_UV_EVENT(kvm, 3, "%s", "PROTVIRT VM UNPACK: successful");
return ret;
}
int kvm_s390_pv_set_cpu_state(struct kvm_vcpu *vcpu, u8 state)
{
struct uv_cb_cpu_set_state uvcb = {
.header.cmd = UVC_CMD_CPU_SET_STATE,
.header.len = sizeof(uvcb),
.cpu_handle = kvm_s390_pv_cpu_get_handle(vcpu),
.state = state,
};
int cc;
cc = uv_call(0, (u64)&uvcb);
KVM_UV_EVENT(vcpu->kvm, 3, "PROTVIRT SET CPU %d STATE %d rc %x rrc %x",
vcpu->vcpu_id, state, uvcb.header.rc, uvcb.header.rrc);
if (cc)
return -EINVAL;
return 0;
}
int kvm_s390_pv_dump_cpu(struct kvm_vcpu *vcpu, void *buff, u16 *rc, u16 *rrc)
{
struct uv_cb_dump_cpu uvcb = {
.header.cmd = UVC_CMD_DUMP_CPU,
.header.len = sizeof(uvcb),
.cpu_handle = vcpu->arch.pv.handle,
.dump_area_origin = (u64)buff,
};
int cc;
cc = uv_call_sched(0, (u64)&uvcb);
*rc = uvcb.header.rc;
*rrc = uvcb.header.rrc;
return cc;
}
/* Size of the cache for the storage state dump data. 1MB for now */
#define DUMP_BUFF_LEN HPAGE_SIZE
/**
* kvm_s390_pv_dump_stor_state
*
* @kvm: pointer to the guest's KVM struct
* @buff_user: Userspace pointer where we will write the results to
* @gaddr: Starting absolute guest address for which the storage state
* is requested.
* @buff_user_len: Length of the buff_user buffer
* @rc: Pointer to where the uvcb return code is stored
* @rrc: Pointer to where the uvcb return reason code is stored
*
* Stores buff_len bytes of tweak component values to buff_user
* starting with the 1MB block specified by the absolute guest address
* (gaddr). The gaddr pointer will be updated with the last address
* for which data was written when returning to userspace. buff_user
* might be written to even if an error rc is returned. For instance
* if we encounter a fault after writing the first page of data.
*
* Context: kvm->lock needs to be held
*
* Return:
* 0 on success
* -ENOMEM if allocating the cache fails
* -EINVAL if gaddr is not aligned to 1MB
* -EINVAL if buff_user_len is not aligned to uv_info.conf_dump_storage_state_len
* -EINVAL if the UV call fails, rc and rrc will be set in this case
* -EFAULT if copying the result to buff_user failed
*/
int kvm_s390_pv_dump_stor_state(struct kvm *kvm, void __user *buff_user,
u64 *gaddr, u64 buff_user_len, u16 *rc, u16 *rrc)
{
struct uv_cb_dump_stor_state uvcb = {
.header.cmd = UVC_CMD_DUMP_CONF_STOR_STATE,
.header.len = sizeof(uvcb),
.config_handle = kvm->arch.pv.handle,
.gaddr = *gaddr,
.dump_area_origin = 0,
};
const u64 increment_len = uv_info.conf_dump_storage_state_len;
size_t buff_kvm_size;
size_t size_done = 0;
u8 *buff_kvm = NULL;
int cc, ret;
ret = -EINVAL;
/* UV call processes 1MB guest storage chunks at a time */
if (!IS_ALIGNED(*gaddr, HPAGE_SIZE))
goto out;
/*
* We provide the storage state for 1MB chunks of guest
* storage. The buffer will need to be aligned to
* conf_dump_storage_state_len so we don't end on a partial
* chunk.
*/
if (!buff_user_len ||
!IS_ALIGNED(buff_user_len, increment_len))
goto out;
/*
* Allocate a buffer from which we will later copy to the user
* process. We don't want userspace to dictate our buffer size
* so we limit it to DUMP_BUFF_LEN.
*/
ret = -ENOMEM;
buff_kvm_size = min_t(u64, buff_user_len, DUMP_BUFF_LEN);
buff_kvm = vzalloc(buff_kvm_size);
if (!buff_kvm)
goto out;
ret = 0;
uvcb.dump_area_origin = (u64)buff_kvm;
/* We will loop until the user buffer is filled or an error occurs */
do {
/* Get 1MB worth of guest storage state data */
cc = uv_call_sched(0, (u64)&uvcb);
/* All or nothing */
if (cc) {
ret = -EINVAL;
break;
}
size_done += increment_len;
uvcb.dump_area_origin += increment_len;
buff_user_len -= increment_len;
uvcb.gaddr += HPAGE_SIZE;
/* KVM Buffer full, time to copy to the process */
if (!buff_user_len || size_done == DUMP_BUFF_LEN) {
if (copy_to_user(buff_user, buff_kvm, size_done)) {
ret = -EFAULT;
break;
}
buff_user += size_done;
size_done = 0;
uvcb.dump_area_origin = (u64)buff_kvm;
}
} while (buff_user_len);
/* Report back where we ended dumping */
*gaddr = uvcb.gaddr;
/* Lets only log errors, we don't want to spam */
out:
if (ret)
KVM_UV_EVENT(kvm, 3,
"PROTVIRT DUMP STORAGE STATE: addr %llx ret %d, uvcb rc %x rrc %x",
uvcb.gaddr, ret, uvcb.header.rc, uvcb.header.rrc);
*rc = uvcb.header.rc;
*rrc = uvcb.header.rrc;
vfree(buff_kvm);
return ret;
}
/**
* kvm_s390_pv_dump_complete
*
* @kvm: pointer to the guest's KVM struct
* @buff_user: Userspace pointer where we will write the results to
* @rc: Pointer to where the uvcb return code is stored
* @rrc: Pointer to where the uvcb return reason code is stored
*
* Completes the dumping operation and writes the completion data to
* user space.
*
* Context: kvm->lock needs to be held
*
* Return:
* 0 on success
* -ENOMEM if allocating the completion buffer fails
* -EINVAL if the UV call fails, rc and rrc will be set in this case
* -EFAULT if copying the result to buff_user failed
*/
int kvm_s390_pv_dump_complete(struct kvm *kvm, void __user *buff_user,
u16 *rc, u16 *rrc)
{
struct uv_cb_dump_complete complete = {
.header.len = sizeof(complete),
.header.cmd = UVC_CMD_DUMP_COMPLETE,
.config_handle = kvm_s390_pv_get_handle(kvm),
};
u64 *compl_data;
int ret;
/* Allocate dump area */
compl_data = vzalloc(uv_info.conf_dump_finalize_len);
if (!compl_data)
return -ENOMEM;
complete.dump_area_origin = (u64)compl_data;
ret = uv_call_sched(0, (u64)&complete);
*rc = complete.header.rc;
*rrc = complete.header.rrc;
KVM_UV_EVENT(kvm, 3, "PROTVIRT DUMP COMPLETE: rc %x rrc %x",
complete.header.rc, complete.header.rrc);
if (!ret) {
/*
* kvm_s390_pv_dealloc_vm() will also (mem)set
* this to false on a reboot or other destroy
* operation for this vm.
*/
kvm->arch.pv.dumping = false;
kvm_s390_vcpu_unblock_all(kvm);
ret = copy_to_user(buff_user, compl_data, uv_info.conf_dump_finalize_len);
if (ret)
ret = -EFAULT;
}
vfree(compl_data);
/* If the UVC returned an error, translate it to -EINVAL */
if (ret > 0)
ret = -EINVAL;
return ret;
}
| linux-master | arch/s390/kvm/pv.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Test module for unwind_for_each_frame
*/
#include <kunit/test.h>
#include <asm/unwind.h>
#include <linux/completion.h>
#include <linux/kallsyms.h>
#include <linux/kthread.h>
#include <linux/ftrace.h>
#include <linux/module.h>
#include <linux/timer.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/kprobes.h>
#include <linux/wait.h>
#include <asm/irq.h>
static struct kunit *current_test;
#define BT_BUF_SIZE (PAGE_SIZE * 4)
static bool force_bt;
module_param_named(backtrace, force_bt, bool, 0444);
MODULE_PARM_DESC(backtrace, "print backtraces for all tests");
/*
* To avoid printk line limit split backtrace by lines
*/
static void print_backtrace(char *bt)
{
char *p;
while (true) {
p = strsep(&bt, "\n");
if (!p)
break;
kunit_err(current_test, "%s\n", p);
}
}
/*
* Calls unwind_for_each_frame(task, regs, sp) and verifies that the result
* contains unwindme_func2 followed by unwindme_func1.
*/
static noinline int test_unwind(struct task_struct *task, struct pt_regs *regs,
unsigned long sp)
{
int frame_count, prev_is_func2, seen_func2_func1, seen_arch_rethook_trampoline;
const int max_frames = 128;
struct unwind_state state;
size_t bt_pos = 0;
int ret = 0;
char *bt;
bt = kmalloc(BT_BUF_SIZE, GFP_ATOMIC);
if (!bt) {
kunit_err(current_test, "failed to allocate backtrace buffer\n");
return -ENOMEM;
}
/* Unwind. */
frame_count = 0;
prev_is_func2 = 0;
seen_func2_func1 = 0;
seen_arch_rethook_trampoline = 0;
unwind_for_each_frame(&state, task, regs, sp) {
unsigned long addr = unwind_get_return_address(&state);
char sym[KSYM_SYMBOL_LEN];
if (frame_count++ == max_frames)
break;
if (state.reliable && !addr) {
kunit_err(current_test, "unwind state reliable but addr is 0\n");
ret = -EINVAL;
break;
}
sprint_symbol(sym, addr);
if (bt_pos < BT_BUF_SIZE) {
bt_pos += snprintf(bt + bt_pos, BT_BUF_SIZE - bt_pos,
state.reliable ? " [%-7s%px] %pSR\n" :
"([%-7s%px] %pSR)\n",
stack_type_name(state.stack_info.type),
(void *)state.sp, (void *)state.ip);
if (bt_pos >= BT_BUF_SIZE)
kunit_err(current_test, "backtrace buffer is too small\n");
}
frame_count += 1;
if (prev_is_func2 && str_has_prefix(sym, "unwindme_func1"))
seen_func2_func1 = 1;
prev_is_func2 = str_has_prefix(sym, "unwindme_func2");
if (str_has_prefix(sym, "arch_rethook_trampoline+0x0/"))
seen_arch_rethook_trampoline = 1;
}
/* Check the results. */
if (unwind_error(&state)) {
kunit_err(current_test, "unwind error\n");
ret = -EINVAL;
}
if (!seen_func2_func1) {
kunit_err(current_test, "unwindme_func2 and unwindme_func1 not found\n");
ret = -EINVAL;
}
if (frame_count == max_frames) {
kunit_err(current_test, "Maximum number of frames exceeded\n");
ret = -EINVAL;
}
if (seen_arch_rethook_trampoline) {
kunit_err(current_test, "arch_rethook_trampoline+0x0 in unwinding results\n");
ret = -EINVAL;
}
if (ret || force_bt)
print_backtrace(bt);
kfree(bt);
return ret;
}
/* State of the task being unwound. */
struct unwindme {
int flags;
int ret;
struct task_struct *task;
struct completion task_ready;
wait_queue_head_t task_wq;
unsigned long sp;
};
static struct unwindme *unwindme;
/* Values of unwindme.flags. */
#define UWM_DEFAULT 0x0
#define UWM_THREAD 0x1 /* Unwind a separate task. */
#define UWM_REGS 0x2 /* Pass regs to test_unwind(). */
#define UWM_SP 0x4 /* Pass sp to test_unwind(). */
#define UWM_CALLER 0x8 /* Unwind starting from caller. */
#define UWM_SWITCH_STACK 0x10 /* Use call_on_stack. */
#define UWM_IRQ 0x20 /* Unwind from irq context. */
#define UWM_PGM 0x40 /* Unwind from program check handler */
#define UWM_KPROBE_ON_FTRACE 0x80 /* Unwind from kprobe handler called via ftrace. */
#define UWM_FTRACE 0x100 /* Unwind from ftrace handler. */
#define UWM_KRETPROBE 0x200 /* Unwind through kretprobed function. */
#define UWM_KRETPROBE_HANDLER 0x400 /* Unwind from kretprobe handler. */
static __always_inline struct pt_regs fake_pt_regs(void)
{
struct pt_regs regs;
memset(®s, 0, sizeof(regs));
regs.gprs[15] = current_stack_pointer;
asm volatile(
"basr %[psw_addr],0\n"
: [psw_addr] "=d" (regs.psw.addr));
return regs;
}
static int kretprobe_ret_handler(struct kretprobe_instance *ri, struct pt_regs *regs)
{
struct unwindme *u = unwindme;
if (!(u->flags & UWM_KRETPROBE_HANDLER))
return 0;
u->ret = test_unwind(NULL, (u->flags & UWM_REGS) ? regs : NULL,
(u->flags & UWM_SP) ? u->sp : 0);
return 0;
}
static noinline notrace int test_unwind_kretprobed_func(struct unwindme *u)
{
struct pt_regs regs;
if (!(u->flags & UWM_KRETPROBE))
return 0;
regs = fake_pt_regs();
return test_unwind(NULL, (u->flags & UWM_REGS) ? ®s : NULL,
(u->flags & UWM_SP) ? u->sp : 0);
}
static noinline int test_unwind_kretprobed_func_caller(struct unwindme *u)
{
return test_unwind_kretprobed_func(u);
}
static int test_unwind_kretprobe(struct unwindme *u)
{
int ret;
struct kretprobe my_kretprobe;
if (!IS_ENABLED(CONFIG_KPROBES))
kunit_skip(current_test, "requires CONFIG_KPROBES");
u->ret = -1; /* make sure kprobe is called */
unwindme = u;
memset(&my_kretprobe, 0, sizeof(my_kretprobe));
my_kretprobe.handler = kretprobe_ret_handler;
my_kretprobe.maxactive = 1;
my_kretprobe.kp.addr = (kprobe_opcode_t *)test_unwind_kretprobed_func;
ret = register_kretprobe(&my_kretprobe);
if (ret < 0) {
kunit_err(current_test, "register_kretprobe failed %d\n", ret);
return -EINVAL;
}
ret = test_unwind_kretprobed_func_caller(u);
unregister_kretprobe(&my_kretprobe);
unwindme = NULL;
if (u->flags & UWM_KRETPROBE_HANDLER)
ret = u->ret;
return ret;
}
static int kprobe_pre_handler(struct kprobe *p, struct pt_regs *regs)
{
struct unwindme *u = unwindme;
u->ret = test_unwind(NULL, (u->flags & UWM_REGS) ? regs : NULL,
(u->flags & UWM_SP) ? u->sp : 0);
return 0;
}
extern const char test_unwind_kprobed_insn[];
static noinline void test_unwind_kprobed_func(void)
{
asm volatile(
" nopr %%r7\n"
"test_unwind_kprobed_insn:\n"
" nopr %%r7\n"
:);
}
static int test_unwind_kprobe(struct unwindme *u)
{
struct kprobe kp;
int ret;
if (!IS_ENABLED(CONFIG_KPROBES))
kunit_skip(current_test, "requires CONFIG_KPROBES");
if (!IS_ENABLED(CONFIG_KPROBES_ON_FTRACE) && u->flags & UWM_KPROBE_ON_FTRACE)
kunit_skip(current_test, "requires CONFIG_KPROBES_ON_FTRACE");
u->ret = -1; /* make sure kprobe is called */
unwindme = u;
memset(&kp, 0, sizeof(kp));
kp.pre_handler = kprobe_pre_handler;
kp.addr = u->flags & UWM_KPROBE_ON_FTRACE ?
(kprobe_opcode_t *)test_unwind_kprobed_func :
(kprobe_opcode_t *)test_unwind_kprobed_insn;
ret = register_kprobe(&kp);
if (ret < 0) {
kunit_err(current_test, "register_kprobe failed %d\n", ret);
return -EINVAL;
}
test_unwind_kprobed_func();
unregister_kprobe(&kp);
unwindme = NULL;
return u->ret;
}
static void notrace __used test_unwind_ftrace_handler(unsigned long ip,
unsigned long parent_ip,
struct ftrace_ops *fops,
struct ftrace_regs *fregs)
{
struct unwindme *u = (struct unwindme *)fregs->regs.gprs[2];
u->ret = test_unwind(NULL, (u->flags & UWM_REGS) ? &fregs->regs : NULL,
(u->flags & UWM_SP) ? u->sp : 0);
}
static noinline int test_unwind_ftraced_func(struct unwindme *u)
{
return READ_ONCE(u)->ret;
}
static int test_unwind_ftrace(struct unwindme *u)
{
int ret;
#ifdef CONFIG_DYNAMIC_FTRACE
struct ftrace_ops *fops;
fops = kunit_kzalloc(current_test, sizeof(*fops), GFP_KERNEL);
fops->func = test_unwind_ftrace_handler;
fops->flags = FTRACE_OPS_FL_DYNAMIC |
FTRACE_OPS_FL_RECURSION |
FTRACE_OPS_FL_SAVE_REGS |
FTRACE_OPS_FL_PERMANENT;
#else
kunit_skip(current_test, "requires CONFIG_DYNAMIC_FTRACE");
#endif
ret = ftrace_set_filter_ip(fops, (unsigned long)test_unwind_ftraced_func, 0, 0);
if (ret) {
kunit_err(current_test, "failed to set ftrace filter (%d)\n", ret);
return -1;
}
ret = register_ftrace_function(fops);
if (!ret) {
ret = test_unwind_ftraced_func(u);
unregister_ftrace_function(fops);
} else {
kunit_err(current_test, "failed to register ftrace handler (%d)\n", ret);
}
ftrace_set_filter_ip(fops, (unsigned long)test_unwind_ftraced_func, 1, 0);
return ret;
}
/* This function may or may not appear in the backtrace. */
static noinline int unwindme_func4(struct unwindme *u)
{
if (!(u->flags & UWM_CALLER))
u->sp = current_frame_address();
if (u->flags & UWM_THREAD) {
complete(&u->task_ready);
wait_event(u->task_wq, kthread_should_park());
kthread_parkme();
return 0;
} else if (u->flags & (UWM_PGM | UWM_KPROBE_ON_FTRACE)) {
return test_unwind_kprobe(u);
} else if (u->flags & (UWM_KRETPROBE | UWM_KRETPROBE_HANDLER)) {
return test_unwind_kretprobe(u);
} else if (u->flags & UWM_FTRACE) {
return test_unwind_ftrace(u);
} else {
struct pt_regs regs = fake_pt_regs();
return test_unwind(NULL,
(u->flags & UWM_REGS) ? ®s : NULL,
(u->flags & UWM_SP) ? u->sp : 0);
}
}
/* This function may or may not appear in the backtrace. */
static noinline int unwindme_func3(struct unwindme *u)
{
u->sp = current_frame_address();
return unwindme_func4(u);
}
/* This function must appear in the backtrace. */
static noinline int unwindme_func2(struct unwindme *u)
{
unsigned long flags;
int rc;
if (u->flags & UWM_SWITCH_STACK) {
local_irq_save(flags);
local_mcck_disable();
rc = call_on_stack(1, S390_lowcore.nodat_stack,
int, unwindme_func3, struct unwindme *, u);
local_mcck_enable();
local_irq_restore(flags);
return rc;
} else {
return unwindme_func3(u);
}
}
/* This function must follow unwindme_func2 in the backtrace. */
static noinline int unwindme_func1(void *u)
{
return unwindme_func2((struct unwindme *)u);
}
static void unwindme_timer_fn(struct timer_list *unused)
{
struct unwindme *u = READ_ONCE(unwindme);
if (u) {
unwindme = NULL;
u->task = NULL;
u->ret = unwindme_func1(u);
complete(&u->task_ready);
}
}
static struct timer_list unwind_timer;
static int test_unwind_irq(struct unwindme *u)
{
unwindme = u;
init_completion(&u->task_ready);
timer_setup(&unwind_timer, unwindme_timer_fn, 0);
mod_timer(&unwind_timer, jiffies + 1);
wait_for_completion(&u->task_ready);
return u->ret;
}
/* Spawns a task and passes it to test_unwind(). */
static int test_unwind_task(struct unwindme *u)
{
struct task_struct *task;
int ret;
/* Initialize thread-related fields. */
init_completion(&u->task_ready);
init_waitqueue_head(&u->task_wq);
/*
* Start the task and wait until it reaches unwindme_func4() and sleeps
* in (task_ready, unwind_done] range.
*/
task = kthread_run(unwindme_func1, u, "%s", __func__);
if (IS_ERR(task)) {
kunit_err(current_test, "kthread_run() failed\n");
return PTR_ERR(task);
}
/*
* Make sure task reaches unwindme_func4 before parking it,
* we might park it before kthread function has been executed otherwise
*/
wait_for_completion(&u->task_ready);
kthread_park(task);
/* Unwind. */
ret = test_unwind(task, NULL, (u->flags & UWM_SP) ? u->sp : 0);
kthread_stop(task);
return ret;
}
struct test_params {
int flags;
char *name;
};
/*
* Create required parameter list for tests
*/
#define TEST_WITH_FLAGS(f) { .flags = f, .name = #f }
static const struct test_params param_list[] = {
TEST_WITH_FLAGS(UWM_DEFAULT),
TEST_WITH_FLAGS(UWM_SP),
TEST_WITH_FLAGS(UWM_REGS),
TEST_WITH_FLAGS(UWM_SWITCH_STACK),
TEST_WITH_FLAGS(UWM_SP | UWM_REGS),
TEST_WITH_FLAGS(UWM_CALLER | UWM_SP),
TEST_WITH_FLAGS(UWM_CALLER | UWM_SP | UWM_REGS),
TEST_WITH_FLAGS(UWM_CALLER | UWM_SP | UWM_REGS | UWM_SWITCH_STACK),
TEST_WITH_FLAGS(UWM_THREAD),
TEST_WITH_FLAGS(UWM_THREAD | UWM_SP),
TEST_WITH_FLAGS(UWM_THREAD | UWM_CALLER | UWM_SP),
TEST_WITH_FLAGS(UWM_IRQ),
TEST_WITH_FLAGS(UWM_IRQ | UWM_SWITCH_STACK),
TEST_WITH_FLAGS(UWM_IRQ | UWM_SP),
TEST_WITH_FLAGS(UWM_IRQ | UWM_REGS),
TEST_WITH_FLAGS(UWM_IRQ | UWM_SP | UWM_REGS),
TEST_WITH_FLAGS(UWM_IRQ | UWM_CALLER | UWM_SP),
TEST_WITH_FLAGS(UWM_IRQ | UWM_CALLER | UWM_SP | UWM_REGS),
TEST_WITH_FLAGS(UWM_IRQ | UWM_CALLER | UWM_SP | UWM_REGS | UWM_SWITCH_STACK),
TEST_WITH_FLAGS(UWM_PGM),
TEST_WITH_FLAGS(UWM_PGM | UWM_SP),
TEST_WITH_FLAGS(UWM_PGM | UWM_REGS),
TEST_WITH_FLAGS(UWM_PGM | UWM_SP | UWM_REGS),
TEST_WITH_FLAGS(UWM_KPROBE_ON_FTRACE),
TEST_WITH_FLAGS(UWM_KPROBE_ON_FTRACE | UWM_SP),
TEST_WITH_FLAGS(UWM_KPROBE_ON_FTRACE | UWM_REGS),
TEST_WITH_FLAGS(UWM_KPROBE_ON_FTRACE | UWM_SP | UWM_REGS),
TEST_WITH_FLAGS(UWM_FTRACE),
TEST_WITH_FLAGS(UWM_FTRACE | UWM_SP),
TEST_WITH_FLAGS(UWM_FTRACE | UWM_REGS),
TEST_WITH_FLAGS(UWM_FTRACE | UWM_SP | UWM_REGS),
TEST_WITH_FLAGS(UWM_KRETPROBE),
TEST_WITH_FLAGS(UWM_KRETPROBE | UWM_SP),
TEST_WITH_FLAGS(UWM_KRETPROBE | UWM_REGS),
TEST_WITH_FLAGS(UWM_KRETPROBE | UWM_SP | UWM_REGS),
TEST_WITH_FLAGS(UWM_KRETPROBE_HANDLER),
TEST_WITH_FLAGS(UWM_KRETPROBE_HANDLER | UWM_SP),
TEST_WITH_FLAGS(UWM_KRETPROBE_HANDLER | UWM_REGS),
TEST_WITH_FLAGS(UWM_KRETPROBE_HANDLER | UWM_SP | UWM_REGS),
};
/*
* Parameter description generator: required for KUNIT_ARRAY_PARAM()
*/
static void get_desc(const struct test_params *params, char *desc)
{
strscpy(desc, params->name, KUNIT_PARAM_DESC_SIZE);
}
/*
* Create test_unwind_gen_params
*/
KUNIT_ARRAY_PARAM(test_unwind, param_list, get_desc);
static void test_unwind_flags(struct kunit *test)
{
struct unwindme u;
const struct test_params *params;
current_test = test;
params = (const struct test_params *)test->param_value;
u.flags = params->flags;
if (u.flags & UWM_THREAD)
KUNIT_EXPECT_EQ(test, 0, test_unwind_task(&u));
else if (u.flags & UWM_IRQ)
KUNIT_EXPECT_EQ(test, 0, test_unwind_irq(&u));
else
KUNIT_EXPECT_EQ(test, 0, unwindme_func1(&u));
}
static struct kunit_case unwind_test_cases[] = {
KUNIT_CASE_PARAM(test_unwind_flags, test_unwind_gen_params),
{}
};
static struct kunit_suite test_unwind_suite = {
.name = "test_unwind",
.test_cases = unwind_test_cases,
};
kunit_test_suites(&test_unwind_suite);
MODULE_LICENSE("GPL");
| linux-master | arch/s390/lib/test_unwind.c |
// SPDX-License-Identifier: GPL-2.0+
#include <linux/export.h>
#include "test_modules.h"
#define DEFINE_RETURN(i) \
int test_modules_return_ ## i(void) \
{ \
return 1 ## i - 10000; \
} \
EXPORT_SYMBOL_GPL(test_modules_return_ ## i)
REPEAT_10000(DEFINE_RETURN);
| linux-master | arch/s390/lib/test_modules_helpers.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Precise Delay Loops for S390
*
* Copyright IBM Corp. 1999, 2008
* Author(s): Martin Schwidefsky <[email protected]>,
*/
#include <linux/processor.h>
#include <linux/delay.h>
#include <asm/div64.h>
#include <asm/timex.h>
void __delay(unsigned long loops)
{
/*
* Loop 'loops' times. Callers must not assume a specific
* amount of time passes before this function returns.
*/
asm volatile("0: brct %0,0b" : : "d" ((loops/2) + 1));
}
EXPORT_SYMBOL(__delay);
static void delay_loop(unsigned long delta)
{
unsigned long end;
end = get_tod_clock_monotonic() + delta;
while (!tod_after(get_tod_clock_monotonic(), end))
cpu_relax();
}
void __udelay(unsigned long usecs)
{
delay_loop(usecs << 12);
}
EXPORT_SYMBOL(__udelay);
void __ndelay(unsigned long nsecs)
{
nsecs <<= 9;
do_div(nsecs, 125);
delay_loop(nsecs);
}
EXPORT_SYMBOL(__ndelay);
| linux-master | arch/s390/lib/delay.c |
// SPDX-License-Identifier: GPL-2.0+
#include <linux/kernel.h>
#include <linux/kprobes.h>
#include <linux/random.h>
#include <kunit/test.h>
#include "test_kprobes.h"
static struct kprobe kp;
static void setup_kprobe(struct kunit *test, struct kprobe *kp,
const char *symbol, int offset)
{
kp->offset = offset;
kp->addr = NULL;
kp->symbol_name = symbol;
}
static void test_kprobe_offset(struct kunit *test, struct kprobe *kp,
const char *target, int offset)
{
int ret;
setup_kprobe(test, kp, target, 0);
ret = register_kprobe(kp);
if (!ret)
unregister_kprobe(kp);
KUNIT_EXPECT_EQ(test, 0, ret);
setup_kprobe(test, kp, target, offset);
ret = register_kprobe(kp);
KUNIT_EXPECT_EQ(test, -EINVAL, ret);
if (!ret)
unregister_kprobe(kp);
}
static void test_kprobe_odd(struct kunit *test)
{
test_kprobe_offset(test, &kp, "kprobes_target_odd",
kprobes_target_odd_offs);
}
static void test_kprobe_in_insn4(struct kunit *test)
{
test_kprobe_offset(test, &kp, "kprobes_target_in_insn4",
kprobes_target_in_insn4_offs);
}
static void test_kprobe_in_insn6_lo(struct kunit *test)
{
test_kprobe_offset(test, &kp, "kprobes_target_in_insn6_lo",
kprobes_target_in_insn6_lo_offs);
}
static void test_kprobe_in_insn6_hi(struct kunit *test)
{
test_kprobe_offset(test, &kp, "kprobes_target_in_insn6_hi",
kprobes_target_in_insn6_hi_offs);
}
static struct kunit_case kprobes_testcases[] = {
KUNIT_CASE(test_kprobe_odd),
KUNIT_CASE(test_kprobe_in_insn4),
KUNIT_CASE(test_kprobe_in_insn6_lo),
KUNIT_CASE(test_kprobe_in_insn6_hi),
{}
};
static struct kunit_suite kprobes_test_suite = {
.name = "kprobes_test_s390",
.test_cases = kprobes_testcases,
};
kunit_test_suites(&kprobes_test_suite);
MODULE_LICENSE("GPL");
| linux-master | arch/s390/lib/test_kprobes.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Optimized string functions
*
* S390 version
* Copyright IBM Corp. 2004
* Author(s): Martin Schwidefsky ([email protected])
*/
#define IN_ARCH_STRING_C 1
#ifndef __NO_FORTIFY
# define __NO_FORTIFY
#endif
#include <linux/types.h>
#include <linux/string.h>
#include <linux/export.h>
/*
* Helper functions to find the end of a string
*/
static inline char *__strend(const char *s)
{
unsigned long e = 0;
asm volatile(
" lghi 0,0\n"
"0: srst %[e],%[s]\n"
" jo 0b\n"
: [e] "+&a" (e), [s] "+&a" (s)
:
: "cc", "memory", "0");
return (char *)e;
}
static inline char *__strnend(const char *s, size_t n)
{
const char *p = s + n;
asm volatile(
" lghi 0,0\n"
"0: srst %[p],%[s]\n"
" jo 0b\n"
: [p] "+&d" (p), [s] "+&a" (s)
:
: "cc", "memory", "0");
return (char *)p;
}
/**
* strlen - Find the length of a string
* @s: The string to be sized
*
* returns the length of @s
*/
#ifdef __HAVE_ARCH_STRLEN
size_t strlen(const char *s)
{
return __strend(s) - s;
}
EXPORT_SYMBOL(strlen);
#endif
/**
* strnlen - Find the length of a length-limited string
* @s: The string to be sized
* @n: The maximum number of bytes to search
*
* returns the minimum of the length of @s and @n
*/
#ifdef __HAVE_ARCH_STRNLEN
size_t strnlen(const char *s, size_t n)
{
return __strnend(s, n) - s;
}
EXPORT_SYMBOL(strnlen);
#endif
/**
* strcpy - Copy a %NUL terminated string
* @dest: Where to copy the string to
* @src: Where to copy the string from
*
* returns a pointer to @dest
*/
#ifdef __HAVE_ARCH_STRCPY
char *strcpy(char *dest, const char *src)
{
char *ret = dest;
asm volatile(
" lghi 0,0\n"
"0: mvst %[dest],%[src]\n"
" jo 0b\n"
: [dest] "+&a" (dest), [src] "+&a" (src)
:
: "cc", "memory", "0");
return ret;
}
EXPORT_SYMBOL(strcpy);
#endif
/**
* strncpy - Copy a length-limited, %NUL-terminated string
* @dest: Where to copy the string to
* @src: Where to copy the string from
* @n: The maximum number of bytes to copy
*
* The result is not %NUL-terminated if the source exceeds
* @n bytes.
*/
#ifdef __HAVE_ARCH_STRNCPY
char *strncpy(char *dest, const char *src, size_t n)
{
size_t len = __strnend(src, n) - src;
memset(dest + len, 0, n - len);
memcpy(dest, src, len);
return dest;
}
EXPORT_SYMBOL(strncpy);
#endif
/**
* strcat - Append one %NUL-terminated string to another
* @dest: The string to be appended to
* @src: The string to append to it
*
* returns a pointer to @dest
*/
#ifdef __HAVE_ARCH_STRCAT
char *strcat(char *dest, const char *src)
{
unsigned long dummy = 0;
char *ret = dest;
asm volatile(
" lghi 0,0\n"
"0: srst %[dummy],%[dest]\n"
" jo 0b\n"
"1: mvst %[dummy],%[src]\n"
" jo 1b\n"
: [dummy] "+&a" (dummy), [dest] "+&a" (dest), [src] "+&a" (src)
:
: "cc", "memory", "0");
return ret;
}
EXPORT_SYMBOL(strcat);
#endif
/**
* strlcat - Append a length-limited, %NUL-terminated string to another
* @dest: The string to be appended to
* @src: The string to append to it
* @n: The size of the destination buffer.
*/
#ifdef __HAVE_ARCH_STRLCAT
size_t strlcat(char *dest, const char *src, size_t n)
{
size_t dsize = __strend(dest) - dest;
size_t len = __strend(src) - src;
size_t res = dsize + len;
if (dsize < n) {
dest += dsize;
n -= dsize;
if (len >= n)
len = n - 1;
dest[len] = '\0';
memcpy(dest, src, len);
}
return res;
}
EXPORT_SYMBOL(strlcat);
#endif
/**
* strncat - Append a length-limited, %NUL-terminated string to another
* @dest: The string to be appended to
* @src: The string to append to it
* @n: The maximum numbers of bytes to copy
*
* returns a pointer to @dest
*
* Note that in contrast to strncpy, strncat ensures the result is
* terminated.
*/
#ifdef __HAVE_ARCH_STRNCAT
char *strncat(char *dest, const char *src, size_t n)
{
size_t len = __strnend(src, n) - src;
char *p = __strend(dest);
p[len] = '\0';
memcpy(p, src, len);
return dest;
}
EXPORT_SYMBOL(strncat);
#endif
/**
* strcmp - Compare two strings
* @s1: One string
* @s2: Another string
*
* returns 0 if @s1 and @s2 are equal,
* < 0 if @s1 is less than @s2
* > 0 if @s1 is greater than @s2
*/
#ifdef __HAVE_ARCH_STRCMP
int strcmp(const char *s1, const char *s2)
{
int ret = 0;
asm volatile(
" lghi 0,0\n"
"0: clst %[s1],%[s2]\n"
" jo 0b\n"
" je 1f\n"
" ic %[ret],0(%[s1])\n"
" ic 0,0(%[s2])\n"
" sr %[ret],0\n"
"1:"
: [ret] "+&d" (ret), [s1] "+&a" (s1), [s2] "+&a" (s2)
:
: "cc", "memory", "0");
return ret;
}
EXPORT_SYMBOL(strcmp);
#endif
static inline int clcle(const char *s1, unsigned long l1,
const char *s2, unsigned long l2)
{
union register_pair r1 = { .even = (unsigned long)s1, .odd = l1, };
union register_pair r3 = { .even = (unsigned long)s2, .odd = l2, };
int cc;
asm volatile(
"0: clcle %[r1],%[r3],0\n"
" jo 0b\n"
" ipm %[cc]\n"
" srl %[cc],28\n"
: [cc] "=&d" (cc), [r1] "+&d" (r1.pair), [r3] "+&d" (r3.pair)
:
: "cc", "memory");
return cc;
}
/**
* strstr - Find the first substring in a %NUL terminated string
* @s1: The string to be searched
* @s2: The string to search for
*/
#ifdef __HAVE_ARCH_STRSTR
char *strstr(const char *s1, const char *s2)
{
int l1, l2;
l2 = __strend(s2) - s2;
if (!l2)
return (char *) s1;
l1 = __strend(s1) - s1;
while (l1-- >= l2) {
int cc;
cc = clcle(s1, l2, s2, l2);
if (!cc)
return (char *) s1;
s1++;
}
return NULL;
}
EXPORT_SYMBOL(strstr);
#endif
/**
* memchr - Find a character in an area of memory.
* @s: The memory area
* @c: The byte to search for
* @n: The size of the area.
*
* returns the address of the first occurrence of @c, or %NULL
* if @c is not found
*/
#ifdef __HAVE_ARCH_MEMCHR
void *memchr(const void *s, int c, size_t n)
{
const void *ret = s + n;
asm volatile(
" lgr 0,%[c]\n"
"0: srst %[ret],%[s]\n"
" jo 0b\n"
" jl 1f\n"
" la %[ret],0\n"
"1:"
: [ret] "+&a" (ret), [s] "+&a" (s)
: [c] "d" (c)
: "cc", "memory", "0");
return (void *) ret;
}
EXPORT_SYMBOL(memchr);
#endif
/**
* memcmp - Compare two areas of memory
* @s1: One area of memory
* @s2: Another area of memory
* @n: The size of the area.
*/
#ifdef __HAVE_ARCH_MEMCMP
int memcmp(const void *s1, const void *s2, size_t n)
{
int ret;
ret = clcle(s1, n, s2, n);
if (ret)
ret = ret == 1 ? -1 : 1;
return ret;
}
EXPORT_SYMBOL(memcmp);
#endif
/**
* memscan - Find a character in an area of memory.
* @s: The memory area
* @c: The byte to search for
* @n: The size of the area.
*
* returns the address of the first occurrence of @c, or 1 byte past
* the area if @c is not found
*/
#ifdef __HAVE_ARCH_MEMSCAN
void *memscan(void *s, int c, size_t n)
{
const void *ret = s + n;
asm volatile(
" lgr 0,%[c]\n"
"0: srst %[ret],%[s]\n"
" jo 0b\n"
: [ret] "+&a" (ret), [s] "+&a" (s)
: [c] "d" (c)
: "cc", "memory", "0");
return (void *)ret;
}
EXPORT_SYMBOL(memscan);
#endif
| linux-master | arch/s390/lib/string.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Optimized xor_block operation for RAID4/5
*
* Copyright IBM Corp. 2016
* Author(s): Martin Schwidefsky <[email protected]>
*/
#include <linux/types.h>
#include <linux/export.h>
#include <linux/raid/xor.h>
#include <asm/xor.h>
static void xor_xc_2(unsigned long bytes, unsigned long * __restrict p1,
const unsigned long * __restrict p2)
{
asm volatile(
" larl 1,2f\n"
" aghi %0,-1\n"
" jm 3f\n"
" srlg 0,%0,8\n"
" ltgr 0,0\n"
" jz 1f\n"
"0: xc 0(256,%1),0(%2)\n"
" la %1,256(%1)\n"
" la %2,256(%2)\n"
" brctg 0,0b\n"
"1: ex %0,0(1)\n"
" j 3f\n"
"2: xc 0(1,%1),0(%2)\n"
"3:\n"
: : "d" (bytes), "a" (p1), "a" (p2)
: "0", "1", "cc", "memory");
}
static void xor_xc_3(unsigned long bytes, unsigned long * __restrict p1,
const unsigned long * __restrict p2,
const unsigned long * __restrict p3)
{
asm volatile(
" larl 1,2f\n"
" aghi %0,-1\n"
" jm 3f\n"
" srlg 0,%0,8\n"
" ltgr 0,0\n"
" jz 1f\n"
"0: xc 0(256,%1),0(%2)\n"
" xc 0(256,%1),0(%3)\n"
" la %1,256(%1)\n"
" la %2,256(%2)\n"
" la %3,256(%3)\n"
" brctg 0,0b\n"
"1: ex %0,0(1)\n"
" ex %0,6(1)\n"
" j 3f\n"
"2: xc 0(1,%1),0(%2)\n"
" xc 0(1,%1),0(%3)\n"
"3:\n"
: "+d" (bytes), "+a" (p1), "+a" (p2), "+a" (p3)
: : "0", "1", "cc", "memory");
}
static void xor_xc_4(unsigned long bytes, unsigned long * __restrict p1,
const unsigned long * __restrict p2,
const unsigned long * __restrict p3,
const unsigned long * __restrict p4)
{
asm volatile(
" larl 1,2f\n"
" aghi %0,-1\n"
" jm 3f\n"
" srlg 0,%0,8\n"
" ltgr 0,0\n"
" jz 1f\n"
"0: xc 0(256,%1),0(%2)\n"
" xc 0(256,%1),0(%3)\n"
" xc 0(256,%1),0(%4)\n"
" la %1,256(%1)\n"
" la %2,256(%2)\n"
" la %3,256(%3)\n"
" la %4,256(%4)\n"
" brctg 0,0b\n"
"1: ex %0,0(1)\n"
" ex %0,6(1)\n"
" ex %0,12(1)\n"
" j 3f\n"
"2: xc 0(1,%1),0(%2)\n"
" xc 0(1,%1),0(%3)\n"
" xc 0(1,%1),0(%4)\n"
"3:\n"
: "+d" (bytes), "+a" (p1), "+a" (p2), "+a" (p3), "+a" (p4)
: : "0", "1", "cc", "memory");
}
static void xor_xc_5(unsigned long bytes, unsigned long * __restrict p1,
const unsigned long * __restrict p2,
const unsigned long * __restrict p3,
const unsigned long * __restrict p4,
const unsigned long * __restrict p5)
{
asm volatile(
" larl 1,2f\n"
" aghi %0,-1\n"
" jm 3f\n"
" srlg 0,%0,8\n"
" ltgr 0,0\n"
" jz 1f\n"
"0: xc 0(256,%1),0(%2)\n"
" xc 0(256,%1),0(%3)\n"
" xc 0(256,%1),0(%4)\n"
" xc 0(256,%1),0(%5)\n"
" la %1,256(%1)\n"
" la %2,256(%2)\n"
" la %3,256(%3)\n"
" la %4,256(%4)\n"
" la %5,256(%5)\n"
" brctg 0,0b\n"
"1: ex %0,0(1)\n"
" ex %0,6(1)\n"
" ex %0,12(1)\n"
" ex %0,18(1)\n"
" j 3f\n"
"2: xc 0(1,%1),0(%2)\n"
" xc 0(1,%1),0(%3)\n"
" xc 0(1,%1),0(%4)\n"
" xc 0(1,%1),0(%5)\n"
"3:\n"
: "+d" (bytes), "+a" (p1), "+a" (p2), "+a" (p3), "+a" (p4),
"+a" (p5)
: : "0", "1", "cc", "memory");
}
struct xor_block_template xor_block_xc = {
.name = "xc",
.do_2 = xor_xc_2,
.do_3 = xor_xc_3,
.do_4 = xor_xc_4,
.do_5 = xor_xc_5,
};
EXPORT_SYMBOL(xor_block_xc);
| linux-master | arch/s390/lib/xor.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Common helper functions for kprobes and uprobes
*
* Copyright IBM Corp. 2014
*/
#include <linux/errno.h>
#include <asm/kprobes.h>
#include <asm/dis.h>
int probe_is_prohibited_opcode(u16 *insn)
{
if (!is_known_insn((unsigned char *)insn))
return -EINVAL;
switch (insn[0] >> 8) {
case 0x0c: /* bassm */
case 0x0b: /* bsm */
case 0x83: /* diag */
case 0x44: /* ex */
case 0xac: /* stnsm */
case 0xad: /* stosm */
return -EINVAL;
case 0xc6:
switch (insn[0] & 0x0f) {
case 0x00: /* exrl */
return -EINVAL;
}
}
switch (insn[0]) {
case 0x0101: /* pr */
case 0xb25a: /* bsa */
case 0xb240: /* bakr */
case 0xb258: /* bsg */
case 0xb218: /* pc */
case 0xb228: /* pt */
case 0xb98d: /* epsw */
case 0xe560: /* tbegin */
case 0xe561: /* tbeginc */
case 0xb2f8: /* tend */
return -EINVAL;
}
return 0;
}
int probe_get_fixup_type(u16 *insn)
{
/* default fixup method */
int fixup = FIXUP_PSW_NORMAL;
switch (insn[0] >> 8) {
case 0x05: /* balr */
case 0x0d: /* basr */
fixup = FIXUP_RETURN_REGISTER;
/* if r2 = 0, no branch will be taken */
if ((insn[0] & 0x0f) == 0)
fixup |= FIXUP_BRANCH_NOT_TAKEN;
break;
case 0x06: /* bctr */
case 0x07: /* bcr */
fixup = FIXUP_BRANCH_NOT_TAKEN;
break;
case 0x45: /* bal */
case 0x4d: /* bas */
fixup = FIXUP_RETURN_REGISTER;
break;
case 0x47: /* bc */
case 0x46: /* bct */
case 0x86: /* bxh */
case 0x87: /* bxle */
fixup = FIXUP_BRANCH_NOT_TAKEN;
break;
case 0x82: /* lpsw */
fixup = FIXUP_NOT_REQUIRED;
break;
case 0xb2: /* lpswe */
if ((insn[0] & 0xff) == 0xb2)
fixup = FIXUP_NOT_REQUIRED;
break;
case 0xa7: /* bras */
if ((insn[0] & 0x0f) == 0x05)
fixup |= FIXUP_RETURN_REGISTER;
break;
case 0xc0:
if ((insn[0] & 0x0f) == 0x05) /* brasl */
fixup |= FIXUP_RETURN_REGISTER;
break;
case 0xeb:
switch (insn[2] & 0xff) {
case 0x44: /* bxhg */
case 0x45: /* bxleg */
fixup = FIXUP_BRANCH_NOT_TAKEN;
break;
}
break;
case 0xe3: /* bctg */
if ((insn[2] & 0xff) == 0x46)
fixup = FIXUP_BRANCH_NOT_TAKEN;
break;
case 0xec:
switch (insn[2] & 0xff) {
case 0xe5: /* clgrb */
case 0xe6: /* cgrb */
case 0xf6: /* crb */
case 0xf7: /* clrb */
case 0xfc: /* cgib */
case 0xfd: /* cglib */
case 0xfe: /* cib */
case 0xff: /* clib */
fixup = FIXUP_BRANCH_NOT_TAKEN;
break;
}
break;
}
return fixup;
}
int probe_is_insn_relative_long(u16 *insn)
{
/* Check if we have a RIL-b or RIL-c format instruction which
* we need to modify in order to avoid instruction emulation. */
switch (insn[0] >> 8) {
case 0xc0:
if ((insn[0] & 0x0f) == 0x00) /* larl */
return true;
break;
case 0xc4:
switch (insn[0] & 0x0f) {
case 0x02: /* llhrl */
case 0x04: /* lghrl */
case 0x05: /* lhrl */
case 0x06: /* llghrl */
case 0x07: /* sthrl */
case 0x08: /* lgrl */
case 0x0b: /* stgrl */
case 0x0c: /* lgfrl */
case 0x0d: /* lrl */
case 0x0e: /* llgfrl */
case 0x0f: /* strl */
return true;
}
break;
case 0xc6:
switch (insn[0] & 0x0f) {
case 0x02: /* pfdrl */
case 0x04: /* cghrl */
case 0x05: /* chrl */
case 0x06: /* clghrl */
case 0x07: /* clhrl */
case 0x08: /* cgrl */
case 0x0a: /* clgrl */
case 0x0c: /* cgfrl */
case 0x0d: /* crl */
case 0x0e: /* clgfrl */
case 0x0f: /* clrl */
return true;
}
break;
}
return false;
}
| linux-master | arch/s390/lib/probes.c |
// SPDX-License-Identifier: GPL-2.0+
#include <kunit/test.h>
#include <linux/module.h>
#include "test_modules.h"
/*
* Test that modules with many relocations are loaded properly.
*/
static void test_modules_many_vmlinux_relocs(struct kunit *test)
{
int result = 0;
#define CALL_RETURN(i) result += test_modules_return_ ## i()
REPEAT_10000(CALL_RETURN);
KUNIT_ASSERT_EQ(test, result, 49995000);
}
static struct kunit_case modules_testcases[] = {
KUNIT_CASE(test_modules_many_vmlinux_relocs),
{}
};
static struct kunit_suite modules_test_suite = {
.name = "modules_test_s390",
.test_cases = modules_testcases,
};
kunit_test_suites(&modules_test_suite);
MODULE_LICENSE("GPL");
| linux-master | arch/s390/lib/test_modules.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Out of line spinlock code.
*
* Copyright IBM Corp. 2004, 2006
* Author(s): Martin Schwidefsky ([email protected])
*/
#include <linux/types.h>
#include <linux/export.h>
#include <linux/spinlock.h>
#include <linux/jiffies.h>
#include <linux/init.h>
#include <linux/smp.h>
#include <linux/percpu.h>
#include <linux/io.h>
#include <asm/alternative.h>
int spin_retry = -1;
static int __init spin_retry_init(void)
{
if (spin_retry < 0)
spin_retry = 1000;
return 0;
}
early_initcall(spin_retry_init);
/*
* spin_retry= parameter
*/
static int __init spin_retry_setup(char *str)
{
spin_retry = simple_strtoul(str, &str, 0);
return 1;
}
__setup("spin_retry=", spin_retry_setup);
struct spin_wait {
struct spin_wait *next, *prev;
int node_id;
} __aligned(32);
static DEFINE_PER_CPU_ALIGNED(struct spin_wait, spin_wait[4]);
#define _Q_LOCK_CPU_OFFSET 0
#define _Q_LOCK_STEAL_OFFSET 16
#define _Q_TAIL_IDX_OFFSET 18
#define _Q_TAIL_CPU_OFFSET 20
#define _Q_LOCK_CPU_MASK 0x0000ffff
#define _Q_LOCK_STEAL_ADD 0x00010000
#define _Q_LOCK_STEAL_MASK 0x00030000
#define _Q_TAIL_IDX_MASK 0x000c0000
#define _Q_TAIL_CPU_MASK 0xfff00000
#define _Q_LOCK_MASK (_Q_LOCK_CPU_MASK | _Q_LOCK_STEAL_MASK)
#define _Q_TAIL_MASK (_Q_TAIL_IDX_MASK | _Q_TAIL_CPU_MASK)
void arch_spin_lock_setup(int cpu)
{
struct spin_wait *node;
int ix;
node = per_cpu_ptr(&spin_wait[0], cpu);
for (ix = 0; ix < 4; ix++, node++) {
memset(node, 0, sizeof(*node));
node->node_id = ((cpu + 1) << _Q_TAIL_CPU_OFFSET) +
(ix << _Q_TAIL_IDX_OFFSET);
}
}
static inline int arch_load_niai4(int *lock)
{
int owner;
asm_inline volatile(
ALTERNATIVE("nop", ".insn rre,0xb2fa0000,4,0", 49) /* NIAI 4 */
" l %0,%1\n"
: "=d" (owner) : "Q" (*lock) : "memory");
return owner;
}
static inline int arch_cmpxchg_niai8(int *lock, int old, int new)
{
int expected = old;
asm_inline volatile(
ALTERNATIVE("nop", ".insn rre,0xb2fa0000,8,0", 49) /* NIAI 8 */
" cs %0,%3,%1\n"
: "=d" (old), "=Q" (*lock)
: "0" (old), "d" (new), "Q" (*lock)
: "cc", "memory");
return expected == old;
}
static inline struct spin_wait *arch_spin_decode_tail(int lock)
{
int ix, cpu;
ix = (lock & _Q_TAIL_IDX_MASK) >> _Q_TAIL_IDX_OFFSET;
cpu = (lock & _Q_TAIL_CPU_MASK) >> _Q_TAIL_CPU_OFFSET;
return per_cpu_ptr(&spin_wait[ix], cpu - 1);
}
static inline int arch_spin_yield_target(int lock, struct spin_wait *node)
{
if (lock & _Q_LOCK_CPU_MASK)
return lock & _Q_LOCK_CPU_MASK;
if (node == NULL || node->prev == NULL)
return 0; /* 0 -> no target cpu */
while (node->prev)
node = node->prev;
return node->node_id >> _Q_TAIL_CPU_OFFSET;
}
static inline void arch_spin_lock_queued(arch_spinlock_t *lp)
{
struct spin_wait *node, *next;
int lockval, ix, node_id, tail_id, old, new, owner, count;
ix = S390_lowcore.spinlock_index++;
barrier();
lockval = SPINLOCK_LOCKVAL; /* cpu + 1 */
node = this_cpu_ptr(&spin_wait[ix]);
node->prev = node->next = NULL;
node_id = node->node_id;
/* Enqueue the node for this CPU in the spinlock wait queue */
while (1) {
old = READ_ONCE(lp->lock);
if ((old & _Q_LOCK_CPU_MASK) == 0 &&
(old & _Q_LOCK_STEAL_MASK) != _Q_LOCK_STEAL_MASK) {
/*
* The lock is free but there may be waiters.
* With no waiters simply take the lock, if there
* are waiters try to steal the lock. The lock may
* be stolen three times before the next queued
* waiter will get the lock.
*/
new = (old ? (old + _Q_LOCK_STEAL_ADD) : 0) | lockval;
if (__atomic_cmpxchg_bool(&lp->lock, old, new))
/* Got the lock */
goto out;
/* lock passing in progress */
continue;
}
/* Make the node of this CPU the new tail. */
new = node_id | (old & _Q_LOCK_MASK);
if (__atomic_cmpxchg_bool(&lp->lock, old, new))
break;
}
/* Set the 'next' pointer of the tail node in the queue */
tail_id = old & _Q_TAIL_MASK;
if (tail_id != 0) {
node->prev = arch_spin_decode_tail(tail_id);
WRITE_ONCE(node->prev->next, node);
}
/* Pass the virtual CPU to the lock holder if it is not running */
owner = arch_spin_yield_target(old, node);
if (owner && arch_vcpu_is_preempted(owner - 1))
smp_yield_cpu(owner - 1);
/* Spin on the CPU local node->prev pointer */
if (tail_id != 0) {
count = spin_retry;
while (READ_ONCE(node->prev) != NULL) {
if (count-- >= 0)
continue;
count = spin_retry;
/* Query running state of lock holder again. */
owner = arch_spin_yield_target(old, node);
if (owner && arch_vcpu_is_preempted(owner - 1))
smp_yield_cpu(owner - 1);
}
}
/* Spin on the lock value in the spinlock_t */
count = spin_retry;
while (1) {
old = READ_ONCE(lp->lock);
owner = old & _Q_LOCK_CPU_MASK;
if (!owner) {
tail_id = old & _Q_TAIL_MASK;
new = ((tail_id != node_id) ? tail_id : 0) | lockval;
if (__atomic_cmpxchg_bool(&lp->lock, old, new))
/* Got the lock */
break;
continue;
}
if (count-- >= 0)
continue;
count = spin_retry;
if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(owner - 1))
smp_yield_cpu(owner - 1);
}
/* Pass lock_spin job to next CPU in the queue */
if (node_id && tail_id != node_id) {
/* Wait until the next CPU has set up the 'next' pointer */
while ((next = READ_ONCE(node->next)) == NULL)
;
next->prev = NULL;
}
out:
S390_lowcore.spinlock_index--;
}
static inline void arch_spin_lock_classic(arch_spinlock_t *lp)
{
int lockval, old, new, owner, count;
lockval = SPINLOCK_LOCKVAL; /* cpu + 1 */
/* Pass the virtual CPU to the lock holder if it is not running */
owner = arch_spin_yield_target(READ_ONCE(lp->lock), NULL);
if (owner && arch_vcpu_is_preempted(owner - 1))
smp_yield_cpu(owner - 1);
count = spin_retry;
while (1) {
old = arch_load_niai4(&lp->lock);
owner = old & _Q_LOCK_CPU_MASK;
/* Try to get the lock if it is free. */
if (!owner) {
new = (old & _Q_TAIL_MASK) | lockval;
if (arch_cmpxchg_niai8(&lp->lock, old, new)) {
/* Got the lock */
return;
}
continue;
}
if (count-- >= 0)
continue;
count = spin_retry;
if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(owner - 1))
smp_yield_cpu(owner - 1);
}
}
void arch_spin_lock_wait(arch_spinlock_t *lp)
{
if (test_cpu_flag(CIF_DEDICATED_CPU))
arch_spin_lock_queued(lp);
else
arch_spin_lock_classic(lp);
}
EXPORT_SYMBOL(arch_spin_lock_wait);
int arch_spin_trylock_retry(arch_spinlock_t *lp)
{
int cpu = SPINLOCK_LOCKVAL;
int owner, count;
for (count = spin_retry; count > 0; count--) {
owner = READ_ONCE(lp->lock);
/* Try to get the lock if it is free. */
if (!owner) {
if (__atomic_cmpxchg_bool(&lp->lock, 0, cpu))
return 1;
}
}
return 0;
}
EXPORT_SYMBOL(arch_spin_trylock_retry);
void arch_read_lock_wait(arch_rwlock_t *rw)
{
if (unlikely(in_interrupt())) {
while (READ_ONCE(rw->cnts) & 0x10000)
barrier();
return;
}
/* Remove this reader again to allow recursive read locking */
__atomic_add_const(-1, &rw->cnts);
/* Put the reader into the wait queue */
arch_spin_lock(&rw->wait);
/* Now add this reader to the count value again */
__atomic_add_const(1, &rw->cnts);
/* Loop until the writer is done */
while (READ_ONCE(rw->cnts) & 0x10000)
barrier();
arch_spin_unlock(&rw->wait);
}
EXPORT_SYMBOL(arch_read_lock_wait);
void arch_write_lock_wait(arch_rwlock_t *rw)
{
int old;
/* Add this CPU to the write waiters */
__atomic_add(0x20000, &rw->cnts);
/* Put the writer into the wait queue */
arch_spin_lock(&rw->wait);
while (1) {
old = READ_ONCE(rw->cnts);
if ((old & 0x1ffff) == 0 &&
__atomic_cmpxchg_bool(&rw->cnts, old, old | 0x10000))
/* Got the lock */
break;
barrier();
}
arch_spin_unlock(&rw->wait);
}
EXPORT_SYMBOL(arch_write_lock_wait);
void arch_spin_relax(arch_spinlock_t *lp)
{
int cpu;
cpu = READ_ONCE(lp->lock) & _Q_LOCK_CPU_MASK;
if (!cpu)
return;
if (MACHINE_IS_LPAR && !arch_vcpu_is_preempted(cpu - 1))
return;
smp_yield_cpu(cpu - 1);
}
EXPORT_SYMBOL(arch_spin_relax);
| linux-master | arch/s390/lib/spinlock.c |
// SPDX-License-Identifier: GPL-2.0+
#include <asm/ptrace.h>
#include <linux/error-injection.h>
#include <linux/kprobes.h>
void override_function_with_return(struct pt_regs *regs)
{
/*
* Emulate 'br 14'. 'regs' is captured by kprobes on entry to some
* kernel function.
*/
regs->psw.addr = regs->gprs[14];
}
NOKPROBE_SYMBOL(override_function_with_return);
| linux-master | arch/s390/lib/error-inject.c |
// SPDX-License-Identifier: GPL-2.0
/*
* MSB0 numbered special bitops handling.
*
* The bits are numbered:
* |0..............63|64............127|128...........191|192...........255|
*
* The reason for this bit numbering is the fact that the hardware sets bits
* in a bitmap starting at bit 0 (MSB) and we don't want to scan the bitmap
* from the 'wrong end'.
*/
#include <linux/compiler.h>
#include <linux/bitops.h>
#include <linux/export.h>
unsigned long find_first_bit_inv(const unsigned long *addr, unsigned long size)
{
const unsigned long *p = addr;
unsigned long result = 0;
unsigned long tmp;
while (size & ~(BITS_PER_LONG - 1)) {
if ((tmp = *(p++)))
goto found;
result += BITS_PER_LONG;
size -= BITS_PER_LONG;
}
if (!size)
return result;
tmp = (*p) & (~0UL << (BITS_PER_LONG - size));
if (!tmp) /* Are any bits set? */
return result + size; /* Nope. */
found:
return result + (__fls(tmp) ^ (BITS_PER_LONG - 1));
}
EXPORT_SYMBOL(find_first_bit_inv);
unsigned long find_next_bit_inv(const unsigned long *addr, unsigned long size,
unsigned long offset)
{
const unsigned long *p = addr + (offset / BITS_PER_LONG);
unsigned long result = offset & ~(BITS_PER_LONG - 1);
unsigned long tmp;
if (offset >= size)
return size;
size -= result;
offset %= BITS_PER_LONG;
if (offset) {
tmp = *(p++);
tmp &= (~0UL >> offset);
if (size < BITS_PER_LONG)
goto found_first;
if (tmp)
goto found_middle;
size -= BITS_PER_LONG;
result += BITS_PER_LONG;
}
while (size & ~(BITS_PER_LONG-1)) {
if ((tmp = *(p++)))
goto found_middle;
result += BITS_PER_LONG;
size -= BITS_PER_LONG;
}
if (!size)
return result;
tmp = *p;
found_first:
tmp &= (~0UL << (BITS_PER_LONG - size));
if (!tmp) /* Are any bits set? */
return result + size; /* Nope. */
found_middle:
return result + (__fls(tmp) ^ (BITS_PER_LONG - 1));
}
EXPORT_SYMBOL(find_next_bit_inv);
| linux-master | arch/s390/lib/find.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Standard user space access functions based on mvcp/mvcs and doing
* interesting things in the secondary space mode.
*
* Copyright IBM Corp. 2006,2014
* Author(s): Martin Schwidefsky ([email protected]),
* Gerald Schaefer ([email protected])
*/
#include <linux/uaccess.h>
#include <linux/export.h>
#include <linux/mm.h>
#include <asm/asm-extable.h>
#ifdef CONFIG_DEBUG_ENTRY
void debug_user_asce(int exit)
{
unsigned long cr1, cr7;
__ctl_store(cr1, 1, 1);
__ctl_store(cr7, 7, 7);
if (cr1 == S390_lowcore.kernel_asce && cr7 == S390_lowcore.user_asce)
return;
panic("incorrect ASCE on kernel %s\n"
"cr1: %016lx cr7: %016lx\n"
"kernel: %016llx user: %016llx\n",
exit ? "exit" : "entry", cr1, cr7,
S390_lowcore.kernel_asce, S390_lowcore.user_asce);
}
#endif /*CONFIG_DEBUG_ENTRY */
static unsigned long raw_copy_from_user_key(void *to, const void __user *from,
unsigned long size, unsigned long key)
{
unsigned long rem;
union oac spec = {
.oac2.key = key,
.oac2.as = PSW_BITS_AS_SECONDARY,
.oac2.k = 1,
.oac2.a = 1,
};
asm volatile(
" lr 0,%[spec]\n"
"0: mvcos 0(%[to]),0(%[from]),%[size]\n"
"1: jz 5f\n"
" algr %[size],%[val]\n"
" slgr %[from],%[val]\n"
" slgr %[to],%[val]\n"
" j 0b\n"
"2: la %[rem],4095(%[from])\n" /* rem = from + 4095 */
" nr %[rem],%[val]\n" /* rem = (from + 4095) & -4096 */
" slgr %[rem],%[from]\n"
" clgr %[size],%[rem]\n" /* copy crosses next page boundary? */
" jnh 6f\n"
"3: mvcos 0(%[to]),0(%[from]),%[rem]\n"
"4: slgr %[size],%[rem]\n"
" j 6f\n"
"5: slgr %[size],%[size]\n"
"6:\n"
EX_TABLE(0b, 2b)
EX_TABLE(1b, 2b)
EX_TABLE(3b, 6b)
EX_TABLE(4b, 6b)
: [size] "+&a" (size), [from] "+&a" (from), [to] "+&a" (to), [rem] "=&a" (rem)
: [val] "a" (-4096UL), [spec] "d" (spec.val)
: "cc", "memory", "0");
return size;
}
unsigned long raw_copy_from_user(void *to, const void __user *from, unsigned long n)
{
return raw_copy_from_user_key(to, from, n, 0);
}
EXPORT_SYMBOL(raw_copy_from_user);
unsigned long _copy_from_user_key(void *to, const void __user *from,
unsigned long n, unsigned long key)
{
unsigned long res = n;
might_fault();
if (!should_fail_usercopy()) {
instrument_copy_from_user_before(to, from, n);
res = raw_copy_from_user_key(to, from, n, key);
instrument_copy_from_user_after(to, from, n, res);
}
if (unlikely(res))
memset(to + (n - res), 0, res);
return res;
}
EXPORT_SYMBOL(_copy_from_user_key);
static unsigned long raw_copy_to_user_key(void __user *to, const void *from,
unsigned long size, unsigned long key)
{
unsigned long rem;
union oac spec = {
.oac1.key = key,
.oac1.as = PSW_BITS_AS_SECONDARY,
.oac1.k = 1,
.oac1.a = 1,
};
asm volatile(
" lr 0,%[spec]\n"
"0: mvcos 0(%[to]),0(%[from]),%[size]\n"
"1: jz 5f\n"
" algr %[size],%[val]\n"
" slgr %[to],%[val]\n"
" slgr %[from],%[val]\n"
" j 0b\n"
"2: la %[rem],4095(%[to])\n" /* rem = to + 4095 */
" nr %[rem],%[val]\n" /* rem = (to + 4095) & -4096 */
" slgr %[rem],%[to]\n"
" clgr %[size],%[rem]\n" /* copy crosses next page boundary? */
" jnh 6f\n"
"3: mvcos 0(%[to]),0(%[from]),%[rem]\n"
"4: slgr %[size],%[rem]\n"
" j 6f\n"
"5: slgr %[size],%[size]\n"
"6:\n"
EX_TABLE(0b, 2b)
EX_TABLE(1b, 2b)
EX_TABLE(3b, 6b)
EX_TABLE(4b, 6b)
: [size] "+&a" (size), [to] "+&a" (to), [from] "+&a" (from), [rem] "=&a" (rem)
: [val] "a" (-4096UL), [spec] "d" (spec.val)
: "cc", "memory", "0");
return size;
}
unsigned long raw_copy_to_user(void __user *to, const void *from, unsigned long n)
{
return raw_copy_to_user_key(to, from, n, 0);
}
EXPORT_SYMBOL(raw_copy_to_user);
unsigned long _copy_to_user_key(void __user *to, const void *from,
unsigned long n, unsigned long key)
{
might_fault();
if (should_fail_usercopy())
return n;
instrument_copy_to_user(to, from, n);
return raw_copy_to_user_key(to, from, n, key);
}
EXPORT_SYMBOL(_copy_to_user_key);
unsigned long __clear_user(void __user *to, unsigned long size)
{
unsigned long rem;
union oac spec = {
.oac1.as = PSW_BITS_AS_SECONDARY,
.oac1.a = 1,
};
asm volatile(
" lr 0,%[spec]\n"
"0: mvcos 0(%[to]),0(%[zeropg]),%[size]\n"
"1: jz 5f\n"
" algr %[size],%[val]\n"
" slgr %[to],%[val]\n"
" j 0b\n"
"2: la %[rem],4095(%[to])\n" /* rem = to + 4095 */
" nr %[rem],%[val]\n" /* rem = (to + 4095) & -4096 */
" slgr %[rem],%[to]\n"
" clgr %[size],%[rem]\n" /* copy crosses next page boundary? */
" jnh 6f\n"
"3: mvcos 0(%[to]),0(%[zeropg]),%[rem]\n"
"4: slgr %[size],%[rem]\n"
" j 6f\n"
"5: slgr %[size],%[size]\n"
"6:\n"
EX_TABLE(0b, 2b)
EX_TABLE(1b, 2b)
EX_TABLE(3b, 6b)
EX_TABLE(4b, 6b)
: [size] "+&a" (size), [to] "+&a" (to), [rem] "=&a" (rem)
: [val] "a" (-4096UL), [zeropg] "a" (empty_zero_page), [spec] "d" (spec.val)
: "cc", "memory", "0");
return size;
}
EXPORT_SYMBOL(__clear_user);
| linux-master | arch/s390/lib/uaccess.c |
// SPDX-License-Identifier: GPL-2.0
/*
* KVM guest address space mapping code
*
* Copyright IBM Corp. 2007, 2020
* Author(s): Martin Schwidefsky <[email protected]>
* David Hildenbrand <[email protected]>
* Janosch Frank <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/pagewalk.h>
#include <linux/swap.h>
#include <linux/smp.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/swapops.h>
#include <linux/ksm.h>
#include <linux/mman.h>
#include <linux/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/gmap.h>
#include <asm/tlb.h>
#define GMAP_SHADOW_FAKE_TABLE 1ULL
/**
* gmap_alloc - allocate and initialize a guest address space
* @limit: maximum address of the gmap address space
*
* Returns a guest address space structure.
*/
static struct gmap *gmap_alloc(unsigned long limit)
{
struct gmap *gmap;
struct page *page;
unsigned long *table;
unsigned long etype, atype;
if (limit < _REGION3_SIZE) {
limit = _REGION3_SIZE - 1;
atype = _ASCE_TYPE_SEGMENT;
etype = _SEGMENT_ENTRY_EMPTY;
} else if (limit < _REGION2_SIZE) {
limit = _REGION2_SIZE - 1;
atype = _ASCE_TYPE_REGION3;
etype = _REGION3_ENTRY_EMPTY;
} else if (limit < _REGION1_SIZE) {
limit = _REGION1_SIZE - 1;
atype = _ASCE_TYPE_REGION2;
etype = _REGION2_ENTRY_EMPTY;
} else {
limit = -1UL;
atype = _ASCE_TYPE_REGION1;
etype = _REGION1_ENTRY_EMPTY;
}
gmap = kzalloc(sizeof(struct gmap), GFP_KERNEL_ACCOUNT);
if (!gmap)
goto out;
INIT_LIST_HEAD(&gmap->crst_list);
INIT_LIST_HEAD(&gmap->children);
INIT_LIST_HEAD(&gmap->pt_list);
INIT_RADIX_TREE(&gmap->guest_to_host, GFP_KERNEL_ACCOUNT);
INIT_RADIX_TREE(&gmap->host_to_guest, GFP_ATOMIC | __GFP_ACCOUNT);
INIT_RADIX_TREE(&gmap->host_to_rmap, GFP_ATOMIC | __GFP_ACCOUNT);
spin_lock_init(&gmap->guest_table_lock);
spin_lock_init(&gmap->shadow_lock);
refcount_set(&gmap->ref_count, 1);
page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
if (!page)
goto out_free;
page->index = 0;
list_add(&page->lru, &gmap->crst_list);
table = page_to_virt(page);
crst_table_init(table, etype);
gmap->table = table;
gmap->asce = atype | _ASCE_TABLE_LENGTH |
_ASCE_USER_BITS | __pa(table);
gmap->asce_end = limit;
return gmap;
out_free:
kfree(gmap);
out:
return NULL;
}
/**
* gmap_create - create a guest address space
* @mm: pointer to the parent mm_struct
* @limit: maximum size of the gmap address space
*
* Returns a guest address space structure.
*/
struct gmap *gmap_create(struct mm_struct *mm, unsigned long limit)
{
struct gmap *gmap;
unsigned long gmap_asce;
gmap = gmap_alloc(limit);
if (!gmap)
return NULL;
gmap->mm = mm;
spin_lock(&mm->context.lock);
list_add_rcu(&gmap->list, &mm->context.gmap_list);
if (list_is_singular(&mm->context.gmap_list))
gmap_asce = gmap->asce;
else
gmap_asce = -1UL;
WRITE_ONCE(mm->context.gmap_asce, gmap_asce);
spin_unlock(&mm->context.lock);
return gmap;
}
EXPORT_SYMBOL_GPL(gmap_create);
static void gmap_flush_tlb(struct gmap *gmap)
{
if (MACHINE_HAS_IDTE)
__tlb_flush_idte(gmap->asce);
else
__tlb_flush_global();
}
static void gmap_radix_tree_free(struct radix_tree_root *root)
{
struct radix_tree_iter iter;
unsigned long indices[16];
unsigned long index;
void __rcu **slot;
int i, nr;
/* A radix tree is freed by deleting all of its entries */
index = 0;
do {
nr = 0;
radix_tree_for_each_slot(slot, root, &iter, index) {
indices[nr] = iter.index;
if (++nr == 16)
break;
}
for (i = 0; i < nr; i++) {
index = indices[i];
radix_tree_delete(root, index);
}
} while (nr > 0);
}
static void gmap_rmap_radix_tree_free(struct radix_tree_root *root)
{
struct gmap_rmap *rmap, *rnext, *head;
struct radix_tree_iter iter;
unsigned long indices[16];
unsigned long index;
void __rcu **slot;
int i, nr;
/* A radix tree is freed by deleting all of its entries */
index = 0;
do {
nr = 0;
radix_tree_for_each_slot(slot, root, &iter, index) {
indices[nr] = iter.index;
if (++nr == 16)
break;
}
for (i = 0; i < nr; i++) {
index = indices[i];
head = radix_tree_delete(root, index);
gmap_for_each_rmap_safe(rmap, rnext, head)
kfree(rmap);
}
} while (nr > 0);
}
/**
* gmap_free - free a guest address space
* @gmap: pointer to the guest address space structure
*
* No locks required. There are no references to this gmap anymore.
*/
static void gmap_free(struct gmap *gmap)
{
struct page *page, *next;
/* Flush tlb of all gmaps (if not already done for shadows) */
if (!(gmap_is_shadow(gmap) && gmap->removed))
gmap_flush_tlb(gmap);
/* Free all segment & region tables. */
list_for_each_entry_safe(page, next, &gmap->crst_list, lru)
__free_pages(page, CRST_ALLOC_ORDER);
gmap_radix_tree_free(&gmap->guest_to_host);
gmap_radix_tree_free(&gmap->host_to_guest);
/* Free additional data for a shadow gmap */
if (gmap_is_shadow(gmap)) {
/* Free all page tables. */
list_for_each_entry_safe(page, next, &gmap->pt_list, lru)
page_table_free_pgste(page);
gmap_rmap_radix_tree_free(&gmap->host_to_rmap);
/* Release reference to the parent */
gmap_put(gmap->parent);
}
kfree(gmap);
}
/**
* gmap_get - increase reference counter for guest address space
* @gmap: pointer to the guest address space structure
*
* Returns the gmap pointer
*/
struct gmap *gmap_get(struct gmap *gmap)
{
refcount_inc(&gmap->ref_count);
return gmap;
}
EXPORT_SYMBOL_GPL(gmap_get);
/**
* gmap_put - decrease reference counter for guest address space
* @gmap: pointer to the guest address space structure
*
* If the reference counter reaches zero the guest address space is freed.
*/
void gmap_put(struct gmap *gmap)
{
if (refcount_dec_and_test(&gmap->ref_count))
gmap_free(gmap);
}
EXPORT_SYMBOL_GPL(gmap_put);
/**
* gmap_remove - remove a guest address space but do not free it yet
* @gmap: pointer to the guest address space structure
*/
void gmap_remove(struct gmap *gmap)
{
struct gmap *sg, *next;
unsigned long gmap_asce;
/* Remove all shadow gmaps linked to this gmap */
if (!list_empty(&gmap->children)) {
spin_lock(&gmap->shadow_lock);
list_for_each_entry_safe(sg, next, &gmap->children, list) {
list_del(&sg->list);
gmap_put(sg);
}
spin_unlock(&gmap->shadow_lock);
}
/* Remove gmap from the pre-mm list */
spin_lock(&gmap->mm->context.lock);
list_del_rcu(&gmap->list);
if (list_empty(&gmap->mm->context.gmap_list))
gmap_asce = 0;
else if (list_is_singular(&gmap->mm->context.gmap_list))
gmap_asce = list_first_entry(&gmap->mm->context.gmap_list,
struct gmap, list)->asce;
else
gmap_asce = -1UL;
WRITE_ONCE(gmap->mm->context.gmap_asce, gmap_asce);
spin_unlock(&gmap->mm->context.lock);
synchronize_rcu();
/* Put reference */
gmap_put(gmap);
}
EXPORT_SYMBOL_GPL(gmap_remove);
/**
* gmap_enable - switch primary space to the guest address space
* @gmap: pointer to the guest address space structure
*/
void gmap_enable(struct gmap *gmap)
{
S390_lowcore.gmap = (unsigned long) gmap;
}
EXPORT_SYMBOL_GPL(gmap_enable);
/**
* gmap_disable - switch back to the standard primary address space
* @gmap: pointer to the guest address space structure
*/
void gmap_disable(struct gmap *gmap)
{
S390_lowcore.gmap = 0UL;
}
EXPORT_SYMBOL_GPL(gmap_disable);
/**
* gmap_get_enabled - get a pointer to the currently enabled gmap
*
* Returns a pointer to the currently enabled gmap. 0 if none is enabled.
*/
struct gmap *gmap_get_enabled(void)
{
return (struct gmap *) S390_lowcore.gmap;
}
EXPORT_SYMBOL_GPL(gmap_get_enabled);
/*
* gmap_alloc_table is assumed to be called with mmap_lock held
*/
static int gmap_alloc_table(struct gmap *gmap, unsigned long *table,
unsigned long init, unsigned long gaddr)
{
struct page *page;
unsigned long *new;
/* since we dont free the gmap table until gmap_free we can unlock */
page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
if (!page)
return -ENOMEM;
new = page_to_virt(page);
crst_table_init(new, init);
spin_lock(&gmap->guest_table_lock);
if (*table & _REGION_ENTRY_INVALID) {
list_add(&page->lru, &gmap->crst_list);
*table = __pa(new) | _REGION_ENTRY_LENGTH |
(*table & _REGION_ENTRY_TYPE_MASK);
page->index = gaddr;
page = NULL;
}
spin_unlock(&gmap->guest_table_lock);
if (page)
__free_pages(page, CRST_ALLOC_ORDER);
return 0;
}
/**
* __gmap_segment_gaddr - find virtual address from segment pointer
* @entry: pointer to a segment table entry in the guest address space
*
* Returns the virtual address in the guest address space for the segment
*/
static unsigned long __gmap_segment_gaddr(unsigned long *entry)
{
struct page *page;
unsigned long offset;
offset = (unsigned long) entry / sizeof(unsigned long);
offset = (offset & (PTRS_PER_PMD - 1)) * PMD_SIZE;
page = pmd_pgtable_page((pmd_t *) entry);
return page->index + offset;
}
/**
* __gmap_unlink_by_vmaddr - unlink a single segment via a host address
* @gmap: pointer to the guest address space structure
* @vmaddr: address in the host process address space
*
* Returns 1 if a TLB flush is required
*/
static int __gmap_unlink_by_vmaddr(struct gmap *gmap, unsigned long vmaddr)
{
unsigned long *entry;
int flush = 0;
BUG_ON(gmap_is_shadow(gmap));
spin_lock(&gmap->guest_table_lock);
entry = radix_tree_delete(&gmap->host_to_guest, vmaddr >> PMD_SHIFT);
if (entry) {
flush = (*entry != _SEGMENT_ENTRY_EMPTY);
*entry = _SEGMENT_ENTRY_EMPTY;
}
spin_unlock(&gmap->guest_table_lock);
return flush;
}
/**
* __gmap_unmap_by_gaddr - unmap a single segment via a guest address
* @gmap: pointer to the guest address space structure
* @gaddr: address in the guest address space
*
* Returns 1 if a TLB flush is required
*/
static int __gmap_unmap_by_gaddr(struct gmap *gmap, unsigned long gaddr)
{
unsigned long vmaddr;
vmaddr = (unsigned long) radix_tree_delete(&gmap->guest_to_host,
gaddr >> PMD_SHIFT);
return vmaddr ? __gmap_unlink_by_vmaddr(gmap, vmaddr) : 0;
}
/**
* gmap_unmap_segment - unmap segment from the guest address space
* @gmap: pointer to the guest address space structure
* @to: address in the guest address space
* @len: length of the memory area to unmap
*
* Returns 0 if the unmap succeeded, -EINVAL if not.
*/
int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
{
unsigned long off;
int flush;
BUG_ON(gmap_is_shadow(gmap));
if ((to | len) & (PMD_SIZE - 1))
return -EINVAL;
if (len == 0 || to + len < to)
return -EINVAL;
flush = 0;
mmap_write_lock(gmap->mm);
for (off = 0; off < len; off += PMD_SIZE)
flush |= __gmap_unmap_by_gaddr(gmap, to + off);
mmap_write_unlock(gmap->mm);
if (flush)
gmap_flush_tlb(gmap);
return 0;
}
EXPORT_SYMBOL_GPL(gmap_unmap_segment);
/**
* gmap_map_segment - map a segment to the guest address space
* @gmap: pointer to the guest address space structure
* @from: source address in the parent address space
* @to: target address in the guest address space
* @len: length of the memory area to map
*
* Returns 0 if the mmap succeeded, -EINVAL or -ENOMEM if not.
*/
int gmap_map_segment(struct gmap *gmap, unsigned long from,
unsigned long to, unsigned long len)
{
unsigned long off;
int flush;
BUG_ON(gmap_is_shadow(gmap));
if ((from | to | len) & (PMD_SIZE - 1))
return -EINVAL;
if (len == 0 || from + len < from || to + len < to ||
from + len - 1 > TASK_SIZE_MAX || to + len - 1 > gmap->asce_end)
return -EINVAL;
flush = 0;
mmap_write_lock(gmap->mm);
for (off = 0; off < len; off += PMD_SIZE) {
/* Remove old translation */
flush |= __gmap_unmap_by_gaddr(gmap, to + off);
/* Store new translation */
if (radix_tree_insert(&gmap->guest_to_host,
(to + off) >> PMD_SHIFT,
(void *) from + off))
break;
}
mmap_write_unlock(gmap->mm);
if (flush)
gmap_flush_tlb(gmap);
if (off >= len)
return 0;
gmap_unmap_segment(gmap, to, len);
return -ENOMEM;
}
EXPORT_SYMBOL_GPL(gmap_map_segment);
/**
* __gmap_translate - translate a guest address to a user space address
* @gmap: pointer to guest mapping meta data structure
* @gaddr: guest address
*
* Returns user space address which corresponds to the guest address or
* -EFAULT if no such mapping exists.
* This function does not establish potentially missing page table entries.
* The mmap_lock of the mm that belongs to the address space must be held
* when this function gets called.
*
* Note: Can also be called for shadow gmaps.
*/
unsigned long __gmap_translate(struct gmap *gmap, unsigned long gaddr)
{
unsigned long vmaddr;
vmaddr = (unsigned long)
radix_tree_lookup(&gmap->guest_to_host, gaddr >> PMD_SHIFT);
/* Note: guest_to_host is empty for a shadow gmap */
return vmaddr ? (vmaddr | (gaddr & ~PMD_MASK)) : -EFAULT;
}
EXPORT_SYMBOL_GPL(__gmap_translate);
/**
* gmap_translate - translate a guest address to a user space address
* @gmap: pointer to guest mapping meta data structure
* @gaddr: guest address
*
* Returns user space address which corresponds to the guest address or
* -EFAULT if no such mapping exists.
* This function does not establish potentially missing page table entries.
*/
unsigned long gmap_translate(struct gmap *gmap, unsigned long gaddr)
{
unsigned long rc;
mmap_read_lock(gmap->mm);
rc = __gmap_translate(gmap, gaddr);
mmap_read_unlock(gmap->mm);
return rc;
}
EXPORT_SYMBOL_GPL(gmap_translate);
/**
* gmap_unlink - disconnect a page table from the gmap shadow tables
* @mm: pointer to the parent mm_struct
* @table: pointer to the host page table
* @vmaddr: vm address associated with the host page table
*/
void gmap_unlink(struct mm_struct *mm, unsigned long *table,
unsigned long vmaddr)
{
struct gmap *gmap;
int flush;
rcu_read_lock();
list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
flush = __gmap_unlink_by_vmaddr(gmap, vmaddr);
if (flush)
gmap_flush_tlb(gmap);
}
rcu_read_unlock();
}
static void gmap_pmdp_xchg(struct gmap *gmap, pmd_t *old, pmd_t new,
unsigned long gaddr);
/**
* __gmap_link - set up shadow page tables to connect a host to a guest address
* @gmap: pointer to guest mapping meta data structure
* @gaddr: guest address
* @vmaddr: vm address
*
* Returns 0 on success, -ENOMEM for out of memory conditions, and -EFAULT
* if the vm address is already mapped to a different guest segment.
* The mmap_lock of the mm that belongs to the address space must be held
* when this function gets called.
*/
int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
{
struct mm_struct *mm;
unsigned long *table;
spinlock_t *ptl;
pgd_t *pgd;
p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
u64 unprot;
int rc;
BUG_ON(gmap_is_shadow(gmap));
/* Create higher level tables in the gmap page table */
table = gmap->table;
if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION1) {
table += (gaddr & _REGION1_INDEX) >> _REGION1_SHIFT;
if ((*table & _REGION_ENTRY_INVALID) &&
gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY,
gaddr & _REGION1_MASK))
return -ENOMEM;
table = __va(*table & _REGION_ENTRY_ORIGIN);
}
if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION2) {
table += (gaddr & _REGION2_INDEX) >> _REGION2_SHIFT;
if ((*table & _REGION_ENTRY_INVALID) &&
gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY,
gaddr & _REGION2_MASK))
return -ENOMEM;
table = __va(*table & _REGION_ENTRY_ORIGIN);
}
if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION3) {
table += (gaddr & _REGION3_INDEX) >> _REGION3_SHIFT;
if ((*table & _REGION_ENTRY_INVALID) &&
gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY,
gaddr & _REGION3_MASK))
return -ENOMEM;
table = __va(*table & _REGION_ENTRY_ORIGIN);
}
table += (gaddr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT;
/* Walk the parent mm page table */
mm = gmap->mm;
pgd = pgd_offset(mm, vmaddr);
VM_BUG_ON(pgd_none(*pgd));
p4d = p4d_offset(pgd, vmaddr);
VM_BUG_ON(p4d_none(*p4d));
pud = pud_offset(p4d, vmaddr);
VM_BUG_ON(pud_none(*pud));
/* large puds cannot yet be handled */
if (pud_large(*pud))
return -EFAULT;
pmd = pmd_offset(pud, vmaddr);
VM_BUG_ON(pmd_none(*pmd));
/* Are we allowed to use huge pages? */
if (pmd_large(*pmd) && !gmap->mm->context.allow_gmap_hpage_1m)
return -EFAULT;
/* Link gmap segment table entry location to page table. */
rc = radix_tree_preload(GFP_KERNEL_ACCOUNT);
if (rc)
return rc;
ptl = pmd_lock(mm, pmd);
spin_lock(&gmap->guest_table_lock);
if (*table == _SEGMENT_ENTRY_EMPTY) {
rc = radix_tree_insert(&gmap->host_to_guest,
vmaddr >> PMD_SHIFT, table);
if (!rc) {
if (pmd_large(*pmd)) {
*table = (pmd_val(*pmd) &
_SEGMENT_ENTRY_HARDWARE_BITS_LARGE)
| _SEGMENT_ENTRY_GMAP_UC;
} else
*table = pmd_val(*pmd) &
_SEGMENT_ENTRY_HARDWARE_BITS;
}
} else if (*table & _SEGMENT_ENTRY_PROTECT &&
!(pmd_val(*pmd) & _SEGMENT_ENTRY_PROTECT)) {
unprot = (u64)*table;
unprot &= ~_SEGMENT_ENTRY_PROTECT;
unprot |= _SEGMENT_ENTRY_GMAP_UC;
gmap_pmdp_xchg(gmap, (pmd_t *)table, __pmd(unprot), gaddr);
}
spin_unlock(&gmap->guest_table_lock);
spin_unlock(ptl);
radix_tree_preload_end();
return rc;
}
/**
* gmap_fault - resolve a fault on a guest address
* @gmap: pointer to guest mapping meta data structure
* @gaddr: guest address
* @fault_flags: flags to pass down to handle_mm_fault()
*
* Returns 0 on success, -ENOMEM for out of memory conditions, and -EFAULT
* if the vm address is already mapped to a different guest segment.
*/
int gmap_fault(struct gmap *gmap, unsigned long gaddr,
unsigned int fault_flags)
{
unsigned long vmaddr;
int rc;
bool unlocked;
mmap_read_lock(gmap->mm);
retry:
unlocked = false;
vmaddr = __gmap_translate(gmap, gaddr);
if (IS_ERR_VALUE(vmaddr)) {
rc = vmaddr;
goto out_up;
}
if (fixup_user_fault(gmap->mm, vmaddr, fault_flags,
&unlocked)) {
rc = -EFAULT;
goto out_up;
}
/*
* In the case that fixup_user_fault unlocked the mmap_lock during
* faultin redo __gmap_translate to not race with a map/unmap_segment.
*/
if (unlocked)
goto retry;
rc = __gmap_link(gmap, gaddr, vmaddr);
out_up:
mmap_read_unlock(gmap->mm);
return rc;
}
EXPORT_SYMBOL_GPL(gmap_fault);
/*
* this function is assumed to be called with mmap_lock held
*/
void __gmap_zap(struct gmap *gmap, unsigned long gaddr)
{
struct vm_area_struct *vma;
unsigned long vmaddr;
spinlock_t *ptl;
pte_t *ptep;
/* Find the vm address for the guest address */
vmaddr = (unsigned long) radix_tree_lookup(&gmap->guest_to_host,
gaddr >> PMD_SHIFT);
if (vmaddr) {
vmaddr |= gaddr & ~PMD_MASK;
vma = vma_lookup(gmap->mm, vmaddr);
if (!vma || is_vm_hugetlb_page(vma))
return;
/* Get pointer to the page table entry */
ptep = get_locked_pte(gmap->mm, vmaddr, &ptl);
if (likely(ptep)) {
ptep_zap_unused(gmap->mm, vmaddr, ptep, 0);
pte_unmap_unlock(ptep, ptl);
}
}
}
EXPORT_SYMBOL_GPL(__gmap_zap);
void gmap_discard(struct gmap *gmap, unsigned long from, unsigned long to)
{
unsigned long gaddr, vmaddr, size;
struct vm_area_struct *vma;
mmap_read_lock(gmap->mm);
for (gaddr = from; gaddr < to;
gaddr = (gaddr + PMD_SIZE) & PMD_MASK) {
/* Find the vm address for the guest address */
vmaddr = (unsigned long)
radix_tree_lookup(&gmap->guest_to_host,
gaddr >> PMD_SHIFT);
if (!vmaddr)
continue;
vmaddr |= gaddr & ~PMD_MASK;
/* Find vma in the parent mm */
vma = find_vma(gmap->mm, vmaddr);
if (!vma)
continue;
/*
* We do not discard pages that are backed by
* hugetlbfs, so we don't have to refault them.
*/
if (is_vm_hugetlb_page(vma))
continue;
size = min(to - gaddr, PMD_SIZE - (gaddr & ~PMD_MASK));
zap_page_range_single(vma, vmaddr, size, NULL);
}
mmap_read_unlock(gmap->mm);
}
EXPORT_SYMBOL_GPL(gmap_discard);
static LIST_HEAD(gmap_notifier_list);
static DEFINE_SPINLOCK(gmap_notifier_lock);
/**
* gmap_register_pte_notifier - register a pte invalidation callback
* @nb: pointer to the gmap notifier block
*/
void gmap_register_pte_notifier(struct gmap_notifier *nb)
{
spin_lock(&gmap_notifier_lock);
list_add_rcu(&nb->list, &gmap_notifier_list);
spin_unlock(&gmap_notifier_lock);
}
EXPORT_SYMBOL_GPL(gmap_register_pte_notifier);
/**
* gmap_unregister_pte_notifier - remove a pte invalidation callback
* @nb: pointer to the gmap notifier block
*/
void gmap_unregister_pte_notifier(struct gmap_notifier *nb)
{
spin_lock(&gmap_notifier_lock);
list_del_rcu(&nb->list);
spin_unlock(&gmap_notifier_lock);
synchronize_rcu();
}
EXPORT_SYMBOL_GPL(gmap_unregister_pte_notifier);
/**
* gmap_call_notifier - call all registered invalidation callbacks
* @gmap: pointer to guest mapping meta data structure
* @start: start virtual address in the guest address space
* @end: end virtual address in the guest address space
*/
static void gmap_call_notifier(struct gmap *gmap, unsigned long start,
unsigned long end)
{
struct gmap_notifier *nb;
list_for_each_entry(nb, &gmap_notifier_list, list)
nb->notifier_call(gmap, start, end);
}
/**
* gmap_table_walk - walk the gmap page tables
* @gmap: pointer to guest mapping meta data structure
* @gaddr: virtual address in the guest address space
* @level: page table level to stop at
*
* Returns a table entry pointer for the given guest address and @level
* @level=0 : returns a pointer to a page table table entry (or NULL)
* @level=1 : returns a pointer to a segment table entry (or NULL)
* @level=2 : returns a pointer to a region-3 table entry (or NULL)
* @level=3 : returns a pointer to a region-2 table entry (or NULL)
* @level=4 : returns a pointer to a region-1 table entry (or NULL)
*
* Returns NULL if the gmap page tables could not be walked to the
* requested level.
*
* Note: Can also be called for shadow gmaps.
*/
static inline unsigned long *gmap_table_walk(struct gmap *gmap,
unsigned long gaddr, int level)
{
const int asce_type = gmap->asce & _ASCE_TYPE_MASK;
unsigned long *table = gmap->table;
if (gmap_is_shadow(gmap) && gmap->removed)
return NULL;
if (WARN_ON_ONCE(level > (asce_type >> 2) + 1))
return NULL;
if (asce_type != _ASCE_TYPE_REGION1 &&
gaddr & (-1UL << (31 + (asce_type >> 2) * 11)))
return NULL;
switch (asce_type) {
case _ASCE_TYPE_REGION1:
table += (gaddr & _REGION1_INDEX) >> _REGION1_SHIFT;
if (level == 4)
break;
if (*table & _REGION_ENTRY_INVALID)
return NULL;
table = __va(*table & _REGION_ENTRY_ORIGIN);
fallthrough;
case _ASCE_TYPE_REGION2:
table += (gaddr & _REGION2_INDEX) >> _REGION2_SHIFT;
if (level == 3)
break;
if (*table & _REGION_ENTRY_INVALID)
return NULL;
table = __va(*table & _REGION_ENTRY_ORIGIN);
fallthrough;
case _ASCE_TYPE_REGION3:
table += (gaddr & _REGION3_INDEX) >> _REGION3_SHIFT;
if (level == 2)
break;
if (*table & _REGION_ENTRY_INVALID)
return NULL;
table = __va(*table & _REGION_ENTRY_ORIGIN);
fallthrough;
case _ASCE_TYPE_SEGMENT:
table += (gaddr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT;
if (level == 1)
break;
if (*table & _REGION_ENTRY_INVALID)
return NULL;
table = __va(*table & _SEGMENT_ENTRY_ORIGIN);
table += (gaddr & _PAGE_INDEX) >> _PAGE_SHIFT;
}
return table;
}
/**
* gmap_pte_op_walk - walk the gmap page table, get the page table lock
* and return the pte pointer
* @gmap: pointer to guest mapping meta data structure
* @gaddr: virtual address in the guest address space
* @ptl: pointer to the spinlock pointer
*
* Returns a pointer to the locked pte for a guest address, or NULL
*/
static pte_t *gmap_pte_op_walk(struct gmap *gmap, unsigned long gaddr,
spinlock_t **ptl)
{
unsigned long *table;
BUG_ON(gmap_is_shadow(gmap));
/* Walk the gmap page table, lock and get pte pointer */
table = gmap_table_walk(gmap, gaddr, 1); /* get segment pointer */
if (!table || *table & _SEGMENT_ENTRY_INVALID)
return NULL;
return pte_alloc_map_lock(gmap->mm, (pmd_t *) table, gaddr, ptl);
}
/**
* gmap_pte_op_fixup - force a page in and connect the gmap page table
* @gmap: pointer to guest mapping meta data structure
* @gaddr: virtual address in the guest address space
* @vmaddr: address in the host process address space
* @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
*
* Returns 0 if the caller can retry __gmap_translate (might fail again),
* -ENOMEM if out of memory and -EFAULT if anything goes wrong while fixing
* up or connecting the gmap page table.
*/
static int gmap_pte_op_fixup(struct gmap *gmap, unsigned long gaddr,
unsigned long vmaddr, int prot)
{
struct mm_struct *mm = gmap->mm;
unsigned int fault_flags;
bool unlocked = false;
BUG_ON(gmap_is_shadow(gmap));
fault_flags = (prot == PROT_WRITE) ? FAULT_FLAG_WRITE : 0;
if (fixup_user_fault(mm, vmaddr, fault_flags, &unlocked))
return -EFAULT;
if (unlocked)
/* lost mmap_lock, caller has to retry __gmap_translate */
return 0;
/* Connect the page tables */
return __gmap_link(gmap, gaddr, vmaddr);
}
/**
* gmap_pte_op_end - release the page table lock
* @ptep: pointer to the locked pte
* @ptl: pointer to the page table spinlock
*/
static void gmap_pte_op_end(pte_t *ptep, spinlock_t *ptl)
{
pte_unmap_unlock(ptep, ptl);
}
/**
* gmap_pmd_op_walk - walk the gmap tables, get the guest table lock
* and return the pmd pointer
* @gmap: pointer to guest mapping meta data structure
* @gaddr: virtual address in the guest address space
*
* Returns a pointer to the pmd for a guest address, or NULL
*/
static inline pmd_t *gmap_pmd_op_walk(struct gmap *gmap, unsigned long gaddr)
{
pmd_t *pmdp;
BUG_ON(gmap_is_shadow(gmap));
pmdp = (pmd_t *) gmap_table_walk(gmap, gaddr, 1);
if (!pmdp)
return NULL;
/* without huge pages, there is no need to take the table lock */
if (!gmap->mm->context.allow_gmap_hpage_1m)
return pmd_none(*pmdp) ? NULL : pmdp;
spin_lock(&gmap->guest_table_lock);
if (pmd_none(*pmdp)) {
spin_unlock(&gmap->guest_table_lock);
return NULL;
}
/* 4k page table entries are locked via the pte (pte_alloc_map_lock). */
if (!pmd_large(*pmdp))
spin_unlock(&gmap->guest_table_lock);
return pmdp;
}
/**
* gmap_pmd_op_end - release the guest_table_lock if needed
* @gmap: pointer to the guest mapping meta data structure
* @pmdp: pointer to the pmd
*/
static inline void gmap_pmd_op_end(struct gmap *gmap, pmd_t *pmdp)
{
if (pmd_large(*pmdp))
spin_unlock(&gmap->guest_table_lock);
}
/*
* gmap_protect_pmd - remove access rights to memory and set pmd notification bits
* @pmdp: pointer to the pmd to be protected
* @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
* @bits: notification bits to set
*
* Returns:
* 0 if successfully protected
* -EAGAIN if a fixup is needed
* -EINVAL if unsupported notifier bits have been specified
*
* Expected to be called with sg->mm->mmap_lock in read and
* guest_table_lock held.
*/
static int gmap_protect_pmd(struct gmap *gmap, unsigned long gaddr,
pmd_t *pmdp, int prot, unsigned long bits)
{
int pmd_i = pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID;
int pmd_p = pmd_val(*pmdp) & _SEGMENT_ENTRY_PROTECT;
pmd_t new = *pmdp;
/* Fixup needed */
if ((pmd_i && (prot != PROT_NONE)) || (pmd_p && (prot == PROT_WRITE)))
return -EAGAIN;
if (prot == PROT_NONE && !pmd_i) {
new = set_pmd_bit(new, __pgprot(_SEGMENT_ENTRY_INVALID));
gmap_pmdp_xchg(gmap, pmdp, new, gaddr);
}
if (prot == PROT_READ && !pmd_p) {
new = clear_pmd_bit(new, __pgprot(_SEGMENT_ENTRY_INVALID));
new = set_pmd_bit(new, __pgprot(_SEGMENT_ENTRY_PROTECT));
gmap_pmdp_xchg(gmap, pmdp, new, gaddr);
}
if (bits & GMAP_NOTIFY_MPROT)
set_pmd(pmdp, set_pmd_bit(*pmdp, __pgprot(_SEGMENT_ENTRY_GMAP_IN)));
/* Shadow GMAP protection needs split PMDs */
if (bits & GMAP_NOTIFY_SHADOW)
return -EINVAL;
return 0;
}
/*
* gmap_protect_pte - remove access rights to memory and set pgste bits
* @gmap: pointer to guest mapping meta data structure
* @gaddr: virtual address in the guest address space
* @pmdp: pointer to the pmd associated with the pte
* @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
* @bits: notification bits to set
*
* Returns 0 if successfully protected, -ENOMEM if out of memory and
* -EAGAIN if a fixup is needed.
*
* Expected to be called with sg->mm->mmap_lock in read
*/
static int gmap_protect_pte(struct gmap *gmap, unsigned long gaddr,
pmd_t *pmdp, int prot, unsigned long bits)
{
int rc;
pte_t *ptep;
spinlock_t *ptl;
unsigned long pbits = 0;
if (pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID)
return -EAGAIN;
ptep = pte_alloc_map_lock(gmap->mm, pmdp, gaddr, &ptl);
if (!ptep)
return -ENOMEM;
pbits |= (bits & GMAP_NOTIFY_MPROT) ? PGSTE_IN_BIT : 0;
pbits |= (bits & GMAP_NOTIFY_SHADOW) ? PGSTE_VSIE_BIT : 0;
/* Protect and unlock. */
rc = ptep_force_prot(gmap->mm, gaddr, ptep, prot, pbits);
gmap_pte_op_end(ptep, ptl);
return rc;
}
/*
* gmap_protect_range - remove access rights to memory and set pgste bits
* @gmap: pointer to guest mapping meta data structure
* @gaddr: virtual address in the guest address space
* @len: size of area
* @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
* @bits: pgste notification bits to set
*
* Returns 0 if successfully protected, -ENOMEM if out of memory and
* -EFAULT if gaddr is invalid (or mapping for shadows is missing).
*
* Called with sg->mm->mmap_lock in read.
*/
static int gmap_protect_range(struct gmap *gmap, unsigned long gaddr,
unsigned long len, int prot, unsigned long bits)
{
unsigned long vmaddr, dist;
pmd_t *pmdp;
int rc;
BUG_ON(gmap_is_shadow(gmap));
while (len) {
rc = -EAGAIN;
pmdp = gmap_pmd_op_walk(gmap, gaddr);
if (pmdp) {
if (!pmd_large(*pmdp)) {
rc = gmap_protect_pte(gmap, gaddr, pmdp, prot,
bits);
if (!rc) {
len -= PAGE_SIZE;
gaddr += PAGE_SIZE;
}
} else {
rc = gmap_protect_pmd(gmap, gaddr, pmdp, prot,
bits);
if (!rc) {
dist = HPAGE_SIZE - (gaddr & ~HPAGE_MASK);
len = len < dist ? 0 : len - dist;
gaddr = (gaddr & HPAGE_MASK) + HPAGE_SIZE;
}
}
gmap_pmd_op_end(gmap, pmdp);
}
if (rc) {
if (rc == -EINVAL)
return rc;
/* -EAGAIN, fixup of userspace mm and gmap */
vmaddr = __gmap_translate(gmap, gaddr);
if (IS_ERR_VALUE(vmaddr))
return vmaddr;
rc = gmap_pte_op_fixup(gmap, gaddr, vmaddr, prot);
if (rc)
return rc;
}
}
return 0;
}
/**
* gmap_mprotect_notify - change access rights for a range of ptes and
* call the notifier if any pte changes again
* @gmap: pointer to guest mapping meta data structure
* @gaddr: virtual address in the guest address space
* @len: size of area
* @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
*
* Returns 0 if for each page in the given range a gmap mapping exists,
* the new access rights could be set and the notifier could be armed.
* If the gmap mapping is missing for one or more pages -EFAULT is
* returned. If no memory could be allocated -ENOMEM is returned.
* This function establishes missing page table entries.
*/
int gmap_mprotect_notify(struct gmap *gmap, unsigned long gaddr,
unsigned long len, int prot)
{
int rc;
if ((gaddr & ~PAGE_MASK) || (len & ~PAGE_MASK) || gmap_is_shadow(gmap))
return -EINVAL;
if (!MACHINE_HAS_ESOP && prot == PROT_READ)
return -EINVAL;
mmap_read_lock(gmap->mm);
rc = gmap_protect_range(gmap, gaddr, len, prot, GMAP_NOTIFY_MPROT);
mmap_read_unlock(gmap->mm);
return rc;
}
EXPORT_SYMBOL_GPL(gmap_mprotect_notify);
/**
* gmap_read_table - get an unsigned long value from a guest page table using
* absolute addressing, without marking the page referenced.
* @gmap: pointer to guest mapping meta data structure
* @gaddr: virtual address in the guest address space
* @val: pointer to the unsigned long value to return
*
* Returns 0 if the value was read, -ENOMEM if out of memory and -EFAULT
* if reading using the virtual address failed. -EINVAL if called on a gmap
* shadow.
*
* Called with gmap->mm->mmap_lock in read.
*/
int gmap_read_table(struct gmap *gmap, unsigned long gaddr, unsigned long *val)
{
unsigned long address, vmaddr;
spinlock_t *ptl;
pte_t *ptep, pte;
int rc;
if (gmap_is_shadow(gmap))
return -EINVAL;
while (1) {
rc = -EAGAIN;
ptep = gmap_pte_op_walk(gmap, gaddr, &ptl);
if (ptep) {
pte = *ptep;
if (pte_present(pte) && (pte_val(pte) & _PAGE_READ)) {
address = pte_val(pte) & PAGE_MASK;
address += gaddr & ~PAGE_MASK;
*val = *(unsigned long *)__va(address);
set_pte(ptep, set_pte_bit(*ptep, __pgprot(_PAGE_YOUNG)));
/* Do *NOT* clear the _PAGE_INVALID bit! */
rc = 0;
}
gmap_pte_op_end(ptep, ptl);
}
if (!rc)
break;
vmaddr = __gmap_translate(gmap, gaddr);
if (IS_ERR_VALUE(vmaddr)) {
rc = vmaddr;
break;
}
rc = gmap_pte_op_fixup(gmap, gaddr, vmaddr, PROT_READ);
if (rc)
break;
}
return rc;
}
EXPORT_SYMBOL_GPL(gmap_read_table);
/**
* gmap_insert_rmap - add a rmap to the host_to_rmap radix tree
* @sg: pointer to the shadow guest address space structure
* @vmaddr: vm address associated with the rmap
* @rmap: pointer to the rmap structure
*
* Called with the sg->guest_table_lock
*/
static inline void gmap_insert_rmap(struct gmap *sg, unsigned long vmaddr,
struct gmap_rmap *rmap)
{
struct gmap_rmap *temp;
void __rcu **slot;
BUG_ON(!gmap_is_shadow(sg));
slot = radix_tree_lookup_slot(&sg->host_to_rmap, vmaddr >> PAGE_SHIFT);
if (slot) {
rmap->next = radix_tree_deref_slot_protected(slot,
&sg->guest_table_lock);
for (temp = rmap->next; temp; temp = temp->next) {
if (temp->raddr == rmap->raddr) {
kfree(rmap);
return;
}
}
radix_tree_replace_slot(&sg->host_to_rmap, slot, rmap);
} else {
rmap->next = NULL;
radix_tree_insert(&sg->host_to_rmap, vmaddr >> PAGE_SHIFT,
rmap);
}
}
/**
* gmap_protect_rmap - restrict access rights to memory (RO) and create an rmap
* @sg: pointer to the shadow guest address space structure
* @raddr: rmap address in the shadow gmap
* @paddr: address in the parent guest address space
* @len: length of the memory area to protect
*
* Returns 0 if successfully protected and the rmap was created, -ENOMEM
* if out of memory and -EFAULT if paddr is invalid.
*/
static int gmap_protect_rmap(struct gmap *sg, unsigned long raddr,
unsigned long paddr, unsigned long len)
{
struct gmap *parent;
struct gmap_rmap *rmap;
unsigned long vmaddr;
spinlock_t *ptl;
pte_t *ptep;
int rc;
BUG_ON(!gmap_is_shadow(sg));
parent = sg->parent;
while (len) {
vmaddr = __gmap_translate(parent, paddr);
if (IS_ERR_VALUE(vmaddr))
return vmaddr;
rmap = kzalloc(sizeof(*rmap), GFP_KERNEL_ACCOUNT);
if (!rmap)
return -ENOMEM;
rmap->raddr = raddr;
rc = radix_tree_preload(GFP_KERNEL_ACCOUNT);
if (rc) {
kfree(rmap);
return rc;
}
rc = -EAGAIN;
ptep = gmap_pte_op_walk(parent, paddr, &ptl);
if (ptep) {
spin_lock(&sg->guest_table_lock);
rc = ptep_force_prot(parent->mm, paddr, ptep, PROT_READ,
PGSTE_VSIE_BIT);
if (!rc)
gmap_insert_rmap(sg, vmaddr, rmap);
spin_unlock(&sg->guest_table_lock);
gmap_pte_op_end(ptep, ptl);
}
radix_tree_preload_end();
if (rc) {
kfree(rmap);
rc = gmap_pte_op_fixup(parent, paddr, vmaddr, PROT_READ);
if (rc)
return rc;
continue;
}
paddr += PAGE_SIZE;
len -= PAGE_SIZE;
}
return 0;
}
#define _SHADOW_RMAP_MASK 0x7
#define _SHADOW_RMAP_REGION1 0x5
#define _SHADOW_RMAP_REGION2 0x4
#define _SHADOW_RMAP_REGION3 0x3
#define _SHADOW_RMAP_SEGMENT 0x2
#define _SHADOW_RMAP_PGTABLE 0x1
/**
* gmap_idte_one - invalidate a single region or segment table entry
* @asce: region or segment table *origin* + table-type bits
* @vaddr: virtual address to identify the table entry to flush
*
* The invalid bit of a single region or segment table entry is set
* and the associated TLB entries depending on the entry are flushed.
* The table-type of the @asce identifies the portion of the @vaddr
* that is used as the invalidation index.
*/
static inline void gmap_idte_one(unsigned long asce, unsigned long vaddr)
{
asm volatile(
" idte %0,0,%1"
: : "a" (asce), "a" (vaddr) : "cc", "memory");
}
/**
* gmap_unshadow_page - remove a page from a shadow page table
* @sg: pointer to the shadow guest address space structure
* @raddr: rmap address in the shadow guest address space
*
* Called with the sg->guest_table_lock
*/
static void gmap_unshadow_page(struct gmap *sg, unsigned long raddr)
{
unsigned long *table;
BUG_ON(!gmap_is_shadow(sg));
table = gmap_table_walk(sg, raddr, 0); /* get page table pointer */
if (!table || *table & _PAGE_INVALID)
return;
gmap_call_notifier(sg, raddr, raddr + _PAGE_SIZE - 1);
ptep_unshadow_pte(sg->mm, raddr, (pte_t *) table);
}
/**
* __gmap_unshadow_pgt - remove all entries from a shadow page table
* @sg: pointer to the shadow guest address space structure
* @raddr: rmap address in the shadow guest address space
* @pgt: pointer to the start of a shadow page table
*
* Called with the sg->guest_table_lock
*/
static void __gmap_unshadow_pgt(struct gmap *sg, unsigned long raddr,
unsigned long *pgt)
{
int i;
BUG_ON(!gmap_is_shadow(sg));
for (i = 0; i < _PAGE_ENTRIES; i++, raddr += _PAGE_SIZE)
pgt[i] = _PAGE_INVALID;
}
/**
* gmap_unshadow_pgt - remove a shadow page table from a segment entry
* @sg: pointer to the shadow guest address space structure
* @raddr: address in the shadow guest address space
*
* Called with the sg->guest_table_lock
*/
static void gmap_unshadow_pgt(struct gmap *sg, unsigned long raddr)
{
unsigned long *ste;
phys_addr_t sto, pgt;
struct page *page;
BUG_ON(!gmap_is_shadow(sg));
ste = gmap_table_walk(sg, raddr, 1); /* get segment pointer */
if (!ste || !(*ste & _SEGMENT_ENTRY_ORIGIN))
return;
gmap_call_notifier(sg, raddr, raddr + _SEGMENT_SIZE - 1);
sto = __pa(ste - ((raddr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT));
gmap_idte_one(sto | _ASCE_TYPE_SEGMENT, raddr);
pgt = *ste & _SEGMENT_ENTRY_ORIGIN;
*ste = _SEGMENT_ENTRY_EMPTY;
__gmap_unshadow_pgt(sg, raddr, __va(pgt));
/* Free page table */
page = phys_to_page(pgt);
list_del(&page->lru);
page_table_free_pgste(page);
}
/**
* __gmap_unshadow_sgt - remove all entries from a shadow segment table
* @sg: pointer to the shadow guest address space structure
* @raddr: rmap address in the shadow guest address space
* @sgt: pointer to the start of a shadow segment table
*
* Called with the sg->guest_table_lock
*/
static void __gmap_unshadow_sgt(struct gmap *sg, unsigned long raddr,
unsigned long *sgt)
{
struct page *page;
phys_addr_t pgt;
int i;
BUG_ON(!gmap_is_shadow(sg));
for (i = 0; i < _CRST_ENTRIES; i++, raddr += _SEGMENT_SIZE) {
if (!(sgt[i] & _SEGMENT_ENTRY_ORIGIN))
continue;
pgt = sgt[i] & _REGION_ENTRY_ORIGIN;
sgt[i] = _SEGMENT_ENTRY_EMPTY;
__gmap_unshadow_pgt(sg, raddr, __va(pgt));
/* Free page table */
page = phys_to_page(pgt);
list_del(&page->lru);
page_table_free_pgste(page);
}
}
/**
* gmap_unshadow_sgt - remove a shadow segment table from a region-3 entry
* @sg: pointer to the shadow guest address space structure
* @raddr: rmap address in the shadow guest address space
*
* Called with the shadow->guest_table_lock
*/
static void gmap_unshadow_sgt(struct gmap *sg, unsigned long raddr)
{
unsigned long r3o, *r3e;
phys_addr_t sgt;
struct page *page;
BUG_ON(!gmap_is_shadow(sg));
r3e = gmap_table_walk(sg, raddr, 2); /* get region-3 pointer */
if (!r3e || !(*r3e & _REGION_ENTRY_ORIGIN))
return;
gmap_call_notifier(sg, raddr, raddr + _REGION3_SIZE - 1);
r3o = (unsigned long) (r3e - ((raddr & _REGION3_INDEX) >> _REGION3_SHIFT));
gmap_idte_one(__pa(r3o) | _ASCE_TYPE_REGION3, raddr);
sgt = *r3e & _REGION_ENTRY_ORIGIN;
*r3e = _REGION3_ENTRY_EMPTY;
__gmap_unshadow_sgt(sg, raddr, __va(sgt));
/* Free segment table */
page = phys_to_page(sgt);
list_del(&page->lru);
__free_pages(page, CRST_ALLOC_ORDER);
}
/**
* __gmap_unshadow_r3t - remove all entries from a shadow region-3 table
* @sg: pointer to the shadow guest address space structure
* @raddr: address in the shadow guest address space
* @r3t: pointer to the start of a shadow region-3 table
*
* Called with the sg->guest_table_lock
*/
static void __gmap_unshadow_r3t(struct gmap *sg, unsigned long raddr,
unsigned long *r3t)
{
struct page *page;
phys_addr_t sgt;
int i;
BUG_ON(!gmap_is_shadow(sg));
for (i = 0; i < _CRST_ENTRIES; i++, raddr += _REGION3_SIZE) {
if (!(r3t[i] & _REGION_ENTRY_ORIGIN))
continue;
sgt = r3t[i] & _REGION_ENTRY_ORIGIN;
r3t[i] = _REGION3_ENTRY_EMPTY;
__gmap_unshadow_sgt(sg, raddr, __va(sgt));
/* Free segment table */
page = phys_to_page(sgt);
list_del(&page->lru);
__free_pages(page, CRST_ALLOC_ORDER);
}
}
/**
* gmap_unshadow_r3t - remove a shadow region-3 table from a region-2 entry
* @sg: pointer to the shadow guest address space structure
* @raddr: rmap address in the shadow guest address space
*
* Called with the sg->guest_table_lock
*/
static void gmap_unshadow_r3t(struct gmap *sg, unsigned long raddr)
{
unsigned long r2o, *r2e;
phys_addr_t r3t;
struct page *page;
BUG_ON(!gmap_is_shadow(sg));
r2e = gmap_table_walk(sg, raddr, 3); /* get region-2 pointer */
if (!r2e || !(*r2e & _REGION_ENTRY_ORIGIN))
return;
gmap_call_notifier(sg, raddr, raddr + _REGION2_SIZE - 1);
r2o = (unsigned long) (r2e - ((raddr & _REGION2_INDEX) >> _REGION2_SHIFT));
gmap_idte_one(__pa(r2o) | _ASCE_TYPE_REGION2, raddr);
r3t = *r2e & _REGION_ENTRY_ORIGIN;
*r2e = _REGION2_ENTRY_EMPTY;
__gmap_unshadow_r3t(sg, raddr, __va(r3t));
/* Free region 3 table */
page = phys_to_page(r3t);
list_del(&page->lru);
__free_pages(page, CRST_ALLOC_ORDER);
}
/**
* __gmap_unshadow_r2t - remove all entries from a shadow region-2 table
* @sg: pointer to the shadow guest address space structure
* @raddr: rmap address in the shadow guest address space
* @r2t: pointer to the start of a shadow region-2 table
*
* Called with the sg->guest_table_lock
*/
static void __gmap_unshadow_r2t(struct gmap *sg, unsigned long raddr,
unsigned long *r2t)
{
phys_addr_t r3t;
struct page *page;
int i;
BUG_ON(!gmap_is_shadow(sg));
for (i = 0; i < _CRST_ENTRIES; i++, raddr += _REGION2_SIZE) {
if (!(r2t[i] & _REGION_ENTRY_ORIGIN))
continue;
r3t = r2t[i] & _REGION_ENTRY_ORIGIN;
r2t[i] = _REGION2_ENTRY_EMPTY;
__gmap_unshadow_r3t(sg, raddr, __va(r3t));
/* Free region 3 table */
page = phys_to_page(r3t);
list_del(&page->lru);
__free_pages(page, CRST_ALLOC_ORDER);
}
}
/**
* gmap_unshadow_r2t - remove a shadow region-2 table from a region-1 entry
* @sg: pointer to the shadow guest address space structure
* @raddr: rmap address in the shadow guest address space
*
* Called with the sg->guest_table_lock
*/
static void gmap_unshadow_r2t(struct gmap *sg, unsigned long raddr)
{
unsigned long r1o, *r1e;
struct page *page;
phys_addr_t r2t;
BUG_ON(!gmap_is_shadow(sg));
r1e = gmap_table_walk(sg, raddr, 4); /* get region-1 pointer */
if (!r1e || !(*r1e & _REGION_ENTRY_ORIGIN))
return;
gmap_call_notifier(sg, raddr, raddr + _REGION1_SIZE - 1);
r1o = (unsigned long) (r1e - ((raddr & _REGION1_INDEX) >> _REGION1_SHIFT));
gmap_idte_one(__pa(r1o) | _ASCE_TYPE_REGION1, raddr);
r2t = *r1e & _REGION_ENTRY_ORIGIN;
*r1e = _REGION1_ENTRY_EMPTY;
__gmap_unshadow_r2t(sg, raddr, __va(r2t));
/* Free region 2 table */
page = phys_to_page(r2t);
list_del(&page->lru);
__free_pages(page, CRST_ALLOC_ORDER);
}
/**
* __gmap_unshadow_r1t - remove all entries from a shadow region-1 table
* @sg: pointer to the shadow guest address space structure
* @raddr: rmap address in the shadow guest address space
* @r1t: pointer to the start of a shadow region-1 table
*
* Called with the shadow->guest_table_lock
*/
static void __gmap_unshadow_r1t(struct gmap *sg, unsigned long raddr,
unsigned long *r1t)
{
unsigned long asce;
struct page *page;
phys_addr_t r2t;
int i;
BUG_ON(!gmap_is_shadow(sg));
asce = __pa(r1t) | _ASCE_TYPE_REGION1;
for (i = 0; i < _CRST_ENTRIES; i++, raddr += _REGION1_SIZE) {
if (!(r1t[i] & _REGION_ENTRY_ORIGIN))
continue;
r2t = r1t[i] & _REGION_ENTRY_ORIGIN;
__gmap_unshadow_r2t(sg, raddr, __va(r2t));
/* Clear entry and flush translation r1t -> r2t */
gmap_idte_one(asce, raddr);
r1t[i] = _REGION1_ENTRY_EMPTY;
/* Free region 2 table */
page = phys_to_page(r2t);
list_del(&page->lru);
__free_pages(page, CRST_ALLOC_ORDER);
}
}
/**
* gmap_unshadow - remove a shadow page table completely
* @sg: pointer to the shadow guest address space structure
*
* Called with sg->guest_table_lock
*/
static void gmap_unshadow(struct gmap *sg)
{
unsigned long *table;
BUG_ON(!gmap_is_shadow(sg));
if (sg->removed)
return;
sg->removed = 1;
gmap_call_notifier(sg, 0, -1UL);
gmap_flush_tlb(sg);
table = __va(sg->asce & _ASCE_ORIGIN);
switch (sg->asce & _ASCE_TYPE_MASK) {
case _ASCE_TYPE_REGION1:
__gmap_unshadow_r1t(sg, 0, table);
break;
case _ASCE_TYPE_REGION2:
__gmap_unshadow_r2t(sg, 0, table);
break;
case _ASCE_TYPE_REGION3:
__gmap_unshadow_r3t(sg, 0, table);
break;
case _ASCE_TYPE_SEGMENT:
__gmap_unshadow_sgt(sg, 0, table);
break;
}
}
/**
* gmap_find_shadow - find a specific asce in the list of shadow tables
* @parent: pointer to the parent gmap
* @asce: ASCE for which the shadow table is created
* @edat_level: edat level to be used for the shadow translation
*
* Returns the pointer to a gmap if a shadow table with the given asce is
* already available, ERR_PTR(-EAGAIN) if another one is just being created,
* otherwise NULL
*/
static struct gmap *gmap_find_shadow(struct gmap *parent, unsigned long asce,
int edat_level)
{
struct gmap *sg;
list_for_each_entry(sg, &parent->children, list) {
if (sg->orig_asce != asce || sg->edat_level != edat_level ||
sg->removed)
continue;
if (!sg->initialized)
return ERR_PTR(-EAGAIN);
refcount_inc(&sg->ref_count);
return sg;
}
return NULL;
}
/**
* gmap_shadow_valid - check if a shadow guest address space matches the
* given properties and is still valid
* @sg: pointer to the shadow guest address space structure
* @asce: ASCE for which the shadow table is requested
* @edat_level: edat level to be used for the shadow translation
*
* Returns 1 if the gmap shadow is still valid and matches the given
* properties, the caller can continue using it. Returns 0 otherwise, the
* caller has to request a new shadow gmap in this case.
*
*/
int gmap_shadow_valid(struct gmap *sg, unsigned long asce, int edat_level)
{
if (sg->removed)
return 0;
return sg->orig_asce == asce && sg->edat_level == edat_level;
}
EXPORT_SYMBOL_GPL(gmap_shadow_valid);
/**
* gmap_shadow - create/find a shadow guest address space
* @parent: pointer to the parent gmap
* @asce: ASCE for which the shadow table is created
* @edat_level: edat level to be used for the shadow translation
*
* The pages of the top level page table referred by the asce parameter
* will be set to read-only and marked in the PGSTEs of the kvm process.
* The shadow table will be removed automatically on any change to the
* PTE mapping for the source table.
*
* Returns a guest address space structure, ERR_PTR(-ENOMEM) if out of memory,
* ERR_PTR(-EAGAIN) if the caller has to retry and ERR_PTR(-EFAULT) if the
* parent gmap table could not be protected.
*/
struct gmap *gmap_shadow(struct gmap *parent, unsigned long asce,
int edat_level)
{
struct gmap *sg, *new;
unsigned long limit;
int rc;
BUG_ON(parent->mm->context.allow_gmap_hpage_1m);
BUG_ON(gmap_is_shadow(parent));
spin_lock(&parent->shadow_lock);
sg = gmap_find_shadow(parent, asce, edat_level);
spin_unlock(&parent->shadow_lock);
if (sg)
return sg;
/* Create a new shadow gmap */
limit = -1UL >> (33 - (((asce & _ASCE_TYPE_MASK) >> 2) * 11));
if (asce & _ASCE_REAL_SPACE)
limit = -1UL;
new = gmap_alloc(limit);
if (!new)
return ERR_PTR(-ENOMEM);
new->mm = parent->mm;
new->parent = gmap_get(parent);
new->orig_asce = asce;
new->edat_level = edat_level;
new->initialized = false;
spin_lock(&parent->shadow_lock);
/* Recheck if another CPU created the same shadow */
sg = gmap_find_shadow(parent, asce, edat_level);
if (sg) {
spin_unlock(&parent->shadow_lock);
gmap_free(new);
return sg;
}
if (asce & _ASCE_REAL_SPACE) {
/* only allow one real-space gmap shadow */
list_for_each_entry(sg, &parent->children, list) {
if (sg->orig_asce & _ASCE_REAL_SPACE) {
spin_lock(&sg->guest_table_lock);
gmap_unshadow(sg);
spin_unlock(&sg->guest_table_lock);
list_del(&sg->list);
gmap_put(sg);
break;
}
}
}
refcount_set(&new->ref_count, 2);
list_add(&new->list, &parent->children);
if (asce & _ASCE_REAL_SPACE) {
/* nothing to protect, return right away */
new->initialized = true;
spin_unlock(&parent->shadow_lock);
return new;
}
spin_unlock(&parent->shadow_lock);
/* protect after insertion, so it will get properly invalidated */
mmap_read_lock(parent->mm);
rc = gmap_protect_range(parent, asce & _ASCE_ORIGIN,
((asce & _ASCE_TABLE_LENGTH) + 1) * PAGE_SIZE,
PROT_READ, GMAP_NOTIFY_SHADOW);
mmap_read_unlock(parent->mm);
spin_lock(&parent->shadow_lock);
new->initialized = true;
if (rc) {
list_del(&new->list);
gmap_free(new);
new = ERR_PTR(rc);
}
spin_unlock(&parent->shadow_lock);
return new;
}
EXPORT_SYMBOL_GPL(gmap_shadow);
/**
* gmap_shadow_r2t - create an empty shadow region 2 table
* @sg: pointer to the shadow guest address space structure
* @saddr: faulting address in the shadow gmap
* @r2t: parent gmap address of the region 2 table to get shadowed
* @fake: r2t references contiguous guest memory block, not a r2t
*
* The r2t parameter specifies the address of the source table. The
* four pages of the source table are made read-only in the parent gmap
* address space. A write to the source table area @r2t will automatically
* remove the shadow r2 table and all of its descendants.
*
* Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
* shadow table structure is incomplete, -ENOMEM if out of memory and
* -EFAULT if an address in the parent gmap could not be resolved.
*
* Called with sg->mm->mmap_lock in read.
*/
int gmap_shadow_r2t(struct gmap *sg, unsigned long saddr, unsigned long r2t,
int fake)
{
unsigned long raddr, origin, offset, len;
unsigned long *table;
phys_addr_t s_r2t;
struct page *page;
int rc;
BUG_ON(!gmap_is_shadow(sg));
/* Allocate a shadow region second table */
page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
if (!page)
return -ENOMEM;
page->index = r2t & _REGION_ENTRY_ORIGIN;
if (fake)
page->index |= GMAP_SHADOW_FAKE_TABLE;
s_r2t = page_to_phys(page);
/* Install shadow region second table */
spin_lock(&sg->guest_table_lock);
table = gmap_table_walk(sg, saddr, 4); /* get region-1 pointer */
if (!table) {
rc = -EAGAIN; /* Race with unshadow */
goto out_free;
}
if (!(*table & _REGION_ENTRY_INVALID)) {
rc = 0; /* Already established */
goto out_free;
} else if (*table & _REGION_ENTRY_ORIGIN) {
rc = -EAGAIN; /* Race with shadow */
goto out_free;
}
crst_table_init(__va(s_r2t), _REGION2_ENTRY_EMPTY);
/* mark as invalid as long as the parent table is not protected */
*table = s_r2t | _REGION_ENTRY_LENGTH |
_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID;
if (sg->edat_level >= 1)
*table |= (r2t & _REGION_ENTRY_PROTECT);
list_add(&page->lru, &sg->crst_list);
if (fake) {
/* nothing to protect for fake tables */
*table &= ~_REGION_ENTRY_INVALID;
spin_unlock(&sg->guest_table_lock);
return 0;
}
spin_unlock(&sg->guest_table_lock);
/* Make r2t read-only in parent gmap page table */
raddr = (saddr & _REGION1_MASK) | _SHADOW_RMAP_REGION1;
origin = r2t & _REGION_ENTRY_ORIGIN;
offset = ((r2t & _REGION_ENTRY_OFFSET) >> 6) * PAGE_SIZE;
len = ((r2t & _REGION_ENTRY_LENGTH) + 1) * PAGE_SIZE - offset;
rc = gmap_protect_rmap(sg, raddr, origin + offset, len);
spin_lock(&sg->guest_table_lock);
if (!rc) {
table = gmap_table_walk(sg, saddr, 4);
if (!table || (*table & _REGION_ENTRY_ORIGIN) != s_r2t)
rc = -EAGAIN; /* Race with unshadow */
else
*table &= ~_REGION_ENTRY_INVALID;
} else {
gmap_unshadow_r2t(sg, raddr);
}
spin_unlock(&sg->guest_table_lock);
return rc;
out_free:
spin_unlock(&sg->guest_table_lock);
__free_pages(page, CRST_ALLOC_ORDER);
return rc;
}
EXPORT_SYMBOL_GPL(gmap_shadow_r2t);
/**
* gmap_shadow_r3t - create a shadow region 3 table
* @sg: pointer to the shadow guest address space structure
* @saddr: faulting address in the shadow gmap
* @r3t: parent gmap address of the region 3 table to get shadowed
* @fake: r3t references contiguous guest memory block, not a r3t
*
* Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
* shadow table structure is incomplete, -ENOMEM if out of memory and
* -EFAULT if an address in the parent gmap could not be resolved.
*
* Called with sg->mm->mmap_lock in read.
*/
int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, unsigned long r3t,
int fake)
{
unsigned long raddr, origin, offset, len;
unsigned long *table;
phys_addr_t s_r3t;
struct page *page;
int rc;
BUG_ON(!gmap_is_shadow(sg));
/* Allocate a shadow region second table */
page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
if (!page)
return -ENOMEM;
page->index = r3t & _REGION_ENTRY_ORIGIN;
if (fake)
page->index |= GMAP_SHADOW_FAKE_TABLE;
s_r3t = page_to_phys(page);
/* Install shadow region second table */
spin_lock(&sg->guest_table_lock);
table = gmap_table_walk(sg, saddr, 3); /* get region-2 pointer */
if (!table) {
rc = -EAGAIN; /* Race with unshadow */
goto out_free;
}
if (!(*table & _REGION_ENTRY_INVALID)) {
rc = 0; /* Already established */
goto out_free;
} else if (*table & _REGION_ENTRY_ORIGIN) {
rc = -EAGAIN; /* Race with shadow */
goto out_free;
}
crst_table_init(__va(s_r3t), _REGION3_ENTRY_EMPTY);
/* mark as invalid as long as the parent table is not protected */
*table = s_r3t | _REGION_ENTRY_LENGTH |
_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID;
if (sg->edat_level >= 1)
*table |= (r3t & _REGION_ENTRY_PROTECT);
list_add(&page->lru, &sg->crst_list);
if (fake) {
/* nothing to protect for fake tables */
*table &= ~_REGION_ENTRY_INVALID;
spin_unlock(&sg->guest_table_lock);
return 0;
}
spin_unlock(&sg->guest_table_lock);
/* Make r3t read-only in parent gmap page table */
raddr = (saddr & _REGION2_MASK) | _SHADOW_RMAP_REGION2;
origin = r3t & _REGION_ENTRY_ORIGIN;
offset = ((r3t & _REGION_ENTRY_OFFSET) >> 6) * PAGE_SIZE;
len = ((r3t & _REGION_ENTRY_LENGTH) + 1) * PAGE_SIZE - offset;
rc = gmap_protect_rmap(sg, raddr, origin + offset, len);
spin_lock(&sg->guest_table_lock);
if (!rc) {
table = gmap_table_walk(sg, saddr, 3);
if (!table || (*table & _REGION_ENTRY_ORIGIN) != s_r3t)
rc = -EAGAIN; /* Race with unshadow */
else
*table &= ~_REGION_ENTRY_INVALID;
} else {
gmap_unshadow_r3t(sg, raddr);
}
spin_unlock(&sg->guest_table_lock);
return rc;
out_free:
spin_unlock(&sg->guest_table_lock);
__free_pages(page, CRST_ALLOC_ORDER);
return rc;
}
EXPORT_SYMBOL_GPL(gmap_shadow_r3t);
/**
* gmap_shadow_sgt - create a shadow segment table
* @sg: pointer to the shadow guest address space structure
* @saddr: faulting address in the shadow gmap
* @sgt: parent gmap address of the segment table to get shadowed
* @fake: sgt references contiguous guest memory block, not a sgt
*
* Returns: 0 if successfully shadowed or already shadowed, -EAGAIN if the
* shadow table structure is incomplete, -ENOMEM if out of memory and
* -EFAULT if an address in the parent gmap could not be resolved.
*
* Called with sg->mm->mmap_lock in read.
*/
int gmap_shadow_sgt(struct gmap *sg, unsigned long saddr, unsigned long sgt,
int fake)
{
unsigned long raddr, origin, offset, len;
unsigned long *table;
phys_addr_t s_sgt;
struct page *page;
int rc;
BUG_ON(!gmap_is_shadow(sg) || (sgt & _REGION3_ENTRY_LARGE));
/* Allocate a shadow segment table */
page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
if (!page)
return -ENOMEM;
page->index = sgt & _REGION_ENTRY_ORIGIN;
if (fake)
page->index |= GMAP_SHADOW_FAKE_TABLE;
s_sgt = page_to_phys(page);
/* Install shadow region second table */
spin_lock(&sg->guest_table_lock);
table = gmap_table_walk(sg, saddr, 2); /* get region-3 pointer */
if (!table) {
rc = -EAGAIN; /* Race with unshadow */
goto out_free;
}
if (!(*table & _REGION_ENTRY_INVALID)) {
rc = 0; /* Already established */
goto out_free;
} else if (*table & _REGION_ENTRY_ORIGIN) {
rc = -EAGAIN; /* Race with shadow */
goto out_free;
}
crst_table_init(__va(s_sgt), _SEGMENT_ENTRY_EMPTY);
/* mark as invalid as long as the parent table is not protected */
*table = s_sgt | _REGION_ENTRY_LENGTH |
_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID;
if (sg->edat_level >= 1)
*table |= sgt & _REGION_ENTRY_PROTECT;
list_add(&page->lru, &sg->crst_list);
if (fake) {
/* nothing to protect for fake tables */
*table &= ~_REGION_ENTRY_INVALID;
spin_unlock(&sg->guest_table_lock);
return 0;
}
spin_unlock(&sg->guest_table_lock);
/* Make sgt read-only in parent gmap page table */
raddr = (saddr & _REGION3_MASK) | _SHADOW_RMAP_REGION3;
origin = sgt & _REGION_ENTRY_ORIGIN;
offset = ((sgt & _REGION_ENTRY_OFFSET) >> 6) * PAGE_SIZE;
len = ((sgt & _REGION_ENTRY_LENGTH) + 1) * PAGE_SIZE - offset;
rc = gmap_protect_rmap(sg, raddr, origin + offset, len);
spin_lock(&sg->guest_table_lock);
if (!rc) {
table = gmap_table_walk(sg, saddr, 2);
if (!table || (*table & _REGION_ENTRY_ORIGIN) != s_sgt)
rc = -EAGAIN; /* Race with unshadow */
else
*table &= ~_REGION_ENTRY_INVALID;
} else {
gmap_unshadow_sgt(sg, raddr);
}
spin_unlock(&sg->guest_table_lock);
return rc;
out_free:
spin_unlock(&sg->guest_table_lock);
__free_pages(page, CRST_ALLOC_ORDER);
return rc;
}
EXPORT_SYMBOL_GPL(gmap_shadow_sgt);
/**
* gmap_shadow_pgt_lookup - find a shadow page table
* @sg: pointer to the shadow guest address space structure
* @saddr: the address in the shadow aguest address space
* @pgt: parent gmap address of the page table to get shadowed
* @dat_protection: if the pgtable is marked as protected by dat
* @fake: pgt references contiguous guest memory block, not a pgtable
*
* Returns 0 if the shadow page table was found and -EAGAIN if the page
* table was not found.
*
* Called with sg->mm->mmap_lock in read.
*/
int gmap_shadow_pgt_lookup(struct gmap *sg, unsigned long saddr,
unsigned long *pgt, int *dat_protection,
int *fake)
{
unsigned long *table;
struct page *page;
int rc;
BUG_ON(!gmap_is_shadow(sg));
spin_lock(&sg->guest_table_lock);
table = gmap_table_walk(sg, saddr, 1); /* get segment pointer */
if (table && !(*table & _SEGMENT_ENTRY_INVALID)) {
/* Shadow page tables are full pages (pte+pgste) */
page = pfn_to_page(*table >> PAGE_SHIFT);
*pgt = page->index & ~GMAP_SHADOW_FAKE_TABLE;
*dat_protection = !!(*table & _SEGMENT_ENTRY_PROTECT);
*fake = !!(page->index & GMAP_SHADOW_FAKE_TABLE);
rc = 0;
} else {
rc = -EAGAIN;
}
spin_unlock(&sg->guest_table_lock);
return rc;
}
EXPORT_SYMBOL_GPL(gmap_shadow_pgt_lookup);
/**
* gmap_shadow_pgt - instantiate a shadow page table
* @sg: pointer to the shadow guest address space structure
* @saddr: faulting address in the shadow gmap
* @pgt: parent gmap address of the page table to get shadowed
* @fake: pgt references contiguous guest memory block, not a pgtable
*
* Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
* shadow table structure is incomplete, -ENOMEM if out of memory,
* -EFAULT if an address in the parent gmap could not be resolved and
*
* Called with gmap->mm->mmap_lock in read
*/
int gmap_shadow_pgt(struct gmap *sg, unsigned long saddr, unsigned long pgt,
int fake)
{
unsigned long raddr, origin;
unsigned long *table;
struct page *page;
phys_addr_t s_pgt;
int rc;
BUG_ON(!gmap_is_shadow(sg) || (pgt & _SEGMENT_ENTRY_LARGE));
/* Allocate a shadow page table */
page = page_table_alloc_pgste(sg->mm);
if (!page)
return -ENOMEM;
page->index = pgt & _SEGMENT_ENTRY_ORIGIN;
if (fake)
page->index |= GMAP_SHADOW_FAKE_TABLE;
s_pgt = page_to_phys(page);
/* Install shadow page table */
spin_lock(&sg->guest_table_lock);
table = gmap_table_walk(sg, saddr, 1); /* get segment pointer */
if (!table) {
rc = -EAGAIN; /* Race with unshadow */
goto out_free;
}
if (!(*table & _SEGMENT_ENTRY_INVALID)) {
rc = 0; /* Already established */
goto out_free;
} else if (*table & _SEGMENT_ENTRY_ORIGIN) {
rc = -EAGAIN; /* Race with shadow */
goto out_free;
}
/* mark as invalid as long as the parent table is not protected */
*table = (unsigned long) s_pgt | _SEGMENT_ENTRY |
(pgt & _SEGMENT_ENTRY_PROTECT) | _SEGMENT_ENTRY_INVALID;
list_add(&page->lru, &sg->pt_list);
if (fake) {
/* nothing to protect for fake tables */
*table &= ~_SEGMENT_ENTRY_INVALID;
spin_unlock(&sg->guest_table_lock);
return 0;
}
spin_unlock(&sg->guest_table_lock);
/* Make pgt read-only in parent gmap page table (not the pgste) */
raddr = (saddr & _SEGMENT_MASK) | _SHADOW_RMAP_SEGMENT;
origin = pgt & _SEGMENT_ENTRY_ORIGIN & PAGE_MASK;
rc = gmap_protect_rmap(sg, raddr, origin, PAGE_SIZE);
spin_lock(&sg->guest_table_lock);
if (!rc) {
table = gmap_table_walk(sg, saddr, 1);
if (!table || (*table & _SEGMENT_ENTRY_ORIGIN) != s_pgt)
rc = -EAGAIN; /* Race with unshadow */
else
*table &= ~_SEGMENT_ENTRY_INVALID;
} else {
gmap_unshadow_pgt(sg, raddr);
}
spin_unlock(&sg->guest_table_lock);
return rc;
out_free:
spin_unlock(&sg->guest_table_lock);
page_table_free_pgste(page);
return rc;
}
EXPORT_SYMBOL_GPL(gmap_shadow_pgt);
/**
* gmap_shadow_page - create a shadow page mapping
* @sg: pointer to the shadow guest address space structure
* @saddr: faulting address in the shadow gmap
* @pte: pte in parent gmap address space to get shadowed
*
* Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
* shadow table structure is incomplete, -ENOMEM if out of memory and
* -EFAULT if an address in the parent gmap could not be resolved.
*
* Called with sg->mm->mmap_lock in read.
*/
int gmap_shadow_page(struct gmap *sg, unsigned long saddr, pte_t pte)
{
struct gmap *parent;
struct gmap_rmap *rmap;
unsigned long vmaddr, paddr;
spinlock_t *ptl;
pte_t *sptep, *tptep;
int prot;
int rc;
BUG_ON(!gmap_is_shadow(sg));
parent = sg->parent;
prot = (pte_val(pte) & _PAGE_PROTECT) ? PROT_READ : PROT_WRITE;
rmap = kzalloc(sizeof(*rmap), GFP_KERNEL_ACCOUNT);
if (!rmap)
return -ENOMEM;
rmap->raddr = (saddr & PAGE_MASK) | _SHADOW_RMAP_PGTABLE;
while (1) {
paddr = pte_val(pte) & PAGE_MASK;
vmaddr = __gmap_translate(parent, paddr);
if (IS_ERR_VALUE(vmaddr)) {
rc = vmaddr;
break;
}
rc = radix_tree_preload(GFP_KERNEL_ACCOUNT);
if (rc)
break;
rc = -EAGAIN;
sptep = gmap_pte_op_walk(parent, paddr, &ptl);
if (sptep) {
spin_lock(&sg->guest_table_lock);
/* Get page table pointer */
tptep = (pte_t *) gmap_table_walk(sg, saddr, 0);
if (!tptep) {
spin_unlock(&sg->guest_table_lock);
gmap_pte_op_end(sptep, ptl);
radix_tree_preload_end();
break;
}
rc = ptep_shadow_pte(sg->mm, saddr, sptep, tptep, pte);
if (rc > 0) {
/* Success and a new mapping */
gmap_insert_rmap(sg, vmaddr, rmap);
rmap = NULL;
rc = 0;
}
gmap_pte_op_end(sptep, ptl);
spin_unlock(&sg->guest_table_lock);
}
radix_tree_preload_end();
if (!rc)
break;
rc = gmap_pte_op_fixup(parent, paddr, vmaddr, prot);
if (rc)
break;
}
kfree(rmap);
return rc;
}
EXPORT_SYMBOL_GPL(gmap_shadow_page);
/*
* gmap_shadow_notify - handle notifications for shadow gmap
*
* Called with sg->parent->shadow_lock.
*/
static void gmap_shadow_notify(struct gmap *sg, unsigned long vmaddr,
unsigned long gaddr)
{
struct gmap_rmap *rmap, *rnext, *head;
unsigned long start, end, bits, raddr;
BUG_ON(!gmap_is_shadow(sg));
spin_lock(&sg->guest_table_lock);
if (sg->removed) {
spin_unlock(&sg->guest_table_lock);
return;
}
/* Check for top level table */
start = sg->orig_asce & _ASCE_ORIGIN;
end = start + ((sg->orig_asce & _ASCE_TABLE_LENGTH) + 1) * PAGE_SIZE;
if (!(sg->orig_asce & _ASCE_REAL_SPACE) && gaddr >= start &&
gaddr < end) {
/* The complete shadow table has to go */
gmap_unshadow(sg);
spin_unlock(&sg->guest_table_lock);
list_del(&sg->list);
gmap_put(sg);
return;
}
/* Remove the page table tree from on specific entry */
head = radix_tree_delete(&sg->host_to_rmap, vmaddr >> PAGE_SHIFT);
gmap_for_each_rmap_safe(rmap, rnext, head) {
bits = rmap->raddr & _SHADOW_RMAP_MASK;
raddr = rmap->raddr ^ bits;
switch (bits) {
case _SHADOW_RMAP_REGION1:
gmap_unshadow_r2t(sg, raddr);
break;
case _SHADOW_RMAP_REGION2:
gmap_unshadow_r3t(sg, raddr);
break;
case _SHADOW_RMAP_REGION3:
gmap_unshadow_sgt(sg, raddr);
break;
case _SHADOW_RMAP_SEGMENT:
gmap_unshadow_pgt(sg, raddr);
break;
case _SHADOW_RMAP_PGTABLE:
gmap_unshadow_page(sg, raddr);
break;
}
kfree(rmap);
}
spin_unlock(&sg->guest_table_lock);
}
/**
* ptep_notify - call all invalidation callbacks for a specific pte.
* @mm: pointer to the process mm_struct
* @vmaddr: virtual address in the process address space
* @pte: pointer to the page table entry
* @bits: bits from the pgste that caused the notify call
*
* This function is assumed to be called with the page table lock held
* for the pte to notify.
*/
void ptep_notify(struct mm_struct *mm, unsigned long vmaddr,
pte_t *pte, unsigned long bits)
{
unsigned long offset, gaddr = 0;
unsigned long *table;
struct gmap *gmap, *sg, *next;
offset = ((unsigned long) pte) & (255 * sizeof(pte_t));
offset = offset * (PAGE_SIZE / sizeof(pte_t));
rcu_read_lock();
list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
spin_lock(&gmap->guest_table_lock);
table = radix_tree_lookup(&gmap->host_to_guest,
vmaddr >> PMD_SHIFT);
if (table)
gaddr = __gmap_segment_gaddr(table) + offset;
spin_unlock(&gmap->guest_table_lock);
if (!table)
continue;
if (!list_empty(&gmap->children) && (bits & PGSTE_VSIE_BIT)) {
spin_lock(&gmap->shadow_lock);
list_for_each_entry_safe(sg, next,
&gmap->children, list)
gmap_shadow_notify(sg, vmaddr, gaddr);
spin_unlock(&gmap->shadow_lock);
}
if (bits & PGSTE_IN_BIT)
gmap_call_notifier(gmap, gaddr, gaddr + PAGE_SIZE - 1);
}
rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(ptep_notify);
static void pmdp_notify_gmap(struct gmap *gmap, pmd_t *pmdp,
unsigned long gaddr)
{
set_pmd(pmdp, clear_pmd_bit(*pmdp, __pgprot(_SEGMENT_ENTRY_GMAP_IN)));
gmap_call_notifier(gmap, gaddr, gaddr + HPAGE_SIZE - 1);
}
/**
* gmap_pmdp_xchg - exchange a gmap pmd with another
* @gmap: pointer to the guest address space structure
* @pmdp: pointer to the pmd entry
* @new: replacement entry
* @gaddr: the affected guest address
*
* This function is assumed to be called with the guest_table_lock
* held.
*/
static void gmap_pmdp_xchg(struct gmap *gmap, pmd_t *pmdp, pmd_t new,
unsigned long gaddr)
{
gaddr &= HPAGE_MASK;
pmdp_notify_gmap(gmap, pmdp, gaddr);
new = clear_pmd_bit(new, __pgprot(_SEGMENT_ENTRY_GMAP_IN));
if (MACHINE_HAS_TLB_GUEST)
__pmdp_idte(gaddr, (pmd_t *)pmdp, IDTE_GUEST_ASCE, gmap->asce,
IDTE_GLOBAL);
else if (MACHINE_HAS_IDTE)
__pmdp_idte(gaddr, (pmd_t *)pmdp, 0, 0, IDTE_GLOBAL);
else
__pmdp_csp(pmdp);
set_pmd(pmdp, new);
}
static void gmap_pmdp_clear(struct mm_struct *mm, unsigned long vmaddr,
int purge)
{
pmd_t *pmdp;
struct gmap *gmap;
unsigned long gaddr;
rcu_read_lock();
list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
spin_lock(&gmap->guest_table_lock);
pmdp = (pmd_t *)radix_tree_delete(&gmap->host_to_guest,
vmaddr >> PMD_SHIFT);
if (pmdp) {
gaddr = __gmap_segment_gaddr((unsigned long *)pmdp);
pmdp_notify_gmap(gmap, pmdp, gaddr);
WARN_ON(pmd_val(*pmdp) & ~(_SEGMENT_ENTRY_HARDWARE_BITS_LARGE |
_SEGMENT_ENTRY_GMAP_UC));
if (purge)
__pmdp_csp(pmdp);
set_pmd(pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
}
spin_unlock(&gmap->guest_table_lock);
}
rcu_read_unlock();
}
/**
* gmap_pmdp_invalidate - invalidate all affected guest pmd entries without
* flushing
* @mm: pointer to the process mm_struct
* @vmaddr: virtual address in the process address space
*/
void gmap_pmdp_invalidate(struct mm_struct *mm, unsigned long vmaddr)
{
gmap_pmdp_clear(mm, vmaddr, 0);
}
EXPORT_SYMBOL_GPL(gmap_pmdp_invalidate);
/**
* gmap_pmdp_csp - csp all affected guest pmd entries
* @mm: pointer to the process mm_struct
* @vmaddr: virtual address in the process address space
*/
void gmap_pmdp_csp(struct mm_struct *mm, unsigned long vmaddr)
{
gmap_pmdp_clear(mm, vmaddr, 1);
}
EXPORT_SYMBOL_GPL(gmap_pmdp_csp);
/**
* gmap_pmdp_idte_local - invalidate and clear a guest pmd entry
* @mm: pointer to the process mm_struct
* @vmaddr: virtual address in the process address space
*/
void gmap_pmdp_idte_local(struct mm_struct *mm, unsigned long vmaddr)
{
unsigned long *entry, gaddr;
struct gmap *gmap;
pmd_t *pmdp;
rcu_read_lock();
list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
spin_lock(&gmap->guest_table_lock);
entry = radix_tree_delete(&gmap->host_to_guest,
vmaddr >> PMD_SHIFT);
if (entry) {
pmdp = (pmd_t *)entry;
gaddr = __gmap_segment_gaddr(entry);
pmdp_notify_gmap(gmap, pmdp, gaddr);
WARN_ON(*entry & ~(_SEGMENT_ENTRY_HARDWARE_BITS_LARGE |
_SEGMENT_ENTRY_GMAP_UC));
if (MACHINE_HAS_TLB_GUEST)
__pmdp_idte(gaddr, pmdp, IDTE_GUEST_ASCE,
gmap->asce, IDTE_LOCAL);
else if (MACHINE_HAS_IDTE)
__pmdp_idte(gaddr, pmdp, 0, 0, IDTE_LOCAL);
*entry = _SEGMENT_ENTRY_EMPTY;
}
spin_unlock(&gmap->guest_table_lock);
}
rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(gmap_pmdp_idte_local);
/**
* gmap_pmdp_idte_global - invalidate and clear a guest pmd entry
* @mm: pointer to the process mm_struct
* @vmaddr: virtual address in the process address space
*/
void gmap_pmdp_idte_global(struct mm_struct *mm, unsigned long vmaddr)
{
unsigned long *entry, gaddr;
struct gmap *gmap;
pmd_t *pmdp;
rcu_read_lock();
list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
spin_lock(&gmap->guest_table_lock);
entry = radix_tree_delete(&gmap->host_to_guest,
vmaddr >> PMD_SHIFT);
if (entry) {
pmdp = (pmd_t *)entry;
gaddr = __gmap_segment_gaddr(entry);
pmdp_notify_gmap(gmap, pmdp, gaddr);
WARN_ON(*entry & ~(_SEGMENT_ENTRY_HARDWARE_BITS_LARGE |
_SEGMENT_ENTRY_GMAP_UC));
if (MACHINE_HAS_TLB_GUEST)
__pmdp_idte(gaddr, pmdp, IDTE_GUEST_ASCE,
gmap->asce, IDTE_GLOBAL);
else if (MACHINE_HAS_IDTE)
__pmdp_idte(gaddr, pmdp, 0, 0, IDTE_GLOBAL);
else
__pmdp_csp(pmdp);
*entry = _SEGMENT_ENTRY_EMPTY;
}
spin_unlock(&gmap->guest_table_lock);
}
rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(gmap_pmdp_idte_global);
/**
* gmap_test_and_clear_dirty_pmd - test and reset segment dirty status
* @gmap: pointer to guest address space
* @pmdp: pointer to the pmd to be tested
* @gaddr: virtual address in the guest address space
*
* This function is assumed to be called with the guest_table_lock
* held.
*/
static bool gmap_test_and_clear_dirty_pmd(struct gmap *gmap, pmd_t *pmdp,
unsigned long gaddr)
{
if (pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID)
return false;
/* Already protected memory, which did not change is clean */
if (pmd_val(*pmdp) & _SEGMENT_ENTRY_PROTECT &&
!(pmd_val(*pmdp) & _SEGMENT_ENTRY_GMAP_UC))
return false;
/* Clear UC indication and reset protection */
set_pmd(pmdp, clear_pmd_bit(*pmdp, __pgprot(_SEGMENT_ENTRY_GMAP_UC)));
gmap_protect_pmd(gmap, gaddr, pmdp, PROT_READ, 0);
return true;
}
/**
* gmap_sync_dirty_log_pmd - set bitmap based on dirty status of segment
* @gmap: pointer to guest address space
* @bitmap: dirty bitmap for this pmd
* @gaddr: virtual address in the guest address space
* @vmaddr: virtual address in the host address space
*
* This function is assumed to be called with the guest_table_lock
* held.
*/
void gmap_sync_dirty_log_pmd(struct gmap *gmap, unsigned long bitmap[4],
unsigned long gaddr, unsigned long vmaddr)
{
int i;
pmd_t *pmdp;
pte_t *ptep;
spinlock_t *ptl;
pmdp = gmap_pmd_op_walk(gmap, gaddr);
if (!pmdp)
return;
if (pmd_large(*pmdp)) {
if (gmap_test_and_clear_dirty_pmd(gmap, pmdp, gaddr))
bitmap_fill(bitmap, _PAGE_ENTRIES);
} else {
for (i = 0; i < _PAGE_ENTRIES; i++, vmaddr += PAGE_SIZE) {
ptep = pte_alloc_map_lock(gmap->mm, pmdp, vmaddr, &ptl);
if (!ptep)
continue;
if (ptep_test_and_clear_uc(gmap->mm, vmaddr, ptep))
set_bit(i, bitmap);
pte_unmap_unlock(ptep, ptl);
}
}
gmap_pmd_op_end(gmap, pmdp);
}
EXPORT_SYMBOL_GPL(gmap_sync_dirty_log_pmd);
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static int thp_split_walk_pmd_entry(pmd_t *pmd, unsigned long addr,
unsigned long end, struct mm_walk *walk)
{
struct vm_area_struct *vma = walk->vma;
split_huge_pmd(vma, pmd, addr);
return 0;
}
static const struct mm_walk_ops thp_split_walk_ops = {
.pmd_entry = thp_split_walk_pmd_entry,
.walk_lock = PGWALK_WRLOCK_VERIFY,
};
static inline void thp_split_mm(struct mm_struct *mm)
{
struct vm_area_struct *vma;
VMA_ITERATOR(vmi, mm, 0);
for_each_vma(vmi, vma) {
vm_flags_mod(vma, VM_NOHUGEPAGE, VM_HUGEPAGE);
walk_page_vma(vma, &thp_split_walk_ops, NULL);
}
mm->def_flags |= VM_NOHUGEPAGE;
}
#else
static inline void thp_split_mm(struct mm_struct *mm)
{
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
/*
* Remove all empty zero pages from the mapping for lazy refaulting
* - This must be called after mm->context.has_pgste is set, to avoid
* future creation of zero pages
* - This must be called after THP was disabled.
*
* mm contracts with s390, that even if mm were to remove a page table,
* racing with the loop below and so causing pte_offset_map_lock() to fail,
* it will never insert a page table containing empty zero pages once
* mm_forbids_zeropage(mm) i.e. mm->context.has_pgste is set.
*/
static int __zap_zero_pages(pmd_t *pmd, unsigned long start,
unsigned long end, struct mm_walk *walk)
{
unsigned long addr;
for (addr = start; addr != end; addr += PAGE_SIZE) {
pte_t *ptep;
spinlock_t *ptl;
ptep = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
if (!ptep)
break;
if (is_zero_pfn(pte_pfn(*ptep)))
ptep_xchg_direct(walk->mm, addr, ptep, __pte(_PAGE_INVALID));
pte_unmap_unlock(ptep, ptl);
}
return 0;
}
static const struct mm_walk_ops zap_zero_walk_ops = {
.pmd_entry = __zap_zero_pages,
.walk_lock = PGWALK_WRLOCK,
};
/*
* switch on pgstes for its userspace process (for kvm)
*/
int s390_enable_sie(void)
{
struct mm_struct *mm = current->mm;
/* Do we have pgstes? if yes, we are done */
if (mm_has_pgste(mm))
return 0;
/* Fail if the page tables are 2K */
if (!mm_alloc_pgste(mm))
return -EINVAL;
mmap_write_lock(mm);
mm->context.has_pgste = 1;
/* split thp mappings and disable thp for future mappings */
thp_split_mm(mm);
walk_page_range(mm, 0, TASK_SIZE, &zap_zero_walk_ops, NULL);
mmap_write_unlock(mm);
return 0;
}
EXPORT_SYMBOL_GPL(s390_enable_sie);
int gmap_mark_unmergeable(void)
{
/*
* Make sure to disable KSM (if enabled for the whole process or
* individual VMAs). Note that nothing currently hinders user space
* from re-enabling it.
*/
return ksm_disable(current->mm);
}
EXPORT_SYMBOL_GPL(gmap_mark_unmergeable);
/*
* Enable storage key handling from now on and initialize the storage
* keys with the default key.
*/
static int __s390_enable_skey_pte(pte_t *pte, unsigned long addr,
unsigned long next, struct mm_walk *walk)
{
/* Clear storage key */
ptep_zap_key(walk->mm, addr, pte);
return 0;
}
/*
* Give a chance to schedule after setting a key to 256 pages.
* We only hold the mm lock, which is a rwsem and the kvm srcu.
* Both can sleep.
*/
static int __s390_enable_skey_pmd(pmd_t *pmd, unsigned long addr,
unsigned long next, struct mm_walk *walk)
{
cond_resched();
return 0;
}
static int __s390_enable_skey_hugetlb(pte_t *pte, unsigned long addr,
unsigned long hmask, unsigned long next,
struct mm_walk *walk)
{
pmd_t *pmd = (pmd_t *)pte;
unsigned long start, end;
struct page *page = pmd_page(*pmd);
/*
* The write check makes sure we do not set a key on shared
* memory. This is needed as the walker does not differentiate
* between actual guest memory and the process executable or
* shared libraries.
*/
if (pmd_val(*pmd) & _SEGMENT_ENTRY_INVALID ||
!(pmd_val(*pmd) & _SEGMENT_ENTRY_WRITE))
return 0;
start = pmd_val(*pmd) & HPAGE_MASK;
end = start + HPAGE_SIZE - 1;
__storage_key_init_range(start, end);
set_bit(PG_arch_1, &page->flags);
cond_resched();
return 0;
}
static const struct mm_walk_ops enable_skey_walk_ops = {
.hugetlb_entry = __s390_enable_skey_hugetlb,
.pte_entry = __s390_enable_skey_pte,
.pmd_entry = __s390_enable_skey_pmd,
.walk_lock = PGWALK_WRLOCK,
};
int s390_enable_skey(void)
{
struct mm_struct *mm = current->mm;
int rc = 0;
mmap_write_lock(mm);
if (mm_uses_skeys(mm))
goto out_up;
mm->context.uses_skeys = 1;
rc = gmap_mark_unmergeable();
if (rc) {
mm->context.uses_skeys = 0;
goto out_up;
}
walk_page_range(mm, 0, TASK_SIZE, &enable_skey_walk_ops, NULL);
out_up:
mmap_write_unlock(mm);
return rc;
}
EXPORT_SYMBOL_GPL(s390_enable_skey);
/*
* Reset CMMA state, make all pages stable again.
*/
static int __s390_reset_cmma(pte_t *pte, unsigned long addr,
unsigned long next, struct mm_walk *walk)
{
ptep_zap_unused(walk->mm, addr, pte, 1);
return 0;
}
static const struct mm_walk_ops reset_cmma_walk_ops = {
.pte_entry = __s390_reset_cmma,
.walk_lock = PGWALK_WRLOCK,
};
void s390_reset_cmma(struct mm_struct *mm)
{
mmap_write_lock(mm);
walk_page_range(mm, 0, TASK_SIZE, &reset_cmma_walk_ops, NULL);
mmap_write_unlock(mm);
}
EXPORT_SYMBOL_GPL(s390_reset_cmma);
#define GATHER_GET_PAGES 32
struct reset_walk_state {
unsigned long next;
unsigned long count;
unsigned long pfns[GATHER_GET_PAGES];
};
static int s390_gather_pages(pte_t *ptep, unsigned long addr,
unsigned long next, struct mm_walk *walk)
{
struct reset_walk_state *p = walk->private;
pte_t pte = READ_ONCE(*ptep);
if (pte_present(pte)) {
/* we have a reference from the mapping, take an extra one */
get_page(phys_to_page(pte_val(pte)));
p->pfns[p->count] = phys_to_pfn(pte_val(pte));
p->next = next;
p->count++;
}
return p->count >= GATHER_GET_PAGES;
}
static const struct mm_walk_ops gather_pages_ops = {
.pte_entry = s390_gather_pages,
.walk_lock = PGWALK_RDLOCK,
};
/*
* Call the Destroy secure page UVC on each page in the given array of PFNs.
* Each page needs to have an extra reference, which will be released here.
*/
void s390_uv_destroy_pfns(unsigned long count, unsigned long *pfns)
{
unsigned long i;
for (i = 0; i < count; i++) {
/* we always have an extra reference */
uv_destroy_owned_page(pfn_to_phys(pfns[i]));
/* get rid of the extra reference */
put_page(pfn_to_page(pfns[i]));
cond_resched();
}
}
EXPORT_SYMBOL_GPL(s390_uv_destroy_pfns);
/**
* __s390_uv_destroy_range - Call the destroy secure page UVC on each page
* in the given range of the given address space.
* @mm: the mm to operate on
* @start: the start of the range
* @end: the end of the range
* @interruptible: if not 0, stop when a fatal signal is received
*
* Walk the given range of the given address space and call the destroy
* secure page UVC on each page. Optionally exit early if a fatal signal is
* pending.
*
* Return: 0 on success, -EINTR if the function stopped before completing
*/
int __s390_uv_destroy_range(struct mm_struct *mm, unsigned long start,
unsigned long end, bool interruptible)
{
struct reset_walk_state state = { .next = start };
int r = 1;
while (r > 0) {
state.count = 0;
mmap_read_lock(mm);
r = walk_page_range(mm, state.next, end, &gather_pages_ops, &state);
mmap_read_unlock(mm);
cond_resched();
s390_uv_destroy_pfns(state.count, state.pfns);
if (interruptible && fatal_signal_pending(current))
return -EINTR;
}
return 0;
}
EXPORT_SYMBOL_GPL(__s390_uv_destroy_range);
/**
* s390_unlist_old_asce - Remove the topmost level of page tables from the
* list of page tables of the gmap.
* @gmap: the gmap whose table is to be removed
*
* On s390x, KVM keeps a list of all pages containing the page tables of the
* gmap (the CRST list). This list is used at tear down time to free all
* pages that are now not needed anymore.
*
* This function removes the topmost page of the tree (the one pointed to by
* the ASCE) from the CRST list.
*
* This means that it will not be freed when the VM is torn down, and needs
* to be handled separately by the caller, unless a leak is actually
* intended. Notice that this function will only remove the page from the
* list, the page will still be used as a top level page table (and ASCE).
*/
void s390_unlist_old_asce(struct gmap *gmap)
{
struct page *old;
old = virt_to_page(gmap->table);
spin_lock(&gmap->guest_table_lock);
list_del(&old->lru);
/*
* Sometimes the topmost page might need to be "removed" multiple
* times, for example if the VM is rebooted into secure mode several
* times concurrently, or if s390_replace_asce fails after calling
* s390_remove_old_asce and is attempted again later. In that case
* the old asce has been removed from the list, and therefore it
* will not be freed when the VM terminates, but the ASCE is still
* in use and still pointed to.
* A subsequent call to replace_asce will follow the pointer and try
* to remove the same page from the list again.
* Therefore it's necessary that the page of the ASCE has valid
* pointers, so list_del can work (and do nothing) without
* dereferencing stale or invalid pointers.
*/
INIT_LIST_HEAD(&old->lru);
spin_unlock(&gmap->guest_table_lock);
}
EXPORT_SYMBOL_GPL(s390_unlist_old_asce);
/**
* s390_replace_asce - Try to replace the current ASCE of a gmap with a copy
* @gmap: the gmap whose ASCE needs to be replaced
*
* If the ASCE is a SEGMENT type then this function will return -EINVAL,
* otherwise the pointers in the host_to_guest radix tree will keep pointing
* to the wrong pages, causing use-after-free and memory corruption.
* If the allocation of the new top level page table fails, the ASCE is not
* replaced.
* In any case, the old ASCE is always removed from the gmap CRST list.
* Therefore the caller has to make sure to save a pointer to it
* beforehand, unless a leak is actually intended.
*/
int s390_replace_asce(struct gmap *gmap)
{
unsigned long asce;
struct page *page;
void *table;
s390_unlist_old_asce(gmap);
/* Replacing segment type ASCEs would cause serious issues */
if ((gmap->asce & _ASCE_TYPE_MASK) == _ASCE_TYPE_SEGMENT)
return -EINVAL;
page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
if (!page)
return -ENOMEM;
page->index = 0;
table = page_to_virt(page);
memcpy(table, gmap->table, 1UL << (CRST_ALLOC_ORDER + PAGE_SHIFT));
/*
* The caller has to deal with the old ASCE, but here we make sure
* the new one is properly added to the CRST list, so that
* it will be freed when the VM is torn down.
*/
spin_lock(&gmap->guest_table_lock);
list_add(&page->lru, &gmap->crst_list);
spin_unlock(&gmap->guest_table_lock);
/* Set new table origin while preserving existing ASCE control bits */
asce = (gmap->asce & ~_ASCE_ORIGIN) | __pa(table);
WRITE_ONCE(gmap->asce, asce);
WRITE_ONCE(gmap->mm->context.gmap_asce, asce);
WRITE_ONCE(gmap->table, table);
return 0;
}
EXPORT_SYMBOL_GPL(s390_replace_asce);
| linux-master | arch/s390/mm/gmap.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2011
* Author(s): Jan Glauber <[email protected]>
*/
#include <linux/hugetlb.h>
#include <linux/proc_fs.h>
#include <linux/vmalloc.h>
#include <linux/mm.h>
#include <asm/cacheflush.h>
#include <asm/facility.h>
#include <asm/pgalloc.h>
#include <asm/kfence.h>
#include <asm/page.h>
#include <asm/set_memory.h>
static inline unsigned long sske_frame(unsigned long addr, unsigned char skey)
{
asm volatile(".insn rrf,0xb22b0000,%[skey],%[addr],1,0"
: [addr] "+a" (addr) : [skey] "d" (skey));
return addr;
}
void __storage_key_init_range(unsigned long start, unsigned long end)
{
unsigned long boundary, size;
while (start < end) {
if (MACHINE_HAS_EDAT1) {
/* set storage keys for a 1MB frame */
size = 1UL << 20;
boundary = (start + size) & ~(size - 1);
if (boundary <= end) {
do {
start = sske_frame(start, PAGE_DEFAULT_KEY);
} while (start < boundary);
continue;
}
}
page_set_storage_key(start, PAGE_DEFAULT_KEY, 1);
start += PAGE_SIZE;
}
}
#ifdef CONFIG_PROC_FS
atomic_long_t __bootdata_preserved(direct_pages_count[PG_DIRECT_MAP_MAX]);
void arch_report_meminfo(struct seq_file *m)
{
seq_printf(m, "DirectMap4k: %8lu kB\n",
atomic_long_read(&direct_pages_count[PG_DIRECT_MAP_4K]) << 2);
seq_printf(m, "DirectMap1M: %8lu kB\n",
atomic_long_read(&direct_pages_count[PG_DIRECT_MAP_1M]) << 10);
seq_printf(m, "DirectMap2G: %8lu kB\n",
atomic_long_read(&direct_pages_count[PG_DIRECT_MAP_2G]) << 21);
}
#endif /* CONFIG_PROC_FS */
static void pgt_set(unsigned long *old, unsigned long new, unsigned long addr,
unsigned long dtt)
{
unsigned long *table, mask;
mask = 0;
if (MACHINE_HAS_EDAT2) {
switch (dtt) {
case CRDTE_DTT_REGION3:
mask = ~(PTRS_PER_PUD * sizeof(pud_t) - 1);
break;
case CRDTE_DTT_SEGMENT:
mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1);
break;
case CRDTE_DTT_PAGE:
mask = ~(PTRS_PER_PTE * sizeof(pte_t) - 1);
break;
}
table = (unsigned long *)((unsigned long)old & mask);
crdte(*old, new, table, dtt, addr, S390_lowcore.kernel_asce);
} else if (MACHINE_HAS_IDTE) {
cspg(old, *old, new);
} else {
csp((unsigned int *)old + 1, *old, new);
}
}
static int walk_pte_level(pmd_t *pmdp, unsigned long addr, unsigned long end,
unsigned long flags)
{
pte_t *ptep, new;
if (flags == SET_MEMORY_4K)
return 0;
ptep = pte_offset_kernel(pmdp, addr);
do {
new = *ptep;
if (pte_none(new))
return -EINVAL;
if (flags & SET_MEMORY_RO)
new = pte_wrprotect(new);
else if (flags & SET_MEMORY_RW)
new = pte_mkwrite_novma(pte_mkdirty(new));
if (flags & SET_MEMORY_NX)
new = set_pte_bit(new, __pgprot(_PAGE_NOEXEC));
else if (flags & SET_MEMORY_X)
new = clear_pte_bit(new, __pgprot(_PAGE_NOEXEC));
if (flags & SET_MEMORY_INV) {
new = set_pte_bit(new, __pgprot(_PAGE_INVALID));
} else if (flags & SET_MEMORY_DEF) {
new = __pte(pte_val(new) & PAGE_MASK);
new = set_pte_bit(new, PAGE_KERNEL);
if (!MACHINE_HAS_NX)
new = clear_pte_bit(new, __pgprot(_PAGE_NOEXEC));
}
pgt_set((unsigned long *)ptep, pte_val(new), addr, CRDTE_DTT_PAGE);
ptep++;
addr += PAGE_SIZE;
cond_resched();
} while (addr < end);
return 0;
}
static int split_pmd_page(pmd_t *pmdp, unsigned long addr)
{
unsigned long pte_addr, prot;
pte_t *pt_dir, *ptep;
pmd_t new;
int i, ro, nx;
pt_dir = vmem_pte_alloc();
if (!pt_dir)
return -ENOMEM;
pte_addr = pmd_pfn(*pmdp) << PAGE_SHIFT;
ro = !!(pmd_val(*pmdp) & _SEGMENT_ENTRY_PROTECT);
nx = !!(pmd_val(*pmdp) & _SEGMENT_ENTRY_NOEXEC);
prot = pgprot_val(ro ? PAGE_KERNEL_RO : PAGE_KERNEL);
if (!nx)
prot &= ~_PAGE_NOEXEC;
ptep = pt_dir;
for (i = 0; i < PTRS_PER_PTE; i++) {
set_pte(ptep, __pte(pte_addr | prot));
pte_addr += PAGE_SIZE;
ptep++;
}
new = __pmd(__pa(pt_dir) | _SEGMENT_ENTRY);
pgt_set((unsigned long *)pmdp, pmd_val(new), addr, CRDTE_DTT_SEGMENT);
update_page_count(PG_DIRECT_MAP_4K, PTRS_PER_PTE);
update_page_count(PG_DIRECT_MAP_1M, -1);
return 0;
}
static void modify_pmd_page(pmd_t *pmdp, unsigned long addr,
unsigned long flags)
{
pmd_t new = *pmdp;
if (flags & SET_MEMORY_RO)
new = pmd_wrprotect(new);
else if (flags & SET_MEMORY_RW)
new = pmd_mkwrite_novma(pmd_mkdirty(new));
if (flags & SET_MEMORY_NX)
new = set_pmd_bit(new, __pgprot(_SEGMENT_ENTRY_NOEXEC));
else if (flags & SET_MEMORY_X)
new = clear_pmd_bit(new, __pgprot(_SEGMENT_ENTRY_NOEXEC));
if (flags & SET_MEMORY_INV) {
new = set_pmd_bit(new, __pgprot(_SEGMENT_ENTRY_INVALID));
} else if (flags & SET_MEMORY_DEF) {
new = __pmd(pmd_val(new) & PMD_MASK);
new = set_pmd_bit(new, SEGMENT_KERNEL);
if (!MACHINE_HAS_NX)
new = clear_pmd_bit(new, __pgprot(_SEGMENT_ENTRY_NOEXEC));
}
pgt_set((unsigned long *)pmdp, pmd_val(new), addr, CRDTE_DTT_SEGMENT);
}
static int walk_pmd_level(pud_t *pudp, unsigned long addr, unsigned long end,
unsigned long flags)
{
unsigned long next;
int need_split;
pmd_t *pmdp;
int rc = 0;
pmdp = pmd_offset(pudp, addr);
do {
if (pmd_none(*pmdp))
return -EINVAL;
next = pmd_addr_end(addr, end);
if (pmd_large(*pmdp)) {
need_split = !!(flags & SET_MEMORY_4K);
need_split |= !!(addr & ~PMD_MASK);
need_split |= !!(addr + PMD_SIZE > next);
if (need_split) {
rc = split_pmd_page(pmdp, addr);
if (rc)
return rc;
continue;
}
modify_pmd_page(pmdp, addr, flags);
} else {
rc = walk_pte_level(pmdp, addr, next, flags);
if (rc)
return rc;
}
pmdp++;
addr = next;
cond_resched();
} while (addr < end);
return rc;
}
static int split_pud_page(pud_t *pudp, unsigned long addr)
{
unsigned long pmd_addr, prot;
pmd_t *pm_dir, *pmdp;
pud_t new;
int i, ro, nx;
pm_dir = vmem_crst_alloc(_SEGMENT_ENTRY_EMPTY);
if (!pm_dir)
return -ENOMEM;
pmd_addr = pud_pfn(*pudp) << PAGE_SHIFT;
ro = !!(pud_val(*pudp) & _REGION_ENTRY_PROTECT);
nx = !!(pud_val(*pudp) & _REGION_ENTRY_NOEXEC);
prot = pgprot_val(ro ? SEGMENT_KERNEL_RO : SEGMENT_KERNEL);
if (!nx)
prot &= ~_SEGMENT_ENTRY_NOEXEC;
pmdp = pm_dir;
for (i = 0; i < PTRS_PER_PMD; i++) {
set_pmd(pmdp, __pmd(pmd_addr | prot));
pmd_addr += PMD_SIZE;
pmdp++;
}
new = __pud(__pa(pm_dir) | _REGION3_ENTRY);
pgt_set((unsigned long *)pudp, pud_val(new), addr, CRDTE_DTT_REGION3);
update_page_count(PG_DIRECT_MAP_1M, PTRS_PER_PMD);
update_page_count(PG_DIRECT_MAP_2G, -1);
return 0;
}
static void modify_pud_page(pud_t *pudp, unsigned long addr,
unsigned long flags)
{
pud_t new = *pudp;
if (flags & SET_MEMORY_RO)
new = pud_wrprotect(new);
else if (flags & SET_MEMORY_RW)
new = pud_mkwrite(pud_mkdirty(new));
if (flags & SET_MEMORY_NX)
new = set_pud_bit(new, __pgprot(_REGION_ENTRY_NOEXEC));
else if (flags & SET_MEMORY_X)
new = clear_pud_bit(new, __pgprot(_REGION_ENTRY_NOEXEC));
if (flags & SET_MEMORY_INV) {
new = set_pud_bit(new, __pgprot(_REGION_ENTRY_INVALID));
} else if (flags & SET_MEMORY_DEF) {
new = __pud(pud_val(new) & PUD_MASK);
new = set_pud_bit(new, REGION3_KERNEL);
if (!MACHINE_HAS_NX)
new = clear_pud_bit(new, __pgprot(_REGION_ENTRY_NOEXEC));
}
pgt_set((unsigned long *)pudp, pud_val(new), addr, CRDTE_DTT_REGION3);
}
static int walk_pud_level(p4d_t *p4d, unsigned long addr, unsigned long end,
unsigned long flags)
{
unsigned long next;
int need_split;
pud_t *pudp;
int rc = 0;
pudp = pud_offset(p4d, addr);
do {
if (pud_none(*pudp))
return -EINVAL;
next = pud_addr_end(addr, end);
if (pud_large(*pudp)) {
need_split = !!(flags & SET_MEMORY_4K);
need_split |= !!(addr & ~PUD_MASK);
need_split |= !!(addr + PUD_SIZE > next);
if (need_split) {
rc = split_pud_page(pudp, addr);
if (rc)
break;
continue;
}
modify_pud_page(pudp, addr, flags);
} else {
rc = walk_pmd_level(pudp, addr, next, flags);
}
pudp++;
addr = next;
cond_resched();
} while (addr < end && !rc);
return rc;
}
static int walk_p4d_level(pgd_t *pgd, unsigned long addr, unsigned long end,
unsigned long flags)
{
unsigned long next;
p4d_t *p4dp;
int rc = 0;
p4dp = p4d_offset(pgd, addr);
do {
if (p4d_none(*p4dp))
return -EINVAL;
next = p4d_addr_end(addr, end);
rc = walk_pud_level(p4dp, addr, next, flags);
p4dp++;
addr = next;
cond_resched();
} while (addr < end && !rc);
return rc;
}
DEFINE_MUTEX(cpa_mutex);
static int change_page_attr(unsigned long addr, unsigned long end,
unsigned long flags)
{
unsigned long next;
int rc = -EINVAL;
pgd_t *pgdp;
pgdp = pgd_offset_k(addr);
do {
if (pgd_none(*pgdp))
break;
next = pgd_addr_end(addr, end);
rc = walk_p4d_level(pgdp, addr, next, flags);
if (rc)
break;
cond_resched();
} while (pgdp++, addr = next, addr < end && !rc);
return rc;
}
static int change_page_attr_alias(unsigned long addr, unsigned long end,
unsigned long flags)
{
unsigned long alias, offset, va_start, va_end;
struct vm_struct *area;
int rc = 0;
/*
* Changes to read-only permissions on kernel VA mappings are also
* applied to the kernel direct mapping. Execute permissions are
* intentionally not transferred to keep all allocated pages within
* the direct mapping non-executable.
*/
flags &= SET_MEMORY_RO | SET_MEMORY_RW;
if (!flags)
return 0;
area = NULL;
while (addr < end) {
if (!area)
area = find_vm_area((void *)addr);
if (!area || !(area->flags & VM_ALLOC))
return 0;
va_start = (unsigned long)area->addr;
va_end = va_start + area->nr_pages * PAGE_SIZE;
offset = (addr - va_start) >> PAGE_SHIFT;
alias = (unsigned long)page_address(area->pages[offset]);
rc = change_page_attr(alias, alias + PAGE_SIZE, flags);
if (rc)
break;
addr += PAGE_SIZE;
if (addr >= va_end)
area = NULL;
}
return rc;
}
int __set_memory(unsigned long addr, unsigned long numpages, unsigned long flags)
{
unsigned long end;
int rc;
if (!MACHINE_HAS_NX)
flags &= ~(SET_MEMORY_NX | SET_MEMORY_X);
if (!flags)
return 0;
if (!numpages)
return 0;
addr &= PAGE_MASK;
end = addr + numpages * PAGE_SIZE;
mutex_lock(&cpa_mutex);
rc = change_page_attr(addr, end, flags);
if (rc)
goto out;
rc = change_page_attr_alias(addr, end, flags);
out:
mutex_unlock(&cpa_mutex);
return rc;
}
int set_direct_map_invalid_noflush(struct page *page)
{
return __set_memory((unsigned long)page_to_virt(page), 1, SET_MEMORY_INV);
}
int set_direct_map_default_noflush(struct page *page)
{
return __set_memory((unsigned long)page_to_virt(page), 1, SET_MEMORY_DEF);
}
#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE)
static void ipte_range(pte_t *pte, unsigned long address, int nr)
{
int i;
if (test_facility(13)) {
__ptep_ipte_range(address, nr - 1, pte, IPTE_GLOBAL);
return;
}
for (i = 0; i < nr; i++) {
__ptep_ipte(address, pte, 0, 0, IPTE_GLOBAL);
address += PAGE_SIZE;
pte++;
}
}
void __kernel_map_pages(struct page *page, int numpages, int enable)
{
unsigned long address;
pte_t *ptep, pte;
int nr, i, j;
for (i = 0; i < numpages;) {
address = (unsigned long)page_to_virt(page + i);
ptep = virt_to_kpte(address);
nr = (unsigned long)ptep >> ilog2(sizeof(long));
nr = PTRS_PER_PTE - (nr & (PTRS_PER_PTE - 1));
nr = min(numpages - i, nr);
if (enable) {
for (j = 0; j < nr; j++) {
pte = clear_pte_bit(*ptep, __pgprot(_PAGE_INVALID));
set_pte(ptep, pte);
address += PAGE_SIZE;
ptep++;
}
} else {
ipte_range(ptep, address, nr);
}
i += nr;
}
}
#endif /* CONFIG_DEBUG_PAGEALLOC */
| linux-master | arch/s390/mm/pageattr.c |
// SPDX-License-Identifier: GPL-2.0
/*
* IBM System z Huge TLB Page Support for Kernel.
*
* Copyright IBM Corp. 2007,2020
* Author(s): Gerald Schaefer <[email protected]>
*/
#define KMSG_COMPONENT "hugetlb"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <asm/pgalloc.h>
#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/mman.h>
#include <linux/sched/mm.h>
#include <linux/security.h>
/*
* If the bit selected by single-bit bitmask "a" is set within "x", move
* it to the position indicated by single-bit bitmask "b".
*/
#define move_set_bit(x, a, b) (((x) & (a)) >> ilog2(a) << ilog2(b))
static inline unsigned long __pte_to_rste(pte_t pte)
{
unsigned long rste;
/*
* Convert encoding pte bits pmd / pud bits
* lIR.uswrdy.p dy..R...I...wr
* empty 010.000000.0 -> 00..0...1...00
* prot-none, clean, old 111.000000.1 -> 00..1...1...00
* prot-none, clean, young 111.000001.1 -> 01..1...1...00
* prot-none, dirty, old 111.000010.1 -> 10..1...1...00
* prot-none, dirty, young 111.000011.1 -> 11..1...1...00
* read-only, clean, old 111.000100.1 -> 00..1...1...01
* read-only, clean, young 101.000101.1 -> 01..1...0...01
* read-only, dirty, old 111.000110.1 -> 10..1...1...01
* read-only, dirty, young 101.000111.1 -> 11..1...0...01
* read-write, clean, old 111.001100.1 -> 00..1...1...11
* read-write, clean, young 101.001101.1 -> 01..1...0...11
* read-write, dirty, old 110.001110.1 -> 10..0...1...11
* read-write, dirty, young 100.001111.1 -> 11..0...0...11
* HW-bits: R read-only, I invalid
* SW-bits: p present, y young, d dirty, r read, w write, s special,
* u unused, l large
*/
if (pte_present(pte)) {
rste = pte_val(pte) & PAGE_MASK;
rste |= move_set_bit(pte_val(pte), _PAGE_READ,
_SEGMENT_ENTRY_READ);
rste |= move_set_bit(pte_val(pte), _PAGE_WRITE,
_SEGMENT_ENTRY_WRITE);
rste |= move_set_bit(pte_val(pte), _PAGE_INVALID,
_SEGMENT_ENTRY_INVALID);
rste |= move_set_bit(pte_val(pte), _PAGE_PROTECT,
_SEGMENT_ENTRY_PROTECT);
rste |= move_set_bit(pte_val(pte), _PAGE_DIRTY,
_SEGMENT_ENTRY_DIRTY);
rste |= move_set_bit(pte_val(pte), _PAGE_YOUNG,
_SEGMENT_ENTRY_YOUNG);
#ifdef CONFIG_MEM_SOFT_DIRTY
rste |= move_set_bit(pte_val(pte), _PAGE_SOFT_DIRTY,
_SEGMENT_ENTRY_SOFT_DIRTY);
#endif
rste |= move_set_bit(pte_val(pte), _PAGE_NOEXEC,
_SEGMENT_ENTRY_NOEXEC);
} else
rste = _SEGMENT_ENTRY_EMPTY;
return rste;
}
static inline pte_t __rste_to_pte(unsigned long rste)
{
unsigned long pteval;
int present;
if ((rste & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
present = pud_present(__pud(rste));
else
present = pmd_present(__pmd(rste));
/*
* Convert encoding pmd / pud bits pte bits
* dy..R...I...wr lIR.uswrdy.p
* empty 00..0...1...00 -> 010.000000.0
* prot-none, clean, old 00..1...1...00 -> 111.000000.1
* prot-none, clean, young 01..1...1...00 -> 111.000001.1
* prot-none, dirty, old 10..1...1...00 -> 111.000010.1
* prot-none, dirty, young 11..1...1...00 -> 111.000011.1
* read-only, clean, old 00..1...1...01 -> 111.000100.1
* read-only, clean, young 01..1...0...01 -> 101.000101.1
* read-only, dirty, old 10..1...1...01 -> 111.000110.1
* read-only, dirty, young 11..1...0...01 -> 101.000111.1
* read-write, clean, old 00..1...1...11 -> 111.001100.1
* read-write, clean, young 01..1...0...11 -> 101.001101.1
* read-write, dirty, old 10..0...1...11 -> 110.001110.1
* read-write, dirty, young 11..0...0...11 -> 100.001111.1
* HW-bits: R read-only, I invalid
* SW-bits: p present, y young, d dirty, r read, w write, s special,
* u unused, l large
*/
if (present) {
pteval = rste & _SEGMENT_ENTRY_ORIGIN_LARGE;
pteval |= _PAGE_LARGE | _PAGE_PRESENT;
pteval |= move_set_bit(rste, _SEGMENT_ENTRY_READ, _PAGE_READ);
pteval |= move_set_bit(rste, _SEGMENT_ENTRY_WRITE, _PAGE_WRITE);
pteval |= move_set_bit(rste, _SEGMENT_ENTRY_INVALID, _PAGE_INVALID);
pteval |= move_set_bit(rste, _SEGMENT_ENTRY_PROTECT, _PAGE_PROTECT);
pteval |= move_set_bit(rste, _SEGMENT_ENTRY_DIRTY, _PAGE_DIRTY);
pteval |= move_set_bit(rste, _SEGMENT_ENTRY_YOUNG, _PAGE_YOUNG);
#ifdef CONFIG_MEM_SOFT_DIRTY
pteval |= move_set_bit(rste, _SEGMENT_ENTRY_SOFT_DIRTY, _PAGE_SOFT_DIRTY);
#endif
pteval |= move_set_bit(rste, _SEGMENT_ENTRY_NOEXEC, _PAGE_NOEXEC);
} else
pteval = _PAGE_INVALID;
return __pte(pteval);
}
static void clear_huge_pte_skeys(struct mm_struct *mm, unsigned long rste)
{
struct page *page;
unsigned long size, paddr;
if (!mm_uses_skeys(mm) ||
rste & _SEGMENT_ENTRY_INVALID)
return;
if ((rste & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) {
page = pud_page(__pud(rste));
size = PUD_SIZE;
paddr = rste & PUD_MASK;
} else {
page = pmd_page(__pmd(rste));
size = PMD_SIZE;
paddr = rste & PMD_MASK;
}
if (!test_and_set_bit(PG_arch_1, &page->flags))
__storage_key_init_range(paddr, paddr + size - 1);
}
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte)
{
unsigned long rste;
rste = __pte_to_rste(pte);
if (!MACHINE_HAS_NX)
rste &= ~_SEGMENT_ENTRY_NOEXEC;
/* Set correct table type for 2G hugepages */
if ((pte_val(*ptep) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) {
if (likely(pte_present(pte)))
rste |= _REGION3_ENTRY_LARGE;
rste |= _REGION_ENTRY_TYPE_R3;
} else if (likely(pte_present(pte)))
rste |= _SEGMENT_ENTRY_LARGE;
clear_huge_pte_skeys(mm, rste);
set_pte(ptep, __pte(rste));
}
pte_t huge_ptep_get(pte_t *ptep)
{
return __rste_to_pte(pte_val(*ptep));
}
pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
pte_t pte = huge_ptep_get(ptep);
pmd_t *pmdp = (pmd_t *) ptep;
pud_t *pudp = (pud_t *) ptep;
if ((pte_val(*ptep) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
pudp_xchg_direct(mm, addr, pudp, __pud(_REGION3_ENTRY_EMPTY));
else
pmdp_xchg_direct(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
return pte;
}
pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, unsigned long sz)
{
pgd_t *pgdp;
p4d_t *p4dp;
pud_t *pudp;
pmd_t *pmdp = NULL;
pgdp = pgd_offset(mm, addr);
p4dp = p4d_alloc(mm, pgdp, addr);
if (p4dp) {
pudp = pud_alloc(mm, p4dp, addr);
if (pudp) {
if (sz == PUD_SIZE)
return (pte_t *) pudp;
else if (sz == PMD_SIZE)
pmdp = pmd_alloc(mm, pudp, addr);
}
}
return (pte_t *) pmdp;
}
pte_t *huge_pte_offset(struct mm_struct *mm,
unsigned long addr, unsigned long sz)
{
pgd_t *pgdp;
p4d_t *p4dp;
pud_t *pudp;
pmd_t *pmdp = NULL;
pgdp = pgd_offset(mm, addr);
if (pgd_present(*pgdp)) {
p4dp = p4d_offset(pgdp, addr);
if (p4d_present(*p4dp)) {
pudp = pud_offset(p4dp, addr);
if (pud_present(*pudp)) {
if (pud_large(*pudp))
return (pte_t *) pudp;
pmdp = pmd_offset(pudp, addr);
}
}
}
return (pte_t *) pmdp;
}
int pmd_huge(pmd_t pmd)
{
return pmd_large(pmd);
}
int pud_huge(pud_t pud)
{
return pud_large(pud);
}
bool __init arch_hugetlb_valid_size(unsigned long size)
{
if (MACHINE_HAS_EDAT1 && size == PMD_SIZE)
return true;
else if (MACHINE_HAS_EDAT2 && size == PUD_SIZE)
return true;
else
return false;
}
static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
unsigned long addr, unsigned long len,
unsigned long pgoff, unsigned long flags)
{
struct hstate *h = hstate_file(file);
struct vm_unmapped_area_info info;
info.flags = 0;
info.length = len;
info.low_limit = current->mm->mmap_base;
info.high_limit = TASK_SIZE;
info.align_mask = PAGE_MASK & ~huge_page_mask(h);
info.align_offset = 0;
return vm_unmapped_area(&info);
}
static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
unsigned long addr0, unsigned long len,
unsigned long pgoff, unsigned long flags)
{
struct hstate *h = hstate_file(file);
struct vm_unmapped_area_info info;
unsigned long addr;
info.flags = VM_UNMAPPED_AREA_TOPDOWN;
info.length = len;
info.low_limit = PAGE_SIZE;
info.high_limit = current->mm->mmap_base;
info.align_mask = PAGE_MASK & ~huge_page_mask(h);
info.align_offset = 0;
addr = vm_unmapped_area(&info);
/*
* A failed mmap() very likely causes application failure,
* so fall back to the bottom-up function here. This scenario
* can happen with large stack limits and large mmap()
* allocations.
*/
if (addr & ~PAGE_MASK) {
VM_BUG_ON(addr != -ENOMEM);
info.flags = 0;
info.low_limit = TASK_UNMAPPED_BASE;
info.high_limit = TASK_SIZE;
addr = vm_unmapped_area(&info);
}
return addr;
}
unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags)
{
struct hstate *h = hstate_file(file);
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
if (len & ~huge_page_mask(h))
return -EINVAL;
if (len > TASK_SIZE - mmap_min_addr)
return -ENOMEM;
if (flags & MAP_FIXED) {
if (prepare_hugepage_range(file, addr, len))
return -EINVAL;
goto check_asce_limit;
}
if (addr) {
addr = ALIGN(addr, huge_page_size(h));
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
(!vma || addr + len <= vm_start_gap(vma)))
goto check_asce_limit;
}
if (mm->get_unmapped_area == arch_get_unmapped_area)
addr = hugetlb_get_unmapped_area_bottomup(file, addr, len,
pgoff, flags);
else
addr = hugetlb_get_unmapped_area_topdown(file, addr, len,
pgoff, flags);
if (offset_in_page(addr))
return addr;
check_asce_limit:
return check_asce_limit(mm, addr, len);
}
| linux-master | arch/s390/mm/hugetlbpage.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Collaborative memory management interface.
*
* Copyright IBM Corp 2003, 2010
* Author(s): Martin Schwidefsky <[email protected]>,
*
*/
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/gfp.h>
#include <linux/sched.h>
#include <linux/string_helpers.h>
#include <linux/sysctl.h>
#include <linux/swap.h>
#include <linux/kthread.h>
#include <linux/oom.h>
#include <linux/uaccess.h>
#include <asm/diag.h>
#ifdef CONFIG_CMM_IUCV
static char *cmm_default_sender = "VMRMSVM";
#endif
static char *sender;
module_param(sender, charp, 0400);
MODULE_PARM_DESC(sender,
"Guest name that may send SMSG messages (default VMRMSVM)");
#include "../../../drivers/s390/net/smsgiucv.h"
#define CMM_NR_PAGES ((PAGE_SIZE / sizeof(unsigned long)) - 2)
struct cmm_page_array {
struct cmm_page_array *next;
unsigned long index;
unsigned long pages[CMM_NR_PAGES];
};
static long cmm_pages;
static long cmm_timed_pages;
static volatile long cmm_pages_target;
static volatile long cmm_timed_pages_target;
static long cmm_timeout_pages;
static long cmm_timeout_seconds;
static struct cmm_page_array *cmm_page_list;
static struct cmm_page_array *cmm_timed_page_list;
static DEFINE_SPINLOCK(cmm_lock);
static struct task_struct *cmm_thread_ptr;
static DECLARE_WAIT_QUEUE_HEAD(cmm_thread_wait);
static void cmm_timer_fn(struct timer_list *);
static void cmm_set_timer(void);
static DEFINE_TIMER(cmm_timer, cmm_timer_fn);
static long cmm_alloc_pages(long nr, long *counter,
struct cmm_page_array **list)
{
struct cmm_page_array *pa, *npa;
unsigned long addr;
while (nr) {
addr = __get_free_page(GFP_NOIO);
if (!addr)
break;
spin_lock(&cmm_lock);
pa = *list;
if (!pa || pa->index >= CMM_NR_PAGES) {
/* Need a new page for the page list. */
spin_unlock(&cmm_lock);
npa = (struct cmm_page_array *)
__get_free_page(GFP_NOIO);
if (!npa) {
free_page(addr);
break;
}
spin_lock(&cmm_lock);
pa = *list;
if (!pa || pa->index >= CMM_NR_PAGES) {
npa->next = pa;
npa->index = 0;
pa = npa;
*list = pa;
} else
free_page((unsigned long) npa);
}
diag10_range(virt_to_pfn((void *)addr), 1);
pa->pages[pa->index++] = addr;
(*counter)++;
spin_unlock(&cmm_lock);
nr--;
}
return nr;
}
static long cmm_free_pages(long nr, long *counter, struct cmm_page_array **list)
{
struct cmm_page_array *pa;
unsigned long addr;
spin_lock(&cmm_lock);
pa = *list;
while (nr) {
if (!pa || pa->index <= 0)
break;
addr = pa->pages[--pa->index];
if (pa->index == 0) {
pa = pa->next;
free_page((unsigned long) *list);
*list = pa;
}
free_page(addr);
(*counter)--;
nr--;
}
spin_unlock(&cmm_lock);
return nr;
}
static int cmm_oom_notify(struct notifier_block *self,
unsigned long dummy, void *parm)
{
unsigned long *freed = parm;
long nr = 256;
nr = cmm_free_pages(nr, &cmm_timed_pages, &cmm_timed_page_list);
if (nr > 0)
nr = cmm_free_pages(nr, &cmm_pages, &cmm_page_list);
cmm_pages_target = cmm_pages;
cmm_timed_pages_target = cmm_timed_pages;
*freed += 256 - nr;
return NOTIFY_OK;
}
static struct notifier_block cmm_oom_nb = {
.notifier_call = cmm_oom_notify,
};
static int cmm_thread(void *dummy)
{
int rc;
while (1) {
rc = wait_event_interruptible(cmm_thread_wait,
cmm_pages != cmm_pages_target ||
cmm_timed_pages != cmm_timed_pages_target ||
kthread_should_stop());
if (kthread_should_stop() || rc == -ERESTARTSYS) {
cmm_pages_target = cmm_pages;
cmm_timed_pages_target = cmm_timed_pages;
break;
}
if (cmm_pages_target > cmm_pages) {
if (cmm_alloc_pages(1, &cmm_pages, &cmm_page_list))
cmm_pages_target = cmm_pages;
} else if (cmm_pages_target < cmm_pages) {
cmm_free_pages(1, &cmm_pages, &cmm_page_list);
}
if (cmm_timed_pages_target > cmm_timed_pages) {
if (cmm_alloc_pages(1, &cmm_timed_pages,
&cmm_timed_page_list))
cmm_timed_pages_target = cmm_timed_pages;
} else if (cmm_timed_pages_target < cmm_timed_pages) {
cmm_free_pages(1, &cmm_timed_pages,
&cmm_timed_page_list);
}
if (cmm_timed_pages > 0 && !timer_pending(&cmm_timer))
cmm_set_timer();
}
return 0;
}
static void cmm_kick_thread(void)
{
wake_up(&cmm_thread_wait);
}
static void cmm_set_timer(void)
{
if (cmm_timed_pages_target <= 0 || cmm_timeout_seconds <= 0) {
if (timer_pending(&cmm_timer))
del_timer(&cmm_timer);
return;
}
mod_timer(&cmm_timer, jiffies + msecs_to_jiffies(cmm_timeout_seconds * MSEC_PER_SEC));
}
static void cmm_timer_fn(struct timer_list *unused)
{
long nr;
nr = cmm_timed_pages_target - cmm_timeout_pages;
if (nr < 0)
cmm_timed_pages_target = 0;
else
cmm_timed_pages_target = nr;
cmm_kick_thread();
cmm_set_timer();
}
static void cmm_set_pages(long nr)
{
cmm_pages_target = nr;
cmm_kick_thread();
}
static long cmm_get_pages(void)
{
return cmm_pages;
}
static void cmm_add_timed_pages(long nr)
{
cmm_timed_pages_target += nr;
cmm_kick_thread();
}
static long cmm_get_timed_pages(void)
{
return cmm_timed_pages;
}
static void cmm_set_timeout(long nr, long seconds)
{
cmm_timeout_pages = nr;
cmm_timeout_seconds = seconds;
cmm_set_timer();
}
static int cmm_skip_blanks(char *cp, char **endp)
{
char *str;
for (str = cp; *str == ' ' || *str == '\t'; str++)
;
*endp = str;
return str != cp;
}
static int cmm_pages_handler(struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
long nr = cmm_get_pages();
struct ctl_table ctl_entry = {
.procname = ctl->procname,
.data = &nr,
.maxlen = sizeof(long),
};
int rc;
rc = proc_doulongvec_minmax(&ctl_entry, write, buffer, lenp, ppos);
if (rc < 0 || !write)
return rc;
cmm_set_pages(nr);
return 0;
}
static int cmm_timed_pages_handler(struct ctl_table *ctl, int write,
void *buffer, size_t *lenp,
loff_t *ppos)
{
long nr = cmm_get_timed_pages();
struct ctl_table ctl_entry = {
.procname = ctl->procname,
.data = &nr,
.maxlen = sizeof(long),
};
int rc;
rc = proc_doulongvec_minmax(&ctl_entry, write, buffer, lenp, ppos);
if (rc < 0 || !write)
return rc;
cmm_add_timed_pages(nr);
return 0;
}
static int cmm_timeout_handler(struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
char buf[64], *p;
long nr, seconds;
unsigned int len;
if (!*lenp || (*ppos && !write)) {
*lenp = 0;
return 0;
}
if (write) {
len = min(*lenp, sizeof(buf));
memcpy(buf, buffer, len);
buf[len - 1] = '\0';
cmm_skip_blanks(buf, &p);
nr = simple_strtoul(p, &p, 0);
cmm_skip_blanks(p, &p);
seconds = simple_strtoul(p, &p, 0);
cmm_set_timeout(nr, seconds);
*ppos += *lenp;
} else {
len = sprintf(buf, "%ld %ld\n",
cmm_timeout_pages, cmm_timeout_seconds);
if (len > *lenp)
len = *lenp;
memcpy(buffer, buf, len);
*lenp = len;
*ppos += len;
}
return 0;
}
static struct ctl_table cmm_table[] = {
{
.procname = "cmm_pages",
.mode = 0644,
.proc_handler = cmm_pages_handler,
},
{
.procname = "cmm_timed_pages",
.mode = 0644,
.proc_handler = cmm_timed_pages_handler,
},
{
.procname = "cmm_timeout",
.mode = 0644,
.proc_handler = cmm_timeout_handler,
},
{ }
};
#ifdef CONFIG_CMM_IUCV
#define SMSG_PREFIX "CMM"
static void cmm_smsg_target(const char *from, char *msg)
{
long nr, seconds;
if (strlen(sender) > 0 && strcmp(from, sender) != 0)
return;
if (!cmm_skip_blanks(msg + strlen(SMSG_PREFIX), &msg))
return;
if (strncmp(msg, "SHRINK", 6) == 0) {
if (!cmm_skip_blanks(msg + 6, &msg))
return;
nr = simple_strtoul(msg, &msg, 0);
cmm_skip_blanks(msg, &msg);
if (*msg == '\0')
cmm_set_pages(nr);
} else if (strncmp(msg, "RELEASE", 7) == 0) {
if (!cmm_skip_blanks(msg + 7, &msg))
return;
nr = simple_strtoul(msg, &msg, 0);
cmm_skip_blanks(msg, &msg);
if (*msg == '\0')
cmm_add_timed_pages(nr);
} else if (strncmp(msg, "REUSE", 5) == 0) {
if (!cmm_skip_blanks(msg + 5, &msg))
return;
nr = simple_strtoul(msg, &msg, 0);
if (!cmm_skip_blanks(msg, &msg))
return;
seconds = simple_strtoul(msg, &msg, 0);
cmm_skip_blanks(msg, &msg);
if (*msg == '\0')
cmm_set_timeout(nr, seconds);
}
}
#endif
static struct ctl_table_header *cmm_sysctl_header;
static int __init cmm_init(void)
{
int rc = -ENOMEM;
cmm_sysctl_header = register_sysctl("vm", cmm_table);
if (!cmm_sysctl_header)
goto out_sysctl;
#ifdef CONFIG_CMM_IUCV
/* convert sender to uppercase characters */
if (sender)
string_upper(sender, sender);
else
sender = cmm_default_sender;
rc = smsg_register_callback(SMSG_PREFIX, cmm_smsg_target);
if (rc < 0)
goto out_smsg;
#endif
rc = register_oom_notifier(&cmm_oom_nb);
if (rc < 0)
goto out_oom_notify;
cmm_thread_ptr = kthread_run(cmm_thread, NULL, "cmmthread");
if (!IS_ERR(cmm_thread_ptr))
return 0;
rc = PTR_ERR(cmm_thread_ptr);
unregister_oom_notifier(&cmm_oom_nb);
out_oom_notify:
#ifdef CONFIG_CMM_IUCV
smsg_unregister_callback(SMSG_PREFIX, cmm_smsg_target);
out_smsg:
#endif
unregister_sysctl_table(cmm_sysctl_header);
out_sysctl:
del_timer_sync(&cmm_timer);
return rc;
}
module_init(cmm_init);
static void __exit cmm_exit(void)
{
unregister_sysctl_table(cmm_sysctl_header);
#ifdef CONFIG_CMM_IUCV
smsg_unregister_callback(SMSG_PREFIX, cmm_smsg_target);
#endif
unregister_oom_notifier(&cmm_oom_nb);
kthread_stop(cmm_thread_ptr);
del_timer_sync(&cmm_timer);
cmm_free_pages(cmm_pages, &cmm_pages, &cmm_page_list);
cmm_free_pages(cmm_timed_pages, &cmm_timed_pages, &cmm_timed_page_list);
}
module_exit(cmm_exit);
MODULE_LICENSE("GPL");
| linux-master | arch/s390/mm/cmm.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Access kernel memory without faulting -- s390 specific implementation.
*
* Copyright IBM Corp. 2009, 2015
*
*/
#include <linux/uaccess.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/gfp.h>
#include <linux/cpu.h>
#include <linux/uio.h>
#include <linux/io.h>
#include <asm/asm-extable.h>
#include <asm/ctl_reg.h>
#include <asm/abs_lowcore.h>
#include <asm/stacktrace.h>
#include <asm/maccess.h>
unsigned long __bootdata_preserved(__memcpy_real_area);
pte_t *__bootdata_preserved(memcpy_real_ptep);
static DEFINE_MUTEX(memcpy_real_mutex);
static notrace long s390_kernel_write_odd(void *dst, const void *src, size_t size)
{
unsigned long aligned, offset, count;
char tmp[8];
aligned = (unsigned long) dst & ~7UL;
offset = (unsigned long) dst & 7UL;
size = min(8UL - offset, size);
count = size - 1;
asm volatile(
" bras 1,0f\n"
" mvc 0(1,%4),0(%5)\n"
"0: mvc 0(8,%3),0(%0)\n"
" ex %1,0(1)\n"
" lg %1,0(%3)\n"
" lra %0,0(%0)\n"
" sturg %1,%0\n"
: "+&a" (aligned), "+&a" (count), "=m" (tmp)
: "a" (&tmp), "a" (&tmp[offset]), "a" (src)
: "cc", "memory", "1");
return size;
}
/*
* s390_kernel_write - write to kernel memory bypassing DAT
* @dst: destination address
* @src: source address
* @size: number of bytes to copy
*
* This function writes to kernel memory bypassing DAT and possible page table
* write protection. It writes to the destination using the sturg instruction.
* Therefore we have a read-modify-write sequence: the function reads eight
* bytes from destination at an eight byte boundary, modifies the bytes
* requested and writes the result back in a loop.
*/
static DEFINE_SPINLOCK(s390_kernel_write_lock);
notrace void *s390_kernel_write(void *dst, const void *src, size_t size)
{
void *tmp = dst;
unsigned long flags;
long copied;
spin_lock_irqsave(&s390_kernel_write_lock, flags);
while (size) {
copied = s390_kernel_write_odd(tmp, src, size);
tmp += copied;
src += copied;
size -= copied;
}
spin_unlock_irqrestore(&s390_kernel_write_lock, flags);
return dst;
}
size_t memcpy_real_iter(struct iov_iter *iter, unsigned long src, size_t count)
{
size_t len, copied, res = 0;
unsigned long phys, offset;
void *chunk;
pte_t pte;
BUILD_BUG_ON(MEMCPY_REAL_SIZE != PAGE_SIZE);
while (count) {
phys = src & MEMCPY_REAL_MASK;
offset = src & ~MEMCPY_REAL_MASK;
chunk = (void *)(__memcpy_real_area + offset);
len = min(count, MEMCPY_REAL_SIZE - offset);
pte = mk_pte_phys(phys, PAGE_KERNEL_RO);
mutex_lock(&memcpy_real_mutex);
if (pte_val(pte) != pte_val(*memcpy_real_ptep)) {
__ptep_ipte(__memcpy_real_area, memcpy_real_ptep, 0, 0, IPTE_GLOBAL);
set_pte(memcpy_real_ptep, pte);
}
copied = copy_to_iter(chunk, len, iter);
mutex_unlock(&memcpy_real_mutex);
count -= copied;
src += copied;
res += copied;
if (copied < len)
break;
}
return res;
}
int memcpy_real(void *dest, unsigned long src, size_t count)
{
struct iov_iter iter;
struct kvec kvec;
kvec.iov_base = dest;
kvec.iov_len = count;
iov_iter_kvec(&iter, ITER_DEST, &kvec, 1, count);
if (memcpy_real_iter(&iter, src, count) < count)
return -EFAULT;
return 0;
}
/*
* Find CPU that owns swapped prefix page
*/
static int get_swapped_owner(phys_addr_t addr)
{
phys_addr_t lc;
int cpu;
for_each_online_cpu(cpu) {
lc = virt_to_phys(lowcore_ptr[cpu]);
if (addr > lc + sizeof(struct lowcore) - 1 || addr < lc)
continue;
return cpu;
}
return -1;
}
/*
* Convert a physical pointer for /dev/mem access
*
* For swapped prefix pages a new buffer is returned that contains a copy of
* the absolute memory. The buffer size is maximum one page large.
*/
void *xlate_dev_mem_ptr(phys_addr_t addr)
{
void *ptr = phys_to_virt(addr);
void *bounce = ptr;
struct lowcore *abs_lc;
unsigned long size;
int this_cpu, cpu;
cpus_read_lock();
this_cpu = get_cpu();
if (addr >= sizeof(struct lowcore)) {
cpu = get_swapped_owner(addr);
if (cpu < 0)
goto out;
}
bounce = (void *)__get_free_page(GFP_ATOMIC);
if (!bounce)
goto out;
size = PAGE_SIZE - (addr & ~PAGE_MASK);
if (addr < sizeof(struct lowcore)) {
abs_lc = get_abs_lowcore();
ptr = (void *)abs_lc + addr;
memcpy(bounce, ptr, size);
put_abs_lowcore(abs_lc);
} else if (cpu == this_cpu) {
ptr = (void *)(addr - virt_to_phys(lowcore_ptr[cpu]));
memcpy(bounce, ptr, size);
} else {
memcpy(bounce, ptr, size);
}
out:
put_cpu();
cpus_read_unlock();
return bounce;
}
/*
* Free converted buffer for /dev/mem access (if necessary)
*/
void unxlate_dev_mem_ptr(phys_addr_t addr, void *ptr)
{
if (addr != virt_to_phys(ptr))
free_page((unsigned long)ptr);
}
| linux-master | arch/s390/mm/maccess.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* flexible mmap layout support
*
* Copyright 2003-2004 Red Hat Inc., Durham, North Carolina.
* All Rights Reserved.
*
* Started by Ingo Molnar <[email protected]>
*/
#include <linux/elf-randomize.h>
#include <linux/personality.h>
#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/sched/signal.h>
#include <linux/sched/mm.h>
#include <linux/random.h>
#include <linux/compat.h>
#include <linux/security.h>
#include <asm/elf.h>
static unsigned long stack_maxrandom_size(void)
{
if (!(current->flags & PF_RANDOMIZE))
return 0;
return STACK_RND_MASK << PAGE_SHIFT;
}
static inline int mmap_is_legacy(struct rlimit *rlim_stack)
{
if (current->personality & ADDR_COMPAT_LAYOUT)
return 1;
if (rlim_stack->rlim_cur == RLIM_INFINITY)
return 1;
return sysctl_legacy_va_layout;
}
unsigned long arch_mmap_rnd(void)
{
return (get_random_u32() & MMAP_RND_MASK) << PAGE_SHIFT;
}
static unsigned long mmap_base_legacy(unsigned long rnd)
{
return TASK_UNMAPPED_BASE + rnd;
}
static inline unsigned long mmap_base(unsigned long rnd,
struct rlimit *rlim_stack)
{
unsigned long gap = rlim_stack->rlim_cur;
unsigned long pad = stack_maxrandom_size() + stack_guard_gap;
unsigned long gap_min, gap_max;
/* Values close to RLIM_INFINITY can overflow. */
if (gap + pad > gap)
gap += pad;
/*
* Top of mmap area (just below the process stack).
* Leave at least a ~128 MB hole.
*/
gap_min = SZ_128M;
gap_max = (STACK_TOP / 6) * 5;
if (gap < gap_min)
gap = gap_min;
else if (gap > gap_max)
gap = gap_max;
return PAGE_ALIGN(STACK_TOP - gap - rnd);
}
unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff,
unsigned long flags)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
struct vm_unmapped_area_info info;
if (len > TASK_SIZE - mmap_min_addr)
return -ENOMEM;
if (flags & MAP_FIXED)
goto check_asce_limit;
if (addr) {
addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
(!vma || addr + len <= vm_start_gap(vma)))
goto check_asce_limit;
}
info.flags = 0;
info.length = len;
info.low_limit = mm->mmap_base;
info.high_limit = TASK_SIZE;
if (filp || (flags & MAP_SHARED))
info.align_mask = MMAP_ALIGN_MASK << PAGE_SHIFT;
else
info.align_mask = 0;
info.align_offset = pgoff << PAGE_SHIFT;
addr = vm_unmapped_area(&info);
if (offset_in_page(addr))
return addr;
check_asce_limit:
return check_asce_limit(mm, addr, len);
}
unsigned long arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff,
unsigned long flags)
{
struct vm_area_struct *vma;
struct mm_struct *mm = current->mm;
struct vm_unmapped_area_info info;
/* requested length too big for entire address space */
if (len > TASK_SIZE - mmap_min_addr)
return -ENOMEM;
if (flags & MAP_FIXED)
goto check_asce_limit;
/* requesting a specific address */
if (addr) {
addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
(!vma || addr + len <= vm_start_gap(vma)))
goto check_asce_limit;
}
info.flags = VM_UNMAPPED_AREA_TOPDOWN;
info.length = len;
info.low_limit = PAGE_SIZE;
info.high_limit = mm->mmap_base;
if (filp || (flags & MAP_SHARED))
info.align_mask = MMAP_ALIGN_MASK << PAGE_SHIFT;
else
info.align_mask = 0;
info.align_offset = pgoff << PAGE_SHIFT;
addr = vm_unmapped_area(&info);
/*
* A failed mmap() very likely causes application failure,
* so fall back to the bottom-up function here. This scenario
* can happen with large stack limits and large mmap()
* allocations.
*/
if (offset_in_page(addr)) {
VM_BUG_ON(addr != -ENOMEM);
info.flags = 0;
info.low_limit = TASK_UNMAPPED_BASE;
info.high_limit = TASK_SIZE;
addr = vm_unmapped_area(&info);
if (offset_in_page(addr))
return addr;
}
check_asce_limit:
return check_asce_limit(mm, addr, len);
}
/*
* This function, called very early during the creation of a new
* process VM image, sets up which VM layout function to use:
*/
void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
{
unsigned long random_factor = 0UL;
if (current->flags & PF_RANDOMIZE)
random_factor = arch_mmap_rnd();
/*
* Fall back to the standard layout if the personality
* bit is set, or if the expected stack growth is unlimited:
*/
if (mmap_is_legacy(rlim_stack)) {
mm->mmap_base = mmap_base_legacy(random_factor);
mm->get_unmapped_area = arch_get_unmapped_area;
} else {
mm->mmap_base = mmap_base(random_factor, rlim_stack);
mm->get_unmapped_area = arch_get_unmapped_area_topdown;
}
}
static const pgprot_t protection_map[16] = {
[VM_NONE] = PAGE_NONE,
[VM_READ] = PAGE_RO,
[VM_WRITE] = PAGE_RO,
[VM_WRITE | VM_READ] = PAGE_RO,
[VM_EXEC] = PAGE_RX,
[VM_EXEC | VM_READ] = PAGE_RX,
[VM_EXEC | VM_WRITE] = PAGE_RX,
[VM_EXEC | VM_WRITE | VM_READ] = PAGE_RX,
[VM_SHARED] = PAGE_NONE,
[VM_SHARED | VM_READ] = PAGE_RO,
[VM_SHARED | VM_WRITE] = PAGE_RW,
[VM_SHARED | VM_WRITE | VM_READ] = PAGE_RW,
[VM_SHARED | VM_EXEC] = PAGE_RX,
[VM_SHARED | VM_EXEC | VM_READ] = PAGE_RX,
[VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_RWX,
[VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_RWX
};
DECLARE_VM_GET_PAGE_PROT
| linux-master | arch/s390/mm/mmap.c |
// SPDX-License-Identifier: GPL-2.0
/*
* S390 version
* Copyright IBM Corp. 1999
* Author(s): Hartmut Penner ([email protected])
*
* Derived from "arch/i386/mm/init.c"
* Copyright (C) 1995 Linus Torvalds
*/
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/swiotlb.h>
#include <linux/smp.h>
#include <linux/init.h>
#include <linux/pagemap.h>
#include <linux/memblock.h>
#include <linux/memory.h>
#include <linux/pfn.h>
#include <linux/poison.h>
#include <linux/initrd.h>
#include <linux/export.h>
#include <linux/cma.h>
#include <linux/gfp.h>
#include <linux/dma-direct.h>
#include <linux/percpu.h>
#include <asm/processor.h>
#include <linux/uaccess.h>
#include <asm/pgalloc.h>
#include <asm/kfence.h>
#include <asm/ptdump.h>
#include <asm/dma.h>
#include <asm/abs_lowcore.h>
#include <asm/tlb.h>
#include <asm/tlbflush.h>
#include <asm/sections.h>
#include <asm/ctl_reg.h>
#include <asm/sclp.h>
#include <asm/set_memory.h>
#include <asm/kasan.h>
#include <asm/dma-mapping.h>
#include <asm/uv.h>
#include <linux/virtio_anchor.h>
#include <linux/virtio_config.h>
pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(".bss..swapper_pg_dir");
pgd_t invalid_pg_dir[PTRS_PER_PGD] __section(".bss..invalid_pg_dir");
unsigned long __bootdata_preserved(s390_invalid_asce);
unsigned long empty_zero_page, zero_page_mask;
EXPORT_SYMBOL(empty_zero_page);
EXPORT_SYMBOL(zero_page_mask);
static void __init setup_zero_pages(void)
{
unsigned int order;
struct page *page;
int i;
/* Latest machines require a mapping granularity of 512KB */
order = 7;
/* Limit number of empty zero pages for small memory sizes */
while (order > 2 && (totalram_pages() >> 10) < (1UL << order))
order--;
empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
if (!empty_zero_page)
panic("Out of memory in setup_zero_pages");
page = virt_to_page((void *) empty_zero_page);
split_page(page, order);
for (i = 1 << order; i > 0; i--) {
mark_page_reserved(page);
page++;
}
zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK;
}
/*
* paging_init() sets up the page tables
*/
void __init paging_init(void)
{
unsigned long max_zone_pfns[MAX_NR_ZONES];
vmem_map_init();
sparse_init();
zone_dma_bits = 31;
memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
max_zone_pfns[ZONE_DMA] = virt_to_pfn(MAX_DMA_ADDRESS);
max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
free_area_init(max_zone_pfns);
}
void mark_rodata_ro(void)
{
unsigned long size = __end_ro_after_init - __start_ro_after_init;
__set_memory_ro(__start_ro_after_init, __end_ro_after_init);
pr_info("Write protected read-only-after-init data: %luk\n", size >> 10);
debug_checkwx();
}
int set_memory_encrypted(unsigned long vaddr, int numpages)
{
int i;
/* make specified pages unshared, (swiotlb, dma_free) */
for (i = 0; i < numpages; ++i) {
uv_remove_shared(virt_to_phys((void *)vaddr));
vaddr += PAGE_SIZE;
}
return 0;
}
int set_memory_decrypted(unsigned long vaddr, int numpages)
{
int i;
/* make specified pages shared (swiotlb, dma_alloca) */
for (i = 0; i < numpages; ++i) {
uv_set_shared(virt_to_phys((void *)vaddr));
vaddr += PAGE_SIZE;
}
return 0;
}
/* are we a protected virtualization guest? */
bool force_dma_unencrypted(struct device *dev)
{
return is_prot_virt_guest();
}
/* protected virtualization */
static void pv_init(void)
{
if (!is_prot_virt_guest())
return;
virtio_set_mem_acc_cb(virtio_require_restricted_mem_acc);
/* make sure bounce buffers are shared */
swiotlb_init(true, SWIOTLB_FORCE | SWIOTLB_VERBOSE);
swiotlb_update_mem_attributes();
}
void __init mem_init(void)
{
cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask);
cpumask_set_cpu(0, mm_cpumask(&init_mm));
set_max_mapnr(max_low_pfn);
high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
pv_init();
kfence_split_mapping();
/* Setup guest page hinting */
cmma_init();
/* this will put all low memory onto the freelists */
memblock_free_all();
setup_zero_pages(); /* Setup zeroed pages. */
cmma_init_nodat();
}
void free_initmem(void)
{
set_memory_rwnx((unsigned long)_sinittext,
(unsigned long)(_einittext - _sinittext) >> PAGE_SHIFT);
free_initmem_default(POISON_FREE_INITMEM);
}
unsigned long memory_block_size_bytes(void)
{
/*
* Make sure the memory block size is always greater
* or equal than the memory increment size.
*/
return max_t(unsigned long, MIN_MEMORY_BLOCK_SIZE, sclp.rzm);
}
unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
EXPORT_SYMBOL(__per_cpu_offset);
static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
{
return LOCAL_DISTANCE;
}
static int __init pcpu_cpu_to_node(int cpu)
{
return 0;
}
void __init setup_per_cpu_areas(void)
{
unsigned long delta;
unsigned int cpu;
int rc;
/*
* Always reserve area for module percpu variables. That's
* what the legacy allocator did.
*/
rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
PERCPU_DYNAMIC_RESERVE, PAGE_SIZE,
pcpu_cpu_distance,
pcpu_cpu_to_node);
if (rc < 0)
panic("Failed to initialize percpu areas.");
delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
for_each_possible_cpu(cpu)
__per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
}
#ifdef CONFIG_MEMORY_HOTPLUG
#ifdef CONFIG_CMA
/* Prevent memory blocks which contain cma regions from going offline */
struct s390_cma_mem_data {
unsigned long start;
unsigned long end;
};
static int s390_cma_check_range(struct cma *cma, void *data)
{
struct s390_cma_mem_data *mem_data;
unsigned long start, end;
mem_data = data;
start = cma_get_base(cma);
end = start + cma_get_size(cma);
if (end < mem_data->start)
return 0;
if (start >= mem_data->end)
return 0;
return -EBUSY;
}
static int s390_cma_mem_notifier(struct notifier_block *nb,
unsigned long action, void *data)
{
struct s390_cma_mem_data mem_data;
struct memory_notify *arg;
int rc = 0;
arg = data;
mem_data.start = arg->start_pfn << PAGE_SHIFT;
mem_data.end = mem_data.start + (arg->nr_pages << PAGE_SHIFT);
if (action == MEM_GOING_OFFLINE)
rc = cma_for_each_area(s390_cma_check_range, &mem_data);
return notifier_from_errno(rc);
}
static struct notifier_block s390_cma_mem_nb = {
.notifier_call = s390_cma_mem_notifier,
};
static int __init s390_cma_mem_init(void)
{
return register_memory_notifier(&s390_cma_mem_nb);
}
device_initcall(s390_cma_mem_init);
#endif /* CONFIG_CMA */
int arch_add_memory(int nid, u64 start, u64 size,
struct mhp_params *params)
{
unsigned long start_pfn = PFN_DOWN(start);
unsigned long size_pages = PFN_DOWN(size);
int rc;
if (WARN_ON_ONCE(params->altmap))
return -EINVAL;
if (WARN_ON_ONCE(params->pgprot.pgprot != PAGE_KERNEL.pgprot))
return -EINVAL;
VM_BUG_ON(!mhp_range_allowed(start, size, true));
rc = vmem_add_mapping(start, size);
if (rc)
return rc;
rc = __add_pages(nid, start_pfn, size_pages, params);
if (rc)
vmem_remove_mapping(start, size);
return rc;
}
void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
{
unsigned long start_pfn = start >> PAGE_SHIFT;
unsigned long nr_pages = size >> PAGE_SHIFT;
__remove_pages(start_pfn, nr_pages, altmap);
vmem_remove_mapping(start, size);
}
#endif /* CONFIG_MEMORY_HOTPLUG */
| linux-master | arch/s390/mm/init.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/bitfield.h>
#include <linux/extable.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/panic.h>
#include <asm/asm-extable.h>
#include <asm/extable.h>
const struct exception_table_entry *s390_search_extables(unsigned long addr)
{
const struct exception_table_entry *fixup;
size_t num;
fixup = search_exception_tables(addr);
if (fixup)
return fixup;
num = __stop_amode31_ex_table - __start_amode31_ex_table;
return search_extable(__start_amode31_ex_table, num, addr);
}
static bool ex_handler_fixup(const struct exception_table_entry *ex, struct pt_regs *regs)
{
regs->psw.addr = extable_fixup(ex);
return true;
}
static bool ex_handler_ua_store(const struct exception_table_entry *ex, struct pt_regs *regs)
{
unsigned int reg_err = FIELD_GET(EX_DATA_REG_ERR, ex->data);
regs->gprs[reg_err] = -EFAULT;
regs->psw.addr = extable_fixup(ex);
return true;
}
static bool ex_handler_ua_load_mem(const struct exception_table_entry *ex, struct pt_regs *regs)
{
unsigned int reg_addr = FIELD_GET(EX_DATA_REG_ADDR, ex->data);
unsigned int reg_err = FIELD_GET(EX_DATA_REG_ERR, ex->data);
size_t len = FIELD_GET(EX_DATA_LEN, ex->data);
regs->gprs[reg_err] = -EFAULT;
memset((void *)regs->gprs[reg_addr], 0, len);
regs->psw.addr = extable_fixup(ex);
return true;
}
static bool ex_handler_ua_load_reg(const struct exception_table_entry *ex,
bool pair, struct pt_regs *regs)
{
unsigned int reg_zero = FIELD_GET(EX_DATA_REG_ADDR, ex->data);
unsigned int reg_err = FIELD_GET(EX_DATA_REG_ERR, ex->data);
regs->gprs[reg_err] = -EFAULT;
regs->gprs[reg_zero] = 0;
if (pair)
regs->gprs[reg_zero + 1] = 0;
regs->psw.addr = extable_fixup(ex);
return true;
}
bool fixup_exception(struct pt_regs *regs)
{
const struct exception_table_entry *ex;
ex = s390_search_extables(instruction_pointer(regs));
if (!ex)
return false;
switch (ex->type) {
case EX_TYPE_FIXUP:
return ex_handler_fixup(ex, regs);
case EX_TYPE_BPF:
return ex_handler_bpf(ex, regs);
case EX_TYPE_UA_STORE:
return ex_handler_ua_store(ex, regs);
case EX_TYPE_UA_LOAD_MEM:
return ex_handler_ua_load_mem(ex, regs);
case EX_TYPE_UA_LOAD_REG:
return ex_handler_ua_load_reg(ex, false, regs);
case EX_TYPE_UA_LOAD_REGPAIR:
return ex_handler_ua_load_reg(ex, true, regs);
}
panic("invalid exception table entry");
}
| linux-master | arch/s390/mm/extable.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2006
*/
#include <linux/memory_hotplug.h>
#include <linux/memblock.h>
#include <linux/pfn.h>
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/hugetlb.h>
#include <linux/slab.h>
#include <linux/sort.h>
#include <asm/cacheflush.h>
#include <asm/nospec-branch.h>
#include <asm/pgalloc.h>
#include <asm/setup.h>
#include <asm/tlbflush.h>
#include <asm/sections.h>
#include <asm/set_memory.h>
static DEFINE_MUTEX(vmem_mutex);
static void __ref *vmem_alloc_pages(unsigned int order)
{
unsigned long size = PAGE_SIZE << order;
if (slab_is_available())
return (void *)__get_free_pages(GFP_KERNEL, order);
return memblock_alloc(size, size);
}
static void vmem_free_pages(unsigned long addr, int order)
{
/* We don't expect boot memory to be removed ever. */
if (!slab_is_available() ||
WARN_ON_ONCE(PageReserved(virt_to_page((void *)addr))))
return;
free_pages(addr, order);
}
void *vmem_crst_alloc(unsigned long val)
{
unsigned long *table;
table = vmem_alloc_pages(CRST_ALLOC_ORDER);
if (table)
crst_table_init(table, val);
return table;
}
pte_t __ref *vmem_pte_alloc(void)
{
unsigned long size = PTRS_PER_PTE * sizeof(pte_t);
pte_t *pte;
if (slab_is_available())
pte = (pte_t *) page_table_alloc(&init_mm);
else
pte = (pte_t *) memblock_alloc(size, size);
if (!pte)
return NULL;
memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE);
return pte;
}
static void vmem_pte_free(unsigned long *table)
{
/* We don't expect boot memory to be removed ever. */
if (!slab_is_available() ||
WARN_ON_ONCE(PageReserved(virt_to_page(table))))
return;
page_table_free(&init_mm, table);
}
#define PAGE_UNUSED 0xFD
/*
* The unused vmemmap range, which was not yet memset(PAGE_UNUSED) ranges
* from unused_sub_pmd_start to next PMD_SIZE boundary.
*/
static unsigned long unused_sub_pmd_start;
static void vmemmap_flush_unused_sub_pmd(void)
{
if (!unused_sub_pmd_start)
return;
memset((void *)unused_sub_pmd_start, PAGE_UNUSED,
ALIGN(unused_sub_pmd_start, PMD_SIZE) - unused_sub_pmd_start);
unused_sub_pmd_start = 0;
}
static void vmemmap_mark_sub_pmd_used(unsigned long start, unsigned long end)
{
/*
* As we expect to add in the same granularity as we remove, it's
* sufficient to mark only some piece used to block the memmap page from
* getting removed (just in case the memmap never gets initialized,
* e.g., because the memory block never gets onlined).
*/
memset((void *)start, 0, sizeof(struct page));
}
static void vmemmap_use_sub_pmd(unsigned long start, unsigned long end)
{
/*
* We only optimize if the new used range directly follows the
* previously unused range (esp., when populating consecutive sections).
*/
if (unused_sub_pmd_start == start) {
unused_sub_pmd_start = end;
if (likely(IS_ALIGNED(unused_sub_pmd_start, PMD_SIZE)))
unused_sub_pmd_start = 0;
return;
}
vmemmap_flush_unused_sub_pmd();
vmemmap_mark_sub_pmd_used(start, end);
}
static void vmemmap_use_new_sub_pmd(unsigned long start, unsigned long end)
{
unsigned long page = ALIGN_DOWN(start, PMD_SIZE);
vmemmap_flush_unused_sub_pmd();
/* Could be our memmap page is filled with PAGE_UNUSED already ... */
vmemmap_mark_sub_pmd_used(start, end);
/* Mark the unused parts of the new memmap page PAGE_UNUSED. */
if (!IS_ALIGNED(start, PMD_SIZE))
memset((void *)page, PAGE_UNUSED, start - page);
/*
* We want to avoid memset(PAGE_UNUSED) when populating the vmemmap of
* consecutive sections. Remember for the last added PMD the last
* unused range in the populated PMD.
*/
if (!IS_ALIGNED(end, PMD_SIZE))
unused_sub_pmd_start = end;
}
/* Returns true if the PMD is completely unused and can be freed. */
static bool vmemmap_unuse_sub_pmd(unsigned long start, unsigned long end)
{
unsigned long page = ALIGN_DOWN(start, PMD_SIZE);
vmemmap_flush_unused_sub_pmd();
memset((void *)start, PAGE_UNUSED, end - start);
return !memchr_inv((void *)page, PAGE_UNUSED, PMD_SIZE);
}
/* __ref: we'll only call vmemmap_alloc_block() via vmemmap_populate() */
static int __ref modify_pte_table(pmd_t *pmd, unsigned long addr,
unsigned long end, bool add, bool direct)
{
unsigned long prot, pages = 0;
int ret = -ENOMEM;
pte_t *pte;
prot = pgprot_val(PAGE_KERNEL);
if (!MACHINE_HAS_NX)
prot &= ~_PAGE_NOEXEC;
pte = pte_offset_kernel(pmd, addr);
for (; addr < end; addr += PAGE_SIZE, pte++) {
if (!add) {
if (pte_none(*pte))
continue;
if (!direct)
vmem_free_pages((unsigned long) pfn_to_virt(pte_pfn(*pte)), 0);
pte_clear(&init_mm, addr, pte);
} else if (pte_none(*pte)) {
if (!direct) {
void *new_page = vmemmap_alloc_block(PAGE_SIZE, NUMA_NO_NODE);
if (!new_page)
goto out;
set_pte(pte, __pte(__pa(new_page) | prot));
} else {
set_pte(pte, __pte(__pa(addr) | prot));
}
} else {
continue;
}
pages++;
}
ret = 0;
out:
if (direct)
update_page_count(PG_DIRECT_MAP_4K, add ? pages : -pages);
return ret;
}
static void try_free_pte_table(pmd_t *pmd, unsigned long start)
{
pte_t *pte;
int i;
/* We can safely assume this is fully in 1:1 mapping & vmemmap area */
pte = pte_offset_kernel(pmd, start);
for (i = 0; i < PTRS_PER_PTE; i++, pte++) {
if (!pte_none(*pte))
return;
}
vmem_pte_free((unsigned long *) pmd_deref(*pmd));
pmd_clear(pmd);
}
/* __ref: we'll only call vmemmap_alloc_block() via vmemmap_populate() */
static int __ref modify_pmd_table(pud_t *pud, unsigned long addr,
unsigned long end, bool add, bool direct)
{
unsigned long next, prot, pages = 0;
int ret = -ENOMEM;
pmd_t *pmd;
pte_t *pte;
prot = pgprot_val(SEGMENT_KERNEL);
if (!MACHINE_HAS_NX)
prot &= ~_SEGMENT_ENTRY_NOEXEC;
pmd = pmd_offset(pud, addr);
for (; addr < end; addr = next, pmd++) {
next = pmd_addr_end(addr, end);
if (!add) {
if (pmd_none(*pmd))
continue;
if (pmd_large(*pmd)) {
if (IS_ALIGNED(addr, PMD_SIZE) &&
IS_ALIGNED(next, PMD_SIZE)) {
if (!direct)
vmem_free_pages(pmd_deref(*pmd), get_order(PMD_SIZE));
pmd_clear(pmd);
pages++;
} else if (!direct && vmemmap_unuse_sub_pmd(addr, next)) {
vmem_free_pages(pmd_deref(*pmd), get_order(PMD_SIZE));
pmd_clear(pmd);
}
continue;
}
} else if (pmd_none(*pmd)) {
if (IS_ALIGNED(addr, PMD_SIZE) &&
IS_ALIGNED(next, PMD_SIZE) &&
MACHINE_HAS_EDAT1 && direct &&
!debug_pagealloc_enabled()) {
set_pmd(pmd, __pmd(__pa(addr) | prot));
pages++;
continue;
} else if (!direct && MACHINE_HAS_EDAT1) {
void *new_page;
/*
* Use 1MB frames for vmemmap if available. We
* always use large frames even if they are only
* partially used. Otherwise we would have also
* page tables since vmemmap_populate gets
* called for each section separately.
*/
new_page = vmemmap_alloc_block(PMD_SIZE, NUMA_NO_NODE);
if (new_page) {
set_pmd(pmd, __pmd(__pa(new_page) | prot));
if (!IS_ALIGNED(addr, PMD_SIZE) ||
!IS_ALIGNED(next, PMD_SIZE)) {
vmemmap_use_new_sub_pmd(addr, next);
}
continue;
}
}
pte = vmem_pte_alloc();
if (!pte)
goto out;
pmd_populate(&init_mm, pmd, pte);
} else if (pmd_large(*pmd)) {
if (!direct)
vmemmap_use_sub_pmd(addr, next);
continue;
}
ret = modify_pte_table(pmd, addr, next, add, direct);
if (ret)
goto out;
if (!add)
try_free_pte_table(pmd, addr & PMD_MASK);
}
ret = 0;
out:
if (direct)
update_page_count(PG_DIRECT_MAP_1M, add ? pages : -pages);
return ret;
}
static void try_free_pmd_table(pud_t *pud, unsigned long start)
{
pmd_t *pmd;
int i;
pmd = pmd_offset(pud, start);
for (i = 0; i < PTRS_PER_PMD; i++, pmd++)
if (!pmd_none(*pmd))
return;
vmem_free_pages(pud_deref(*pud), CRST_ALLOC_ORDER);
pud_clear(pud);
}
static int modify_pud_table(p4d_t *p4d, unsigned long addr, unsigned long end,
bool add, bool direct)
{
unsigned long next, prot, pages = 0;
int ret = -ENOMEM;
pud_t *pud;
pmd_t *pmd;
prot = pgprot_val(REGION3_KERNEL);
if (!MACHINE_HAS_NX)
prot &= ~_REGION_ENTRY_NOEXEC;
pud = pud_offset(p4d, addr);
for (; addr < end; addr = next, pud++) {
next = pud_addr_end(addr, end);
if (!add) {
if (pud_none(*pud))
continue;
if (pud_large(*pud)) {
if (IS_ALIGNED(addr, PUD_SIZE) &&
IS_ALIGNED(next, PUD_SIZE)) {
pud_clear(pud);
pages++;
}
continue;
}
} else if (pud_none(*pud)) {
if (IS_ALIGNED(addr, PUD_SIZE) &&
IS_ALIGNED(next, PUD_SIZE) &&
MACHINE_HAS_EDAT2 && direct &&
!debug_pagealloc_enabled()) {
set_pud(pud, __pud(__pa(addr) | prot));
pages++;
continue;
}
pmd = vmem_crst_alloc(_SEGMENT_ENTRY_EMPTY);
if (!pmd)
goto out;
pud_populate(&init_mm, pud, pmd);
} else if (pud_large(*pud)) {
continue;
}
ret = modify_pmd_table(pud, addr, next, add, direct);
if (ret)
goto out;
if (!add)
try_free_pmd_table(pud, addr & PUD_MASK);
}
ret = 0;
out:
if (direct)
update_page_count(PG_DIRECT_MAP_2G, add ? pages : -pages);
return ret;
}
static void try_free_pud_table(p4d_t *p4d, unsigned long start)
{
pud_t *pud;
int i;
pud = pud_offset(p4d, start);
for (i = 0; i < PTRS_PER_PUD; i++, pud++) {
if (!pud_none(*pud))
return;
}
vmem_free_pages(p4d_deref(*p4d), CRST_ALLOC_ORDER);
p4d_clear(p4d);
}
static int modify_p4d_table(pgd_t *pgd, unsigned long addr, unsigned long end,
bool add, bool direct)
{
unsigned long next;
int ret = -ENOMEM;
p4d_t *p4d;
pud_t *pud;
p4d = p4d_offset(pgd, addr);
for (; addr < end; addr = next, p4d++) {
next = p4d_addr_end(addr, end);
if (!add) {
if (p4d_none(*p4d))
continue;
} else if (p4d_none(*p4d)) {
pud = vmem_crst_alloc(_REGION3_ENTRY_EMPTY);
if (!pud)
goto out;
p4d_populate(&init_mm, p4d, pud);
}
ret = modify_pud_table(p4d, addr, next, add, direct);
if (ret)
goto out;
if (!add)
try_free_pud_table(p4d, addr & P4D_MASK);
}
ret = 0;
out:
return ret;
}
static void try_free_p4d_table(pgd_t *pgd, unsigned long start)
{
p4d_t *p4d;
int i;
p4d = p4d_offset(pgd, start);
for (i = 0; i < PTRS_PER_P4D; i++, p4d++) {
if (!p4d_none(*p4d))
return;
}
vmem_free_pages(pgd_deref(*pgd), CRST_ALLOC_ORDER);
pgd_clear(pgd);
}
static int modify_pagetable(unsigned long start, unsigned long end, bool add,
bool direct)
{
unsigned long addr, next;
int ret = -ENOMEM;
pgd_t *pgd;
p4d_t *p4d;
if (WARN_ON_ONCE(!PAGE_ALIGNED(start | end)))
return -EINVAL;
/* Don't mess with any tables not fully in 1:1 mapping & vmemmap area */
if (WARN_ON_ONCE(end > VMALLOC_START))
return -EINVAL;
for (addr = start; addr < end; addr = next) {
next = pgd_addr_end(addr, end);
pgd = pgd_offset_k(addr);
if (!add) {
if (pgd_none(*pgd))
continue;
} else if (pgd_none(*pgd)) {
p4d = vmem_crst_alloc(_REGION2_ENTRY_EMPTY);
if (!p4d)
goto out;
pgd_populate(&init_mm, pgd, p4d);
}
ret = modify_p4d_table(pgd, addr, next, add, direct);
if (ret)
goto out;
if (!add)
try_free_p4d_table(pgd, addr & PGDIR_MASK);
}
ret = 0;
out:
if (!add)
flush_tlb_kernel_range(start, end);
return ret;
}
static int add_pagetable(unsigned long start, unsigned long end, bool direct)
{
return modify_pagetable(start, end, true, direct);
}
static int remove_pagetable(unsigned long start, unsigned long end, bool direct)
{
return modify_pagetable(start, end, false, direct);
}
/*
* Add a physical memory range to the 1:1 mapping.
*/
static int vmem_add_range(unsigned long start, unsigned long size)
{
start = (unsigned long)__va(start);
return add_pagetable(start, start + size, true);
}
/*
* Remove a physical memory range from the 1:1 mapping.
*/
static void vmem_remove_range(unsigned long start, unsigned long size)
{
start = (unsigned long)__va(start);
remove_pagetable(start, start + size, true);
}
/*
* Add a backed mem_map array to the virtual mem_map array.
*/
int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
struct vmem_altmap *altmap)
{
int ret;
mutex_lock(&vmem_mutex);
/* We don't care about the node, just use NUMA_NO_NODE on allocations */
ret = add_pagetable(start, end, false);
if (ret)
remove_pagetable(start, end, false);
mutex_unlock(&vmem_mutex);
return ret;
}
void vmemmap_free(unsigned long start, unsigned long end,
struct vmem_altmap *altmap)
{
mutex_lock(&vmem_mutex);
remove_pagetable(start, end, false);
mutex_unlock(&vmem_mutex);
}
void vmem_remove_mapping(unsigned long start, unsigned long size)
{
mutex_lock(&vmem_mutex);
vmem_remove_range(start, size);
mutex_unlock(&vmem_mutex);
}
struct range arch_get_mappable_range(void)
{
struct range mhp_range;
mhp_range.start = 0;
mhp_range.end = max_mappable - 1;
return mhp_range;
}
int vmem_add_mapping(unsigned long start, unsigned long size)
{
struct range range = arch_get_mappable_range();
int ret;
if (start < range.start ||
start + size > range.end + 1 ||
start + size < start)
return -ERANGE;
mutex_lock(&vmem_mutex);
ret = vmem_add_range(start, size);
if (ret)
vmem_remove_range(start, size);
mutex_unlock(&vmem_mutex);
return ret;
}
/*
* Allocate new or return existing page-table entry, but do not map it
* to any physical address. If missing, allocate segment- and region-
* table entries along. Meeting a large segment- or region-table entry
* while traversing is an error, since the function is expected to be
* called against virtual regions reserved for 4KB mappings only.
*/
pte_t *vmem_get_alloc_pte(unsigned long addr, bool alloc)
{
pte_t *ptep = NULL;
pgd_t *pgd;
p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
pgd = pgd_offset_k(addr);
if (pgd_none(*pgd)) {
if (!alloc)
goto out;
p4d = vmem_crst_alloc(_REGION2_ENTRY_EMPTY);
if (!p4d)
goto out;
pgd_populate(&init_mm, pgd, p4d);
}
p4d = p4d_offset(pgd, addr);
if (p4d_none(*p4d)) {
if (!alloc)
goto out;
pud = vmem_crst_alloc(_REGION3_ENTRY_EMPTY);
if (!pud)
goto out;
p4d_populate(&init_mm, p4d, pud);
}
pud = pud_offset(p4d, addr);
if (pud_none(*pud)) {
if (!alloc)
goto out;
pmd = vmem_crst_alloc(_SEGMENT_ENTRY_EMPTY);
if (!pmd)
goto out;
pud_populate(&init_mm, pud, pmd);
} else if (WARN_ON_ONCE(pud_large(*pud))) {
goto out;
}
pmd = pmd_offset(pud, addr);
if (pmd_none(*pmd)) {
if (!alloc)
goto out;
pte = vmem_pte_alloc();
if (!pte)
goto out;
pmd_populate(&init_mm, pmd, pte);
} else if (WARN_ON_ONCE(pmd_large(*pmd))) {
goto out;
}
ptep = pte_offset_kernel(pmd, addr);
out:
return ptep;
}
int __vmem_map_4k_page(unsigned long addr, unsigned long phys, pgprot_t prot, bool alloc)
{
pte_t *ptep, pte;
if (!IS_ALIGNED(addr, PAGE_SIZE))
return -EINVAL;
ptep = vmem_get_alloc_pte(addr, alloc);
if (!ptep)
return -ENOMEM;
__ptep_ipte(addr, ptep, 0, 0, IPTE_GLOBAL);
pte = mk_pte_phys(phys, prot);
set_pte(ptep, pte);
return 0;
}
int vmem_map_4k_page(unsigned long addr, unsigned long phys, pgprot_t prot)
{
int rc;
mutex_lock(&vmem_mutex);
rc = __vmem_map_4k_page(addr, phys, prot, true);
mutex_unlock(&vmem_mutex);
return rc;
}
void vmem_unmap_4k_page(unsigned long addr)
{
pte_t *ptep;
mutex_lock(&vmem_mutex);
ptep = virt_to_kpte(addr);
__ptep_ipte(addr, ptep, 0, 0, IPTE_GLOBAL);
pte_clear(&init_mm, addr, ptep);
mutex_unlock(&vmem_mutex);
}
void __init vmem_map_init(void)
{
__set_memory_rox(_stext, _etext);
__set_memory_ro(_etext, __end_rodata);
__set_memory_rox(_sinittext, _einittext);
__set_memory_rox(__stext_amode31, __etext_amode31);
/*
* If the BEAR-enhancement facility is not installed the first
* prefix page is used to return to the previous context with
* an LPSWE instruction and therefore must be executable.
*/
if (!static_key_enabled(&cpu_has_bear))
set_memory_x(0, 1);
if (debug_pagealloc_enabled()) {
/*
* Use RELOC_HIDE() as long as __va(0) translates to NULL,
* since performing pointer arithmetic on a NULL pointer
* has undefined behavior and generates compiler warnings.
*/
__set_memory_4k(__va(0), RELOC_HIDE(__va(0), ident_map_size));
}
if (MACHINE_HAS_NX)
ctl_set_bit(0, 20);
pr_info("Write protected kernel read-only data: %luk\n",
(unsigned long)(__end_rodata - _stext) >> 10);
}
| linux-master | arch/s390/mm/vmem.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2008
*
* Guest page hinting for unused pages.
*
* Author(s): Martin Schwidefsky <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/memblock.h>
#include <linux/gfp.h>
#include <linux/init.h>
#include <asm/asm-extable.h>
#include <asm/facility.h>
#include <asm/page-states.h>
static int cmma_flag = 1;
static int __init cmma(char *str)
{
bool enabled;
if (!kstrtobool(str, &enabled))
cmma_flag = enabled;
return 1;
}
__setup("cmma=", cmma);
static inline int cmma_test_essa(void)
{
unsigned long tmp = 0;
int rc = -EOPNOTSUPP;
/* test ESSA_GET_STATE */
asm volatile(
" .insn rrf,0xb9ab0000,%[tmp],%[tmp],%[cmd],0\n"
"0: la %[rc],0\n"
"1:\n"
EX_TABLE(0b,1b)
: [rc] "+&d" (rc), [tmp] "+&d" (tmp)
: [cmd] "i" (ESSA_GET_STATE));
return rc;
}
void __init cmma_init(void)
{
if (!cmma_flag)
return;
if (cmma_test_essa()) {
cmma_flag = 0;
return;
}
if (test_facility(147))
cmma_flag = 2;
}
static inline void set_page_unused(struct page *page, int order)
{
int i, rc;
for (i = 0; i < (1 << order); i++)
asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0"
: "=&d" (rc)
: "a" (page_to_phys(page + i)),
"i" (ESSA_SET_UNUSED));
}
static inline void set_page_stable_dat(struct page *page, int order)
{
int i, rc;
for (i = 0; i < (1 << order); i++)
asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0"
: "=&d" (rc)
: "a" (page_to_phys(page + i)),
"i" (ESSA_SET_STABLE));
}
static inline void set_page_stable_nodat(struct page *page, int order)
{
int i, rc;
for (i = 0; i < (1 << order); i++)
asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0"
: "=&d" (rc)
: "a" (page_to_phys(page + i)),
"i" (ESSA_SET_STABLE_NODAT));
}
static void mark_kernel_pmd(pud_t *pud, unsigned long addr, unsigned long end)
{
unsigned long next;
struct page *page;
pmd_t *pmd;
pmd = pmd_offset(pud, addr);
do {
next = pmd_addr_end(addr, end);
if (pmd_none(*pmd) || pmd_large(*pmd))
continue;
page = phys_to_page(pmd_val(*pmd));
set_bit(PG_arch_1, &page->flags);
} while (pmd++, addr = next, addr != end);
}
static void mark_kernel_pud(p4d_t *p4d, unsigned long addr, unsigned long end)
{
unsigned long next;
struct page *page;
pud_t *pud;
int i;
pud = pud_offset(p4d, addr);
do {
next = pud_addr_end(addr, end);
if (pud_none(*pud) || pud_large(*pud))
continue;
if (!pud_folded(*pud)) {
page = phys_to_page(pud_val(*pud));
for (i = 0; i < 3; i++)
set_bit(PG_arch_1, &page[i].flags);
}
mark_kernel_pmd(pud, addr, next);
} while (pud++, addr = next, addr != end);
}
static void mark_kernel_p4d(pgd_t *pgd, unsigned long addr, unsigned long end)
{
unsigned long next;
struct page *page;
p4d_t *p4d;
int i;
p4d = p4d_offset(pgd, addr);
do {
next = p4d_addr_end(addr, end);
if (p4d_none(*p4d))
continue;
if (!p4d_folded(*p4d)) {
page = phys_to_page(p4d_val(*p4d));
for (i = 0; i < 3; i++)
set_bit(PG_arch_1, &page[i].flags);
}
mark_kernel_pud(p4d, addr, next);
} while (p4d++, addr = next, addr != end);
}
static void mark_kernel_pgd(void)
{
unsigned long addr, next;
struct page *page;
pgd_t *pgd;
int i;
addr = 0;
pgd = pgd_offset_k(addr);
do {
next = pgd_addr_end(addr, MODULES_END);
if (pgd_none(*pgd))
continue;
if (!pgd_folded(*pgd)) {
page = phys_to_page(pgd_val(*pgd));
for (i = 0; i < 3; i++)
set_bit(PG_arch_1, &page[i].flags);
}
mark_kernel_p4d(pgd, addr, next);
} while (pgd++, addr = next, addr != MODULES_END);
}
void __init cmma_init_nodat(void)
{
struct page *page;
unsigned long start, end, ix;
int i;
if (cmma_flag < 2)
return;
/* Mark pages used in kernel page tables */
mark_kernel_pgd();
/* Set all kernel pages not used for page tables to stable/no-dat */
for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) {
page = pfn_to_page(start);
for (ix = start; ix < end; ix++, page++) {
if (__test_and_clear_bit(PG_arch_1, &page->flags))
continue; /* skip page table pages */
if (!list_empty(&page->lru))
continue; /* skip free pages */
set_page_stable_nodat(page, 0);
}
}
}
void arch_free_page(struct page *page, int order)
{
if (!cmma_flag)
return;
set_page_unused(page, order);
}
void arch_alloc_page(struct page *page, int order)
{
if (!cmma_flag)
return;
if (cmma_flag < 2)
set_page_stable_dat(page, order);
else
set_page_stable_nodat(page, order);
}
void arch_set_page_dat(struct page *page, int order)
{
if (!cmma_flag)
return;
set_page_stable_dat(page, order);
}
| linux-master | arch/s390/mm/page-states.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2007, 2011
* Author(s): Martin Schwidefsky <[email protected]>
*/
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/gfp.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/smp.h>
#include <linux/spinlock.h>
#include <linux/rcupdate.h>
#include <linux/slab.h>
#include <linux/swapops.h>
#include <linux/sysctl.h>
#include <linux/ksm.h>
#include <linux/mman.h>
#include <asm/tlb.h>
#include <asm/tlbflush.h>
#include <asm/mmu_context.h>
#include <asm/page-states.h>
pgprot_t pgprot_writecombine(pgprot_t prot)
{
/*
* mio_wb_bit_mask may be set on a different CPU, but it is only set
* once at init and only read afterwards.
*/
return __pgprot(pgprot_val(prot) | mio_wb_bit_mask);
}
EXPORT_SYMBOL_GPL(pgprot_writecombine);
pgprot_t pgprot_writethrough(pgprot_t prot)
{
/*
* mio_wb_bit_mask may be set on a different CPU, but it is only set
* once at init and only read afterwards.
*/
return __pgprot(pgprot_val(prot) & ~mio_wb_bit_mask);
}
EXPORT_SYMBOL_GPL(pgprot_writethrough);
static inline void ptep_ipte_local(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, int nodat)
{
unsigned long opt, asce;
if (MACHINE_HAS_TLB_GUEST) {
opt = 0;
asce = READ_ONCE(mm->context.gmap_asce);
if (asce == 0UL || nodat)
opt |= IPTE_NODAT;
if (asce != -1UL) {
asce = asce ? : mm->context.asce;
opt |= IPTE_GUEST_ASCE;
}
__ptep_ipte(addr, ptep, opt, asce, IPTE_LOCAL);
} else {
__ptep_ipte(addr, ptep, 0, 0, IPTE_LOCAL);
}
}
static inline void ptep_ipte_global(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, int nodat)
{
unsigned long opt, asce;
if (MACHINE_HAS_TLB_GUEST) {
opt = 0;
asce = READ_ONCE(mm->context.gmap_asce);
if (asce == 0UL || nodat)
opt |= IPTE_NODAT;
if (asce != -1UL) {
asce = asce ? : mm->context.asce;
opt |= IPTE_GUEST_ASCE;
}
__ptep_ipte(addr, ptep, opt, asce, IPTE_GLOBAL);
} else {
__ptep_ipte(addr, ptep, 0, 0, IPTE_GLOBAL);
}
}
static inline pte_t ptep_flush_direct(struct mm_struct *mm,
unsigned long addr, pte_t *ptep,
int nodat)
{
pte_t old;
old = *ptep;
if (unlikely(pte_val(old) & _PAGE_INVALID))
return old;
atomic_inc(&mm->context.flush_count);
if (MACHINE_HAS_TLB_LC &&
cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
ptep_ipte_local(mm, addr, ptep, nodat);
else
ptep_ipte_global(mm, addr, ptep, nodat);
atomic_dec(&mm->context.flush_count);
return old;
}
static inline pte_t ptep_flush_lazy(struct mm_struct *mm,
unsigned long addr, pte_t *ptep,
int nodat)
{
pte_t old;
old = *ptep;
if (unlikely(pte_val(old) & _PAGE_INVALID))
return old;
atomic_inc(&mm->context.flush_count);
if (cpumask_equal(&mm->context.cpu_attach_mask,
cpumask_of(smp_processor_id()))) {
set_pte(ptep, set_pte_bit(*ptep, __pgprot(_PAGE_INVALID)));
mm->context.flush_mm = 1;
} else
ptep_ipte_global(mm, addr, ptep, nodat);
atomic_dec(&mm->context.flush_count);
return old;
}
static inline pgste_t pgste_get_lock(pte_t *ptep)
{
unsigned long new = 0;
#ifdef CONFIG_PGSTE
unsigned long old;
asm(
" lg %0,%2\n"
"0: lgr %1,%0\n"
" nihh %0,0xff7f\n" /* clear PCL bit in old */
" oihh %1,0x0080\n" /* set PCL bit in new */
" csg %0,%1,%2\n"
" jl 0b\n"
: "=&d" (old), "=&d" (new), "=Q" (ptep[PTRS_PER_PTE])
: "Q" (ptep[PTRS_PER_PTE]) : "cc", "memory");
#endif
return __pgste(new);
}
static inline void pgste_set_unlock(pte_t *ptep, pgste_t pgste)
{
#ifdef CONFIG_PGSTE
asm(
" nihh %1,0xff7f\n" /* clear PCL bit */
" stg %1,%0\n"
: "=Q" (ptep[PTRS_PER_PTE])
: "d" (pgste_val(pgste)), "Q" (ptep[PTRS_PER_PTE])
: "cc", "memory");
#endif
}
static inline pgste_t pgste_get(pte_t *ptep)
{
unsigned long pgste = 0;
#ifdef CONFIG_PGSTE
pgste = *(unsigned long *)(ptep + PTRS_PER_PTE);
#endif
return __pgste(pgste);
}
static inline void pgste_set(pte_t *ptep, pgste_t pgste)
{
#ifdef CONFIG_PGSTE
*(pgste_t *)(ptep + PTRS_PER_PTE) = pgste;
#endif
}
static inline pgste_t pgste_update_all(pte_t pte, pgste_t pgste,
struct mm_struct *mm)
{
#ifdef CONFIG_PGSTE
unsigned long address, bits, skey;
if (!mm_uses_skeys(mm) || pte_val(pte) & _PAGE_INVALID)
return pgste;
address = pte_val(pte) & PAGE_MASK;
skey = (unsigned long) page_get_storage_key(address);
bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
/* Transfer page changed & referenced bit to guest bits in pgste */
pgste_val(pgste) |= bits << 48; /* GR bit & GC bit */
/* Copy page access key and fetch protection bit to pgste */
pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT);
pgste_val(pgste) |= (skey & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
#endif
return pgste;
}
static inline void pgste_set_key(pte_t *ptep, pgste_t pgste, pte_t entry,
struct mm_struct *mm)
{
#ifdef CONFIG_PGSTE
unsigned long address;
unsigned long nkey;
if (!mm_uses_skeys(mm) || pte_val(entry) & _PAGE_INVALID)
return;
VM_BUG_ON(!(pte_val(*ptep) & _PAGE_INVALID));
address = pte_val(entry) & PAGE_MASK;
/*
* Set page access key and fetch protection bit from pgste.
* The guest C/R information is still in the PGSTE, set real
* key C/R to 0.
*/
nkey = (pgste_val(pgste) & (PGSTE_ACC_BITS | PGSTE_FP_BIT)) >> 56;
nkey |= (pgste_val(pgste) & (PGSTE_GR_BIT | PGSTE_GC_BIT)) >> 48;
page_set_storage_key(address, nkey, 0);
#endif
}
static inline pgste_t pgste_set_pte(pte_t *ptep, pgste_t pgste, pte_t entry)
{
#ifdef CONFIG_PGSTE
if ((pte_val(entry) & _PAGE_PRESENT) &&
(pte_val(entry) & _PAGE_WRITE) &&
!(pte_val(entry) & _PAGE_INVALID)) {
if (!MACHINE_HAS_ESOP) {
/*
* Without enhanced suppression-on-protection force
* the dirty bit on for all writable ptes.
*/
entry = set_pte_bit(entry, __pgprot(_PAGE_DIRTY));
entry = clear_pte_bit(entry, __pgprot(_PAGE_PROTECT));
}
if (!(pte_val(entry) & _PAGE_PROTECT))
/* This pte allows write access, set user-dirty */
pgste_val(pgste) |= PGSTE_UC_BIT;
}
#endif
set_pte(ptep, entry);
return pgste;
}
static inline pgste_t pgste_pte_notify(struct mm_struct *mm,
unsigned long addr,
pte_t *ptep, pgste_t pgste)
{
#ifdef CONFIG_PGSTE
unsigned long bits;
bits = pgste_val(pgste) & (PGSTE_IN_BIT | PGSTE_VSIE_BIT);
if (bits) {
pgste_val(pgste) ^= bits;
ptep_notify(mm, addr, ptep, bits);
}
#endif
return pgste;
}
static inline pgste_t ptep_xchg_start(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
pgste_t pgste = __pgste(0);
if (mm_has_pgste(mm)) {
pgste = pgste_get_lock(ptep);
pgste = pgste_pte_notify(mm, addr, ptep, pgste);
}
return pgste;
}
static inline pte_t ptep_xchg_commit(struct mm_struct *mm,
unsigned long addr, pte_t *ptep,
pgste_t pgste, pte_t old, pte_t new)
{
if (mm_has_pgste(mm)) {
if (pte_val(old) & _PAGE_INVALID)
pgste_set_key(ptep, pgste, new, mm);
if (pte_val(new) & _PAGE_INVALID) {
pgste = pgste_update_all(old, pgste, mm);
if ((pgste_val(pgste) & _PGSTE_GPS_USAGE_MASK) ==
_PGSTE_GPS_USAGE_UNUSED)
old = set_pte_bit(old, __pgprot(_PAGE_UNUSED));
}
pgste = pgste_set_pte(ptep, pgste, new);
pgste_set_unlock(ptep, pgste);
} else {
set_pte(ptep, new);
}
return old;
}
pte_t ptep_xchg_direct(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t new)
{
pgste_t pgste;
pte_t old;
int nodat;
preempt_disable();
pgste = ptep_xchg_start(mm, addr, ptep);
nodat = !!(pgste_val(pgste) & _PGSTE_GPS_NODAT);
old = ptep_flush_direct(mm, addr, ptep, nodat);
old = ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
preempt_enable();
return old;
}
EXPORT_SYMBOL(ptep_xchg_direct);
/*
* Caller must check that new PTE only differs in _PAGE_PROTECT HW bit, so that
* RDP can be used instead of IPTE. See also comments at pte_allow_rdp().
*/
void ptep_reset_dat_prot(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
pte_t new)
{
preempt_disable();
atomic_inc(&mm->context.flush_count);
if (cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
__ptep_rdp(addr, ptep, 0, 0, 1);
else
__ptep_rdp(addr, ptep, 0, 0, 0);
/*
* PTE is not invalidated by RDP, only _PAGE_PROTECT is cleared. That
* means it is still valid and active, and must not be changed according
* to the architecture. But writing a new value that only differs in SW
* bits is allowed.
*/
set_pte(ptep, new);
atomic_dec(&mm->context.flush_count);
preempt_enable();
}
EXPORT_SYMBOL(ptep_reset_dat_prot);
pte_t ptep_xchg_lazy(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t new)
{
pgste_t pgste;
pte_t old;
int nodat;
preempt_disable();
pgste = ptep_xchg_start(mm, addr, ptep);
nodat = !!(pgste_val(pgste) & _PGSTE_GPS_NODAT);
old = ptep_flush_lazy(mm, addr, ptep, nodat);
old = ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
preempt_enable();
return old;
}
EXPORT_SYMBOL(ptep_xchg_lazy);
pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr,
pte_t *ptep)
{
pgste_t pgste;
pte_t old;
int nodat;
struct mm_struct *mm = vma->vm_mm;
preempt_disable();
pgste = ptep_xchg_start(mm, addr, ptep);
nodat = !!(pgste_val(pgste) & _PGSTE_GPS_NODAT);
old = ptep_flush_lazy(mm, addr, ptep, nodat);
if (mm_has_pgste(mm)) {
pgste = pgste_update_all(old, pgste, mm);
pgste_set(ptep, pgste);
}
return old;
}
void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr,
pte_t *ptep, pte_t old_pte, pte_t pte)
{
pgste_t pgste;
struct mm_struct *mm = vma->vm_mm;
if (!MACHINE_HAS_NX)
pte = clear_pte_bit(pte, __pgprot(_PAGE_NOEXEC));
if (mm_has_pgste(mm)) {
pgste = pgste_get(ptep);
pgste_set_key(ptep, pgste, pte, mm);
pgste = pgste_set_pte(ptep, pgste, pte);
pgste_set_unlock(ptep, pgste);
} else {
set_pte(ptep, pte);
}
preempt_enable();
}
static inline void pmdp_idte_local(struct mm_struct *mm,
unsigned long addr, pmd_t *pmdp)
{
if (MACHINE_HAS_TLB_GUEST)
__pmdp_idte(addr, pmdp, IDTE_NODAT | IDTE_GUEST_ASCE,
mm->context.asce, IDTE_LOCAL);
else
__pmdp_idte(addr, pmdp, 0, 0, IDTE_LOCAL);
if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m)
gmap_pmdp_idte_local(mm, addr);
}
static inline void pmdp_idte_global(struct mm_struct *mm,
unsigned long addr, pmd_t *pmdp)
{
if (MACHINE_HAS_TLB_GUEST) {
__pmdp_idte(addr, pmdp, IDTE_NODAT | IDTE_GUEST_ASCE,
mm->context.asce, IDTE_GLOBAL);
if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m)
gmap_pmdp_idte_global(mm, addr);
} else if (MACHINE_HAS_IDTE) {
__pmdp_idte(addr, pmdp, 0, 0, IDTE_GLOBAL);
if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m)
gmap_pmdp_idte_global(mm, addr);
} else {
__pmdp_csp(pmdp);
if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m)
gmap_pmdp_csp(mm, addr);
}
}
static inline pmd_t pmdp_flush_direct(struct mm_struct *mm,
unsigned long addr, pmd_t *pmdp)
{
pmd_t old;
old = *pmdp;
if (pmd_val(old) & _SEGMENT_ENTRY_INVALID)
return old;
atomic_inc(&mm->context.flush_count);
if (MACHINE_HAS_TLB_LC &&
cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
pmdp_idte_local(mm, addr, pmdp);
else
pmdp_idte_global(mm, addr, pmdp);
atomic_dec(&mm->context.flush_count);
return old;
}
static inline pmd_t pmdp_flush_lazy(struct mm_struct *mm,
unsigned long addr, pmd_t *pmdp)
{
pmd_t old;
old = *pmdp;
if (pmd_val(old) & _SEGMENT_ENTRY_INVALID)
return old;
atomic_inc(&mm->context.flush_count);
if (cpumask_equal(&mm->context.cpu_attach_mask,
cpumask_of(smp_processor_id()))) {
set_pmd(pmdp, set_pmd_bit(*pmdp, __pgprot(_SEGMENT_ENTRY_INVALID)));
mm->context.flush_mm = 1;
if (mm_has_pgste(mm))
gmap_pmdp_invalidate(mm, addr);
} else {
pmdp_idte_global(mm, addr, pmdp);
}
atomic_dec(&mm->context.flush_count);
return old;
}
#ifdef CONFIG_PGSTE
static int pmd_lookup(struct mm_struct *mm, unsigned long addr, pmd_t **pmdp)
{
struct vm_area_struct *vma;
pgd_t *pgd;
p4d_t *p4d;
pud_t *pud;
/* We need a valid VMA, otherwise this is clearly a fault. */
vma = vma_lookup(mm, addr);
if (!vma)
return -EFAULT;
pgd = pgd_offset(mm, addr);
if (!pgd_present(*pgd))
return -ENOENT;
p4d = p4d_offset(pgd, addr);
if (!p4d_present(*p4d))
return -ENOENT;
pud = pud_offset(p4d, addr);
if (!pud_present(*pud))
return -ENOENT;
/* Large PUDs are not supported yet. */
if (pud_large(*pud))
return -EFAULT;
*pmdp = pmd_offset(pud, addr);
return 0;
}
#endif
pmd_t pmdp_xchg_direct(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp, pmd_t new)
{
pmd_t old;
preempt_disable();
old = pmdp_flush_direct(mm, addr, pmdp);
set_pmd(pmdp, new);
preempt_enable();
return old;
}
EXPORT_SYMBOL(pmdp_xchg_direct);
pmd_t pmdp_xchg_lazy(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp, pmd_t new)
{
pmd_t old;
preempt_disable();
old = pmdp_flush_lazy(mm, addr, pmdp);
set_pmd(pmdp, new);
preempt_enable();
return old;
}
EXPORT_SYMBOL(pmdp_xchg_lazy);
static inline void pudp_idte_local(struct mm_struct *mm,
unsigned long addr, pud_t *pudp)
{
if (MACHINE_HAS_TLB_GUEST)
__pudp_idte(addr, pudp, IDTE_NODAT | IDTE_GUEST_ASCE,
mm->context.asce, IDTE_LOCAL);
else
__pudp_idte(addr, pudp, 0, 0, IDTE_LOCAL);
}
static inline void pudp_idte_global(struct mm_struct *mm,
unsigned long addr, pud_t *pudp)
{
if (MACHINE_HAS_TLB_GUEST)
__pudp_idte(addr, pudp, IDTE_NODAT | IDTE_GUEST_ASCE,
mm->context.asce, IDTE_GLOBAL);
else if (MACHINE_HAS_IDTE)
__pudp_idte(addr, pudp, 0, 0, IDTE_GLOBAL);
else
/*
* Invalid bit position is the same for pmd and pud, so we can
* re-use _pmd_csp() here
*/
__pmdp_csp((pmd_t *) pudp);
}
static inline pud_t pudp_flush_direct(struct mm_struct *mm,
unsigned long addr, pud_t *pudp)
{
pud_t old;
old = *pudp;
if (pud_val(old) & _REGION_ENTRY_INVALID)
return old;
atomic_inc(&mm->context.flush_count);
if (MACHINE_HAS_TLB_LC &&
cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
pudp_idte_local(mm, addr, pudp);
else
pudp_idte_global(mm, addr, pudp);
atomic_dec(&mm->context.flush_count);
return old;
}
pud_t pudp_xchg_direct(struct mm_struct *mm, unsigned long addr,
pud_t *pudp, pud_t new)
{
pud_t old;
preempt_disable();
old = pudp_flush_direct(mm, addr, pudp);
set_pud(pudp, new);
preempt_enable();
return old;
}
EXPORT_SYMBOL(pudp_xchg_direct);
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
pgtable_t pgtable)
{
struct list_head *lh = (struct list_head *) pgtable;
assert_spin_locked(pmd_lockptr(mm, pmdp));
/* FIFO */
if (!pmd_huge_pte(mm, pmdp))
INIT_LIST_HEAD(lh);
else
list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
pmd_huge_pte(mm, pmdp) = pgtable;
}
pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
{
struct list_head *lh;
pgtable_t pgtable;
pte_t *ptep;
assert_spin_locked(pmd_lockptr(mm, pmdp));
/* FIFO */
pgtable = pmd_huge_pte(mm, pmdp);
lh = (struct list_head *) pgtable;
if (list_empty(lh))
pmd_huge_pte(mm, pmdp) = NULL;
else {
pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
list_del(lh);
}
ptep = (pte_t *) pgtable;
set_pte(ptep, __pte(_PAGE_INVALID));
ptep++;
set_pte(ptep, __pte(_PAGE_INVALID));
return pgtable;
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#ifdef CONFIG_PGSTE
void ptep_set_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t entry)
{
pgste_t pgste;
/* the mm_has_pgste() check is done in set_pte_at() */
preempt_disable();
pgste = pgste_get_lock(ptep);
pgste_val(pgste) &= ~_PGSTE_GPS_ZERO;
pgste_set_key(ptep, pgste, entry, mm);
pgste = pgste_set_pte(ptep, pgste, entry);
pgste_set_unlock(ptep, pgste);
preempt_enable();
}
void ptep_set_notify(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
pgste_t pgste;
preempt_disable();
pgste = pgste_get_lock(ptep);
pgste_val(pgste) |= PGSTE_IN_BIT;
pgste_set_unlock(ptep, pgste);
preempt_enable();
}
/**
* ptep_force_prot - change access rights of a locked pte
* @mm: pointer to the process mm_struct
* @addr: virtual address in the guest address space
* @ptep: pointer to the page table entry
* @prot: indicates guest access rights: PROT_NONE, PROT_READ or PROT_WRITE
* @bit: pgste bit to set (e.g. for notification)
*
* Returns 0 if the access rights were changed and -EAGAIN if the current
* and requested access rights are incompatible.
*/
int ptep_force_prot(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, int prot, unsigned long bit)
{
pte_t entry;
pgste_t pgste;
int pte_i, pte_p, nodat;
pgste = pgste_get_lock(ptep);
entry = *ptep;
/* Check pte entry after all locks have been acquired */
pte_i = pte_val(entry) & _PAGE_INVALID;
pte_p = pte_val(entry) & _PAGE_PROTECT;
if ((pte_i && (prot != PROT_NONE)) ||
(pte_p && (prot & PROT_WRITE))) {
pgste_set_unlock(ptep, pgste);
return -EAGAIN;
}
/* Change access rights and set pgste bit */
nodat = !!(pgste_val(pgste) & _PGSTE_GPS_NODAT);
if (prot == PROT_NONE && !pte_i) {
ptep_flush_direct(mm, addr, ptep, nodat);
pgste = pgste_update_all(entry, pgste, mm);
entry = set_pte_bit(entry, __pgprot(_PAGE_INVALID));
}
if (prot == PROT_READ && !pte_p) {
ptep_flush_direct(mm, addr, ptep, nodat);
entry = clear_pte_bit(entry, __pgprot(_PAGE_INVALID));
entry = set_pte_bit(entry, __pgprot(_PAGE_PROTECT));
}
pgste_val(pgste) |= bit;
pgste = pgste_set_pte(ptep, pgste, entry);
pgste_set_unlock(ptep, pgste);
return 0;
}
int ptep_shadow_pte(struct mm_struct *mm, unsigned long saddr,
pte_t *sptep, pte_t *tptep, pte_t pte)
{
pgste_t spgste, tpgste;
pte_t spte, tpte;
int rc = -EAGAIN;
if (!(pte_val(*tptep) & _PAGE_INVALID))
return 0; /* already shadowed */
spgste = pgste_get_lock(sptep);
spte = *sptep;
if (!(pte_val(spte) & _PAGE_INVALID) &&
!((pte_val(spte) & _PAGE_PROTECT) &&
!(pte_val(pte) & _PAGE_PROTECT))) {
pgste_val(spgste) |= PGSTE_VSIE_BIT;
tpgste = pgste_get_lock(tptep);
tpte = __pte((pte_val(spte) & PAGE_MASK) |
(pte_val(pte) & _PAGE_PROTECT));
/* don't touch the storage key - it belongs to parent pgste */
tpgste = pgste_set_pte(tptep, tpgste, tpte);
pgste_set_unlock(tptep, tpgste);
rc = 1;
}
pgste_set_unlock(sptep, spgste);
return rc;
}
void ptep_unshadow_pte(struct mm_struct *mm, unsigned long saddr, pte_t *ptep)
{
pgste_t pgste;
int nodat;
pgste = pgste_get_lock(ptep);
/* notifier is called by the caller */
nodat = !!(pgste_val(pgste) & _PGSTE_GPS_NODAT);
ptep_flush_direct(mm, saddr, ptep, nodat);
/* don't touch the storage key - it belongs to parent pgste */
pgste = pgste_set_pte(ptep, pgste, __pte(_PAGE_INVALID));
pgste_set_unlock(ptep, pgste);
}
static void ptep_zap_swap_entry(struct mm_struct *mm, swp_entry_t entry)
{
if (!non_swap_entry(entry))
dec_mm_counter(mm, MM_SWAPENTS);
else if (is_migration_entry(entry)) {
struct page *page = pfn_swap_entry_to_page(entry);
dec_mm_counter(mm, mm_counter(page));
}
free_swap_and_cache(entry);
}
void ptep_zap_unused(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, int reset)
{
unsigned long pgstev;
pgste_t pgste;
pte_t pte;
/* Zap unused and logically-zero pages */
preempt_disable();
pgste = pgste_get_lock(ptep);
pgstev = pgste_val(pgste);
pte = *ptep;
if (!reset && pte_swap(pte) &&
((pgstev & _PGSTE_GPS_USAGE_MASK) == _PGSTE_GPS_USAGE_UNUSED ||
(pgstev & _PGSTE_GPS_ZERO))) {
ptep_zap_swap_entry(mm, pte_to_swp_entry(pte));
pte_clear(mm, addr, ptep);
}
if (reset)
pgste_val(pgste) &= ~_PGSTE_GPS_USAGE_MASK;
pgste_set_unlock(ptep, pgste);
preempt_enable();
}
void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
unsigned long ptev;
pgste_t pgste;
/* Clear storage key ACC and F, but set R/C */
preempt_disable();
pgste = pgste_get_lock(ptep);
pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT);
pgste_val(pgste) |= PGSTE_GR_BIT | PGSTE_GC_BIT;
ptev = pte_val(*ptep);
if (!(ptev & _PAGE_INVALID) && (ptev & _PAGE_WRITE))
page_set_storage_key(ptev & PAGE_MASK, PAGE_DEFAULT_KEY, 0);
pgste_set_unlock(ptep, pgste);
preempt_enable();
}
/*
* Test and reset if a guest page is dirty
*/
bool ptep_test_and_clear_uc(struct mm_struct *mm, unsigned long addr,
pte_t *ptep)
{
pgste_t pgste;
pte_t pte;
bool dirty;
int nodat;
pgste = pgste_get_lock(ptep);
dirty = !!(pgste_val(pgste) & PGSTE_UC_BIT);
pgste_val(pgste) &= ~PGSTE_UC_BIT;
pte = *ptep;
if (dirty && (pte_val(pte) & _PAGE_PRESENT)) {
pgste = pgste_pte_notify(mm, addr, ptep, pgste);
nodat = !!(pgste_val(pgste) & _PGSTE_GPS_NODAT);
ptep_ipte_global(mm, addr, ptep, nodat);
if (MACHINE_HAS_ESOP || !(pte_val(pte) & _PAGE_WRITE))
pte = set_pte_bit(pte, __pgprot(_PAGE_PROTECT));
else
pte = set_pte_bit(pte, __pgprot(_PAGE_INVALID));
set_pte(ptep, pte);
}
pgste_set_unlock(ptep, pgste);
return dirty;
}
EXPORT_SYMBOL_GPL(ptep_test_and_clear_uc);
int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
unsigned char key, bool nq)
{
unsigned long keyul, paddr;
spinlock_t *ptl;
pgste_t old, new;
pmd_t *pmdp;
pte_t *ptep;
/*
* If we don't have a PTE table and if there is no huge page mapped,
* we can ignore attempts to set the key to 0, because it already is 0.
*/
switch (pmd_lookup(mm, addr, &pmdp)) {
case -ENOENT:
return key ? -EFAULT : 0;
case 0:
break;
default:
return -EFAULT;
}
again:
ptl = pmd_lock(mm, pmdp);
if (!pmd_present(*pmdp)) {
spin_unlock(ptl);
return key ? -EFAULT : 0;
}
if (pmd_large(*pmdp)) {
paddr = pmd_val(*pmdp) & HPAGE_MASK;
paddr |= addr & ~HPAGE_MASK;
/*
* Huge pmds need quiescing operations, they are
* always mapped.
*/
page_set_storage_key(paddr, key, 1);
spin_unlock(ptl);
return 0;
}
spin_unlock(ptl);
ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl);
if (!ptep)
goto again;
new = old = pgste_get_lock(ptep);
pgste_val(new) &= ~(PGSTE_GR_BIT | PGSTE_GC_BIT |
PGSTE_ACC_BITS | PGSTE_FP_BIT);
keyul = (unsigned long) key;
pgste_val(new) |= (keyul & (_PAGE_CHANGED | _PAGE_REFERENCED)) << 48;
pgste_val(new) |= (keyul & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
if (!(pte_val(*ptep) & _PAGE_INVALID)) {
unsigned long bits, skey;
paddr = pte_val(*ptep) & PAGE_MASK;
skey = (unsigned long) page_get_storage_key(paddr);
bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
skey = key & (_PAGE_ACC_BITS | _PAGE_FP_BIT);
/* Set storage key ACC and FP */
page_set_storage_key(paddr, skey, !nq);
/* Merge host changed & referenced into pgste */
pgste_val(new) |= bits << 52;
}
/* changing the guest storage key is considered a change of the page */
if ((pgste_val(new) ^ pgste_val(old)) &
(PGSTE_ACC_BITS | PGSTE_FP_BIT | PGSTE_GR_BIT | PGSTE_GC_BIT))
pgste_val(new) |= PGSTE_UC_BIT;
pgste_set_unlock(ptep, new);
pte_unmap_unlock(ptep, ptl);
return 0;
}
EXPORT_SYMBOL(set_guest_storage_key);
/*
* Conditionally set a guest storage key (handling csske).
* oldkey will be updated when either mr or mc is set and a pointer is given.
*
* Returns 0 if a guests storage key update wasn't necessary, 1 if the guest
* storage key was updated and -EFAULT on access errors.
*/
int cond_set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
unsigned char key, unsigned char *oldkey,
bool nq, bool mr, bool mc)
{
unsigned char tmp, mask = _PAGE_ACC_BITS | _PAGE_FP_BIT;
int rc;
/* we can drop the pgste lock between getting and setting the key */
if (mr | mc) {
rc = get_guest_storage_key(current->mm, addr, &tmp);
if (rc)
return rc;
if (oldkey)
*oldkey = tmp;
if (!mr)
mask |= _PAGE_REFERENCED;
if (!mc)
mask |= _PAGE_CHANGED;
if (!((tmp ^ key) & mask))
return 0;
}
rc = set_guest_storage_key(current->mm, addr, key, nq);
return rc < 0 ? rc : 1;
}
EXPORT_SYMBOL(cond_set_guest_storage_key);
/*
* Reset a guest reference bit (rrbe), returning the reference and changed bit.
*
* Returns < 0 in case of error, otherwise the cc to be reported to the guest.
*/
int reset_guest_reference_bit(struct mm_struct *mm, unsigned long addr)
{
spinlock_t *ptl;
unsigned long paddr;
pgste_t old, new;
pmd_t *pmdp;
pte_t *ptep;
int cc = 0;
/*
* If we don't have a PTE table and if there is no huge page mapped,
* the storage key is 0 and there is nothing for us to do.
*/
switch (pmd_lookup(mm, addr, &pmdp)) {
case -ENOENT:
return 0;
case 0:
break;
default:
return -EFAULT;
}
again:
ptl = pmd_lock(mm, pmdp);
if (!pmd_present(*pmdp)) {
spin_unlock(ptl);
return 0;
}
if (pmd_large(*pmdp)) {
paddr = pmd_val(*pmdp) & HPAGE_MASK;
paddr |= addr & ~HPAGE_MASK;
cc = page_reset_referenced(paddr);
spin_unlock(ptl);
return cc;
}
spin_unlock(ptl);
ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl);
if (!ptep)
goto again;
new = old = pgste_get_lock(ptep);
/* Reset guest reference bit only */
pgste_val(new) &= ~PGSTE_GR_BIT;
if (!(pte_val(*ptep) & _PAGE_INVALID)) {
paddr = pte_val(*ptep) & PAGE_MASK;
cc = page_reset_referenced(paddr);
/* Merge real referenced bit into host-set */
pgste_val(new) |= ((unsigned long) cc << 53) & PGSTE_HR_BIT;
}
/* Reflect guest's logical view, not physical */
cc |= (pgste_val(old) & (PGSTE_GR_BIT | PGSTE_GC_BIT)) >> 49;
/* Changing the guest storage key is considered a change of the page */
if ((pgste_val(new) ^ pgste_val(old)) & PGSTE_GR_BIT)
pgste_val(new) |= PGSTE_UC_BIT;
pgste_set_unlock(ptep, new);
pte_unmap_unlock(ptep, ptl);
return cc;
}
EXPORT_SYMBOL(reset_guest_reference_bit);
int get_guest_storage_key(struct mm_struct *mm, unsigned long addr,
unsigned char *key)
{
unsigned long paddr;
spinlock_t *ptl;
pgste_t pgste;
pmd_t *pmdp;
pte_t *ptep;
/*
* If we don't have a PTE table and if there is no huge page mapped,
* the storage key is 0.
*/
*key = 0;
switch (pmd_lookup(mm, addr, &pmdp)) {
case -ENOENT:
return 0;
case 0:
break;
default:
return -EFAULT;
}
again:
ptl = pmd_lock(mm, pmdp);
if (!pmd_present(*pmdp)) {
spin_unlock(ptl);
return 0;
}
if (pmd_large(*pmdp)) {
paddr = pmd_val(*pmdp) & HPAGE_MASK;
paddr |= addr & ~HPAGE_MASK;
*key = page_get_storage_key(paddr);
spin_unlock(ptl);
return 0;
}
spin_unlock(ptl);
ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl);
if (!ptep)
goto again;
pgste = pgste_get_lock(ptep);
*key = (pgste_val(pgste) & (PGSTE_ACC_BITS | PGSTE_FP_BIT)) >> 56;
paddr = pte_val(*ptep) & PAGE_MASK;
if (!(pte_val(*ptep) & _PAGE_INVALID))
*key = page_get_storage_key(paddr);
/* Reflect guest's logical view, not physical */
*key |= (pgste_val(pgste) & (PGSTE_GR_BIT | PGSTE_GC_BIT)) >> 48;
pgste_set_unlock(ptep, pgste);
pte_unmap_unlock(ptep, ptl);
return 0;
}
EXPORT_SYMBOL(get_guest_storage_key);
/**
* pgste_perform_essa - perform ESSA actions on the PGSTE.
* @mm: the memory context. It must have PGSTEs, no check is performed here!
* @hva: the host virtual address of the page whose PGSTE is to be processed
* @orc: the specific action to perform, see the ESSA_SET_* macros.
* @oldpte: the PTE will be saved there if the pointer is not NULL.
* @oldpgste: the old PGSTE will be saved there if the pointer is not NULL.
*
* Return: 1 if the page is to be added to the CBRL, otherwise 0,
* or < 0 in case of error. -EINVAL is returned for invalid values
* of orc, -EFAULT for invalid addresses.
*/
int pgste_perform_essa(struct mm_struct *mm, unsigned long hva, int orc,
unsigned long *oldpte, unsigned long *oldpgste)
{
struct vm_area_struct *vma;
unsigned long pgstev;
spinlock_t *ptl;
pgste_t pgste;
pte_t *ptep;
int res = 0;
WARN_ON_ONCE(orc > ESSA_MAX);
if (unlikely(orc > ESSA_MAX))
return -EINVAL;
vma = vma_lookup(mm, hva);
if (!vma || is_vm_hugetlb_page(vma))
return -EFAULT;
ptep = get_locked_pte(mm, hva, &ptl);
if (unlikely(!ptep))
return -EFAULT;
pgste = pgste_get_lock(ptep);
pgstev = pgste_val(pgste);
if (oldpte)
*oldpte = pte_val(*ptep);
if (oldpgste)
*oldpgste = pgstev;
switch (orc) {
case ESSA_GET_STATE:
break;
case ESSA_SET_STABLE:
pgstev &= ~(_PGSTE_GPS_USAGE_MASK | _PGSTE_GPS_NODAT);
pgstev |= _PGSTE_GPS_USAGE_STABLE;
break;
case ESSA_SET_UNUSED:
pgstev &= ~_PGSTE_GPS_USAGE_MASK;
pgstev |= _PGSTE_GPS_USAGE_UNUSED;
if (pte_val(*ptep) & _PAGE_INVALID)
res = 1;
break;
case ESSA_SET_VOLATILE:
pgstev &= ~_PGSTE_GPS_USAGE_MASK;
pgstev |= _PGSTE_GPS_USAGE_VOLATILE;
if (pte_val(*ptep) & _PAGE_INVALID)
res = 1;
break;
case ESSA_SET_POT_VOLATILE:
pgstev &= ~_PGSTE_GPS_USAGE_MASK;
if (!(pte_val(*ptep) & _PAGE_INVALID)) {
pgstev |= _PGSTE_GPS_USAGE_POT_VOLATILE;
break;
}
if (pgstev & _PGSTE_GPS_ZERO) {
pgstev |= _PGSTE_GPS_USAGE_VOLATILE;
break;
}
if (!(pgstev & PGSTE_GC_BIT)) {
pgstev |= _PGSTE_GPS_USAGE_VOLATILE;
res = 1;
break;
}
break;
case ESSA_SET_STABLE_RESIDENT:
pgstev &= ~_PGSTE_GPS_USAGE_MASK;
pgstev |= _PGSTE_GPS_USAGE_STABLE;
/*
* Since the resident state can go away any time after this
* call, we will not make this page resident. We can revisit
* this decision if a guest will ever start using this.
*/
break;
case ESSA_SET_STABLE_IF_RESIDENT:
if (!(pte_val(*ptep) & _PAGE_INVALID)) {
pgstev &= ~_PGSTE_GPS_USAGE_MASK;
pgstev |= _PGSTE_GPS_USAGE_STABLE;
}
break;
case ESSA_SET_STABLE_NODAT:
pgstev &= ~_PGSTE_GPS_USAGE_MASK;
pgstev |= _PGSTE_GPS_USAGE_STABLE | _PGSTE_GPS_NODAT;
break;
default:
/* we should never get here! */
break;
}
/* If we are discarding a page, set it to logical zero */
if (res)
pgstev |= _PGSTE_GPS_ZERO;
pgste_val(pgste) = pgstev;
pgste_set_unlock(ptep, pgste);
pte_unmap_unlock(ptep, ptl);
return res;
}
EXPORT_SYMBOL(pgste_perform_essa);
/**
* set_pgste_bits - set specific PGSTE bits.
* @mm: the memory context. It must have PGSTEs, no check is performed here!
* @hva: the host virtual address of the page whose PGSTE is to be processed
* @bits: a bitmask representing the bits that will be touched
* @value: the values of the bits to be written. Only the bits in the mask
* will be written.
*
* Return: 0 on success, < 0 in case of error.
*/
int set_pgste_bits(struct mm_struct *mm, unsigned long hva,
unsigned long bits, unsigned long value)
{
struct vm_area_struct *vma;
spinlock_t *ptl;
pgste_t new;
pte_t *ptep;
vma = vma_lookup(mm, hva);
if (!vma || is_vm_hugetlb_page(vma))
return -EFAULT;
ptep = get_locked_pte(mm, hva, &ptl);
if (unlikely(!ptep))
return -EFAULT;
new = pgste_get_lock(ptep);
pgste_val(new) &= ~bits;
pgste_val(new) |= value & bits;
pgste_set_unlock(ptep, new);
pte_unmap_unlock(ptep, ptl);
return 0;
}
EXPORT_SYMBOL(set_pgste_bits);
/**
* get_pgste - get the current PGSTE for the given address.
* @mm: the memory context. It must have PGSTEs, no check is performed here!
* @hva: the host virtual address of the page whose PGSTE is to be processed
* @pgstep: will be written with the current PGSTE for the given address.
*
* Return: 0 on success, < 0 in case of error.
*/
int get_pgste(struct mm_struct *mm, unsigned long hva, unsigned long *pgstep)
{
struct vm_area_struct *vma;
spinlock_t *ptl;
pte_t *ptep;
vma = vma_lookup(mm, hva);
if (!vma || is_vm_hugetlb_page(vma))
return -EFAULT;
ptep = get_locked_pte(mm, hva, &ptl);
if (unlikely(!ptep))
return -EFAULT;
*pgstep = pgste_val(pgste_get(ptep));
pte_unmap_unlock(ptep, ptl);
return 0;
}
EXPORT_SYMBOL(get_pgste);
#endif
| linux-master | arch/s390/mm/pgtable.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/set_memory.h>
#include <linux/ptdump.h>
#include <linux/seq_file.h>
#include <linux/debugfs.h>
#include <linux/mm.h>
#include <linux/kfence.h>
#include <linux/kasan.h>
#include <asm/ptdump.h>
#include <asm/kasan.h>
#include <asm/abs_lowcore.h>
#include <asm/nospec-branch.h>
#include <asm/sections.h>
#include <asm/maccess.h>
static unsigned long max_addr;
struct addr_marker {
unsigned long start_address;
const char *name;
};
enum address_markers_idx {
IDENTITY_BEFORE_NR = 0,
IDENTITY_BEFORE_END_NR,
AMODE31_START_NR,
AMODE31_END_NR,
KERNEL_START_NR,
KERNEL_END_NR,
#ifdef CONFIG_KFENCE
KFENCE_START_NR,
KFENCE_END_NR,
#endif
IDENTITY_AFTER_NR,
IDENTITY_AFTER_END_NR,
VMEMMAP_NR,
VMEMMAP_END_NR,
VMALLOC_NR,
VMALLOC_END_NR,
MODULES_NR,
MODULES_END_NR,
ABS_LOWCORE_NR,
ABS_LOWCORE_END_NR,
MEMCPY_REAL_NR,
MEMCPY_REAL_END_NR,
#ifdef CONFIG_KASAN
KASAN_SHADOW_START_NR,
KASAN_SHADOW_END_NR,
#endif
};
static struct addr_marker address_markers[] = {
[IDENTITY_BEFORE_NR] = {0, "Identity Mapping Start"},
[IDENTITY_BEFORE_END_NR] = {(unsigned long)_stext, "Identity Mapping End"},
[AMODE31_START_NR] = {0, "Amode31 Area Start"},
[AMODE31_END_NR] = {0, "Amode31 Area End"},
[KERNEL_START_NR] = {(unsigned long)_stext, "Kernel Image Start"},
[KERNEL_END_NR] = {(unsigned long)_end, "Kernel Image End"},
#ifdef CONFIG_KFENCE
[KFENCE_START_NR] = {0, "KFence Pool Start"},
[KFENCE_END_NR] = {0, "KFence Pool End"},
#endif
[IDENTITY_AFTER_NR] = {(unsigned long)_end, "Identity Mapping Start"},
[IDENTITY_AFTER_END_NR] = {0, "Identity Mapping End"},
[VMEMMAP_NR] = {0, "vmemmap Area Start"},
[VMEMMAP_END_NR] = {0, "vmemmap Area End"},
[VMALLOC_NR] = {0, "vmalloc Area Start"},
[VMALLOC_END_NR] = {0, "vmalloc Area End"},
[MODULES_NR] = {0, "Modules Area Start"},
[MODULES_END_NR] = {0, "Modules Area End"},
[ABS_LOWCORE_NR] = {0, "Lowcore Area Start"},
[ABS_LOWCORE_END_NR] = {0, "Lowcore Area End"},
[MEMCPY_REAL_NR] = {0, "Real Memory Copy Area Start"},
[MEMCPY_REAL_END_NR] = {0, "Real Memory Copy Area End"},
#ifdef CONFIG_KASAN
[KASAN_SHADOW_START_NR] = {KASAN_SHADOW_START, "Kasan Shadow Start"},
[KASAN_SHADOW_END_NR] = {KASAN_SHADOW_END, "Kasan Shadow End"},
#endif
{ -1, NULL }
};
struct pg_state {
struct ptdump_state ptdump;
struct seq_file *seq;
int level;
unsigned int current_prot;
bool check_wx;
unsigned long wx_pages;
unsigned long start_address;
const struct addr_marker *marker;
};
#define pt_dump_seq_printf(m, fmt, args...) \
({ \
struct seq_file *__m = (m); \
\
if (__m) \
seq_printf(__m, fmt, ##args); \
})
#define pt_dump_seq_puts(m, fmt) \
({ \
struct seq_file *__m = (m); \
\
if (__m) \
seq_printf(__m, fmt); \
})
static void print_prot(struct seq_file *m, unsigned int pr, int level)
{
static const char * const level_name[] =
{ "ASCE", "PGD", "PUD", "PMD", "PTE" };
pt_dump_seq_printf(m, "%s ", level_name[level]);
if (pr & _PAGE_INVALID) {
pt_dump_seq_printf(m, "I\n");
return;
}
pt_dump_seq_puts(m, (pr & _PAGE_PROTECT) ? "RO " : "RW ");
pt_dump_seq_puts(m, (pr & _PAGE_NOEXEC) ? "NX\n" : "X\n");
}
static void note_prot_wx(struct pg_state *st, unsigned long addr)
{
#ifdef CONFIG_DEBUG_WX
if (!st->check_wx)
return;
if (st->current_prot & _PAGE_INVALID)
return;
if (st->current_prot & _PAGE_PROTECT)
return;
if (st->current_prot & _PAGE_NOEXEC)
return;
/*
* The first lowcore page is W+X if spectre mitigations are using
* trampolines or the BEAR enhancements facility is not installed,
* in which case we have two lpswe instructions in lowcore that need
* to be executable.
*/
if (addr == PAGE_SIZE && (nospec_uses_trampoline() || !static_key_enabled(&cpu_has_bear)))
return;
WARN_ONCE(1, "s390/mm: Found insecure W+X mapping at address %pS\n",
(void *)st->start_address);
st->wx_pages += (addr - st->start_address) / PAGE_SIZE;
#endif /* CONFIG_DEBUG_WX */
}
static void note_page(struct ptdump_state *pt_st, unsigned long addr, int level, u64 val)
{
int width = sizeof(unsigned long) * 2;
static const char units[] = "KMGTPE";
const char *unit = units;
unsigned long delta;
struct pg_state *st;
struct seq_file *m;
unsigned int prot;
st = container_of(pt_st, struct pg_state, ptdump);
m = st->seq;
prot = val & (_PAGE_PROTECT | _PAGE_NOEXEC);
if (level == 4 && (val & _PAGE_INVALID))
prot = _PAGE_INVALID;
/* For pmd_none() & friends val gets passed as zero. */
if (level != 4 && !val)
prot = _PAGE_INVALID;
/* Final flush from generic code. */
if (level == -1)
addr = max_addr;
if (st->level == -1) {
pt_dump_seq_printf(m, "---[ %s ]---\n", st->marker->name);
st->start_address = addr;
st->current_prot = prot;
st->level = level;
} else if (prot != st->current_prot || level != st->level ||
addr >= st->marker[1].start_address) {
note_prot_wx(st, addr);
pt_dump_seq_printf(m, "0x%0*lx-0x%0*lx ",
width, st->start_address,
width, addr);
delta = (addr - st->start_address) >> 10;
while (!(delta & 0x3ff) && unit[1]) {
delta >>= 10;
unit++;
}
pt_dump_seq_printf(m, "%9lu%c ", delta, *unit);
print_prot(m, st->current_prot, st->level);
while (addr >= st->marker[1].start_address) {
st->marker++;
pt_dump_seq_printf(m, "---[ %s ]---\n", st->marker->name);
}
st->start_address = addr;
st->current_prot = prot;
st->level = level;
}
}
#ifdef CONFIG_DEBUG_WX
void ptdump_check_wx(void)
{
struct pg_state st = {
.ptdump = {
.note_page = note_page,
.range = (struct ptdump_range[]) {
{.start = 0, .end = max_addr},
{.start = 0, .end = 0},
}
},
.seq = NULL,
.level = -1,
.current_prot = 0,
.check_wx = true,
.wx_pages = 0,
.start_address = 0,
.marker = (struct addr_marker[]) {
{ .start_address = 0, .name = NULL},
{ .start_address = -1, .name = NULL},
},
};
if (!MACHINE_HAS_NX)
return;
ptdump_walk_pgd(&st.ptdump, &init_mm, NULL);
if (st.wx_pages)
pr_warn("Checked W+X mappings: FAILED, %lu W+X pages found\n", st.wx_pages);
else
pr_info("Checked W+X mappings: passed, no %sW+X pages found\n",
(nospec_uses_trampoline() || !static_key_enabled(&cpu_has_bear)) ?
"unexpected " : "");
}
#endif /* CONFIG_DEBUG_WX */
#ifdef CONFIG_PTDUMP_DEBUGFS
static int ptdump_show(struct seq_file *m, void *v)
{
struct pg_state st = {
.ptdump = {
.note_page = note_page,
.range = (struct ptdump_range[]) {
{.start = 0, .end = max_addr},
{.start = 0, .end = 0},
}
},
.seq = m,
.level = -1,
.current_prot = 0,
.check_wx = false,
.wx_pages = 0,
.start_address = 0,
.marker = address_markers,
};
get_online_mems();
mutex_lock(&cpa_mutex);
ptdump_walk_pgd(&st.ptdump, &init_mm, NULL);
mutex_unlock(&cpa_mutex);
put_online_mems();
return 0;
}
DEFINE_SHOW_ATTRIBUTE(ptdump);
#endif /* CONFIG_PTDUMP_DEBUGFS */
/*
* Heapsort from lib/sort.c is not a stable sorting algorithm, do a simple
* insertion sort to preserve the original order of markers with the same
* start address.
*/
static void sort_address_markers(void)
{
struct addr_marker tmp;
int i, j;
for (i = 1; i < ARRAY_SIZE(address_markers) - 1; i++) {
tmp = address_markers[i];
for (j = i - 1; j >= 0 && address_markers[j].start_address > tmp.start_address; j--)
address_markers[j + 1] = address_markers[j];
address_markers[j + 1] = tmp;
}
}
static int pt_dump_init(void)
{
#ifdef CONFIG_KFENCE
unsigned long kfence_start = (unsigned long)__kfence_pool;
#endif
/*
* Figure out the maximum virtual address being accessible with the
* kernel ASCE. We need this to keep the page table walker functions
* from accessing non-existent entries.
*/
max_addr = (S390_lowcore.kernel_asce & _REGION_ENTRY_TYPE_MASK) >> 2;
max_addr = 1UL << (max_addr * 11 + 31);
address_markers[IDENTITY_AFTER_END_NR].start_address = ident_map_size;
address_markers[AMODE31_START_NR].start_address = (unsigned long)__samode31;
address_markers[AMODE31_END_NR].start_address = (unsigned long)__eamode31;
address_markers[MODULES_NR].start_address = MODULES_VADDR;
address_markers[MODULES_END_NR].start_address = MODULES_END;
address_markers[ABS_LOWCORE_NR].start_address = __abs_lowcore;
address_markers[ABS_LOWCORE_END_NR].start_address = __abs_lowcore + ABS_LOWCORE_MAP_SIZE;
address_markers[MEMCPY_REAL_NR].start_address = __memcpy_real_area;
address_markers[MEMCPY_REAL_END_NR].start_address = __memcpy_real_area + MEMCPY_REAL_SIZE;
address_markers[VMEMMAP_NR].start_address = (unsigned long) vmemmap;
address_markers[VMEMMAP_END_NR].start_address = (unsigned long)vmemmap + vmemmap_size;
address_markers[VMALLOC_NR].start_address = VMALLOC_START;
address_markers[VMALLOC_END_NR].start_address = VMALLOC_END;
#ifdef CONFIG_KFENCE
address_markers[KFENCE_START_NR].start_address = kfence_start;
address_markers[KFENCE_END_NR].start_address = kfence_start + KFENCE_POOL_SIZE;
#endif
sort_address_markers();
#ifdef CONFIG_PTDUMP_DEBUGFS
debugfs_create_file("kernel_page_tables", 0400, NULL, NULL, &ptdump_fops);
#endif /* CONFIG_PTDUMP_DEBUGFS */
return 0;
}
device_initcall(pt_dump_init);
| linux-master | arch/s390/mm/dump_pagetables.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Author(s)......: Carsten Otte <[email protected]>
* Rob M van der Heij <[email protected]>
* Steven Shultz <[email protected]>
* Bugreports.to..: <[email protected]>
* Copyright IBM Corp. 2002, 2004
*/
#define KMSG_COMPONENT "extmem"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/spinlock.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/export.h>
#include <linux/memblock.h>
#include <linux/ctype.h>
#include <linux/ioport.h>
#include <linux/refcount.h>
#include <linux/pgtable.h>
#include <asm/diag.h>
#include <asm/page.h>
#include <asm/ebcdic.h>
#include <asm/errno.h>
#include <asm/extmem.h>
#include <asm/cpcmd.h>
#include <asm/setup.h>
#define DCSS_PURGESEG 0x08
#define DCSS_LOADSHRX 0x20
#define DCSS_LOADNSRX 0x24
#define DCSS_FINDSEGX 0x2c
#define DCSS_SEGEXTX 0x38
#define DCSS_FINDSEGA 0x0c
struct qrange {
unsigned long start; /* last byte type */
unsigned long end; /* last byte reserved */
};
struct qout64 {
unsigned long segstart;
unsigned long segend;
int segcnt;
int segrcnt;
struct qrange range[6];
};
struct qin64 {
char qopcode;
char rsrv1[3];
char qrcode;
char rsrv2[3];
char qname[8];
unsigned int qoutptr;
short int qoutlen;
};
struct dcss_segment {
struct list_head list;
char dcss_name[8];
char res_name[16];
unsigned long start_addr;
unsigned long end;
refcount_t ref_count;
int do_nonshared;
unsigned int vm_segtype;
struct qrange range[6];
int segcnt;
struct resource *res;
};
static DEFINE_MUTEX(dcss_lock);
static LIST_HEAD(dcss_list);
static char *segtype_string[] = { "SW", "EW", "SR", "ER", "SN", "EN", "SC",
"EW/EN-MIXED" };
static int loadshr_scode = DCSS_LOADSHRX;
static int loadnsr_scode = DCSS_LOADNSRX;
static int purgeseg_scode = DCSS_PURGESEG;
static int segext_scode = DCSS_SEGEXTX;
/*
* Create the 8 bytes, ebcdic VM segment name from
* an ascii name.
*/
static void
dcss_mkname(char *name, char *dcss_name)
{
int i;
for (i = 0; i < 8; i++) {
if (name[i] == '\0')
break;
dcss_name[i] = toupper(name[i]);
}
for (; i < 8; i++)
dcss_name[i] = ' ';
ASCEBC(dcss_name, 8);
}
/*
* search all segments in dcss_list, and return the one
* namend *name. If not found, return NULL.
*/
static struct dcss_segment *
segment_by_name (char *name)
{
char dcss_name[9];
struct list_head *l;
struct dcss_segment *tmp, *retval = NULL;
BUG_ON(!mutex_is_locked(&dcss_lock));
dcss_mkname (name, dcss_name);
list_for_each (l, &dcss_list) {
tmp = list_entry (l, struct dcss_segment, list);
if (memcmp(tmp->dcss_name, dcss_name, 8) == 0) {
retval = tmp;
break;
}
}
return retval;
}
/*
* Perform a function on a dcss segment.
*/
static inline int
dcss_diag(int *func, void *parameter,
unsigned long *ret1, unsigned long *ret2)
{
unsigned long rx, ry;
int rc;
rx = (unsigned long) parameter;
ry = (unsigned long) *func;
diag_stat_inc(DIAG_STAT_X064);
asm volatile(
" diag %0,%1,0x64\n"
" ipm %2\n"
" srl %2,28\n"
: "+d" (rx), "+d" (ry), "=d" (rc) : : "cc");
*ret1 = rx;
*ret2 = ry;
return rc;
}
static inline int
dcss_diag_translate_rc (int vm_rc) {
if (vm_rc == 44)
return -ENOENT;
return -EIO;
}
/* do a diag to get info about a segment.
* fills start_address, end and vm_segtype fields
*/
static int
query_segment_type (struct dcss_segment *seg)
{
unsigned long dummy, vmrc;
int diag_cc, rc, i;
struct qout64 *qout;
struct qin64 *qin;
qin = kmalloc(sizeof(*qin), GFP_KERNEL | GFP_DMA);
qout = kmalloc(sizeof(*qout), GFP_KERNEL | GFP_DMA);
if ((qin == NULL) || (qout == NULL)) {
rc = -ENOMEM;
goto out_free;
}
/* initialize diag input parameters */
qin->qopcode = DCSS_FINDSEGA;
qin->qoutptr = (unsigned long) qout;
qin->qoutlen = sizeof(struct qout64);
memcpy (qin->qname, seg->dcss_name, 8);
diag_cc = dcss_diag(&segext_scode, qin, &dummy, &vmrc);
if (diag_cc < 0) {
rc = diag_cc;
goto out_free;
}
if (diag_cc > 1) {
pr_warn("Querying a DCSS type failed with rc=%ld\n", vmrc);
rc = dcss_diag_translate_rc (vmrc);
goto out_free;
}
if (qout->segcnt > 6) {
rc = -EOPNOTSUPP;
goto out_free;
}
if (qout->segcnt == 1) {
seg->vm_segtype = qout->range[0].start & 0xff;
} else {
/* multi-part segment. only one type supported here:
- all parts are contiguous
- all parts are either EW or EN type
- maximum 6 parts allowed */
unsigned long start = qout->segstart >> PAGE_SHIFT;
for (i=0; i<qout->segcnt; i++) {
if (((qout->range[i].start & 0xff) != SEG_TYPE_EW) &&
((qout->range[i].start & 0xff) != SEG_TYPE_EN)) {
rc = -EOPNOTSUPP;
goto out_free;
}
if (start != qout->range[i].start >> PAGE_SHIFT) {
rc = -EOPNOTSUPP;
goto out_free;
}
start = (qout->range[i].end >> PAGE_SHIFT) + 1;
}
seg->vm_segtype = SEG_TYPE_EWEN;
}
/* analyze diag output and update seg */
seg->start_addr = qout->segstart;
seg->end = qout->segend;
memcpy (seg->range, qout->range, 6*sizeof(struct qrange));
seg->segcnt = qout->segcnt;
rc = 0;
out_free:
kfree(qin);
kfree(qout);
return rc;
}
/*
* get info about a segment
* possible return values:
* -ENOSYS : we are not running on VM
* -EIO : could not perform query diagnose
* -ENOENT : no such segment
* -EOPNOTSUPP: multi-part segment cannot be used with linux
* -ENOMEM : out of memory
* 0 .. 6 : type of segment as defined in include/asm-s390/extmem.h
*/
int
segment_type (char* name)
{
int rc;
struct dcss_segment seg;
if (!MACHINE_IS_VM)
return -ENOSYS;
dcss_mkname(name, seg.dcss_name);
rc = query_segment_type (&seg);
if (rc < 0)
return rc;
return seg.vm_segtype;
}
/*
* check if segment collides with other segments that are currently loaded
* returns 1 if this is the case, 0 if no collision was found
*/
static int
segment_overlaps_others (struct dcss_segment *seg)
{
struct list_head *l;
struct dcss_segment *tmp;
BUG_ON(!mutex_is_locked(&dcss_lock));
list_for_each(l, &dcss_list) {
tmp = list_entry(l, struct dcss_segment, list);
if ((tmp->start_addr >> 20) > (seg->end >> 20))
continue;
if ((tmp->end >> 20) < (seg->start_addr >> 20))
continue;
if (seg == tmp)
continue;
return 1;
}
return 0;
}
/*
* real segment loading function, called from segment_load
* Must return either an error code < 0, or the segment type code >= 0
*/
static int
__segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long *end)
{
unsigned long start_addr, end_addr, dummy;
struct dcss_segment *seg;
int rc, diag_cc, segtype;
start_addr = end_addr = 0;
segtype = -1;
seg = kmalloc(sizeof(*seg), GFP_KERNEL | GFP_DMA);
if (seg == NULL) {
rc = -ENOMEM;
goto out;
}
dcss_mkname (name, seg->dcss_name);
rc = query_segment_type (seg);
if (rc < 0)
goto out_free;
if (segment_overlaps_others(seg)) {
rc = -EBUSY;
goto out_free;
}
seg->res = kzalloc(sizeof(struct resource), GFP_KERNEL);
if (seg->res == NULL) {
rc = -ENOMEM;
goto out_free;
}
seg->res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
seg->res->start = seg->start_addr;
seg->res->end = seg->end;
memcpy(&seg->res_name, seg->dcss_name, 8);
EBCASC(seg->res_name, 8);
seg->res_name[8] = '\0';
strlcat(seg->res_name, " (DCSS)", sizeof(seg->res_name));
seg->res->name = seg->res_name;
segtype = seg->vm_segtype;
if (segtype == SEG_TYPE_SC ||
((segtype == SEG_TYPE_SR || segtype == SEG_TYPE_ER) && !do_nonshared))
seg->res->flags |= IORESOURCE_READONLY;
/* Check for overlapping resources before adding the mapping. */
if (request_resource(&iomem_resource, seg->res)) {
rc = -EBUSY;
goto out_free_resource;
}
rc = vmem_add_mapping(seg->start_addr, seg->end - seg->start_addr + 1);
if (rc)
goto out_resource;
if (do_nonshared)
diag_cc = dcss_diag(&loadnsr_scode, seg->dcss_name,
&start_addr, &end_addr);
else
diag_cc = dcss_diag(&loadshr_scode, seg->dcss_name,
&start_addr, &end_addr);
if (diag_cc < 0) {
dcss_diag(&purgeseg_scode, seg->dcss_name,
&dummy, &dummy);
rc = diag_cc;
goto out_mapping;
}
if (diag_cc > 1) {
pr_warn("Loading DCSS %s failed with rc=%ld\n", name, end_addr);
rc = dcss_diag_translate_rc(end_addr);
dcss_diag(&purgeseg_scode, seg->dcss_name,
&dummy, &dummy);
goto out_mapping;
}
seg->start_addr = start_addr;
seg->end = end_addr;
seg->do_nonshared = do_nonshared;
refcount_set(&seg->ref_count, 1);
list_add(&seg->list, &dcss_list);
*addr = seg->start_addr;
*end = seg->end;
if (do_nonshared)
pr_info("DCSS %s of range %px to %px and type %s loaded as "
"exclusive-writable\n", name, (void*) seg->start_addr,
(void*) seg->end, segtype_string[seg->vm_segtype]);
else {
pr_info("DCSS %s of range %px to %px and type %s loaded in "
"shared access mode\n", name, (void*) seg->start_addr,
(void*) seg->end, segtype_string[seg->vm_segtype]);
}
goto out;
out_mapping:
vmem_remove_mapping(seg->start_addr, seg->end - seg->start_addr + 1);
out_resource:
release_resource(seg->res);
out_free_resource:
kfree(seg->res);
out_free:
kfree(seg);
out:
return rc < 0 ? rc : segtype;
}
/*
* this function loads a DCSS segment
* name : name of the DCSS
* do_nonshared : 0 indicates that the dcss should be shared with other linux images
* 1 indicates that the dcss should be exclusive for this linux image
* addr : will be filled with start address of the segment
* end : will be filled with end address of the segment
* return values:
* -ENOSYS : we are not running on VM
* -EIO : could not perform query or load diagnose
* -ENOENT : no such segment
* -EOPNOTSUPP: multi-part segment cannot be used with linux
* -EBUSY : segment cannot be used (overlaps with dcss or storage)
* -ERANGE : segment cannot be used (exceeds kernel mapping range)
* -EPERM : segment is currently loaded with incompatible permissions
* -ENOMEM : out of memory
* 0 .. 6 : type of segment as defined in include/asm-s390/extmem.h
*/
int
segment_load (char *name, int do_nonshared, unsigned long *addr,
unsigned long *end)
{
struct dcss_segment *seg;
int rc;
if (!MACHINE_IS_VM)
return -ENOSYS;
mutex_lock(&dcss_lock);
seg = segment_by_name (name);
if (seg == NULL)
rc = __segment_load (name, do_nonshared, addr, end);
else {
if (do_nonshared == seg->do_nonshared) {
refcount_inc(&seg->ref_count);
*addr = seg->start_addr;
*end = seg->end;
rc = seg->vm_segtype;
} else {
*addr = *end = 0;
rc = -EPERM;
}
}
mutex_unlock(&dcss_lock);
return rc;
}
/*
* this function modifies the shared state of a DCSS segment. note that
* name : name of the DCSS
* do_nonshared : 0 indicates that the dcss should be shared with other linux images
* 1 indicates that the dcss should be exclusive for this linux image
* return values:
* -EIO : could not perform load diagnose (segment gone!)
* -ENOENT : no such segment (segment gone!)
* -EAGAIN : segment is in use by other exploiters, try later
* -EINVAL : no segment with the given name is currently loaded - name invalid
* -EBUSY : segment can temporarily not be used (overlaps with dcss)
* 0 : operation succeeded
*/
int
segment_modify_shared (char *name, int do_nonshared)
{
struct dcss_segment *seg;
unsigned long start_addr, end_addr, dummy;
int rc, diag_cc;
start_addr = end_addr = 0;
mutex_lock(&dcss_lock);
seg = segment_by_name (name);
if (seg == NULL) {
rc = -EINVAL;
goto out_unlock;
}
if (do_nonshared == seg->do_nonshared) {
pr_info("DCSS %s is already in the requested access "
"mode\n", name);
rc = 0;
goto out_unlock;
}
if (refcount_read(&seg->ref_count) != 1) {
pr_warn("DCSS %s is in use and cannot be reloaded\n", name);
rc = -EAGAIN;
goto out_unlock;
}
release_resource(seg->res);
if (do_nonshared)
seg->res->flags &= ~IORESOURCE_READONLY;
else
if (seg->vm_segtype == SEG_TYPE_SR ||
seg->vm_segtype == SEG_TYPE_ER)
seg->res->flags |= IORESOURCE_READONLY;
if (request_resource(&iomem_resource, seg->res)) {
pr_warn("DCSS %s overlaps with used memory resources and cannot be reloaded\n",
name);
rc = -EBUSY;
kfree(seg->res);
goto out_del_mem;
}
dcss_diag(&purgeseg_scode, seg->dcss_name, &dummy, &dummy);
if (do_nonshared)
diag_cc = dcss_diag(&loadnsr_scode, seg->dcss_name,
&start_addr, &end_addr);
else
diag_cc = dcss_diag(&loadshr_scode, seg->dcss_name,
&start_addr, &end_addr);
if (diag_cc < 0) {
rc = diag_cc;
goto out_del_res;
}
if (diag_cc > 1) {
pr_warn("Reloading DCSS %s failed with rc=%ld\n",
name, end_addr);
rc = dcss_diag_translate_rc(end_addr);
goto out_del_res;
}
seg->start_addr = start_addr;
seg->end = end_addr;
seg->do_nonshared = do_nonshared;
rc = 0;
goto out_unlock;
out_del_res:
release_resource(seg->res);
kfree(seg->res);
out_del_mem:
vmem_remove_mapping(seg->start_addr, seg->end - seg->start_addr + 1);
list_del(&seg->list);
dcss_diag(&purgeseg_scode, seg->dcss_name, &dummy, &dummy);
kfree(seg);
out_unlock:
mutex_unlock(&dcss_lock);
return rc;
}
/*
* Decrease the use count of a DCSS segment and remove
* it from the address space if nobody is using it
* any longer.
*/
void
segment_unload(char *name)
{
unsigned long dummy;
struct dcss_segment *seg;
if (!MACHINE_IS_VM)
return;
mutex_lock(&dcss_lock);
seg = segment_by_name (name);
if (seg == NULL) {
pr_err("Unloading unknown DCSS %s failed\n", name);
goto out_unlock;
}
if (!refcount_dec_and_test(&seg->ref_count))
goto out_unlock;
release_resource(seg->res);
kfree(seg->res);
vmem_remove_mapping(seg->start_addr, seg->end - seg->start_addr + 1);
list_del(&seg->list);
dcss_diag(&purgeseg_scode, seg->dcss_name, &dummy, &dummy);
kfree(seg);
out_unlock:
mutex_unlock(&dcss_lock);
}
/*
* save segment content permanently
*/
void
segment_save(char *name)
{
struct dcss_segment *seg;
char cmd1[160];
char cmd2[80];
int i, response;
if (!MACHINE_IS_VM)
return;
mutex_lock(&dcss_lock);
seg = segment_by_name (name);
if (seg == NULL) {
pr_err("Saving unknown DCSS %s failed\n", name);
goto out;
}
sprintf(cmd1, "DEFSEG %s", name);
for (i=0; i<seg->segcnt; i++) {
sprintf(cmd1+strlen(cmd1), " %lX-%lX %s",
seg->range[i].start >> PAGE_SHIFT,
seg->range[i].end >> PAGE_SHIFT,
segtype_string[seg->range[i].start & 0xff]);
}
sprintf(cmd2, "SAVESEG %s", name);
response = 0;
cpcmd(cmd1, NULL, 0, &response);
if (response) {
pr_err("Saving a DCSS failed with DEFSEG response code "
"%i\n", response);
goto out;
}
cpcmd(cmd2, NULL, 0, &response);
if (response) {
pr_err("Saving a DCSS failed with SAVESEG response code "
"%i\n", response);
goto out;
}
out:
mutex_unlock(&dcss_lock);
}
/*
* print appropriate error message for segment_load()/segment_type()
* return code
*/
void segment_warning(int rc, char *seg_name)
{
switch (rc) {
case -ENOENT:
pr_err("DCSS %s cannot be loaded or queried\n", seg_name);
break;
case -ENOSYS:
pr_err("DCSS %s cannot be loaded or queried without "
"z/VM\n", seg_name);
break;
case -EIO:
pr_err("Loading or querying DCSS %s resulted in a "
"hardware error\n", seg_name);
break;
case -EOPNOTSUPP:
pr_err("DCSS %s has multiple page ranges and cannot be "
"loaded or queried\n", seg_name);
break;
case -EBUSY:
pr_err("%s needs used memory resources and cannot be "
"loaded or queried\n", seg_name);
break;
case -EPERM:
pr_err("DCSS %s is already loaded in a different access "
"mode\n", seg_name);
break;
case -ENOMEM:
pr_err("There is not enough memory to load or query "
"DCSS %s\n", seg_name);
break;
case -ERANGE: {
struct range mhp_range = arch_get_mappable_range();
pr_err("DCSS %s exceeds the kernel mapping range (%llu) "
"and cannot be loaded\n", seg_name, mhp_range.end + 1);
break;
}
default:
break;
}
}
EXPORT_SYMBOL(segment_load);
EXPORT_SYMBOL(segment_unload);
EXPORT_SYMBOL(segment_save);
EXPORT_SYMBOL(segment_type);
EXPORT_SYMBOL(segment_modify_shared);
EXPORT_SYMBOL(segment_warning);
| linux-master | arch/s390/mm/extmem.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Page table allocation functions
*
* Copyright IBM Corp. 2016
* Author(s): Martin Schwidefsky <[email protected]>
*/
#include <linux/sysctl.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <asm/mmu_context.h>
#include <asm/pgalloc.h>
#include <asm/gmap.h>
#include <asm/tlb.h>
#include <asm/tlbflush.h>
#ifdef CONFIG_PGSTE
int page_table_allocate_pgste = 0;
EXPORT_SYMBOL(page_table_allocate_pgste);
static struct ctl_table page_table_sysctl[] = {
{
.procname = "allocate_pgste",
.data = &page_table_allocate_pgste,
.maxlen = sizeof(int),
.mode = S_IRUGO | S_IWUSR,
.proc_handler = proc_dointvec_minmax,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE,
},
{ }
};
static int __init page_table_register_sysctl(void)
{
return register_sysctl("vm", page_table_sysctl) ? 0 : -ENOMEM;
}
__initcall(page_table_register_sysctl);
#endif /* CONFIG_PGSTE */
unsigned long *crst_table_alloc(struct mm_struct *mm)
{
struct ptdesc *ptdesc = pagetable_alloc(GFP_KERNEL, CRST_ALLOC_ORDER);
if (!ptdesc)
return NULL;
arch_set_page_dat(ptdesc_page(ptdesc), CRST_ALLOC_ORDER);
return (unsigned long *) ptdesc_to_virt(ptdesc);
}
void crst_table_free(struct mm_struct *mm, unsigned long *table)
{
pagetable_free(virt_to_ptdesc(table));
}
static void __crst_table_upgrade(void *arg)
{
struct mm_struct *mm = arg;
/* change all active ASCEs to avoid the creation of new TLBs */
if (current->active_mm == mm) {
S390_lowcore.user_asce = mm->context.asce;
__ctl_load(S390_lowcore.user_asce, 7, 7);
}
__tlb_flush_local();
}
int crst_table_upgrade(struct mm_struct *mm, unsigned long end)
{
unsigned long *pgd = NULL, *p4d = NULL, *__pgd;
unsigned long asce_limit = mm->context.asce_limit;
/* upgrade should only happen from 3 to 4, 3 to 5, or 4 to 5 levels */
VM_BUG_ON(asce_limit < _REGION2_SIZE);
if (end <= asce_limit)
return 0;
if (asce_limit == _REGION2_SIZE) {
p4d = crst_table_alloc(mm);
if (unlikely(!p4d))
goto err_p4d;
crst_table_init(p4d, _REGION2_ENTRY_EMPTY);
}
if (end > _REGION1_SIZE) {
pgd = crst_table_alloc(mm);
if (unlikely(!pgd))
goto err_pgd;
crst_table_init(pgd, _REGION1_ENTRY_EMPTY);
}
spin_lock_bh(&mm->page_table_lock);
/*
* This routine gets called with mmap_lock lock held and there is
* no reason to optimize for the case of otherwise. However, if
* that would ever change, the below check will let us know.
*/
VM_BUG_ON(asce_limit != mm->context.asce_limit);
if (p4d) {
__pgd = (unsigned long *) mm->pgd;
p4d_populate(mm, (p4d_t *) p4d, (pud_t *) __pgd);
mm->pgd = (pgd_t *) p4d;
mm->context.asce_limit = _REGION1_SIZE;
mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
_ASCE_USER_BITS | _ASCE_TYPE_REGION2;
mm_inc_nr_puds(mm);
}
if (pgd) {
__pgd = (unsigned long *) mm->pgd;
pgd_populate(mm, (pgd_t *) pgd, (p4d_t *) __pgd);
mm->pgd = (pgd_t *) pgd;
mm->context.asce_limit = TASK_SIZE_MAX;
mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
_ASCE_USER_BITS | _ASCE_TYPE_REGION1;
}
spin_unlock_bh(&mm->page_table_lock);
on_each_cpu(__crst_table_upgrade, mm, 0);
return 0;
err_pgd:
crst_table_free(mm, p4d);
err_p4d:
return -ENOMEM;
}
static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
{
return atomic_fetch_xor(bits, v) ^ bits;
}
#ifdef CONFIG_PGSTE
struct page *page_table_alloc_pgste(struct mm_struct *mm)
{
struct ptdesc *ptdesc;
u64 *table;
ptdesc = pagetable_alloc(GFP_KERNEL, 0);
if (ptdesc) {
table = (u64 *)ptdesc_to_virt(ptdesc);
memset64(table, _PAGE_INVALID, PTRS_PER_PTE);
memset64(table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
}
return ptdesc_page(ptdesc);
}
void page_table_free_pgste(struct page *page)
{
pagetable_free(page_ptdesc(page));
}
#endif /* CONFIG_PGSTE */
/*
* A 2KB-pgtable is either upper or lower half of a normal page.
* The second half of the page may be unused or used as another
* 2KB-pgtable.
*
* Whenever possible the parent page for a new 2KB-pgtable is picked
* from the list of partially allocated pages mm_context_t::pgtable_list.
* In case the list is empty a new parent page is allocated and added to
* the list.
*
* When a parent page gets fully allocated it contains 2KB-pgtables in both
* upper and lower halves and is removed from mm_context_t::pgtable_list.
*
* When 2KB-pgtable is freed from to fully allocated parent page that
* page turns partially allocated and added to mm_context_t::pgtable_list.
*
* If 2KB-pgtable is freed from the partially allocated parent page that
* page turns unused and gets removed from mm_context_t::pgtable_list.
* Furthermore, the unused parent page is released.
*
* As follows from the above, no unallocated or fully allocated parent
* pages are contained in mm_context_t::pgtable_list.
*
* The upper byte (bits 24-31) of the parent page _refcount is used
* for tracking contained 2KB-pgtables and has the following format:
*
* PP AA
* 01234567 upper byte (bits 24-31) of struct page::_refcount
* || ||
* || |+--- upper 2KB-pgtable is allocated
* || +---- lower 2KB-pgtable is allocated
* |+------- upper 2KB-pgtable is pending for removal
* +-------- lower 2KB-pgtable is pending for removal
*
* (See commit 620b4e903179 ("s390: use _refcount for pgtables") on why
* using _refcount is possible).
*
* When 2KB-pgtable is allocated the corresponding AA bit is set to 1.
* The parent page is either:
* - added to mm_context_t::pgtable_list in case the second half of the
* parent page is still unallocated;
* - removed from mm_context_t::pgtable_list in case both hales of the
* parent page are allocated;
* These operations are protected with mm_context_t::lock.
*
* When 2KB-pgtable is deallocated the corresponding AA bit is set to 0
* and the corresponding PP bit is set to 1 in a single atomic operation.
* Thus, PP and AA bits corresponding to the same 2KB-pgtable are mutually
* exclusive and may never be both set to 1!
* The parent page is either:
* - added to mm_context_t::pgtable_list in case the second half of the
* parent page is still allocated;
* - removed from mm_context_t::pgtable_list in case the second half of
* the parent page is unallocated;
* These operations are protected with mm_context_t::lock.
*
* It is important to understand that mm_context_t::lock only protects
* mm_context_t::pgtable_list and AA bits, but not the parent page itself
* and PP bits.
*
* Releasing the parent page happens whenever the PP bit turns from 1 to 0,
* while both AA bits and the second PP bit are already unset. Then the
* parent page does not contain any 2KB-pgtable fragment anymore, and it has
* also been removed from mm_context_t::pgtable_list. It is safe to release
* the page therefore.
*
* PGSTE memory spaces use full 4KB-pgtables and do not need most of the
* logic described above. Both AA bits are set to 1 to denote a 4KB-pgtable
* while the PP bits are never used, nor such a page is added to or removed
* from mm_context_t::pgtable_list.
*
* pte_free_defer() overrides those rules: it takes the page off pgtable_list,
* and prevents both 2K fragments from being reused. pte_free_defer() has to
* guarantee that its pgtable cannot be reused before the RCU grace period
* has elapsed (which page_table_free_rcu() does not actually guarantee).
* But for simplicity, because page->rcu_head overlays page->lru, and because
* the RCU callback might not be called before the mm_context_t has been freed,
* pte_free_defer() in this implementation prevents both fragments from being
* reused, and delays making the call to RCU until both fragments are freed.
*/
unsigned long *page_table_alloc(struct mm_struct *mm)
{
unsigned long *table;
struct ptdesc *ptdesc;
unsigned int mask, bit;
/* Try to get a fragment of a 4K page as a 2K page table */
if (!mm_alloc_pgste(mm)) {
table = NULL;
spin_lock_bh(&mm->context.lock);
if (!list_empty(&mm->context.pgtable_list)) {
ptdesc = list_first_entry(&mm->context.pgtable_list,
struct ptdesc, pt_list);
mask = atomic_read(&ptdesc->_refcount) >> 24;
/*
* The pending removal bits must also be checked.
* Failure to do so might lead to an impossible
* value of (i.e 0x13 or 0x23) written to _refcount.
* Such values violate the assumption that pending and
* allocation bits are mutually exclusive, and the rest
* of the code unrails as result. That could lead to
* a whole bunch of races and corruptions.
*/
mask = (mask | (mask >> 4)) & 0x03U;
if (mask != 0x03U) {
table = (unsigned long *) ptdesc_to_virt(ptdesc);
bit = mask & 1; /* =1 -> second 2K */
if (bit)
table += PTRS_PER_PTE;
atomic_xor_bits(&ptdesc->_refcount,
0x01U << (bit + 24));
list_del_init(&ptdesc->pt_list);
}
}
spin_unlock_bh(&mm->context.lock);
if (table)
return table;
}
/* Allocate a fresh page */
ptdesc = pagetable_alloc(GFP_KERNEL, 0);
if (!ptdesc)
return NULL;
if (!pagetable_pte_ctor(ptdesc)) {
pagetable_free(ptdesc);
return NULL;
}
arch_set_page_dat(ptdesc_page(ptdesc), 0);
/* Initialize page table */
table = (unsigned long *) ptdesc_to_virt(ptdesc);
if (mm_alloc_pgste(mm)) {
/* Return 4K page table with PGSTEs */
INIT_LIST_HEAD(&ptdesc->pt_list);
atomic_xor_bits(&ptdesc->_refcount, 0x03U << 24);
memset64((u64 *)table, _PAGE_INVALID, PTRS_PER_PTE);
memset64((u64 *)table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
} else {
/* Return the first 2K fragment of the page */
atomic_xor_bits(&ptdesc->_refcount, 0x01U << 24);
memset64((u64 *)table, _PAGE_INVALID, 2 * PTRS_PER_PTE);
spin_lock_bh(&mm->context.lock);
list_add(&ptdesc->pt_list, &mm->context.pgtable_list);
spin_unlock_bh(&mm->context.lock);
}
return table;
}
static void page_table_release_check(struct page *page, void *table,
unsigned int half, unsigned int mask)
{
char msg[128];
if (!IS_ENABLED(CONFIG_DEBUG_VM))
return;
if (!mask && list_empty(&page->lru))
return;
snprintf(msg, sizeof(msg),
"Invalid pgtable %p release half 0x%02x mask 0x%02x",
table, half, mask);
dump_page(page, msg);
}
static void pte_free_now(struct rcu_head *head)
{
struct ptdesc *ptdesc;
ptdesc = container_of(head, struct ptdesc, pt_rcu_head);
pagetable_pte_dtor(ptdesc);
pagetable_free(ptdesc);
}
void page_table_free(struct mm_struct *mm, unsigned long *table)
{
unsigned int mask, bit, half;
struct ptdesc *ptdesc = virt_to_ptdesc(table);
if (!mm_alloc_pgste(mm)) {
/* Free 2K page table fragment of a 4K page */
bit = ((unsigned long) table & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t));
spin_lock_bh(&mm->context.lock);
/*
* Mark the page for delayed release. The actual release
* will happen outside of the critical section from this
* function or from __tlb_remove_table()
*/
mask = atomic_xor_bits(&ptdesc->_refcount, 0x11U << (bit + 24));
mask >>= 24;
if ((mask & 0x03U) && !folio_test_active(ptdesc_folio(ptdesc))) {
/*
* Other half is allocated, and neither half has had
* its free deferred: add page to head of list, to make
* this freed half available for immediate reuse.
*/
list_add(&ptdesc->pt_list, &mm->context.pgtable_list);
} else {
/* If page is on list, now remove it. */
list_del_init(&ptdesc->pt_list);
}
spin_unlock_bh(&mm->context.lock);
mask = atomic_xor_bits(&ptdesc->_refcount, 0x10U << (bit + 24));
mask >>= 24;
if (mask != 0x00U)
return;
half = 0x01U << bit;
} else {
half = 0x03U;
mask = atomic_xor_bits(&ptdesc->_refcount, 0x03U << 24);
mask >>= 24;
}
page_table_release_check(ptdesc_page(ptdesc), table, half, mask);
if (folio_test_clear_active(ptdesc_folio(ptdesc)))
call_rcu(&ptdesc->pt_rcu_head, pte_free_now);
else
pte_free_now(&ptdesc->pt_rcu_head);
}
void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table,
unsigned long vmaddr)
{
struct mm_struct *mm;
unsigned int bit, mask;
struct ptdesc *ptdesc = virt_to_ptdesc(table);
mm = tlb->mm;
if (mm_alloc_pgste(mm)) {
gmap_unlink(mm, table, vmaddr);
table = (unsigned long *) ((unsigned long)table | 0x03U);
tlb_remove_ptdesc(tlb, table);
return;
}
bit = ((unsigned long) table & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t));
spin_lock_bh(&mm->context.lock);
/*
* Mark the page for delayed release. The actual release will happen
* outside of the critical section from __tlb_remove_table() or from
* page_table_free()
*/
mask = atomic_xor_bits(&ptdesc->_refcount, 0x11U << (bit + 24));
mask >>= 24;
if ((mask & 0x03U) && !folio_test_active(ptdesc_folio(ptdesc))) {
/*
* Other half is allocated, and neither half has had
* its free deferred: add page to end of list, to make
* this freed half available for reuse once its pending
* bit has been cleared by __tlb_remove_table().
*/
list_add_tail(&ptdesc->pt_list, &mm->context.pgtable_list);
} else {
/* If page is on list, now remove it. */
list_del_init(&ptdesc->pt_list);
}
spin_unlock_bh(&mm->context.lock);
table = (unsigned long *) ((unsigned long) table | (0x01U << bit));
tlb_remove_table(tlb, table);
}
void __tlb_remove_table(void *_table)
{
unsigned int mask = (unsigned long) _table & 0x03U, half = mask;
void *table = (void *)((unsigned long) _table ^ mask);
struct ptdesc *ptdesc = virt_to_ptdesc(table);
switch (half) {
case 0x00U: /* pmd, pud, or p4d */
pagetable_free(ptdesc);
return;
case 0x01U: /* lower 2K of a 4K page table */
case 0x02U: /* higher 2K of a 4K page table */
mask = atomic_xor_bits(&ptdesc->_refcount, mask << (4 + 24));
mask >>= 24;
if (mask != 0x00U)
return;
break;
case 0x03U: /* 4K page table with pgstes */
mask = atomic_xor_bits(&ptdesc->_refcount, 0x03U << 24);
mask >>= 24;
break;
}
page_table_release_check(ptdesc_page(ptdesc), table, half, mask);
if (folio_test_clear_active(ptdesc_folio(ptdesc)))
call_rcu(&ptdesc->pt_rcu_head, pte_free_now);
else
pte_free_now(&ptdesc->pt_rcu_head);
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
void pte_free_defer(struct mm_struct *mm, pgtable_t pgtable)
{
struct page *page;
page = virt_to_page(pgtable);
SetPageActive(page);
page_table_free(mm, (unsigned long *)pgtable);
/*
* page_table_free() does not do the pgste gmap_unlink() which
* page_table_free_rcu() does: warn us if pgste ever reaches here.
*/
WARN_ON_ONCE(mm_has_pgste(mm));
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
/*
* Base infrastructure required to generate basic asces, region, segment,
* and page tables that do not make use of enhanced features like EDAT1.
*/
static struct kmem_cache *base_pgt_cache;
static unsigned long *base_pgt_alloc(void)
{
unsigned long *table;
table = kmem_cache_alloc(base_pgt_cache, GFP_KERNEL);
if (table)
memset64((u64 *)table, _PAGE_INVALID, PTRS_PER_PTE);
return table;
}
static void base_pgt_free(unsigned long *table)
{
kmem_cache_free(base_pgt_cache, table);
}
static unsigned long *base_crst_alloc(unsigned long val)
{
unsigned long *table;
struct ptdesc *ptdesc;
ptdesc = pagetable_alloc(GFP_KERNEL & ~__GFP_HIGHMEM, CRST_ALLOC_ORDER);
if (!ptdesc)
return NULL;
table = ptdesc_address(ptdesc);
crst_table_init(table, val);
return table;
}
static void base_crst_free(unsigned long *table)
{
pagetable_free(virt_to_ptdesc(table));
}
#define BASE_ADDR_END_FUNC(NAME, SIZE) \
static inline unsigned long base_##NAME##_addr_end(unsigned long addr, \
unsigned long end) \
{ \
unsigned long next = (addr + (SIZE)) & ~((SIZE) - 1); \
\
return (next - 1) < (end - 1) ? next : end; \
}
BASE_ADDR_END_FUNC(page, _PAGE_SIZE)
BASE_ADDR_END_FUNC(segment, _SEGMENT_SIZE)
BASE_ADDR_END_FUNC(region3, _REGION3_SIZE)
BASE_ADDR_END_FUNC(region2, _REGION2_SIZE)
BASE_ADDR_END_FUNC(region1, _REGION1_SIZE)
static inline unsigned long base_lra(unsigned long address)
{
unsigned long real;
asm volatile(
" lra %0,0(%1)\n"
: "=d" (real) : "a" (address) : "cc");
return real;
}
static int base_page_walk(unsigned long *origin, unsigned long addr,
unsigned long end, int alloc)
{
unsigned long *pte, next;
if (!alloc)
return 0;
pte = origin;
pte += (addr & _PAGE_INDEX) >> _PAGE_SHIFT;
do {
next = base_page_addr_end(addr, end);
*pte = base_lra(addr);
} while (pte++, addr = next, addr < end);
return 0;
}
static int base_segment_walk(unsigned long *origin, unsigned long addr,
unsigned long end, int alloc)
{
unsigned long *ste, next, *table;
int rc;
ste = origin;
ste += (addr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT;
do {
next = base_segment_addr_end(addr, end);
if (*ste & _SEGMENT_ENTRY_INVALID) {
if (!alloc)
continue;
table = base_pgt_alloc();
if (!table)
return -ENOMEM;
*ste = __pa(table) | _SEGMENT_ENTRY;
}
table = __va(*ste & _SEGMENT_ENTRY_ORIGIN);
rc = base_page_walk(table, addr, next, alloc);
if (rc)
return rc;
if (!alloc)
base_pgt_free(table);
cond_resched();
} while (ste++, addr = next, addr < end);
return 0;
}
static int base_region3_walk(unsigned long *origin, unsigned long addr,
unsigned long end, int alloc)
{
unsigned long *rtte, next, *table;
int rc;
rtte = origin;
rtte += (addr & _REGION3_INDEX) >> _REGION3_SHIFT;
do {
next = base_region3_addr_end(addr, end);
if (*rtte & _REGION_ENTRY_INVALID) {
if (!alloc)
continue;
table = base_crst_alloc(_SEGMENT_ENTRY_EMPTY);
if (!table)
return -ENOMEM;
*rtte = __pa(table) | _REGION3_ENTRY;
}
table = __va(*rtte & _REGION_ENTRY_ORIGIN);
rc = base_segment_walk(table, addr, next, alloc);
if (rc)
return rc;
if (!alloc)
base_crst_free(table);
} while (rtte++, addr = next, addr < end);
return 0;
}
static int base_region2_walk(unsigned long *origin, unsigned long addr,
unsigned long end, int alloc)
{
unsigned long *rste, next, *table;
int rc;
rste = origin;
rste += (addr & _REGION2_INDEX) >> _REGION2_SHIFT;
do {
next = base_region2_addr_end(addr, end);
if (*rste & _REGION_ENTRY_INVALID) {
if (!alloc)
continue;
table = base_crst_alloc(_REGION3_ENTRY_EMPTY);
if (!table)
return -ENOMEM;
*rste = __pa(table) | _REGION2_ENTRY;
}
table = __va(*rste & _REGION_ENTRY_ORIGIN);
rc = base_region3_walk(table, addr, next, alloc);
if (rc)
return rc;
if (!alloc)
base_crst_free(table);
} while (rste++, addr = next, addr < end);
return 0;
}
static int base_region1_walk(unsigned long *origin, unsigned long addr,
unsigned long end, int alloc)
{
unsigned long *rfte, next, *table;
int rc;
rfte = origin;
rfte += (addr & _REGION1_INDEX) >> _REGION1_SHIFT;
do {
next = base_region1_addr_end(addr, end);
if (*rfte & _REGION_ENTRY_INVALID) {
if (!alloc)
continue;
table = base_crst_alloc(_REGION2_ENTRY_EMPTY);
if (!table)
return -ENOMEM;
*rfte = __pa(table) | _REGION1_ENTRY;
}
table = __va(*rfte & _REGION_ENTRY_ORIGIN);
rc = base_region2_walk(table, addr, next, alloc);
if (rc)
return rc;
if (!alloc)
base_crst_free(table);
} while (rfte++, addr = next, addr < end);
return 0;
}
/**
* base_asce_free - free asce and tables returned from base_asce_alloc()
* @asce: asce to be freed
*
* Frees all region, segment, and page tables that were allocated with a
* corresponding base_asce_alloc() call.
*/
void base_asce_free(unsigned long asce)
{
unsigned long *table = __va(asce & _ASCE_ORIGIN);
if (!asce)
return;
switch (asce & _ASCE_TYPE_MASK) {
case _ASCE_TYPE_SEGMENT:
base_segment_walk(table, 0, _REGION3_SIZE, 0);
break;
case _ASCE_TYPE_REGION3:
base_region3_walk(table, 0, _REGION2_SIZE, 0);
break;
case _ASCE_TYPE_REGION2:
base_region2_walk(table, 0, _REGION1_SIZE, 0);
break;
case _ASCE_TYPE_REGION1:
base_region1_walk(table, 0, TASK_SIZE_MAX, 0);
break;
}
base_crst_free(table);
}
static int base_pgt_cache_init(void)
{
static DEFINE_MUTEX(base_pgt_cache_mutex);
unsigned long sz = _PAGE_TABLE_SIZE;
if (base_pgt_cache)
return 0;
mutex_lock(&base_pgt_cache_mutex);
if (!base_pgt_cache)
base_pgt_cache = kmem_cache_create("base_pgt", sz, sz, 0, NULL);
mutex_unlock(&base_pgt_cache_mutex);
return base_pgt_cache ? 0 : -ENOMEM;
}
/**
* base_asce_alloc - create kernel mapping without enhanced DAT features
* @addr: virtual start address of kernel mapping
* @num_pages: number of consecutive pages
*
* Generate an asce, including all required region, segment and page tables,
* that can be used to access the virtual kernel mapping. The difference is
* that the returned asce does not make use of any enhanced DAT features like
* e.g. large pages. This is required for some I/O functions that pass an
* asce, like e.g. some service call requests.
*
* Note: the returned asce may NEVER be attached to any cpu. It may only be
* used for I/O requests. tlb entries that might result because the
* asce was attached to a cpu won't be cleared.
*/
unsigned long base_asce_alloc(unsigned long addr, unsigned long num_pages)
{
unsigned long asce, *table, end;
int rc;
if (base_pgt_cache_init())
return 0;
end = addr + num_pages * PAGE_SIZE;
if (end <= _REGION3_SIZE) {
table = base_crst_alloc(_SEGMENT_ENTRY_EMPTY);
if (!table)
return 0;
rc = base_segment_walk(table, addr, end, 1);
asce = __pa(table) | _ASCE_TYPE_SEGMENT | _ASCE_TABLE_LENGTH;
} else if (end <= _REGION2_SIZE) {
table = base_crst_alloc(_REGION3_ENTRY_EMPTY);
if (!table)
return 0;
rc = base_region3_walk(table, addr, end, 1);
asce = __pa(table) | _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
} else if (end <= _REGION1_SIZE) {
table = base_crst_alloc(_REGION2_ENTRY_EMPTY);
if (!table)
return 0;
rc = base_region2_walk(table, addr, end, 1);
asce = __pa(table) | _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH;
} else {
table = base_crst_alloc(_REGION1_ENTRY_EMPTY);
if (!table)
return 0;
rc = base_region1_walk(table, addr, end, 1);
asce = __pa(table) | _ASCE_TYPE_REGION1 | _ASCE_TABLE_LENGTH;
}
if (rc) {
base_asce_free(asce);
asce = 0;
}
return asce;
}
| linux-master | arch/s390/mm/pgalloc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* S390 version
* Copyright IBM Corp. 1999
* Author(s): Hartmut Penner ([email protected])
* Ulrich Weigand ([email protected])
*
* Derived from "arch/i386/mm/fault.c"
* Copyright (C) 1995 Linus Torvalds
*/
#include <linux/kernel_stat.h>
#include <linux/perf_event.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/sched/debug.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/compat.h>
#include <linux/smp.h>
#include <linux/kdebug.h>
#include <linux/init.h>
#include <linux/console.h>
#include <linux/extable.h>
#include <linux/hardirq.h>
#include <linux/kprobes.h>
#include <linux/uaccess.h>
#include <linux/hugetlb.h>
#include <linux/kfence.h>
#include <asm/asm-extable.h>
#include <asm/asm-offsets.h>
#include <asm/diag.h>
#include <asm/gmap.h>
#include <asm/irq.h>
#include <asm/mmu_context.h>
#include <asm/facility.h>
#include <asm/uv.h>
#include "../kernel/entry.h"
#define __FAIL_ADDR_MASK -4096L
/*
* Allocate private vm_fault_reason from top. Please make sure it won't
* collide with vm_fault_reason.
*/
#define VM_FAULT_BADCONTEXT ((__force vm_fault_t)0x80000000)
#define VM_FAULT_BADMAP ((__force vm_fault_t)0x40000000)
#define VM_FAULT_BADACCESS ((__force vm_fault_t)0x20000000)
#define VM_FAULT_SIGNAL ((__force vm_fault_t)0x10000000)
#define VM_FAULT_PFAULT ((__force vm_fault_t)0x8000000)
enum fault_type {
KERNEL_FAULT,
USER_FAULT,
GMAP_FAULT,
};
static unsigned long store_indication __read_mostly;
static int __init fault_init(void)
{
if (test_facility(75))
store_indication = 0xc00;
return 0;
}
early_initcall(fault_init);
/*
* Find out which address space caused the exception.
*/
static enum fault_type get_fault_type(struct pt_regs *regs)
{
unsigned long trans_exc_code;
trans_exc_code = regs->int_parm_long & 3;
if (likely(trans_exc_code == 0)) {
/* primary space exception */
if (user_mode(regs))
return USER_FAULT;
if (!IS_ENABLED(CONFIG_PGSTE))
return KERNEL_FAULT;
if (test_pt_regs_flag(regs, PIF_GUEST_FAULT))
return GMAP_FAULT;
return KERNEL_FAULT;
}
if (trans_exc_code == 2)
return USER_FAULT;
if (trans_exc_code == 1) {
/* access register mode, not used in the kernel */
return USER_FAULT;
}
/* home space exception -> access via kernel ASCE */
return KERNEL_FAULT;
}
static unsigned long get_fault_address(struct pt_regs *regs)
{
unsigned long trans_exc_code = regs->int_parm_long;
return trans_exc_code & __FAIL_ADDR_MASK;
}
static bool fault_is_write(struct pt_regs *regs)
{
unsigned long trans_exc_code = regs->int_parm_long;
return (trans_exc_code & store_indication) == 0x400;
}
static int bad_address(void *p)
{
unsigned long dummy;
return get_kernel_nofault(dummy, (unsigned long *)p);
}
static void dump_pagetable(unsigned long asce, unsigned long address)
{
unsigned long *table = __va(asce & _ASCE_ORIGIN);
pr_alert("AS:%016lx ", asce);
switch (asce & _ASCE_TYPE_MASK) {
case _ASCE_TYPE_REGION1:
table += (address & _REGION1_INDEX) >> _REGION1_SHIFT;
if (bad_address(table))
goto bad;
pr_cont("R1:%016lx ", *table);
if (*table & _REGION_ENTRY_INVALID)
goto out;
table = __va(*table & _REGION_ENTRY_ORIGIN);
fallthrough;
case _ASCE_TYPE_REGION2:
table += (address & _REGION2_INDEX) >> _REGION2_SHIFT;
if (bad_address(table))
goto bad;
pr_cont("R2:%016lx ", *table);
if (*table & _REGION_ENTRY_INVALID)
goto out;
table = __va(*table & _REGION_ENTRY_ORIGIN);
fallthrough;
case _ASCE_TYPE_REGION3:
table += (address & _REGION3_INDEX) >> _REGION3_SHIFT;
if (bad_address(table))
goto bad;
pr_cont("R3:%016lx ", *table);
if (*table & (_REGION_ENTRY_INVALID | _REGION3_ENTRY_LARGE))
goto out;
table = __va(*table & _REGION_ENTRY_ORIGIN);
fallthrough;
case _ASCE_TYPE_SEGMENT:
table += (address & _SEGMENT_INDEX) >> _SEGMENT_SHIFT;
if (bad_address(table))
goto bad;
pr_cont("S:%016lx ", *table);
if (*table & (_SEGMENT_ENTRY_INVALID | _SEGMENT_ENTRY_LARGE))
goto out;
table = __va(*table & _SEGMENT_ENTRY_ORIGIN);
}
table += (address & _PAGE_INDEX) >> _PAGE_SHIFT;
if (bad_address(table))
goto bad;
pr_cont("P:%016lx ", *table);
out:
pr_cont("\n");
return;
bad:
pr_cont("BAD\n");
}
static void dump_fault_info(struct pt_regs *regs)
{
unsigned long asce;
pr_alert("Failing address: %016lx TEID: %016lx\n",
regs->int_parm_long & __FAIL_ADDR_MASK, regs->int_parm_long);
pr_alert("Fault in ");
switch (regs->int_parm_long & 3) {
case 3:
pr_cont("home space ");
break;
case 2:
pr_cont("secondary space ");
break;
case 1:
pr_cont("access register ");
break;
case 0:
pr_cont("primary space ");
break;
}
pr_cont("mode while using ");
switch (get_fault_type(regs)) {
case USER_FAULT:
asce = S390_lowcore.user_asce;
pr_cont("user ");
break;
case GMAP_FAULT:
asce = ((struct gmap *) S390_lowcore.gmap)->asce;
pr_cont("gmap ");
break;
case KERNEL_FAULT:
asce = S390_lowcore.kernel_asce;
pr_cont("kernel ");
break;
default:
unreachable();
}
pr_cont("ASCE.\n");
dump_pagetable(asce, regs->int_parm_long & __FAIL_ADDR_MASK);
}
int show_unhandled_signals = 1;
void report_user_fault(struct pt_regs *regs, long signr, int is_mm_fault)
{
if ((task_pid_nr(current) > 1) && !show_unhandled_signals)
return;
if (!unhandled_signal(current, signr))
return;
if (!printk_ratelimit())
return;
printk(KERN_ALERT "User process fault: interruption code %04x ilc:%d ",
regs->int_code & 0xffff, regs->int_code >> 17);
print_vma_addr(KERN_CONT "in ", regs->psw.addr);
printk(KERN_CONT "\n");
if (is_mm_fault)
dump_fault_info(regs);
show_regs(regs);
}
/*
* Send SIGSEGV to task. This is an external routine
* to keep the stack usage of do_page_fault small.
*/
static noinline void do_sigsegv(struct pt_regs *regs, int si_code)
{
report_user_fault(regs, SIGSEGV, 1);
force_sig_fault(SIGSEGV, si_code,
(void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK));
}
static noinline void do_no_context(struct pt_regs *regs, vm_fault_t fault)
{
enum fault_type fault_type;
unsigned long address;
bool is_write;
if (fixup_exception(regs))
return;
fault_type = get_fault_type(regs);
if ((fault_type == KERNEL_FAULT) && (fault == VM_FAULT_BADCONTEXT)) {
address = get_fault_address(regs);
is_write = fault_is_write(regs);
if (kfence_handle_page_fault(address, is_write, regs))
return;
}
/*
* Oops. The kernel tried to access some bad page. We'll have to
* terminate things with extreme prejudice.
*/
if (fault_type == KERNEL_FAULT)
printk(KERN_ALERT "Unable to handle kernel pointer dereference"
" in virtual kernel address space\n");
else
printk(KERN_ALERT "Unable to handle kernel paging request"
" in virtual user address space\n");
dump_fault_info(regs);
die(regs, "Oops");
}
static noinline void do_low_address(struct pt_regs *regs)
{
/* Low-address protection hit in kernel mode means
NULL pointer write access in kernel mode. */
if (regs->psw.mask & PSW_MASK_PSTATE) {
/* Low-address protection hit in user mode 'cannot happen'. */
die (regs, "Low-address protection");
}
do_no_context(regs, VM_FAULT_BADACCESS);
}
static noinline void do_sigbus(struct pt_regs *regs)
{
/*
* Send a sigbus, regardless of whether we were in kernel
* or user mode.
*/
force_sig_fault(SIGBUS, BUS_ADRERR,
(void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK));
}
static noinline void do_fault_error(struct pt_regs *regs, vm_fault_t fault)
{
int si_code;
switch (fault) {
case VM_FAULT_BADACCESS:
case VM_FAULT_BADMAP:
/* Bad memory access. Check if it is kernel or user space. */
if (user_mode(regs)) {
/* User mode accesses just cause a SIGSEGV */
si_code = (fault == VM_FAULT_BADMAP) ?
SEGV_MAPERR : SEGV_ACCERR;
do_sigsegv(regs, si_code);
break;
}
fallthrough;
case VM_FAULT_BADCONTEXT:
case VM_FAULT_PFAULT:
do_no_context(regs, fault);
break;
case VM_FAULT_SIGNAL:
if (!user_mode(regs))
do_no_context(regs, fault);
break;
default: /* fault & VM_FAULT_ERROR */
if (fault & VM_FAULT_OOM) {
if (!user_mode(regs))
do_no_context(regs, fault);
else
pagefault_out_of_memory();
} else if (fault & VM_FAULT_SIGSEGV) {
/* Kernel mode? Handle exceptions or die */
if (!user_mode(regs))
do_no_context(regs, fault);
else
do_sigsegv(regs, SEGV_MAPERR);
} else if (fault & VM_FAULT_SIGBUS) {
/* Kernel mode? Handle exceptions or die */
if (!user_mode(regs))
do_no_context(regs, fault);
else
do_sigbus(regs);
} else
BUG();
break;
}
}
/*
* This routine handles page faults. It determines the address,
* and the problem, and then passes it off to one of the appropriate
* routines.
*
* interruption code (int_code):
* 04 Protection -> Write-Protection (suppression)
* 10 Segment translation -> Not present (nullification)
* 11 Page translation -> Not present (nullification)
* 3b Region third trans. -> Not present (nullification)
*/
static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
{
struct gmap *gmap;
struct task_struct *tsk;
struct mm_struct *mm;
struct vm_area_struct *vma;
enum fault_type type;
unsigned long address;
unsigned int flags;
vm_fault_t fault;
bool is_write;
tsk = current;
/*
* The instruction that caused the program check has
* been nullified. Don't signal single step via SIGTRAP.
*/
clear_thread_flag(TIF_PER_TRAP);
if (kprobe_page_fault(regs, 14))
return 0;
mm = tsk->mm;
address = get_fault_address(regs);
is_write = fault_is_write(regs);
/*
* Verify that the fault happened in user space, that
* we are not in an interrupt and that there is a
* user context.
*/
fault = VM_FAULT_BADCONTEXT;
type = get_fault_type(regs);
switch (type) {
case KERNEL_FAULT:
goto out;
case USER_FAULT:
case GMAP_FAULT:
if (faulthandler_disabled() || !mm)
goto out;
break;
}
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
flags = FAULT_FLAG_DEFAULT;
if (user_mode(regs))
flags |= FAULT_FLAG_USER;
if (is_write)
access = VM_WRITE;
if (access == VM_WRITE)
flags |= FAULT_FLAG_WRITE;
if (!(flags & FAULT_FLAG_USER))
goto lock_mmap;
vma = lock_vma_under_rcu(mm, address);
if (!vma)
goto lock_mmap;
if (!(vma->vm_flags & access)) {
vma_end_read(vma);
goto lock_mmap;
}
fault = handle_mm_fault(vma, address, flags | FAULT_FLAG_VMA_LOCK, regs);
if (!(fault & (VM_FAULT_RETRY | VM_FAULT_COMPLETED)))
vma_end_read(vma);
if (!(fault & VM_FAULT_RETRY)) {
count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
if (likely(!(fault & VM_FAULT_ERROR)))
fault = 0;
goto out;
}
count_vm_vma_lock_event(VMA_LOCK_RETRY);
/* Quick path to respond to signals */
if (fault_signal_pending(fault, regs)) {
fault = VM_FAULT_SIGNAL;
goto out;
}
lock_mmap:
mmap_read_lock(mm);
gmap = NULL;
if (IS_ENABLED(CONFIG_PGSTE) && type == GMAP_FAULT) {
gmap = (struct gmap *) S390_lowcore.gmap;
current->thread.gmap_addr = address;
current->thread.gmap_write_flag = !!(flags & FAULT_FLAG_WRITE);
current->thread.gmap_int_code = regs->int_code & 0xffff;
address = __gmap_translate(gmap, address);
if (address == -EFAULT) {
fault = VM_FAULT_BADMAP;
goto out_up;
}
if (gmap->pfault_enabled)
flags |= FAULT_FLAG_RETRY_NOWAIT;
}
retry:
fault = VM_FAULT_BADMAP;
vma = find_vma(mm, address);
if (!vma)
goto out_up;
if (unlikely(vma->vm_start > address)) {
if (!(vma->vm_flags & VM_GROWSDOWN))
goto out_up;
vma = expand_stack(mm, address);
if (!vma)
goto out;
}
/*
* Ok, we have a good vm_area for this memory access, so
* we can handle it..
*/
fault = VM_FAULT_BADACCESS;
if (unlikely(!(vma->vm_flags & access)))
goto out_up;
/*
* If for any reason at all we couldn't handle the fault,
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
fault = handle_mm_fault(vma, address, flags, regs);
if (fault_signal_pending(fault, regs)) {
fault = VM_FAULT_SIGNAL;
if (flags & FAULT_FLAG_RETRY_NOWAIT)
goto out_up;
goto out;
}
/* The fault is fully completed (including releasing mmap lock) */
if (fault & VM_FAULT_COMPLETED) {
if (gmap) {
mmap_read_lock(mm);
goto out_gmap;
}
fault = 0;
goto out;
}
if (unlikely(fault & VM_FAULT_ERROR))
goto out_up;
if (fault & VM_FAULT_RETRY) {
if (IS_ENABLED(CONFIG_PGSTE) && gmap &&
(flags & FAULT_FLAG_RETRY_NOWAIT)) {
/*
* FAULT_FLAG_RETRY_NOWAIT has been set, mmap_lock has
* not been released
*/
current->thread.gmap_pfault = 1;
fault = VM_FAULT_PFAULT;
goto out_up;
}
flags &= ~FAULT_FLAG_RETRY_NOWAIT;
flags |= FAULT_FLAG_TRIED;
mmap_read_lock(mm);
goto retry;
}
out_gmap:
if (IS_ENABLED(CONFIG_PGSTE) && gmap) {
address = __gmap_link(gmap, current->thread.gmap_addr,
address);
if (address == -EFAULT) {
fault = VM_FAULT_BADMAP;
goto out_up;
}
if (address == -ENOMEM) {
fault = VM_FAULT_OOM;
goto out_up;
}
}
fault = 0;
out_up:
mmap_read_unlock(mm);
out:
return fault;
}
void do_protection_exception(struct pt_regs *regs)
{
unsigned long trans_exc_code;
int access;
vm_fault_t fault;
trans_exc_code = regs->int_parm_long;
/*
* Protection exceptions are suppressing, decrement psw address.
* The exception to this rule are aborted transactions, for these
* the PSW already points to the correct location.
*/
if (!(regs->int_code & 0x200))
regs->psw.addr = __rewind_psw(regs->psw, regs->int_code >> 16);
/*
* Check for low-address protection. This needs to be treated
* as a special case because the translation exception code
* field is not guaranteed to contain valid data in this case.
*/
if (unlikely(!(trans_exc_code & 4))) {
do_low_address(regs);
return;
}
if (unlikely(MACHINE_HAS_NX && (trans_exc_code & 0x80))) {
regs->int_parm_long = (trans_exc_code & ~PAGE_MASK) |
(regs->psw.addr & PAGE_MASK);
access = VM_EXEC;
fault = VM_FAULT_BADACCESS;
} else {
access = VM_WRITE;
fault = do_exception(regs, access);
}
if (unlikely(fault))
do_fault_error(regs, fault);
}
NOKPROBE_SYMBOL(do_protection_exception);
void do_dat_exception(struct pt_regs *regs)
{
int access;
vm_fault_t fault;
access = VM_ACCESS_FLAGS;
fault = do_exception(regs, access);
if (unlikely(fault))
do_fault_error(regs, fault);
}
NOKPROBE_SYMBOL(do_dat_exception);
#if IS_ENABLED(CONFIG_PGSTE)
void do_secure_storage_access(struct pt_regs *regs)
{
unsigned long addr = regs->int_parm_long & __FAIL_ADDR_MASK;
struct vm_area_struct *vma;
struct mm_struct *mm;
struct page *page;
struct gmap *gmap;
int rc;
/*
* bit 61 tells us if the address is valid, if it's not we
* have a major problem and should stop the kernel or send a
* SIGSEGV to the process. Unfortunately bit 61 is not
* reliable without the misc UV feature so we need to check
* for that as well.
*/
if (uv_has_feature(BIT_UV_FEAT_MISC) &&
!test_bit_inv(61, ®s->int_parm_long)) {
/*
* When this happens, userspace did something that it
* was not supposed to do, e.g. branching into secure
* memory. Trigger a segmentation fault.
*/
if (user_mode(regs)) {
send_sig(SIGSEGV, current, 0);
return;
}
/*
* The kernel should never run into this case and we
* have no way out of this situation.
*/
panic("Unexpected PGM 0x3d with TEID bit 61=0");
}
switch (get_fault_type(regs)) {
case GMAP_FAULT:
mm = current->mm;
gmap = (struct gmap *)S390_lowcore.gmap;
mmap_read_lock(mm);
addr = __gmap_translate(gmap, addr);
mmap_read_unlock(mm);
if (IS_ERR_VALUE(addr)) {
do_fault_error(regs, VM_FAULT_BADMAP);
break;
}
fallthrough;
case USER_FAULT:
mm = current->mm;
mmap_read_lock(mm);
vma = find_vma(mm, addr);
if (!vma) {
mmap_read_unlock(mm);
do_fault_error(regs, VM_FAULT_BADMAP);
break;
}
page = follow_page(vma, addr, FOLL_WRITE | FOLL_GET);
if (IS_ERR_OR_NULL(page)) {
mmap_read_unlock(mm);
break;
}
if (arch_make_page_accessible(page))
send_sig(SIGSEGV, current, 0);
put_page(page);
mmap_read_unlock(mm);
break;
case KERNEL_FAULT:
page = phys_to_page(addr);
if (unlikely(!try_get_page(page)))
break;
rc = arch_make_page_accessible(page);
put_page(page);
if (rc)
BUG();
break;
default:
do_fault_error(regs, VM_FAULT_BADMAP);
WARN_ON_ONCE(1);
}
}
NOKPROBE_SYMBOL(do_secure_storage_access);
void do_non_secure_storage_access(struct pt_regs *regs)
{
unsigned long gaddr = regs->int_parm_long & __FAIL_ADDR_MASK;
struct gmap *gmap = (struct gmap *)S390_lowcore.gmap;
if (get_fault_type(regs) != GMAP_FAULT) {
do_fault_error(regs, VM_FAULT_BADMAP);
WARN_ON_ONCE(1);
return;
}
if (gmap_convert_to_secure(gmap, gaddr) == -EINVAL)
send_sig(SIGSEGV, current, 0);
}
NOKPROBE_SYMBOL(do_non_secure_storage_access);
void do_secure_storage_violation(struct pt_regs *regs)
{
unsigned long gaddr = regs->int_parm_long & __FAIL_ADDR_MASK;
struct gmap *gmap = (struct gmap *)S390_lowcore.gmap;
/*
* If the VM has been rebooted, its address space might still contain
* secure pages from the previous boot.
* Clear the page so it can be reused.
*/
if (!gmap_destroy_page(gmap, gaddr))
return;
/*
* Either KVM messed up the secure guest mapping or the same
* page is mapped into multiple secure guests.
*
* This exception is only triggered when a guest 2 is running
* and can therefore never occur in kernel context.
*/
printk_ratelimited(KERN_WARNING
"Secure storage violation in task: %s, pid %d\n",
current->comm, current->pid);
send_sig(SIGSEGV, current, 0);
}
#endif /* CONFIG_PGSTE */
| linux-master | arch/s390/mm/fault.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 1999, 2023
*/
#include <linux/cpuhotplug.h>
#include <linux/sched/task.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/irq.h>
#include <asm/asm-extable.h>
#include <asm/pfault.h>
#include <asm/diag.h>
#define __SUBCODE_MASK 0x0600
#define __PF_RES_FIELD 0x8000000000000000UL
/*
* 'pfault' pseudo page faults routines.
*/
static int pfault_disable;
static int __init nopfault(char *str)
{
pfault_disable = 1;
return 1;
}
early_param("nopfault", nopfault);
struct pfault_refbk {
u16 refdiagc;
u16 reffcode;
u16 refdwlen;
u16 refversn;
u64 refgaddr;
u64 refselmk;
u64 refcmpmk;
u64 reserved;
};
static struct pfault_refbk pfault_init_refbk = {
.refdiagc = 0x258,
.reffcode = 0,
.refdwlen = 5,
.refversn = 2,
.refgaddr = __LC_LPP,
.refselmk = 1UL << 48,
.refcmpmk = 1UL << 48,
.reserved = __PF_RES_FIELD
};
int __pfault_init(void)
{
int rc = -EOPNOTSUPP;
if (pfault_disable)
return rc;
diag_stat_inc(DIAG_STAT_X258);
asm volatile(
" diag %[refbk],%[rc],0x258\n"
"0: nopr %%r7\n"
EX_TABLE(0b, 0b)
: [rc] "+d" (rc)
: [refbk] "a" (&pfault_init_refbk), "m" (pfault_init_refbk)
: "cc");
return rc;
}
static struct pfault_refbk pfault_fini_refbk = {
.refdiagc = 0x258,
.reffcode = 1,
.refdwlen = 5,
.refversn = 2,
};
void __pfault_fini(void)
{
if (pfault_disable)
return;
diag_stat_inc(DIAG_STAT_X258);
asm volatile(
" diag %[refbk],0,0x258\n"
"0: nopr %%r7\n"
EX_TABLE(0b, 0b)
:
: [refbk] "a" (&pfault_fini_refbk), "m" (pfault_fini_refbk)
: "cc");
}
static DEFINE_SPINLOCK(pfault_lock);
static LIST_HEAD(pfault_list);
#define PF_COMPLETE 0x0080
/*
* The mechanism of our pfault code: if Linux is running as guest, runs a user
* space process and the user space process accesses a page that the host has
* paged out we get a pfault interrupt.
*
* This allows us, within the guest, to schedule a different process. Without
* this mechanism the host would have to suspend the whole virtual cpu until
* the page has been paged in.
*
* So when we get such an interrupt then we set the state of the current task
* to uninterruptible and also set the need_resched flag. Both happens within
* interrupt context(!). If we later on want to return to user space we
* recognize the need_resched flag and then call schedule(). It's not very
* obvious how this works...
*
* Of course we have a lot of additional fun with the completion interrupt (->
* host signals that a page of a process has been paged in and the process can
* continue to run). This interrupt can arrive on any cpu and, since we have
* virtual cpus, actually appear before the interrupt that signals that a page
* is missing.
*/
static void pfault_interrupt(struct ext_code ext_code,
unsigned int param32, unsigned long param64)
{
struct task_struct *tsk;
__u16 subcode;
pid_t pid;
/*
* Get the external interruption subcode & pfault initial/completion
* signal bit. VM stores this in the 'cpu address' field associated
* with the external interrupt.
*/
subcode = ext_code.subcode;
if ((subcode & 0xff00) != __SUBCODE_MASK)
return;
inc_irq_stat(IRQEXT_PFL);
/* Get the token (= pid of the affected task). */
pid = param64 & LPP_PID_MASK;
rcu_read_lock();
tsk = find_task_by_pid_ns(pid, &init_pid_ns);
if (tsk)
get_task_struct(tsk);
rcu_read_unlock();
if (!tsk)
return;
spin_lock(&pfault_lock);
if (subcode & PF_COMPLETE) {
/* signal bit is set -> a page has been swapped in by VM */
if (tsk->thread.pfault_wait == 1) {
/*
* Initial interrupt was faster than the completion
* interrupt. pfault_wait is valid. Set pfault_wait
* back to zero and wake up the process. This can
* safely be done because the task is still sleeping
* and can't produce new pfaults.
*/
tsk->thread.pfault_wait = 0;
list_del(&tsk->thread.list);
wake_up_process(tsk);
put_task_struct(tsk);
} else {
/*
* Completion interrupt was faster than initial
* interrupt. Set pfault_wait to -1 so the initial
* interrupt doesn't put the task to sleep.
* If the task is not running, ignore the completion
* interrupt since it must be a leftover of a PFAULT
* CANCEL operation which didn't remove all pending
* completion interrupts.
*/
if (task_is_running(tsk))
tsk->thread.pfault_wait = -1;
}
} else {
/* signal bit not set -> a real page is missing. */
if (WARN_ON_ONCE(tsk != current))
goto out;
if (tsk->thread.pfault_wait == 1) {
/* Already on the list with a reference: put to sleep */
goto block;
} else if (tsk->thread.pfault_wait == -1) {
/*
* Completion interrupt was faster than the initial
* interrupt (pfault_wait == -1). Set pfault_wait
* back to zero and exit.
*/
tsk->thread.pfault_wait = 0;
} else {
/*
* Initial interrupt arrived before completion
* interrupt. Let the task sleep.
* An extra task reference is needed since a different
* cpu may set the task state to TASK_RUNNING again
* before the scheduler is reached.
*/
get_task_struct(tsk);
tsk->thread.pfault_wait = 1;
list_add(&tsk->thread.list, &pfault_list);
block:
/*
* Since this must be a userspace fault, there
* is no kernel task state to trample. Rely on the
* return to userspace schedule() to block.
*/
__set_current_state(TASK_UNINTERRUPTIBLE);
set_tsk_need_resched(tsk);
set_preempt_need_resched();
}
}
out:
spin_unlock(&pfault_lock);
put_task_struct(tsk);
}
static int pfault_cpu_dead(unsigned int cpu)
{
struct thread_struct *thread, *next;
struct task_struct *tsk;
spin_lock_irq(&pfault_lock);
list_for_each_entry_safe(thread, next, &pfault_list, list) {
thread->pfault_wait = 0;
list_del(&thread->list);
tsk = container_of(thread, struct task_struct, thread);
wake_up_process(tsk);
put_task_struct(tsk);
}
spin_unlock_irq(&pfault_lock);
return 0;
}
static int __init pfault_irq_init(void)
{
int rc;
rc = register_external_irq(EXT_IRQ_CP_SERVICE, pfault_interrupt);
if (rc)
goto out_extint;
rc = pfault_init() == 0 ? 0 : -EOPNOTSUPP;
if (rc)
goto out_pfault;
irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
cpuhp_setup_state_nocalls(CPUHP_S390_PFAULT_DEAD, "s390/pfault:dead",
NULL, pfault_cpu_dead);
return 0;
out_pfault:
unregister_external_irq(EXT_IRQ_CP_SERVICE, pfault_interrupt);
out_extint:
pfault_disable = 1;
return rc;
}
early_initcall(pfault_irq_init);
| linux-master | arch/s390/mm/pfault.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Hypervisor filesystem for Linux on s390
*
* Diag 0C implementation
*
* Copyright IBM Corp. 2014
*/
#include <linux/slab.h>
#include <linux/cpu.h>
#include <asm/diag.h>
#include <asm/hypfs.h>
#include "hypfs.h"
#define DBFS_D0C_HDR_VERSION 0
/*
* Get hypfs_diag0c_entry from CPU vector and store diag0c data
*/
static void diag0c_fn(void *data)
{
diag_stat_inc(DIAG_STAT_X00C);
diag_amode31_ops.diag0c(((void **)data)[smp_processor_id()]);
}
/*
* Allocate buffer and store diag 0c data
*/
static void *diag0c_store(unsigned int *count)
{
struct hypfs_diag0c_data *diag0c_data;
unsigned int cpu_count, cpu, i;
void **cpu_vec;
cpus_read_lock();
cpu_count = num_online_cpus();
cpu_vec = kmalloc_array(num_possible_cpus(), sizeof(*cpu_vec),
GFP_KERNEL);
if (!cpu_vec)
goto fail_unlock_cpus;
/* Note: Diag 0c needs 8 byte alignment and real storage */
diag0c_data = kzalloc(struct_size(diag0c_data, entry, cpu_count),
GFP_KERNEL | GFP_DMA);
if (!diag0c_data)
goto fail_kfree_cpu_vec;
i = 0;
/* Fill CPU vector for each online CPU */
for_each_online_cpu(cpu) {
diag0c_data->entry[i].cpu = cpu;
cpu_vec[cpu] = &diag0c_data->entry[i++];
}
/* Collect data all CPUs */
on_each_cpu(diag0c_fn, cpu_vec, 1);
*count = cpu_count;
kfree(cpu_vec);
cpus_read_unlock();
return diag0c_data;
fail_kfree_cpu_vec:
kfree(cpu_vec);
fail_unlock_cpus:
cpus_read_unlock();
return ERR_PTR(-ENOMEM);
}
/*
* Hypfs DBFS callback: Free diag 0c data
*/
static void dbfs_diag0c_free(const void *data)
{
kfree(data);
}
/*
* Hypfs DBFS callback: Create diag 0c data
*/
static int dbfs_diag0c_create(void **data, void **data_free_ptr, size_t *size)
{
struct hypfs_diag0c_data *diag0c_data;
unsigned int count;
diag0c_data = diag0c_store(&count);
if (IS_ERR(diag0c_data))
return PTR_ERR(diag0c_data);
memset(&diag0c_data->hdr, 0, sizeof(diag0c_data->hdr));
store_tod_clock_ext((union tod_clock *)diag0c_data->hdr.tod_ext);
diag0c_data->hdr.len = count * sizeof(struct hypfs_diag0c_entry);
diag0c_data->hdr.version = DBFS_D0C_HDR_VERSION;
diag0c_data->hdr.count = count;
*data = diag0c_data;
*data_free_ptr = diag0c_data;
*size = diag0c_data->hdr.len + sizeof(struct hypfs_diag0c_hdr);
return 0;
}
/*
* Hypfs DBFS file structure
*/
static struct hypfs_dbfs_file dbfs_file_0c = {
.name = "diag_0c",
.data_create = dbfs_diag0c_create,
.data_free = dbfs_diag0c_free,
};
/*
* Initialize diag 0c interface for z/VM
*/
int __init hypfs_diag0c_init(void)
{
if (!MACHINE_IS_VM)
return 0;
hypfs_dbfs_create_file(&dbfs_file_0c);
return 0;
}
/*
* Shutdown diag 0c interface for z/VM
*/
void hypfs_diag0c_exit(void)
{
if (!MACHINE_IS_VM)
return;
hypfs_dbfs_remove_file(&dbfs_file_0c);
}
| linux-master | arch/s390/hypfs/hypfs_diag0c.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Hypervisor filesystem for Linux on s390. z/VM implementation.
*
* Copyright IBM Corp. 2006
* Author(s): Michael Holzheu <[email protected]>
*/
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/vmalloc.h>
#include <asm/extable.h>
#include <asm/diag.h>
#include <asm/ebcdic.h>
#include <asm/timex.h>
#include "hypfs_vm.h"
#include "hypfs.h"
#define ATTRIBUTE(dir, name, member) \
do { \
void *rc; \
rc = hypfs_create_u64(dir, name, member); \
if (IS_ERR(rc)) \
return PTR_ERR(rc); \
} while (0)
static int hypfs_vm_create_guest(struct dentry *systems_dir,
struct diag2fc_data *data)
{
char guest_name[DIAG2FC_NAME_LEN + 1] = {};
struct dentry *guest_dir, *cpus_dir, *samples_dir, *mem_dir;
int dedicated_flag, capped_value;
capped_value = (data->flags & 0x00000006) >> 1;
dedicated_flag = (data->flags & 0x00000008) >> 3;
/* guest dir */
memcpy(guest_name, data->guest_name, DIAG2FC_NAME_LEN);
EBCASC(guest_name, DIAG2FC_NAME_LEN);
strim(guest_name);
guest_dir = hypfs_mkdir(systems_dir, guest_name);
if (IS_ERR(guest_dir))
return PTR_ERR(guest_dir);
ATTRIBUTE(guest_dir, "onlinetime_us", data->el_time);
/* logical cpu information */
cpus_dir = hypfs_mkdir(guest_dir, "cpus");
if (IS_ERR(cpus_dir))
return PTR_ERR(cpus_dir);
ATTRIBUTE(cpus_dir, "cputime_us", data->used_cpu);
ATTRIBUTE(cpus_dir, "capped", capped_value);
ATTRIBUTE(cpus_dir, "dedicated", dedicated_flag);
ATTRIBUTE(cpus_dir, "count", data->vcpus);
/*
* Note: The "weight_min" attribute got the wrong name.
* The value represents the number of non-stopped (operating)
* CPUS.
*/
ATTRIBUTE(cpus_dir, "weight_min", data->ocpus);
ATTRIBUTE(cpus_dir, "weight_max", data->cpu_max);
ATTRIBUTE(cpus_dir, "weight_cur", data->cpu_shares);
/* memory information */
mem_dir = hypfs_mkdir(guest_dir, "mem");
if (IS_ERR(mem_dir))
return PTR_ERR(mem_dir);
ATTRIBUTE(mem_dir, "min_KiB", data->mem_min_kb);
ATTRIBUTE(mem_dir, "max_KiB", data->mem_max_kb);
ATTRIBUTE(mem_dir, "used_KiB", data->mem_used_kb);
ATTRIBUTE(mem_dir, "share_KiB", data->mem_share_kb);
/* samples */
samples_dir = hypfs_mkdir(guest_dir, "samples");
if (IS_ERR(samples_dir))
return PTR_ERR(samples_dir);
ATTRIBUTE(samples_dir, "cpu_using", data->cpu_use_samp);
ATTRIBUTE(samples_dir, "cpu_delay", data->cpu_delay_samp);
ATTRIBUTE(samples_dir, "mem_delay", data->page_wait_samp);
ATTRIBUTE(samples_dir, "idle", data->idle_samp);
ATTRIBUTE(samples_dir, "other", data->other_samp);
ATTRIBUTE(samples_dir, "total", data->total_samp);
return 0;
}
int hypfs_vm_create_files(struct dentry *root)
{
struct dentry *dir, *file;
struct diag2fc_data *data;
unsigned int count = 0;
int rc, i;
data = diag2fc_store(diag2fc_guest_query, &count, 0);
if (IS_ERR(data))
return PTR_ERR(data);
/* Hypervisor Info */
dir = hypfs_mkdir(root, "hyp");
if (IS_ERR(dir)) {
rc = PTR_ERR(dir);
goto failed;
}
file = hypfs_create_str(dir, "type", "z/VM Hypervisor");
if (IS_ERR(file)) {
rc = PTR_ERR(file);
goto failed;
}
/* physical cpus */
dir = hypfs_mkdir(root, "cpus");
if (IS_ERR(dir)) {
rc = PTR_ERR(dir);
goto failed;
}
file = hypfs_create_u64(dir, "count", data->lcpus);
if (IS_ERR(file)) {
rc = PTR_ERR(file);
goto failed;
}
/* guests */
dir = hypfs_mkdir(root, "systems");
if (IS_ERR(dir)) {
rc = PTR_ERR(dir);
goto failed;
}
for (i = 0; i < count; i++) {
rc = hypfs_vm_create_guest(dir, &data[i]);
if (rc)
goto failed;
}
diag2fc_free(data);
return 0;
failed:
diag2fc_free(data);
return rc;
}
| linux-master | arch/s390/hypfs/hypfs_vm_fs.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Hypervisor filesystem for Linux on s390. z/VM implementation.
*
* Copyright IBM Corp. 2006
* Author(s): Michael Holzheu <[email protected]>
*/
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/vmalloc.h>
#include <asm/extable.h>
#include <asm/diag.h>
#include <asm/ebcdic.h>
#include <asm/timex.h>
#include "hypfs_vm.h"
#include "hypfs.h"
#define DBFS_D2FC_HDR_VERSION 0
static char local_guest[] = " ";
static char all_guests[] = "* ";
static char *all_groups = all_guests;
char *diag2fc_guest_query;
static int diag2fc(int size, char* query, void *addr)
{
unsigned long residual_cnt;
unsigned long rc;
struct diag2fc_parm_list parm_list;
memcpy(parm_list.userid, query, DIAG2FC_NAME_LEN);
ASCEBC(parm_list.userid, DIAG2FC_NAME_LEN);
memcpy(parm_list.aci_grp, all_groups, DIAG2FC_NAME_LEN);
ASCEBC(parm_list.aci_grp, DIAG2FC_NAME_LEN);
parm_list.addr = (unsigned long)addr;
parm_list.size = size;
parm_list.fmt = 0x02;
rc = -1;
diag_stat_inc(DIAG_STAT_X2FC);
asm volatile(
" diag %0,%1,0x2fc\n"
"0: nopr %%r7\n"
EX_TABLE(0b,0b)
: "=d" (residual_cnt), "+d" (rc) : "0" (&parm_list) : "memory");
if ((rc != 0 ) && (rc != -2))
return rc;
else
return -residual_cnt;
}
/*
* Allocate buffer for "query" and store diag 2fc at "offset"
*/
void *diag2fc_store(char *query, unsigned int *count, int offset)
{
void *data;
int size;
do {
size = diag2fc(0, query, NULL);
if (size < 0)
return ERR_PTR(-EACCES);
data = vmalloc(size + offset);
if (!data)
return ERR_PTR(-ENOMEM);
if (diag2fc(size, query, data + offset) == 0)
break;
vfree(data);
} while (1);
*count = (size / sizeof(struct diag2fc_data));
return data;
}
void diag2fc_free(const void *data)
{
vfree(data);
}
struct dbfs_d2fc_hdr {
u64 len; /* Length of d2fc buffer without header */
u16 version; /* Version of header */
union tod_clock tod_ext; /* TOD clock for d2fc */
u64 count; /* Number of VM guests in d2fc buffer */
char reserved[30];
} __attribute__ ((packed));
struct dbfs_d2fc {
struct dbfs_d2fc_hdr hdr; /* 64 byte header */
char buf[]; /* d2fc buffer */
} __attribute__ ((packed));
static int dbfs_diag2fc_create(void **data, void **data_free_ptr, size_t *size)
{
struct dbfs_d2fc *d2fc;
unsigned int count;
d2fc = diag2fc_store(diag2fc_guest_query, &count, sizeof(d2fc->hdr));
if (IS_ERR(d2fc))
return PTR_ERR(d2fc);
store_tod_clock_ext(&d2fc->hdr.tod_ext);
d2fc->hdr.len = count * sizeof(struct diag2fc_data);
d2fc->hdr.version = DBFS_D2FC_HDR_VERSION;
d2fc->hdr.count = count;
memset(&d2fc->hdr.reserved, 0, sizeof(d2fc->hdr.reserved));
*data = d2fc;
*data_free_ptr = d2fc;
*size = d2fc->hdr.len + sizeof(struct dbfs_d2fc_hdr);
return 0;
}
static struct hypfs_dbfs_file dbfs_file_2fc = {
.name = "diag_2fc",
.data_create = dbfs_diag2fc_create,
.data_free = diag2fc_free,
};
int hypfs_vm_init(void)
{
if (!MACHINE_IS_VM)
return 0;
if (diag2fc(0, all_guests, NULL) > 0)
diag2fc_guest_query = all_guests;
else if (diag2fc(0, local_guest, NULL) > 0)
diag2fc_guest_query = local_guest;
else
return -EACCES;
hypfs_dbfs_create_file(&dbfs_file_2fc);
return 0;
}
void hypfs_vm_exit(void)
{
if (!MACHINE_IS_VM)
return;
hypfs_dbfs_remove_file(&dbfs_file_2fc);
}
| linux-master | arch/s390/hypfs/hypfs_vm.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Hypervisor filesystem for Linux on s390. Diag 204 and 224
* implementation.
*
* Copyright IBM Corp. 2006, 2008
* Author(s): Michael Holzheu <[email protected]>
*/
#define KMSG_COMPONENT "hypfs"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/vmalloc.h>
#include <linux/mm.h>
#include <asm/diag.h>
#include <asm/ebcdic.h>
#include "hypfs_diag.h"
#include "hypfs.h"
#define DBFS_D204_HDR_VERSION 0
static enum diag204_sc diag204_store_sc; /* used subcode for store */
static enum diag204_format diag204_info_type; /* used diag 204 data format */
static void *diag204_buf; /* 4K aligned buffer for diag204 data */
static int diag204_buf_pages; /* number of pages for diag204 data */
static struct dentry *dbfs_d204_file;
enum diag204_format diag204_get_info_type(void)
{
return diag204_info_type;
}
static void diag204_set_info_type(enum diag204_format type)
{
diag204_info_type = type;
}
/* Diagnose 204 functions */
/*
* For the old diag subcode 4 with simple data format we have to use real
* memory. If we use subcode 6 or 7 with extended data format, we can (and
* should) use vmalloc, since we need a lot of memory in that case. Currently
* up to 93 pages!
*/
static void diag204_free_buffer(void)
{
vfree(diag204_buf);
diag204_buf = NULL;
}
void *diag204_get_buffer(enum diag204_format fmt, int *pages)
{
if (diag204_buf) {
*pages = diag204_buf_pages;
return diag204_buf;
}
if (fmt == DIAG204_INFO_SIMPLE) {
*pages = 1;
} else {/* DIAG204_INFO_EXT */
*pages = diag204((unsigned long)DIAG204_SUBC_RSI |
(unsigned long)DIAG204_INFO_EXT, 0, NULL);
if (*pages <= 0)
return ERR_PTR(-EOPNOTSUPP);
}
diag204_buf = __vmalloc_node(array_size(*pages, PAGE_SIZE),
PAGE_SIZE, GFP_KERNEL, NUMA_NO_NODE,
__builtin_return_address(0));
if (!diag204_buf)
return ERR_PTR(-ENOMEM);
diag204_buf_pages = *pages;
return diag204_buf;
}
/*
* diag204_probe() has to find out, which type of diagnose 204 implementation
* we have on our machine. Currently there are three possible scanarios:
* - subcode 4 + simple data format (only one page)
* - subcode 4-6 + extended data format
* - subcode 4-7 + extended data format
*
* Subcode 5 is used to retrieve the size of the data, provided by subcodes
* 6 and 7. Subcode 7 basically has the same function as subcode 6. In addition
* to subcode 6 it provides also information about secondary cpus.
* In order to get as much information as possible, we first try
* subcode 7, then 6 and if both fail, we use subcode 4.
*/
static int diag204_probe(void)
{
void *buf;
int pages, rc;
buf = diag204_get_buffer(DIAG204_INFO_EXT, &pages);
if (!IS_ERR(buf)) {
if (diag204((unsigned long)DIAG204_SUBC_STIB7 |
(unsigned long)DIAG204_INFO_EXT, pages, buf) >= 0) {
diag204_store_sc = DIAG204_SUBC_STIB7;
diag204_set_info_type(DIAG204_INFO_EXT);
goto out;
}
if (diag204((unsigned long)DIAG204_SUBC_STIB6 |
(unsigned long)DIAG204_INFO_EXT, pages, buf) >= 0) {
diag204_store_sc = DIAG204_SUBC_STIB6;
diag204_set_info_type(DIAG204_INFO_EXT);
goto out;
}
diag204_free_buffer();
}
/* subcodes 6 and 7 failed, now try subcode 4 */
buf = diag204_get_buffer(DIAG204_INFO_SIMPLE, &pages);
if (IS_ERR(buf)) {
rc = PTR_ERR(buf);
goto fail_alloc;
}
if (diag204((unsigned long)DIAG204_SUBC_STIB4 |
(unsigned long)DIAG204_INFO_SIMPLE, pages, buf) >= 0) {
diag204_store_sc = DIAG204_SUBC_STIB4;
diag204_set_info_type(DIAG204_INFO_SIMPLE);
goto out;
} else {
rc = -EOPNOTSUPP;
goto fail_store;
}
out:
rc = 0;
fail_store:
diag204_free_buffer();
fail_alloc:
return rc;
}
int diag204_store(void *buf, int pages)
{
int rc;
rc = diag204((unsigned long)diag204_store_sc |
(unsigned long)diag204_get_info_type(), pages, buf);
return rc < 0 ? -EOPNOTSUPP : 0;
}
struct dbfs_d204_hdr {
u64 len; /* Length of d204 buffer without header */
u16 version; /* Version of header */
u8 sc; /* Used subcode */
char reserved[53];
} __attribute__ ((packed));
struct dbfs_d204 {
struct dbfs_d204_hdr hdr; /* 64 byte header */
char buf[]; /* d204 buffer */
} __attribute__ ((packed));
static int dbfs_d204_create(void **data, void **data_free_ptr, size_t *size)
{
struct dbfs_d204 *d204;
int rc, buf_size;
void *base;
buf_size = PAGE_SIZE * (diag204_buf_pages + 1) + sizeof(d204->hdr);
base = vzalloc(buf_size);
if (!base)
return -ENOMEM;
d204 = PTR_ALIGN(base + sizeof(d204->hdr), PAGE_SIZE) - sizeof(d204->hdr);
rc = diag204_store(d204->buf, diag204_buf_pages);
if (rc) {
vfree(base);
return rc;
}
d204->hdr.version = DBFS_D204_HDR_VERSION;
d204->hdr.len = PAGE_SIZE * diag204_buf_pages;
d204->hdr.sc = diag204_store_sc;
*data = d204;
*data_free_ptr = base;
*size = d204->hdr.len + sizeof(struct dbfs_d204_hdr);
return 0;
}
static struct hypfs_dbfs_file dbfs_file_d204 = {
.name = "diag_204",
.data_create = dbfs_d204_create,
.data_free = vfree,
};
__init int hypfs_diag_init(void)
{
int rc;
if (diag204_probe()) {
pr_info("The hardware system does not support hypfs\n");
return -ENODATA;
}
if (diag204_get_info_type() == DIAG204_INFO_EXT)
hypfs_dbfs_create_file(&dbfs_file_d204);
rc = hypfs_diag_fs_init();
if (rc) {
pr_err("The hardware system does not provide all functions required by hypfs\n");
debugfs_remove(dbfs_d204_file);
}
return rc;
}
void hypfs_diag_exit(void)
{
debugfs_remove(dbfs_d204_file);
hypfs_diag_fs_exit();
diag204_free_buffer();
hypfs_dbfs_remove_file(&dbfs_file_d204);
}
| linux-master | arch/s390/hypfs/hypfs_diag.c |
// SPDX-License-Identifier: GPL-1.0+
/*
* Hypervisor filesystem for Linux on s390.
*
* Copyright IBM Corp. 2006, 2008
* Author(s): Michael Holzheu <[email protected]>
*/
#define KMSG_COMPONENT "hypfs"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/fs_context.h>
#include <linux/fs_parser.h>
#include <linux/namei.h>
#include <linux/vfs.h>
#include <linux/slab.h>
#include <linux/pagemap.h>
#include <linux/time.h>
#include <linux/sysfs.h>
#include <linux/init.h>
#include <linux/kobject.h>
#include <linux/seq_file.h>
#include <linux/uio.h>
#include <asm/ebcdic.h>
#include "hypfs.h"
#define HYPFS_MAGIC 0x687970 /* ASCII 'hyp' */
#define TMP_SIZE 64 /* size of temporary buffers */
static struct dentry *hypfs_create_update_file(struct dentry *dir);
struct hypfs_sb_info {
kuid_t uid; /* uid used for files and dirs */
kgid_t gid; /* gid used for files and dirs */
struct dentry *update_file; /* file to trigger update */
time64_t last_update; /* last update, CLOCK_MONOTONIC time */
struct mutex lock; /* lock to protect update process */
};
static const struct file_operations hypfs_file_ops;
static struct file_system_type hypfs_type;
static const struct super_operations hypfs_s_ops;
/* start of list of all dentries, which have to be deleted on update */
static struct dentry *hypfs_last_dentry;
static void hypfs_update_update(struct super_block *sb)
{
struct hypfs_sb_info *sb_info = sb->s_fs_info;
struct inode *inode = d_inode(sb_info->update_file);
sb_info->last_update = ktime_get_seconds();
inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
}
/* directory tree removal functions */
static void hypfs_add_dentry(struct dentry *dentry)
{
dentry->d_fsdata = hypfs_last_dentry;
hypfs_last_dentry = dentry;
}
static void hypfs_remove(struct dentry *dentry)
{
struct dentry *parent;
parent = dentry->d_parent;
inode_lock(d_inode(parent));
if (simple_positive(dentry)) {
if (d_is_dir(dentry))
simple_rmdir(d_inode(parent), dentry);
else
simple_unlink(d_inode(parent), dentry);
}
d_drop(dentry);
dput(dentry);
inode_unlock(d_inode(parent));
}
static void hypfs_delete_tree(struct dentry *root)
{
while (hypfs_last_dentry) {
struct dentry *next_dentry;
next_dentry = hypfs_last_dentry->d_fsdata;
hypfs_remove(hypfs_last_dentry);
hypfs_last_dentry = next_dentry;
}
}
static struct inode *hypfs_make_inode(struct super_block *sb, umode_t mode)
{
struct inode *ret = new_inode(sb);
if (ret) {
struct hypfs_sb_info *hypfs_info = sb->s_fs_info;
ret->i_ino = get_next_ino();
ret->i_mode = mode;
ret->i_uid = hypfs_info->uid;
ret->i_gid = hypfs_info->gid;
ret->i_atime = ret->i_mtime = inode_set_ctime_current(ret);
if (S_ISDIR(mode))
set_nlink(ret, 2);
}
return ret;
}
static void hypfs_evict_inode(struct inode *inode)
{
clear_inode(inode);
kfree(inode->i_private);
}
static int hypfs_open(struct inode *inode, struct file *filp)
{
char *data = file_inode(filp)->i_private;
struct hypfs_sb_info *fs_info;
if (filp->f_mode & FMODE_WRITE) {
if (!(inode->i_mode & S_IWUGO))
return -EACCES;
}
if (filp->f_mode & FMODE_READ) {
if (!(inode->i_mode & S_IRUGO))
return -EACCES;
}
fs_info = inode->i_sb->s_fs_info;
if(data) {
mutex_lock(&fs_info->lock);
filp->private_data = kstrdup(data, GFP_KERNEL);
if (!filp->private_data) {
mutex_unlock(&fs_info->lock);
return -ENOMEM;
}
mutex_unlock(&fs_info->lock);
}
return nonseekable_open(inode, filp);
}
static ssize_t hypfs_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
struct file *file = iocb->ki_filp;
char *data = file->private_data;
size_t available = strlen(data);
loff_t pos = iocb->ki_pos;
size_t count;
if (pos < 0)
return -EINVAL;
if (pos >= available || !iov_iter_count(to))
return 0;
count = copy_to_iter(data + pos, available - pos, to);
if (!count)
return -EFAULT;
iocb->ki_pos = pos + count;
file_accessed(file);
return count;
}
static ssize_t hypfs_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
int rc;
struct super_block *sb = file_inode(iocb->ki_filp)->i_sb;
struct hypfs_sb_info *fs_info = sb->s_fs_info;
size_t count = iov_iter_count(from);
/*
* Currently we only allow one update per second for two reasons:
* 1. diag 204 is VERY expensive
* 2. If several processes do updates in parallel and then read the
* hypfs data, the likelihood of collisions is reduced, if we restrict
* the minimum update interval. A collision occurs, if during the
* data gathering of one process another process triggers an update
* If the first process wants to ensure consistent data, it has
* to restart data collection in this case.
*/
mutex_lock(&fs_info->lock);
if (fs_info->last_update == ktime_get_seconds()) {
rc = -EBUSY;
goto out;
}
hypfs_delete_tree(sb->s_root);
if (MACHINE_IS_VM)
rc = hypfs_vm_create_files(sb->s_root);
else
rc = hypfs_diag_create_files(sb->s_root);
if (rc) {
pr_err("Updating the hypfs tree failed\n");
hypfs_delete_tree(sb->s_root);
goto out;
}
hypfs_update_update(sb);
rc = count;
iov_iter_advance(from, count);
out:
mutex_unlock(&fs_info->lock);
return rc;
}
static int hypfs_release(struct inode *inode, struct file *filp)
{
kfree(filp->private_data);
return 0;
}
enum { Opt_uid, Opt_gid, };
static const struct fs_parameter_spec hypfs_fs_parameters[] = {
fsparam_u32("gid", Opt_gid),
fsparam_u32("uid", Opt_uid),
{}
};
static int hypfs_parse_param(struct fs_context *fc, struct fs_parameter *param)
{
struct hypfs_sb_info *hypfs_info = fc->s_fs_info;
struct fs_parse_result result;
kuid_t uid;
kgid_t gid;
int opt;
opt = fs_parse(fc, hypfs_fs_parameters, param, &result);
if (opt < 0)
return opt;
switch (opt) {
case Opt_uid:
uid = make_kuid(current_user_ns(), result.uint_32);
if (!uid_valid(uid))
return invalf(fc, "Unknown uid");
hypfs_info->uid = uid;
break;
case Opt_gid:
gid = make_kgid(current_user_ns(), result.uint_32);
if (!gid_valid(gid))
return invalf(fc, "Unknown gid");
hypfs_info->gid = gid;
break;
}
return 0;
}
static int hypfs_show_options(struct seq_file *s, struct dentry *root)
{
struct hypfs_sb_info *hypfs_info = root->d_sb->s_fs_info;
seq_printf(s, ",uid=%u", from_kuid_munged(&init_user_ns, hypfs_info->uid));
seq_printf(s, ",gid=%u", from_kgid_munged(&init_user_ns, hypfs_info->gid));
return 0;
}
static int hypfs_fill_super(struct super_block *sb, struct fs_context *fc)
{
struct hypfs_sb_info *sbi = sb->s_fs_info;
struct inode *root_inode;
struct dentry *root_dentry, *update_file;
int rc;
sb->s_blocksize = PAGE_SIZE;
sb->s_blocksize_bits = PAGE_SHIFT;
sb->s_magic = HYPFS_MAGIC;
sb->s_op = &hypfs_s_ops;
root_inode = hypfs_make_inode(sb, S_IFDIR | 0755);
if (!root_inode)
return -ENOMEM;
root_inode->i_op = &simple_dir_inode_operations;
root_inode->i_fop = &simple_dir_operations;
sb->s_root = root_dentry = d_make_root(root_inode);
if (!root_dentry)
return -ENOMEM;
if (MACHINE_IS_VM)
rc = hypfs_vm_create_files(root_dentry);
else
rc = hypfs_diag_create_files(root_dentry);
if (rc)
return rc;
update_file = hypfs_create_update_file(root_dentry);
if (IS_ERR(update_file))
return PTR_ERR(update_file);
sbi->update_file = update_file;
hypfs_update_update(sb);
pr_info("Hypervisor filesystem mounted\n");
return 0;
}
static int hypfs_get_tree(struct fs_context *fc)
{
return get_tree_single(fc, hypfs_fill_super);
}
static void hypfs_free_fc(struct fs_context *fc)
{
kfree(fc->s_fs_info);
}
static const struct fs_context_operations hypfs_context_ops = {
.free = hypfs_free_fc,
.parse_param = hypfs_parse_param,
.get_tree = hypfs_get_tree,
};
static int hypfs_init_fs_context(struct fs_context *fc)
{
struct hypfs_sb_info *sbi;
sbi = kzalloc(sizeof(struct hypfs_sb_info), GFP_KERNEL);
if (!sbi)
return -ENOMEM;
mutex_init(&sbi->lock);
sbi->uid = current_uid();
sbi->gid = current_gid();
fc->s_fs_info = sbi;
fc->ops = &hypfs_context_ops;
return 0;
}
static void hypfs_kill_super(struct super_block *sb)
{
struct hypfs_sb_info *sb_info = sb->s_fs_info;
if (sb->s_root)
hypfs_delete_tree(sb->s_root);
if (sb_info && sb_info->update_file)
hypfs_remove(sb_info->update_file);
kfree(sb->s_fs_info);
sb->s_fs_info = NULL;
kill_litter_super(sb);
}
static struct dentry *hypfs_create_file(struct dentry *parent, const char *name,
char *data, umode_t mode)
{
struct dentry *dentry;
struct inode *inode;
inode_lock(d_inode(parent));
dentry = lookup_one_len(name, parent, strlen(name));
if (IS_ERR(dentry)) {
dentry = ERR_PTR(-ENOMEM);
goto fail;
}
inode = hypfs_make_inode(parent->d_sb, mode);
if (!inode) {
dput(dentry);
dentry = ERR_PTR(-ENOMEM);
goto fail;
}
if (S_ISREG(mode)) {
inode->i_fop = &hypfs_file_ops;
if (data)
inode->i_size = strlen(data);
else
inode->i_size = 0;
} else if (S_ISDIR(mode)) {
inode->i_op = &simple_dir_inode_operations;
inode->i_fop = &simple_dir_operations;
inc_nlink(d_inode(parent));
} else
BUG();
inode->i_private = data;
d_instantiate(dentry, inode);
dget(dentry);
fail:
inode_unlock(d_inode(parent));
return dentry;
}
struct dentry *hypfs_mkdir(struct dentry *parent, const char *name)
{
struct dentry *dentry;
dentry = hypfs_create_file(parent, name, NULL, S_IFDIR | DIR_MODE);
if (IS_ERR(dentry))
return dentry;
hypfs_add_dentry(dentry);
return dentry;
}
static struct dentry *hypfs_create_update_file(struct dentry *dir)
{
struct dentry *dentry;
dentry = hypfs_create_file(dir, "update", NULL,
S_IFREG | UPDATE_FILE_MODE);
/*
* We do not put the update file on the 'delete' list with
* hypfs_add_dentry(), since it should not be removed when the tree
* is updated.
*/
return dentry;
}
struct dentry *hypfs_create_u64(struct dentry *dir,
const char *name, __u64 value)
{
char *buffer;
char tmp[TMP_SIZE];
struct dentry *dentry;
snprintf(tmp, TMP_SIZE, "%llu\n", (unsigned long long int)value);
buffer = kstrdup(tmp, GFP_KERNEL);
if (!buffer)
return ERR_PTR(-ENOMEM);
dentry =
hypfs_create_file(dir, name, buffer, S_IFREG | REG_FILE_MODE);
if (IS_ERR(dentry)) {
kfree(buffer);
return ERR_PTR(-ENOMEM);
}
hypfs_add_dentry(dentry);
return dentry;
}
struct dentry *hypfs_create_str(struct dentry *dir,
const char *name, char *string)
{
char *buffer;
struct dentry *dentry;
buffer = kmalloc(strlen(string) + 2, GFP_KERNEL);
if (!buffer)
return ERR_PTR(-ENOMEM);
sprintf(buffer, "%s\n", string);
dentry =
hypfs_create_file(dir, name, buffer, S_IFREG | REG_FILE_MODE);
if (IS_ERR(dentry)) {
kfree(buffer);
return ERR_PTR(-ENOMEM);
}
hypfs_add_dentry(dentry);
return dentry;
}
static const struct file_operations hypfs_file_ops = {
.open = hypfs_open,
.release = hypfs_release,
.read_iter = hypfs_read_iter,
.write_iter = hypfs_write_iter,
.llseek = no_llseek,
};
static struct file_system_type hypfs_type = {
.owner = THIS_MODULE,
.name = "s390_hypfs",
.init_fs_context = hypfs_init_fs_context,
.parameters = hypfs_fs_parameters,
.kill_sb = hypfs_kill_super
};
static const struct super_operations hypfs_s_ops = {
.statfs = simple_statfs,
.evict_inode = hypfs_evict_inode,
.show_options = hypfs_show_options,
};
int __init __hypfs_fs_init(void)
{
int rc;
rc = sysfs_create_mount_point(hypervisor_kobj, "s390");
if (rc)
return rc;
rc = register_filesystem(&hypfs_type);
if (rc)
goto fail;
return 0;
fail:
sysfs_remove_mount_point(hypervisor_kobj, "s390");
return rc;
}
| linux-master | arch/s390/hypfs/inode.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Hypervisor filesystem for Linux on s390. Diag 204 and 224
* implementation.
*
* Copyright IBM Corp. 2006, 2008
* Author(s): Michael Holzheu <[email protected]>
*/
#define KMSG_COMPONENT "hypfs"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/vmalloc.h>
#include <linux/mm.h>
#include <asm/diag.h>
#include <asm/ebcdic.h>
#include "hypfs_diag.h"
#include "hypfs.h"
#define TMP_SIZE 64 /* size of temporary buffers */
static char *diag224_cpu_names; /* diag 224 name table */
static int diag224_idx2name(int index, char *name);
/*
* DIAG 204 member access functions.
*
* Since we have two different diag 204 data formats for old and new s390
* machines, we do not access the structs directly, but use getter functions for
* each struct member instead. This should make the code more readable.
*/
/* Time information block */
static inline int info_blk_hdr__size(enum diag204_format type)
{
if (type == DIAG204_INFO_SIMPLE)
return sizeof(struct diag204_info_blk_hdr);
else /* DIAG204_INFO_EXT */
return sizeof(struct diag204_x_info_blk_hdr);
}
static inline __u8 info_blk_hdr__npar(enum diag204_format type, void *hdr)
{
if (type == DIAG204_INFO_SIMPLE)
return ((struct diag204_info_blk_hdr *)hdr)->npar;
else /* DIAG204_INFO_EXT */
return ((struct diag204_x_info_blk_hdr *)hdr)->npar;
}
static inline __u8 info_blk_hdr__flags(enum diag204_format type, void *hdr)
{
if (type == DIAG204_INFO_SIMPLE)
return ((struct diag204_info_blk_hdr *)hdr)->flags;
else /* DIAG204_INFO_EXT */
return ((struct diag204_x_info_blk_hdr *)hdr)->flags;
}
/* Partition header */
static inline int part_hdr__size(enum diag204_format type)
{
if (type == DIAG204_INFO_SIMPLE)
return sizeof(struct diag204_part_hdr);
else /* DIAG204_INFO_EXT */
return sizeof(struct diag204_x_part_hdr);
}
static inline __u8 part_hdr__rcpus(enum diag204_format type, void *hdr)
{
if (type == DIAG204_INFO_SIMPLE)
return ((struct diag204_part_hdr *)hdr)->cpus;
else /* DIAG204_INFO_EXT */
return ((struct diag204_x_part_hdr *)hdr)->rcpus;
}
static inline void part_hdr__part_name(enum diag204_format type, void *hdr,
char *name)
{
if (type == DIAG204_INFO_SIMPLE)
memcpy(name, ((struct diag204_part_hdr *)hdr)->part_name,
DIAG204_LPAR_NAME_LEN);
else /* DIAG204_INFO_EXT */
memcpy(name, ((struct diag204_x_part_hdr *)hdr)->part_name,
DIAG204_LPAR_NAME_LEN);
EBCASC(name, DIAG204_LPAR_NAME_LEN);
name[DIAG204_LPAR_NAME_LEN] = 0;
strim(name);
}
/* CPU info block */
static inline int cpu_info__size(enum diag204_format type)
{
if (type == DIAG204_INFO_SIMPLE)
return sizeof(struct diag204_cpu_info);
else /* DIAG204_INFO_EXT */
return sizeof(struct diag204_x_cpu_info);
}
static inline __u8 cpu_info__ctidx(enum diag204_format type, void *hdr)
{
if (type == DIAG204_INFO_SIMPLE)
return ((struct diag204_cpu_info *)hdr)->ctidx;
else /* DIAG204_INFO_EXT */
return ((struct diag204_x_cpu_info *)hdr)->ctidx;
}
static inline __u16 cpu_info__cpu_addr(enum diag204_format type, void *hdr)
{
if (type == DIAG204_INFO_SIMPLE)
return ((struct diag204_cpu_info *)hdr)->cpu_addr;
else /* DIAG204_INFO_EXT */
return ((struct diag204_x_cpu_info *)hdr)->cpu_addr;
}
static inline __u64 cpu_info__acc_time(enum diag204_format type, void *hdr)
{
if (type == DIAG204_INFO_SIMPLE)
return ((struct diag204_cpu_info *)hdr)->acc_time;
else /* DIAG204_INFO_EXT */
return ((struct diag204_x_cpu_info *)hdr)->acc_time;
}
static inline __u64 cpu_info__lp_time(enum diag204_format type, void *hdr)
{
if (type == DIAG204_INFO_SIMPLE)
return ((struct diag204_cpu_info *)hdr)->lp_time;
else /* DIAG204_INFO_EXT */
return ((struct diag204_x_cpu_info *)hdr)->lp_time;
}
static inline __u64 cpu_info__online_time(enum diag204_format type, void *hdr)
{
if (type == DIAG204_INFO_SIMPLE)
return 0; /* online_time not available in simple info */
else /* DIAG204_INFO_EXT */
return ((struct diag204_x_cpu_info *)hdr)->online_time;
}
/* Physical header */
static inline int phys_hdr__size(enum diag204_format type)
{
if (type == DIAG204_INFO_SIMPLE)
return sizeof(struct diag204_phys_hdr);
else /* DIAG204_INFO_EXT */
return sizeof(struct diag204_x_phys_hdr);
}
static inline __u8 phys_hdr__cpus(enum diag204_format type, void *hdr)
{
if (type == DIAG204_INFO_SIMPLE)
return ((struct diag204_phys_hdr *)hdr)->cpus;
else /* DIAG204_INFO_EXT */
return ((struct diag204_x_phys_hdr *)hdr)->cpus;
}
/* Physical CPU info block */
static inline int phys_cpu__size(enum diag204_format type)
{
if (type == DIAG204_INFO_SIMPLE)
return sizeof(struct diag204_phys_cpu);
else /* DIAG204_INFO_EXT */
return sizeof(struct diag204_x_phys_cpu);
}
static inline __u16 phys_cpu__cpu_addr(enum diag204_format type, void *hdr)
{
if (type == DIAG204_INFO_SIMPLE)
return ((struct diag204_phys_cpu *)hdr)->cpu_addr;
else /* DIAG204_INFO_EXT */
return ((struct diag204_x_phys_cpu *)hdr)->cpu_addr;
}
static inline __u64 phys_cpu__mgm_time(enum diag204_format type, void *hdr)
{
if (type == DIAG204_INFO_SIMPLE)
return ((struct diag204_phys_cpu *)hdr)->mgm_time;
else /* DIAG204_INFO_EXT */
return ((struct diag204_x_phys_cpu *)hdr)->mgm_time;
}
static inline __u64 phys_cpu__ctidx(enum diag204_format type, void *hdr)
{
if (type == DIAG204_INFO_SIMPLE)
return ((struct diag204_phys_cpu *)hdr)->ctidx;
else /* DIAG204_INFO_EXT */
return ((struct diag204_x_phys_cpu *)hdr)->ctidx;
}
/*
* Functions to create the directory structure
* *******************************************
*/
static int hypfs_create_cpu_files(struct dentry *cpus_dir, void *cpu_info)
{
struct dentry *cpu_dir;
char buffer[TMP_SIZE];
void *rc;
snprintf(buffer, TMP_SIZE, "%d", cpu_info__cpu_addr(diag204_get_info_type(),
cpu_info));
cpu_dir = hypfs_mkdir(cpus_dir, buffer);
rc = hypfs_create_u64(cpu_dir, "mgmtime",
cpu_info__acc_time(diag204_get_info_type(), cpu_info) -
cpu_info__lp_time(diag204_get_info_type(), cpu_info));
if (IS_ERR(rc))
return PTR_ERR(rc);
rc = hypfs_create_u64(cpu_dir, "cputime",
cpu_info__lp_time(diag204_get_info_type(), cpu_info));
if (IS_ERR(rc))
return PTR_ERR(rc);
if (diag204_get_info_type() == DIAG204_INFO_EXT) {
rc = hypfs_create_u64(cpu_dir, "onlinetime",
cpu_info__online_time(diag204_get_info_type(),
cpu_info));
if (IS_ERR(rc))
return PTR_ERR(rc);
}
diag224_idx2name(cpu_info__ctidx(diag204_get_info_type(), cpu_info), buffer);
rc = hypfs_create_str(cpu_dir, "type", buffer);
return PTR_ERR_OR_ZERO(rc);
}
static void *hypfs_create_lpar_files(struct dentry *systems_dir, void *part_hdr)
{
struct dentry *cpus_dir;
struct dentry *lpar_dir;
char lpar_name[DIAG204_LPAR_NAME_LEN + 1];
void *cpu_info;
int i;
part_hdr__part_name(diag204_get_info_type(), part_hdr, lpar_name);
lpar_name[DIAG204_LPAR_NAME_LEN] = 0;
lpar_dir = hypfs_mkdir(systems_dir, lpar_name);
if (IS_ERR(lpar_dir))
return lpar_dir;
cpus_dir = hypfs_mkdir(lpar_dir, "cpus");
if (IS_ERR(cpus_dir))
return cpus_dir;
cpu_info = part_hdr + part_hdr__size(diag204_get_info_type());
for (i = 0; i < part_hdr__rcpus(diag204_get_info_type(), part_hdr); i++) {
int rc;
rc = hypfs_create_cpu_files(cpus_dir, cpu_info);
if (rc)
return ERR_PTR(rc);
cpu_info += cpu_info__size(diag204_get_info_type());
}
return cpu_info;
}
static int hypfs_create_phys_cpu_files(struct dentry *cpus_dir, void *cpu_info)
{
struct dentry *cpu_dir;
char buffer[TMP_SIZE];
void *rc;
snprintf(buffer, TMP_SIZE, "%i", phys_cpu__cpu_addr(diag204_get_info_type(),
cpu_info));
cpu_dir = hypfs_mkdir(cpus_dir, buffer);
if (IS_ERR(cpu_dir))
return PTR_ERR(cpu_dir);
rc = hypfs_create_u64(cpu_dir, "mgmtime",
phys_cpu__mgm_time(diag204_get_info_type(), cpu_info));
if (IS_ERR(rc))
return PTR_ERR(rc);
diag224_idx2name(phys_cpu__ctidx(diag204_get_info_type(), cpu_info), buffer);
rc = hypfs_create_str(cpu_dir, "type", buffer);
return PTR_ERR_OR_ZERO(rc);
}
static void *hypfs_create_phys_files(struct dentry *parent_dir, void *phys_hdr)
{
int i;
void *cpu_info;
struct dentry *cpus_dir;
cpus_dir = hypfs_mkdir(parent_dir, "cpus");
if (IS_ERR(cpus_dir))
return cpus_dir;
cpu_info = phys_hdr + phys_hdr__size(diag204_get_info_type());
for (i = 0; i < phys_hdr__cpus(diag204_get_info_type(), phys_hdr); i++) {
int rc;
rc = hypfs_create_phys_cpu_files(cpus_dir, cpu_info);
if (rc)
return ERR_PTR(rc);
cpu_info += phys_cpu__size(diag204_get_info_type());
}
return cpu_info;
}
int hypfs_diag_create_files(struct dentry *root)
{
struct dentry *systems_dir, *hyp_dir;
void *time_hdr, *part_hdr;
void *buffer, *ptr;
int i, rc, pages;
buffer = diag204_get_buffer(diag204_get_info_type(), &pages);
if (IS_ERR(buffer))
return PTR_ERR(buffer);
rc = diag204_store(buffer, pages);
if (rc)
return rc;
systems_dir = hypfs_mkdir(root, "systems");
if (IS_ERR(systems_dir)) {
rc = PTR_ERR(systems_dir);
goto err_out;
}
time_hdr = (struct x_info_blk_hdr *)buffer;
part_hdr = time_hdr + info_blk_hdr__size(diag204_get_info_type());
for (i = 0; i < info_blk_hdr__npar(diag204_get_info_type(), time_hdr); i++) {
part_hdr = hypfs_create_lpar_files(systems_dir, part_hdr);
if (IS_ERR(part_hdr)) {
rc = PTR_ERR(part_hdr);
goto err_out;
}
}
if (info_blk_hdr__flags(diag204_get_info_type(), time_hdr) &
DIAG204_LPAR_PHYS_FLG) {
ptr = hypfs_create_phys_files(root, part_hdr);
if (IS_ERR(ptr)) {
rc = PTR_ERR(ptr);
goto err_out;
}
}
hyp_dir = hypfs_mkdir(root, "hyp");
if (IS_ERR(hyp_dir)) {
rc = PTR_ERR(hyp_dir);
goto err_out;
}
ptr = hypfs_create_str(hyp_dir, "type", "LPAR Hypervisor");
if (IS_ERR(ptr)) {
rc = PTR_ERR(ptr);
goto err_out;
}
rc = 0;
err_out:
return rc;
}
/* Diagnose 224 functions */
static int diag224_idx2name(int index, char *name)
{
memcpy(name, diag224_cpu_names + ((index + 1) * DIAG204_CPU_NAME_LEN),
DIAG204_CPU_NAME_LEN);
name[DIAG204_CPU_NAME_LEN] = 0;
strim(name);
return 0;
}
static int diag224_get_name_table(void)
{
/* memory must be below 2GB */
diag224_cpu_names = (char *)__get_free_page(GFP_KERNEL | GFP_DMA);
if (!diag224_cpu_names)
return -ENOMEM;
if (diag224(diag224_cpu_names)) {
free_page((unsigned long)diag224_cpu_names);
return -EOPNOTSUPP;
}
EBCASC(diag224_cpu_names + 16, (*diag224_cpu_names + 1) * 16);
return 0;
}
static void diag224_delete_name_table(void)
{
free_page((unsigned long)diag224_cpu_names);
}
int __init __hypfs_diag_fs_init(void)
{
if (MACHINE_IS_LPAR)
return diag224_get_name_table();
return 0;
}
void __hypfs_diag_fs_exit(void)
{
diag224_delete_name_table();
}
| linux-master | arch/s390/hypfs/hypfs_diag_fs.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Hypervisor filesystem for Linux on s390.
* Set Partition-Resource Parameter interface.
*
* Copyright IBM Corp. 2013
* Author(s): Martin Schwidefsky <[email protected]>
*/
#include <linux/compat.h>
#include <linux/errno.h>
#include <linux/gfp.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/uaccess.h>
#include <asm/diag.h>
#include <asm/sclp.h>
#include "hypfs.h"
#define DIAG304_SET_WEIGHTS 0
#define DIAG304_QUERY_PRP 1
#define DIAG304_SET_CAPPING 2
#define DIAG304_CMD_MAX 2
static inline unsigned long __hypfs_sprp_diag304(void *data, unsigned long cmd)
{
union register_pair r1 = { .even = (unsigned long)data, };
asm volatile("diag %[r1],%[r3],0x304\n"
: [r1] "+&d" (r1.pair)
: [r3] "d" (cmd)
: "memory");
return r1.odd;
}
static unsigned long hypfs_sprp_diag304(void *data, unsigned long cmd)
{
diag_stat_inc(DIAG_STAT_X304);
return __hypfs_sprp_diag304(data, cmd);
}
static void hypfs_sprp_free(const void *data)
{
free_page((unsigned long) data);
}
static int hypfs_sprp_create(void **data_ptr, void **free_ptr, size_t *size)
{
unsigned long rc;
void *data;
data = (void *) get_zeroed_page(GFP_KERNEL);
if (!data)
return -ENOMEM;
rc = hypfs_sprp_diag304(data, DIAG304_QUERY_PRP);
if (rc != 1) {
*data_ptr = *free_ptr = NULL;
*size = 0;
free_page((unsigned long) data);
return -EIO;
}
*data_ptr = *free_ptr = data;
*size = PAGE_SIZE;
return 0;
}
static int __hypfs_sprp_ioctl(void __user *user_area)
{
struct hypfs_diag304 *diag304;
unsigned long cmd;
void __user *udata;
void *data;
int rc;
rc = -ENOMEM;
data = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
diag304 = kzalloc(sizeof(*diag304), GFP_KERNEL);
if (!data || !diag304)
goto out;
rc = -EFAULT;
if (copy_from_user(diag304, user_area, sizeof(*diag304)))
goto out;
rc = -EINVAL;
if ((diag304->args[0] >> 8) != 0 || diag304->args[1] > DIAG304_CMD_MAX)
goto out;
rc = -EFAULT;
udata = (void __user *)(unsigned long) diag304->data;
if (diag304->args[1] == DIAG304_SET_WEIGHTS ||
diag304->args[1] == DIAG304_SET_CAPPING)
if (copy_from_user(data, udata, PAGE_SIZE))
goto out;
cmd = *(unsigned long *) &diag304->args[0];
diag304->rc = hypfs_sprp_diag304(data, cmd);
if (diag304->args[1] == DIAG304_QUERY_PRP)
if (copy_to_user(udata, data, PAGE_SIZE)) {
rc = -EFAULT;
goto out;
}
rc = copy_to_user(user_area, diag304, sizeof(*diag304)) ? -EFAULT : 0;
out:
kfree(diag304);
free_page((unsigned long) data);
return rc;
}
static long hypfs_sprp_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
void __user *argp;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
if (is_compat_task())
argp = compat_ptr(arg);
else
argp = (void __user *) arg;
switch (cmd) {
case HYPFS_DIAG304:
return __hypfs_sprp_ioctl(argp);
default: /* unknown ioctl number */
return -ENOTTY;
}
return 0;
}
static struct hypfs_dbfs_file hypfs_sprp_file = {
.name = "diag_304",
.data_create = hypfs_sprp_create,
.data_free = hypfs_sprp_free,
.unlocked_ioctl = hypfs_sprp_ioctl,
};
void hypfs_sprp_init(void)
{
if (!sclp.has_sprp)
return;
hypfs_dbfs_create_file(&hypfs_sprp_file);
}
void hypfs_sprp_exit(void)
{
if (!sclp.has_sprp)
return;
hypfs_dbfs_remove_file(&hypfs_sprp_file);
}
| linux-master | arch/s390/hypfs/hypfs_sprp.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Hypervisor filesystem for Linux on s390 - debugfs interface
*
* Copyright IBM Corp. 2010
* Author(s): Michael Holzheu <[email protected]>
*/
#include <linux/slab.h>
#include "hypfs.h"
static struct dentry *dbfs_dir;
static struct hypfs_dbfs_data *hypfs_dbfs_data_alloc(struct hypfs_dbfs_file *f)
{
struct hypfs_dbfs_data *data;
data = kmalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return NULL;
data->dbfs_file = f;
return data;
}
static void hypfs_dbfs_data_free(struct hypfs_dbfs_data *data)
{
data->dbfs_file->data_free(data->buf_free_ptr);
kfree(data);
}
static ssize_t dbfs_read(struct file *file, char __user *buf,
size_t size, loff_t *ppos)
{
struct hypfs_dbfs_data *data;
struct hypfs_dbfs_file *df;
ssize_t rc;
if (*ppos != 0)
return 0;
df = file_inode(file)->i_private;
mutex_lock(&df->lock);
data = hypfs_dbfs_data_alloc(df);
if (!data) {
mutex_unlock(&df->lock);
return -ENOMEM;
}
rc = df->data_create(&data->buf, &data->buf_free_ptr, &data->size);
if (rc) {
mutex_unlock(&df->lock);
kfree(data);
return rc;
}
mutex_unlock(&df->lock);
rc = simple_read_from_buffer(buf, size, ppos, data->buf, data->size);
hypfs_dbfs_data_free(data);
return rc;
}
static long dbfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct hypfs_dbfs_file *df = file_inode(file)->i_private;
long rc;
mutex_lock(&df->lock);
if (df->unlocked_ioctl)
rc = df->unlocked_ioctl(file, cmd, arg);
else
rc = -ENOTTY;
mutex_unlock(&df->lock);
return rc;
}
static const struct file_operations dbfs_ops = {
.read = dbfs_read,
.llseek = no_llseek,
.unlocked_ioctl = dbfs_ioctl,
};
void hypfs_dbfs_create_file(struct hypfs_dbfs_file *df)
{
df->dentry = debugfs_create_file(df->name, 0400, dbfs_dir, df,
&dbfs_ops);
mutex_init(&df->lock);
}
void hypfs_dbfs_remove_file(struct hypfs_dbfs_file *df)
{
debugfs_remove(df->dentry);
}
static int __init hypfs_dbfs_init(void)
{
int rc = -ENODATA;
dbfs_dir = debugfs_create_dir("s390_hypfs", NULL);
if (hypfs_diag_init())
goto fail_dbfs_exit;
if (hypfs_vm_init())
goto fail_hypfs_diag_exit;
hypfs_sprp_init();
if (hypfs_diag0c_init())
goto fail_hypfs_sprp_exit;
rc = hypfs_fs_init();
if (rc)
goto fail_hypfs_diag0c_exit;
return 0;
fail_hypfs_diag0c_exit:
hypfs_diag0c_exit();
fail_hypfs_sprp_exit:
hypfs_sprp_exit();
hypfs_vm_exit();
fail_hypfs_diag_exit:
hypfs_diag_exit();
pr_err("Initialization of hypfs failed with rc=%i\n", rc);
fail_dbfs_exit:
debugfs_remove(dbfs_dir);
return rc;
}
device_initcall(hypfs_dbfs_init)
| linux-master | arch/s390/hypfs/hypfs_dbfs.c |
// SPDX-License-Identifier: GPL-2.0
/*
* In-kernel vector facility support functions
*
* Copyright IBM Corp. 2015
* Author(s): Hendrik Brueckner <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/cpu.h>
#include <linux/sched.h>
#include <asm/fpu/types.h>
#include <asm/fpu/api.h>
#include <asm/vx-insn.h>
void __kernel_fpu_begin(struct kernel_fpu *state, u32 flags)
{
/*
* Limit the save to the FPU/vector registers already
* in use by the previous context
*/
flags &= state->mask;
if (flags & KERNEL_FPC)
/* Save floating point control */
asm volatile("stfpc %0" : "=Q" (state->fpc));
if (!MACHINE_HAS_VX) {
if (flags & KERNEL_VXR_V0V7) {
/* Save floating-point registers */
asm volatile("std 0,%0" : "=Q" (state->fprs[0]));
asm volatile("std 1,%0" : "=Q" (state->fprs[1]));
asm volatile("std 2,%0" : "=Q" (state->fprs[2]));
asm volatile("std 3,%0" : "=Q" (state->fprs[3]));
asm volatile("std 4,%0" : "=Q" (state->fprs[4]));
asm volatile("std 5,%0" : "=Q" (state->fprs[5]));
asm volatile("std 6,%0" : "=Q" (state->fprs[6]));
asm volatile("std 7,%0" : "=Q" (state->fprs[7]));
asm volatile("std 8,%0" : "=Q" (state->fprs[8]));
asm volatile("std 9,%0" : "=Q" (state->fprs[9]));
asm volatile("std 10,%0" : "=Q" (state->fprs[10]));
asm volatile("std 11,%0" : "=Q" (state->fprs[11]));
asm volatile("std 12,%0" : "=Q" (state->fprs[12]));
asm volatile("std 13,%0" : "=Q" (state->fprs[13]));
asm volatile("std 14,%0" : "=Q" (state->fprs[14]));
asm volatile("std 15,%0" : "=Q" (state->fprs[15]));
}
return;
}
/* Test and save vector registers */
asm volatile (
/*
* Test if any vector register must be saved and, if so,
* test if all register can be saved.
*/
" la 1,%[vxrs]\n" /* load save area */
" tmll %[m],30\n" /* KERNEL_VXR */
" jz 7f\n" /* no work -> done */
" jo 5f\n" /* -> save V0..V31 */
/*
* Test for special case KERNEL_FPU_MID only. In this
* case a vstm V8..V23 is the best instruction
*/
" chi %[m],12\n" /* KERNEL_VXR_MID */
" jne 0f\n" /* -> save V8..V23 */
" VSTM 8,23,128,1\n" /* vstm %v8,%v23,128(%r1) */
" j 7f\n"
/* Test and save the first half of 16 vector registers */
"0: tmll %[m],6\n" /* KERNEL_VXR_LOW */
" jz 3f\n" /* -> KERNEL_VXR_HIGH */
" jo 2f\n" /* 11 -> save V0..V15 */
" brc 2,1f\n" /* 10 -> save V8..V15 */
" VSTM 0,7,0,1\n" /* vstm %v0,%v7,0(%r1) */
" j 3f\n"
"1: VSTM 8,15,128,1\n" /* vstm %v8,%v15,128(%r1) */
" j 3f\n"
"2: VSTM 0,15,0,1\n" /* vstm %v0,%v15,0(%r1) */
/* Test and save the second half of 16 vector registers */
"3: tmll %[m],24\n" /* KERNEL_VXR_HIGH */
" jz 7f\n"
" jo 6f\n" /* 11 -> save V16..V31 */
" brc 2,4f\n" /* 10 -> save V24..V31 */
" VSTM 16,23,256,1\n" /* vstm %v16,%v23,256(%r1) */
" j 7f\n"
"4: VSTM 24,31,384,1\n" /* vstm %v24,%v31,384(%r1) */
" j 7f\n"
"5: VSTM 0,15,0,1\n" /* vstm %v0,%v15,0(%r1) */
"6: VSTM 16,31,256,1\n" /* vstm %v16,%v31,256(%r1) */
"7:"
: [vxrs] "=Q" (*(struct vx_array *) &state->vxrs)
: [m] "d" (flags)
: "1", "cc");
}
EXPORT_SYMBOL(__kernel_fpu_begin);
void __kernel_fpu_end(struct kernel_fpu *state, u32 flags)
{
/*
* Limit the restore to the FPU/vector registers of the
* previous context that have been overwritte by the
* current context
*/
flags &= state->mask;
if (flags & KERNEL_FPC)
/* Restore floating-point controls */
asm volatile("lfpc %0" : : "Q" (state->fpc));
if (!MACHINE_HAS_VX) {
if (flags & KERNEL_VXR_V0V7) {
/* Restore floating-point registers */
asm volatile("ld 0,%0" : : "Q" (state->fprs[0]));
asm volatile("ld 1,%0" : : "Q" (state->fprs[1]));
asm volatile("ld 2,%0" : : "Q" (state->fprs[2]));
asm volatile("ld 3,%0" : : "Q" (state->fprs[3]));
asm volatile("ld 4,%0" : : "Q" (state->fprs[4]));
asm volatile("ld 5,%0" : : "Q" (state->fprs[5]));
asm volatile("ld 6,%0" : : "Q" (state->fprs[6]));
asm volatile("ld 7,%0" : : "Q" (state->fprs[7]));
asm volatile("ld 8,%0" : : "Q" (state->fprs[8]));
asm volatile("ld 9,%0" : : "Q" (state->fprs[9]));
asm volatile("ld 10,%0" : : "Q" (state->fprs[10]));
asm volatile("ld 11,%0" : : "Q" (state->fprs[11]));
asm volatile("ld 12,%0" : : "Q" (state->fprs[12]));
asm volatile("ld 13,%0" : : "Q" (state->fprs[13]));
asm volatile("ld 14,%0" : : "Q" (state->fprs[14]));
asm volatile("ld 15,%0" : : "Q" (state->fprs[15]));
}
return;
}
/* Test and restore (load) vector registers */
asm volatile (
/*
* Test if any vector register must be loaded and, if so,
* test if all registers can be loaded at once.
*/
" la 1,%[vxrs]\n" /* load restore area */
" tmll %[m],30\n" /* KERNEL_VXR */
" jz 7f\n" /* no work -> done */
" jo 5f\n" /* -> restore V0..V31 */
/*
* Test for special case KERNEL_FPU_MID only. In this
* case a vlm V8..V23 is the best instruction
*/
" chi %[m],12\n" /* KERNEL_VXR_MID */
" jne 0f\n" /* -> restore V8..V23 */
" VLM 8,23,128,1\n" /* vlm %v8,%v23,128(%r1) */
" j 7f\n"
/* Test and restore the first half of 16 vector registers */
"0: tmll %[m],6\n" /* KERNEL_VXR_LOW */
" jz 3f\n" /* -> KERNEL_VXR_HIGH */
" jo 2f\n" /* 11 -> restore V0..V15 */
" brc 2,1f\n" /* 10 -> restore V8..V15 */
" VLM 0,7,0,1\n" /* vlm %v0,%v7,0(%r1) */
" j 3f\n"
"1: VLM 8,15,128,1\n" /* vlm %v8,%v15,128(%r1) */
" j 3f\n"
"2: VLM 0,15,0,1\n" /* vlm %v0,%v15,0(%r1) */
/* Test and restore the second half of 16 vector registers */
"3: tmll %[m],24\n" /* KERNEL_VXR_HIGH */
" jz 7f\n"
" jo 6f\n" /* 11 -> restore V16..V31 */
" brc 2,4f\n" /* 10 -> restore V24..V31 */
" VLM 16,23,256,1\n" /* vlm %v16,%v23,256(%r1) */
" j 7f\n"
"4: VLM 24,31,384,1\n" /* vlm %v24,%v31,384(%r1) */
" j 7f\n"
"5: VLM 0,15,0,1\n" /* vlm %v0,%v15,0(%r1) */
"6: VLM 16,31,256,1\n" /* vlm %v16,%v31,256(%r1) */
"7:"
: [vxrs] "=Q" (*(struct vx_array *) &state->vxrs)
: [m] "d" (flags)
: "1", "cc");
}
EXPORT_SYMBOL(__kernel_fpu_end);
void __load_fpu_regs(void)
{
struct fpu *state = ¤t->thread.fpu;
unsigned long *regs = current->thread.fpu.regs;
asm volatile("lfpc %0" : : "Q" (state->fpc));
if (likely(MACHINE_HAS_VX)) {
asm volatile("lgr 1,%0\n"
"VLM 0,15,0,1\n"
"VLM 16,31,256,1\n"
:
: "d" (regs)
: "1", "cc", "memory");
} else {
asm volatile("ld 0,%0" : : "Q" (regs[0]));
asm volatile("ld 1,%0" : : "Q" (regs[1]));
asm volatile("ld 2,%0" : : "Q" (regs[2]));
asm volatile("ld 3,%0" : : "Q" (regs[3]));
asm volatile("ld 4,%0" : : "Q" (regs[4]));
asm volatile("ld 5,%0" : : "Q" (regs[5]));
asm volatile("ld 6,%0" : : "Q" (regs[6]));
asm volatile("ld 7,%0" : : "Q" (regs[7]));
asm volatile("ld 8,%0" : : "Q" (regs[8]));
asm volatile("ld 9,%0" : : "Q" (regs[9]));
asm volatile("ld 10,%0" : : "Q" (regs[10]));
asm volatile("ld 11,%0" : : "Q" (regs[11]));
asm volatile("ld 12,%0" : : "Q" (regs[12]));
asm volatile("ld 13,%0" : : "Q" (regs[13]));
asm volatile("ld 14,%0" : : "Q" (regs[14]));
asm volatile("ld 15,%0" : : "Q" (regs[15]));
}
clear_cpu_flag(CIF_FPU);
}
EXPORT_SYMBOL(__load_fpu_regs);
void load_fpu_regs(void)
{
raw_local_irq_disable();
__load_fpu_regs();
raw_local_irq_enable();
}
EXPORT_SYMBOL(load_fpu_regs);
void save_fpu_regs(void)
{
unsigned long flags, *regs;
struct fpu *state;
local_irq_save(flags);
if (test_cpu_flag(CIF_FPU))
goto out;
state = ¤t->thread.fpu;
regs = current->thread.fpu.regs;
asm volatile("stfpc %0" : "=Q" (state->fpc));
if (likely(MACHINE_HAS_VX)) {
asm volatile("lgr 1,%0\n"
"VSTM 0,15,0,1\n"
"VSTM 16,31,256,1\n"
:
: "d" (regs)
: "1", "cc", "memory");
} else {
asm volatile("std 0,%0" : "=Q" (regs[0]));
asm volatile("std 1,%0" : "=Q" (regs[1]));
asm volatile("std 2,%0" : "=Q" (regs[2]));
asm volatile("std 3,%0" : "=Q" (regs[3]));
asm volatile("std 4,%0" : "=Q" (regs[4]));
asm volatile("std 5,%0" : "=Q" (regs[5]));
asm volatile("std 6,%0" : "=Q" (regs[6]));
asm volatile("std 7,%0" : "=Q" (regs[7]));
asm volatile("std 8,%0" : "=Q" (regs[8]));
asm volatile("std 9,%0" : "=Q" (regs[9]));
asm volatile("std 10,%0" : "=Q" (regs[10]));
asm volatile("std 11,%0" : "=Q" (regs[11]));
asm volatile("std 12,%0" : "=Q" (regs[12]));
asm volatile("std 13,%0" : "=Q" (regs[13]));
asm volatile("std 14,%0" : "=Q" (regs[14]));
asm volatile("std 15,%0" : "=Q" (regs[15]));
}
set_cpu_flag(CIF_FPU);
out:
local_irq_restore(flags);
}
EXPORT_SYMBOL(save_fpu_regs);
| linux-master | arch/s390/kernel/fpu.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Dynamic function tracer architecture backend.
*
* Copyright IBM Corp. 2009,2014
*
* Author(s): Martin Schwidefsky <[email protected]>
*/
#include <linux/moduleloader.h>
#include <linux/hardirq.h>
#include <linux/uaccess.h>
#include <linux/ftrace.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/kprobes.h>
#include <trace/syscall.h>
#include <asm/asm-offsets.h>
#include <asm/text-patching.h>
#include <asm/cacheflush.h>
#include <asm/ftrace.lds.h>
#include <asm/nospec-branch.h>
#include <asm/set_memory.h>
#include "entry.h"
#include "ftrace.h"
/*
* To generate function prologue either gcc's hotpatch feature (since gcc 4.8)
* or a combination of -pg -mrecord-mcount -mnop-mcount -mfentry flags
* (since gcc 9 / clang 10) is used.
* In both cases the original and also the disabled function prologue contains
* only a single six byte instruction and looks like this:
* > brcl 0,0 # offset 0
* To enable ftrace the code gets patched like above and afterwards looks
* like this:
* > brasl %r0,ftrace_caller # offset 0
*
* The instruction will be patched by ftrace_make_call / ftrace_make_nop.
* The ftrace function gets called with a non-standard C function call ABI
* where r0 contains the return address. It is also expected that the called
* function only clobbers r0 and r1, but restores r2-r15.
* For module code we can't directly jump to ftrace caller, but need a
* trampoline (ftrace_plt), which clobbers also r1.
*/
void *ftrace_func __read_mostly = ftrace_stub;
struct ftrace_insn {
u16 opc;
s32 disp;
} __packed;
#ifdef CONFIG_MODULES
static char *ftrace_plt;
#endif /* CONFIG_MODULES */
static const char *ftrace_shared_hotpatch_trampoline(const char **end)
{
const char *tstart, *tend;
tstart = ftrace_shared_hotpatch_trampoline_br;
tend = ftrace_shared_hotpatch_trampoline_br_end;
#ifdef CONFIG_EXPOLINE
if (!nospec_disable) {
tstart = ftrace_shared_hotpatch_trampoline_exrl;
tend = ftrace_shared_hotpatch_trampoline_exrl_end;
}
#endif /* CONFIG_EXPOLINE */
if (end)
*end = tend;
return tstart;
}
bool ftrace_need_init_nop(void)
{
return true;
}
int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
{
static struct ftrace_hotpatch_trampoline *next_vmlinux_trampoline =
__ftrace_hotpatch_trampolines_start;
static const char orig[6] = { 0xc0, 0x04, 0x00, 0x00, 0x00, 0x00 };
static struct ftrace_hotpatch_trampoline *trampoline;
struct ftrace_hotpatch_trampoline **next_trampoline;
struct ftrace_hotpatch_trampoline *trampolines_end;
struct ftrace_hotpatch_trampoline tmp;
struct ftrace_insn *insn;
const char *shared;
s32 disp;
BUILD_BUG_ON(sizeof(struct ftrace_hotpatch_trampoline) !=
SIZEOF_FTRACE_HOTPATCH_TRAMPOLINE);
next_trampoline = &next_vmlinux_trampoline;
trampolines_end = __ftrace_hotpatch_trampolines_end;
shared = ftrace_shared_hotpatch_trampoline(NULL);
#ifdef CONFIG_MODULES
if (mod) {
next_trampoline = &mod->arch.next_trampoline;
trampolines_end = mod->arch.trampolines_end;
shared = ftrace_plt;
}
#endif
if (WARN_ON_ONCE(*next_trampoline >= trampolines_end))
return -ENOMEM;
trampoline = (*next_trampoline)++;
/* Check for the compiler-generated fentry nop (brcl 0, .). */
if (WARN_ON_ONCE(memcmp((const void *)rec->ip, &orig, sizeof(orig))))
return -EINVAL;
/* Generate the trampoline. */
tmp.brasl_opc = 0xc015; /* brasl %r1, shared */
tmp.brasl_disp = (shared - (const char *)&trampoline->brasl_opc) / 2;
tmp.interceptor = FTRACE_ADDR;
tmp.rest_of_intercepted_function = rec->ip + sizeof(struct ftrace_insn);
s390_kernel_write(trampoline, &tmp, sizeof(tmp));
/* Generate a jump to the trampoline. */
disp = ((char *)trampoline - (char *)rec->ip) / 2;
insn = (struct ftrace_insn *)rec->ip;
s390_kernel_write(&insn->disp, &disp, sizeof(disp));
return 0;
}
static struct ftrace_hotpatch_trampoline *ftrace_get_trampoline(struct dyn_ftrace *rec)
{
struct ftrace_hotpatch_trampoline *trampoline;
struct ftrace_insn insn;
s64 disp;
u16 opc;
if (copy_from_kernel_nofault(&insn, (void *)rec->ip, sizeof(insn)))
return ERR_PTR(-EFAULT);
disp = (s64)insn.disp * 2;
trampoline = (void *)(rec->ip + disp);
if (get_kernel_nofault(opc, &trampoline->brasl_opc))
return ERR_PTR(-EFAULT);
if (opc != 0xc015)
return ERR_PTR(-EINVAL);
return trampoline;
}
int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
unsigned long addr)
{
struct ftrace_hotpatch_trampoline *trampoline;
u64 old;
trampoline = ftrace_get_trampoline(rec);
if (IS_ERR(trampoline))
return PTR_ERR(trampoline);
if (get_kernel_nofault(old, &trampoline->interceptor))
return -EFAULT;
if (old != old_addr)
return -EINVAL;
s390_kernel_write(&trampoline->interceptor, &addr, sizeof(addr));
return 0;
}
static int ftrace_patch_branch_mask(void *addr, u16 expected, bool enable)
{
u16 old;
u8 op;
if (get_kernel_nofault(old, addr))
return -EFAULT;
if (old != expected)
return -EINVAL;
/* set mask field to all ones or zeroes */
op = enable ? 0xf4 : 0x04;
s390_kernel_write((char *)addr + 1, &op, sizeof(op));
return 0;
}
int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
unsigned long addr)
{
/* Expect brcl 0xf,... */
return ftrace_patch_branch_mask((void *)rec->ip, 0xc0f4, false);
}
int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
{
struct ftrace_hotpatch_trampoline *trampoline;
trampoline = ftrace_get_trampoline(rec);
if (IS_ERR(trampoline))
return PTR_ERR(trampoline);
s390_kernel_write(&trampoline->interceptor, &addr, sizeof(addr));
/* Expect brcl 0x0,... */
return ftrace_patch_branch_mask((void *)rec->ip, 0xc004, true);
}
int ftrace_update_ftrace_func(ftrace_func_t func)
{
ftrace_func = func;
return 0;
}
void arch_ftrace_update_code(int command)
{
ftrace_modify_all_code(command);
}
void ftrace_arch_code_modify_post_process(void)
{
/*
* Flush any pre-fetched instructions on all
* CPUs to make the new code visible.
*/
text_poke_sync_lock();
}
#ifdef CONFIG_MODULES
static int __init ftrace_plt_init(void)
{
const char *start, *end;
ftrace_plt = module_alloc(PAGE_SIZE);
if (!ftrace_plt)
panic("cannot allocate ftrace plt\n");
start = ftrace_shared_hotpatch_trampoline(&end);
memcpy(ftrace_plt, start, end - start);
set_memory_rox((unsigned long)ftrace_plt, 1);
return 0;
}
device_initcall(ftrace_plt_init);
#endif /* CONFIG_MODULES */
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
/*
* Hook the return address and push it in the stack of return addresses
* in current thread info.
*/
unsigned long prepare_ftrace_return(unsigned long ra, unsigned long sp,
unsigned long ip)
{
if (unlikely(ftrace_graph_is_dead()))
goto out;
if (unlikely(atomic_read(¤t->tracing_graph_pause)))
goto out;
ip -= MCOUNT_INSN_SIZE;
if (!function_graph_enter(ra, ip, 0, (void *) sp))
ra = (unsigned long) return_to_handler;
out:
return ra;
}
NOKPROBE_SYMBOL(prepare_ftrace_return);
/*
* Patch the kernel code at ftrace_graph_caller location. The instruction
* there is branch relative on condition. To enable the ftrace graph code
* block, we simply patch the mask field of the instruction to zero and
* turn the instruction into a nop.
* To disable the ftrace graph code the mask field will be patched to
* all ones, which turns the instruction into an unconditional branch.
*/
int ftrace_enable_ftrace_graph_caller(void)
{
int rc;
/* Expect brc 0xf,... */
rc = ftrace_patch_branch_mask(ftrace_graph_caller, 0xa7f4, false);
if (rc)
return rc;
text_poke_sync_lock();
return 0;
}
int ftrace_disable_ftrace_graph_caller(void)
{
int rc;
/* Expect brc 0x0,... */
rc = ftrace_patch_branch_mask(ftrace_graph_caller, 0xa704, true);
if (rc)
return rc;
text_poke_sync_lock();
return 0;
}
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
#ifdef CONFIG_KPROBES_ON_FTRACE
void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *ops, struct ftrace_regs *fregs)
{
struct kprobe_ctlblk *kcb;
struct pt_regs *regs;
struct kprobe *p;
int bit;
bit = ftrace_test_recursion_trylock(ip, parent_ip);
if (bit < 0)
return;
regs = ftrace_get_regs(fregs);
p = get_kprobe((kprobe_opcode_t *)ip);
if (!regs || unlikely(!p) || kprobe_disabled(p))
goto out;
if (kprobe_running()) {
kprobes_inc_nmissed_count(p);
goto out;
}
__this_cpu_write(current_kprobe, p);
kcb = get_kprobe_ctlblk();
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
instruction_pointer_set(regs, ip);
if (!p->pre_handler || !p->pre_handler(p, regs)) {
instruction_pointer_set(regs, ip + MCOUNT_INSN_SIZE);
if (unlikely(p->post_handler)) {
kcb->kprobe_status = KPROBE_HIT_SSDONE;
p->post_handler(p, regs, 0);
}
}
__this_cpu_write(current_kprobe, NULL);
out:
ftrace_test_recursion_unlock(bit);
}
NOKPROBE_SYMBOL(kprobe_ftrace_handler);
int arch_prepare_kprobe_ftrace(struct kprobe *p)
{
p->ainsn.insn = NULL;
return 0;
}
#endif
| linux-master | arch/s390/kernel/ftrace.c |
// SPDX-License-Identifier: GPL-2.0
/*
* ipl/reipl/dump support for Linux on s390.
*
* Copyright IBM Corp. 2005, 2012
* Author(s): Michael Holzheu <[email protected]>
* Volker Sameske <[email protected]>
*/
#include <linux/types.h>
#include <linux/export.h>
#include <linux/init.h>
#include <linux/device.h>
#include <linux/delay.h>
#include <linux/kstrtox.h>
#include <linux/panic_notifier.h>
#include <linux/reboot.h>
#include <linux/ctype.h>
#include <linux/fs.h>
#include <linux/gfp.h>
#include <linux/crash_dump.h>
#include <linux/debug_locks.h>
#include <asm/asm-extable.h>
#include <asm/diag.h>
#include <asm/ipl.h>
#include <asm/smp.h>
#include <asm/setup.h>
#include <asm/cpcmd.h>
#include <asm/ebcdic.h>
#include <asm/sclp.h>
#include <asm/checksum.h>
#include <asm/debug.h>
#include <asm/abs_lowcore.h>
#include <asm/os_info.h>
#include <asm/sections.h>
#include <asm/boot_data.h>
#include "entry.h"
#define IPL_PARM_BLOCK_VERSION 0
#define IPL_UNKNOWN_STR "unknown"
#define IPL_CCW_STR "ccw"
#define IPL_ECKD_STR "eckd"
#define IPL_ECKD_DUMP_STR "eckd_dump"
#define IPL_FCP_STR "fcp"
#define IPL_FCP_DUMP_STR "fcp_dump"
#define IPL_NVME_STR "nvme"
#define IPL_NVME_DUMP_STR "nvme_dump"
#define IPL_NSS_STR "nss"
#define DUMP_CCW_STR "ccw"
#define DUMP_ECKD_STR "eckd"
#define DUMP_FCP_STR "fcp"
#define DUMP_NVME_STR "nvme"
#define DUMP_NONE_STR "none"
/*
* Four shutdown trigger types are supported:
* - panic
* - halt
* - power off
* - reipl
* - restart
*/
#define ON_PANIC_STR "on_panic"
#define ON_HALT_STR "on_halt"
#define ON_POFF_STR "on_poff"
#define ON_REIPL_STR "on_reboot"
#define ON_RESTART_STR "on_restart"
struct shutdown_action;
struct shutdown_trigger {
char *name;
struct shutdown_action *action;
};
/*
* The following shutdown action types are supported:
*/
#define SHUTDOWN_ACTION_IPL_STR "ipl"
#define SHUTDOWN_ACTION_REIPL_STR "reipl"
#define SHUTDOWN_ACTION_DUMP_STR "dump"
#define SHUTDOWN_ACTION_VMCMD_STR "vmcmd"
#define SHUTDOWN_ACTION_STOP_STR "stop"
#define SHUTDOWN_ACTION_DUMP_REIPL_STR "dump_reipl"
struct shutdown_action {
char *name;
void (*fn) (struct shutdown_trigger *trigger);
int (*init) (void);
int init_rc;
};
static char *ipl_type_str(enum ipl_type type)
{
switch (type) {
case IPL_TYPE_CCW:
return IPL_CCW_STR;
case IPL_TYPE_ECKD:
return IPL_ECKD_STR;
case IPL_TYPE_ECKD_DUMP:
return IPL_ECKD_DUMP_STR;
case IPL_TYPE_FCP:
return IPL_FCP_STR;
case IPL_TYPE_FCP_DUMP:
return IPL_FCP_DUMP_STR;
case IPL_TYPE_NSS:
return IPL_NSS_STR;
case IPL_TYPE_NVME:
return IPL_NVME_STR;
case IPL_TYPE_NVME_DUMP:
return IPL_NVME_DUMP_STR;
case IPL_TYPE_UNKNOWN:
default:
return IPL_UNKNOWN_STR;
}
}
enum dump_type {
DUMP_TYPE_NONE = 1,
DUMP_TYPE_CCW = 2,
DUMP_TYPE_FCP = 4,
DUMP_TYPE_NVME = 8,
DUMP_TYPE_ECKD = 16,
};
static char *dump_type_str(enum dump_type type)
{
switch (type) {
case DUMP_TYPE_NONE:
return DUMP_NONE_STR;
case DUMP_TYPE_CCW:
return DUMP_CCW_STR;
case DUMP_TYPE_ECKD:
return DUMP_ECKD_STR;
case DUMP_TYPE_FCP:
return DUMP_FCP_STR;
case DUMP_TYPE_NVME:
return DUMP_NVME_STR;
default:
return NULL;
}
}
int __bootdata_preserved(ipl_block_valid);
struct ipl_parameter_block __bootdata_preserved(ipl_block);
int __bootdata_preserved(ipl_secure_flag);
unsigned long __bootdata_preserved(ipl_cert_list_addr);
unsigned long __bootdata_preserved(ipl_cert_list_size);
unsigned long __bootdata(early_ipl_comp_list_addr);
unsigned long __bootdata(early_ipl_comp_list_size);
static int reipl_capabilities = IPL_TYPE_UNKNOWN;
static enum ipl_type reipl_type = IPL_TYPE_UNKNOWN;
static struct ipl_parameter_block *reipl_block_fcp;
static struct ipl_parameter_block *reipl_block_nvme;
static struct ipl_parameter_block *reipl_block_ccw;
static struct ipl_parameter_block *reipl_block_eckd;
static struct ipl_parameter_block *reipl_block_nss;
static struct ipl_parameter_block *reipl_block_actual;
static int dump_capabilities = DUMP_TYPE_NONE;
static enum dump_type dump_type = DUMP_TYPE_NONE;
static struct ipl_parameter_block *dump_block_fcp;
static struct ipl_parameter_block *dump_block_nvme;
static struct ipl_parameter_block *dump_block_ccw;
static struct ipl_parameter_block *dump_block_eckd;
static struct sclp_ipl_info sclp_ipl_info;
static bool reipl_nvme_clear;
static bool reipl_fcp_clear;
static bool reipl_ccw_clear;
static bool reipl_eckd_clear;
static unsigned long os_info_flags;
static inline int __diag308(unsigned long subcode, unsigned long addr)
{
union register_pair r1;
r1.even = addr;
r1.odd = 0;
asm volatile(
" diag %[r1],%[subcode],0x308\n"
"0: nopr %%r7\n"
EX_TABLE(0b,0b)
: [r1] "+&d" (r1.pair)
: [subcode] "d" (subcode)
: "cc", "memory");
return r1.odd;
}
int diag308(unsigned long subcode, void *addr)
{
diag_stat_inc(DIAG_STAT_X308);
return __diag308(subcode, addr ? virt_to_phys(addr) : 0);
}
EXPORT_SYMBOL_GPL(diag308);
/* SYSFS */
#define IPL_ATTR_SHOW_FN(_prefix, _name, _format, args...) \
static ssize_t sys_##_prefix##_##_name##_show(struct kobject *kobj, \
struct kobj_attribute *attr, \
char *page) \
{ \
return scnprintf(page, PAGE_SIZE, _format, ##args); \
}
#define IPL_ATTR_CCW_STORE_FN(_prefix, _name, _ipl_blk) \
static ssize_t sys_##_prefix##_##_name##_store(struct kobject *kobj, \
struct kobj_attribute *attr, \
const char *buf, size_t len) \
{ \
unsigned long long ssid, devno; \
\
if (sscanf(buf, "0.%llx.%llx\n", &ssid, &devno) != 2) \
return -EINVAL; \
\
if (ssid > __MAX_SSID || devno > __MAX_SUBCHANNEL) \
return -EINVAL; \
\
_ipl_blk.ssid = ssid; \
_ipl_blk.devno = devno; \
return len; \
}
#define DEFINE_IPL_CCW_ATTR_RW(_prefix, _name, _ipl_blk) \
IPL_ATTR_SHOW_FN(_prefix, _name, "0.%x.%04x\n", \
_ipl_blk.ssid, _ipl_blk.devno); \
IPL_ATTR_CCW_STORE_FN(_prefix, _name, _ipl_blk); \
static struct kobj_attribute sys_##_prefix##_##_name##_attr = \
__ATTR(_name, 0644, \
sys_##_prefix##_##_name##_show, \
sys_##_prefix##_##_name##_store) \
#define DEFINE_IPL_ATTR_RO(_prefix, _name, _format, _value) \
IPL_ATTR_SHOW_FN(_prefix, _name, _format, _value) \
static struct kobj_attribute sys_##_prefix##_##_name##_attr = \
__ATTR(_name, 0444, sys_##_prefix##_##_name##_show, NULL)
#define DEFINE_IPL_ATTR_RW(_prefix, _name, _fmt_out, _fmt_in, _value) \
IPL_ATTR_SHOW_FN(_prefix, _name, _fmt_out, (unsigned long long) _value) \
static ssize_t sys_##_prefix##_##_name##_store(struct kobject *kobj, \
struct kobj_attribute *attr, \
const char *buf, size_t len) \
{ \
unsigned long long value; \
if (sscanf(buf, _fmt_in, &value) != 1) \
return -EINVAL; \
_value = value; \
return len; \
} \
static struct kobj_attribute sys_##_prefix##_##_name##_attr = \
__ATTR(_name, 0644, \
sys_##_prefix##_##_name##_show, \
sys_##_prefix##_##_name##_store)
#define DEFINE_IPL_ATTR_STR_RW(_prefix, _name, _fmt_out, _fmt_in, _value)\
IPL_ATTR_SHOW_FN(_prefix, _name, _fmt_out, _value) \
static ssize_t sys_##_prefix##_##_name##_store(struct kobject *kobj, \
struct kobj_attribute *attr, \
const char *buf, size_t len) \
{ \
strscpy(_value, buf, sizeof(_value)); \
strim(_value); \
return len; \
} \
static struct kobj_attribute sys_##_prefix##_##_name##_attr = \
__ATTR(_name, 0644, \
sys_##_prefix##_##_name##_show, \
sys_##_prefix##_##_name##_store)
/*
* ipl section
*/
static __init enum ipl_type get_ipl_type(void)
{
if (!ipl_block_valid)
return IPL_TYPE_UNKNOWN;
switch (ipl_block.pb0_hdr.pbt) {
case IPL_PBT_CCW:
return IPL_TYPE_CCW;
case IPL_PBT_FCP:
if (ipl_block.fcp.opt == IPL_PB0_FCP_OPT_DUMP)
return IPL_TYPE_FCP_DUMP;
else
return IPL_TYPE_FCP;
case IPL_PBT_NVME:
if (ipl_block.nvme.opt == IPL_PB0_NVME_OPT_DUMP)
return IPL_TYPE_NVME_DUMP;
else
return IPL_TYPE_NVME;
case IPL_PBT_ECKD:
if (ipl_block.eckd.opt == IPL_PB0_ECKD_OPT_DUMP)
return IPL_TYPE_ECKD_DUMP;
else
return IPL_TYPE_ECKD;
}
return IPL_TYPE_UNKNOWN;
}
struct ipl_info ipl_info;
EXPORT_SYMBOL_GPL(ipl_info);
static ssize_t ipl_type_show(struct kobject *kobj, struct kobj_attribute *attr,
char *page)
{
return sprintf(page, "%s\n", ipl_type_str(ipl_info.type));
}
static struct kobj_attribute sys_ipl_type_attr = __ATTR_RO(ipl_type);
static ssize_t ipl_secure_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
{
return sprintf(page, "%i\n", !!ipl_secure_flag);
}
static struct kobj_attribute sys_ipl_secure_attr =
__ATTR(secure, 0444, ipl_secure_show, NULL);
static ssize_t ipl_has_secure_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
{
return sprintf(page, "%i\n", !!sclp.has_sipl);
}
static struct kobj_attribute sys_ipl_has_secure_attr =
__ATTR(has_secure, 0444, ipl_has_secure_show, NULL);
static ssize_t ipl_vm_parm_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
{
char parm[DIAG308_VMPARM_SIZE + 1] = {};
if (ipl_block_valid && (ipl_block.pb0_hdr.pbt == IPL_PBT_CCW))
ipl_block_get_ascii_vmparm(parm, sizeof(parm), &ipl_block);
return sprintf(page, "%s\n", parm);
}
static struct kobj_attribute sys_ipl_vm_parm_attr =
__ATTR(parm, 0444, ipl_vm_parm_show, NULL);
static ssize_t sys_ipl_device_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
{
switch (ipl_info.type) {
case IPL_TYPE_CCW:
return sprintf(page, "0.%x.%04x\n", ipl_block.ccw.ssid,
ipl_block.ccw.devno);
case IPL_TYPE_ECKD:
case IPL_TYPE_ECKD_DUMP:
return sprintf(page, "0.%x.%04x\n", ipl_block.eckd.ssid,
ipl_block.eckd.devno);
case IPL_TYPE_FCP:
case IPL_TYPE_FCP_DUMP:
return sprintf(page, "0.0.%04x\n", ipl_block.fcp.devno);
case IPL_TYPE_NVME:
case IPL_TYPE_NVME_DUMP:
return sprintf(page, "%08ux\n", ipl_block.nvme.fid);
default:
return 0;
}
}
static struct kobj_attribute sys_ipl_device_attr =
__ATTR(device, 0444, sys_ipl_device_show, NULL);
static ssize_t ipl_parameter_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t off, size_t count)
{
return memory_read_from_buffer(buf, count, &off, &ipl_block,
ipl_block.hdr.len);
}
static struct bin_attribute ipl_parameter_attr =
__BIN_ATTR(binary_parameter, 0444, ipl_parameter_read, NULL,
PAGE_SIZE);
static ssize_t ipl_scp_data_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t off, size_t count)
{
unsigned int size = ipl_block.fcp.scp_data_len;
void *scp_data = &ipl_block.fcp.scp_data;
return memory_read_from_buffer(buf, count, &off, scp_data, size);
}
static ssize_t ipl_nvme_scp_data_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t off, size_t count)
{
unsigned int size = ipl_block.nvme.scp_data_len;
void *scp_data = &ipl_block.nvme.scp_data;
return memory_read_from_buffer(buf, count, &off, scp_data, size);
}
static ssize_t ipl_eckd_scp_data_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t off, size_t count)
{
unsigned int size = ipl_block.eckd.scp_data_len;
void *scp_data = &ipl_block.eckd.scp_data;
return memory_read_from_buffer(buf, count, &off, scp_data, size);
}
static struct bin_attribute ipl_scp_data_attr =
__BIN_ATTR(scp_data, 0444, ipl_scp_data_read, NULL, PAGE_SIZE);
static struct bin_attribute ipl_nvme_scp_data_attr =
__BIN_ATTR(scp_data, 0444, ipl_nvme_scp_data_read, NULL, PAGE_SIZE);
static struct bin_attribute ipl_eckd_scp_data_attr =
__BIN_ATTR(scp_data, 0444, ipl_eckd_scp_data_read, NULL, PAGE_SIZE);
static struct bin_attribute *ipl_fcp_bin_attrs[] = {
&ipl_parameter_attr,
&ipl_scp_data_attr,
NULL,
};
static struct bin_attribute *ipl_nvme_bin_attrs[] = {
&ipl_parameter_attr,
&ipl_nvme_scp_data_attr,
NULL,
};
static struct bin_attribute *ipl_eckd_bin_attrs[] = {
&ipl_parameter_attr,
&ipl_eckd_scp_data_attr,
NULL,
};
/* FCP ipl device attributes */
DEFINE_IPL_ATTR_RO(ipl_fcp, wwpn, "0x%016llx\n",
(unsigned long long)ipl_block.fcp.wwpn);
DEFINE_IPL_ATTR_RO(ipl_fcp, lun, "0x%016llx\n",
(unsigned long long)ipl_block.fcp.lun);
DEFINE_IPL_ATTR_RO(ipl_fcp, bootprog, "%lld\n",
(unsigned long long)ipl_block.fcp.bootprog);
DEFINE_IPL_ATTR_RO(ipl_fcp, br_lba, "%lld\n",
(unsigned long long)ipl_block.fcp.br_lba);
/* NVMe ipl device attributes */
DEFINE_IPL_ATTR_RO(ipl_nvme, fid, "0x%08llx\n",
(unsigned long long)ipl_block.nvme.fid);
DEFINE_IPL_ATTR_RO(ipl_nvme, nsid, "0x%08llx\n",
(unsigned long long)ipl_block.nvme.nsid);
DEFINE_IPL_ATTR_RO(ipl_nvme, bootprog, "%lld\n",
(unsigned long long)ipl_block.nvme.bootprog);
DEFINE_IPL_ATTR_RO(ipl_nvme, br_lba, "%lld\n",
(unsigned long long)ipl_block.nvme.br_lba);
/* ECKD ipl device attributes */
DEFINE_IPL_ATTR_RO(ipl_eckd, bootprog, "%lld\n",
(unsigned long long)ipl_block.eckd.bootprog);
#define IPL_ATTR_BR_CHR_SHOW_FN(_name, _ipb) \
static ssize_t eckd_##_name##_br_chr_show(struct kobject *kobj, \
struct kobj_attribute *attr, \
char *buf) \
{ \
struct ipl_pb0_eckd *ipb = &(_ipb); \
\
if (!ipb->br_chr.cyl && \
!ipb->br_chr.head && \
!ipb->br_chr.record) \
return sprintf(buf, "auto\n"); \
\
return sprintf(buf, "0x%x,0x%x,0x%x\n", \
ipb->br_chr.cyl, \
ipb->br_chr.head, \
ipb->br_chr.record); \
}
#define IPL_ATTR_BR_CHR_STORE_FN(_name, _ipb) \
static ssize_t eckd_##_name##_br_chr_store(struct kobject *kobj, \
struct kobj_attribute *attr, \
const char *buf, size_t len) \
{ \
struct ipl_pb0_eckd *ipb = &(_ipb); \
unsigned long args[3] = { 0 }; \
char *p, *p1, *tmp = NULL; \
int i, rc; \
\
if (!strncmp(buf, "auto", 4)) \
goto out; \
\
tmp = kstrdup(buf, GFP_KERNEL); \
p = tmp; \
for (i = 0; i < 3; i++) { \
p1 = strsep(&p, ", "); \
if (!p1) { \
rc = -EINVAL; \
goto err; \
} \
rc = kstrtoul(p1, 0, args + i); \
if (rc) \
goto err; \
} \
\
rc = -EINVAL; \
if (i != 3) \
goto err; \
\
if ((args[0] || args[1]) && !args[2]) \
goto err; \
\
if (args[0] > UINT_MAX || args[1] > 255 || args[2] > 255) \
goto err; \
\
out: \
ipb->br_chr.cyl = args[0]; \
ipb->br_chr.head = args[1]; \
ipb->br_chr.record = args[2]; \
rc = len; \
err: \
kfree(tmp); \
return rc; \
}
IPL_ATTR_BR_CHR_SHOW_FN(ipl, ipl_block.eckd);
static struct kobj_attribute sys_ipl_eckd_br_chr_attr =
__ATTR(br_chr, 0644, eckd_ipl_br_chr_show, NULL);
IPL_ATTR_BR_CHR_SHOW_FN(reipl, reipl_block_eckd->eckd);
IPL_ATTR_BR_CHR_STORE_FN(reipl, reipl_block_eckd->eckd);
static struct kobj_attribute sys_reipl_eckd_br_chr_attr =
__ATTR(br_chr, 0644, eckd_reipl_br_chr_show, eckd_reipl_br_chr_store);
static ssize_t ipl_ccw_loadparm_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
{
char loadparm[LOADPARM_LEN + 1] = {};
if (!sclp_ipl_info.is_valid)
return sprintf(page, "#unknown#\n");
memcpy(loadparm, &sclp_ipl_info.loadparm, LOADPARM_LEN);
EBCASC(loadparm, LOADPARM_LEN);
strim(loadparm);
return sprintf(page, "%s\n", loadparm);
}
static struct kobj_attribute sys_ipl_ccw_loadparm_attr =
__ATTR(loadparm, 0444, ipl_ccw_loadparm_show, NULL);
static struct attribute *ipl_fcp_attrs[] = {
&sys_ipl_device_attr.attr,
&sys_ipl_fcp_wwpn_attr.attr,
&sys_ipl_fcp_lun_attr.attr,
&sys_ipl_fcp_bootprog_attr.attr,
&sys_ipl_fcp_br_lba_attr.attr,
&sys_ipl_ccw_loadparm_attr.attr,
NULL,
};
static struct attribute_group ipl_fcp_attr_group = {
.attrs = ipl_fcp_attrs,
.bin_attrs = ipl_fcp_bin_attrs,
};
static struct attribute *ipl_nvme_attrs[] = {
&sys_ipl_nvme_fid_attr.attr,
&sys_ipl_nvme_nsid_attr.attr,
&sys_ipl_nvme_bootprog_attr.attr,
&sys_ipl_nvme_br_lba_attr.attr,
&sys_ipl_ccw_loadparm_attr.attr,
NULL,
};
static struct attribute_group ipl_nvme_attr_group = {
.attrs = ipl_nvme_attrs,
.bin_attrs = ipl_nvme_bin_attrs,
};
static struct attribute *ipl_eckd_attrs[] = {
&sys_ipl_eckd_bootprog_attr.attr,
&sys_ipl_eckd_br_chr_attr.attr,
&sys_ipl_ccw_loadparm_attr.attr,
&sys_ipl_device_attr.attr,
NULL,
};
static struct attribute_group ipl_eckd_attr_group = {
.attrs = ipl_eckd_attrs,
.bin_attrs = ipl_eckd_bin_attrs,
};
/* CCW ipl device attributes */
static struct attribute *ipl_ccw_attrs_vm[] = {
&sys_ipl_device_attr.attr,
&sys_ipl_ccw_loadparm_attr.attr,
&sys_ipl_vm_parm_attr.attr,
NULL,
};
static struct attribute *ipl_ccw_attrs_lpar[] = {
&sys_ipl_device_attr.attr,
&sys_ipl_ccw_loadparm_attr.attr,
NULL,
};
static struct attribute_group ipl_ccw_attr_group_vm = {
.attrs = ipl_ccw_attrs_vm,
};
static struct attribute_group ipl_ccw_attr_group_lpar = {
.attrs = ipl_ccw_attrs_lpar
};
static struct attribute *ipl_common_attrs[] = {
&sys_ipl_type_attr.attr,
&sys_ipl_secure_attr.attr,
&sys_ipl_has_secure_attr.attr,
NULL,
};
static struct attribute_group ipl_common_attr_group = {
.attrs = ipl_common_attrs,
};
static struct kset *ipl_kset;
static void __ipl_run(void *unused)
{
diag308(DIAG308_LOAD_CLEAR, NULL);
}
static void ipl_run(struct shutdown_trigger *trigger)
{
smp_call_ipl_cpu(__ipl_run, NULL);
}
static int __init ipl_init(void)
{
int rc;
ipl_kset = kset_create_and_add("ipl", NULL, firmware_kobj);
if (!ipl_kset) {
rc = -ENOMEM;
goto out;
}
rc = sysfs_create_group(&ipl_kset->kobj, &ipl_common_attr_group);
if (rc)
goto out;
switch (ipl_info.type) {
case IPL_TYPE_CCW:
if (MACHINE_IS_VM)
rc = sysfs_create_group(&ipl_kset->kobj,
&ipl_ccw_attr_group_vm);
else
rc = sysfs_create_group(&ipl_kset->kobj,
&ipl_ccw_attr_group_lpar);
break;
case IPL_TYPE_ECKD:
rc = sysfs_create_group(&ipl_kset->kobj, &ipl_eckd_attr_group);
break;
case IPL_TYPE_FCP:
case IPL_TYPE_FCP_DUMP:
rc = sysfs_create_group(&ipl_kset->kobj, &ipl_fcp_attr_group);
break;
case IPL_TYPE_NVME:
case IPL_TYPE_NVME_DUMP:
rc = sysfs_create_group(&ipl_kset->kobj, &ipl_nvme_attr_group);
break;
default:
break;
}
out:
if (rc)
panic("ipl_init failed: rc = %i\n", rc);
return 0;
}
static struct shutdown_action __refdata ipl_action = {
.name = SHUTDOWN_ACTION_IPL_STR,
.fn = ipl_run,
.init = ipl_init,
};
/*
* reipl shutdown action: Reboot Linux on shutdown.
*/
/* VM IPL PARM attributes */
static ssize_t reipl_generic_vmparm_show(struct ipl_parameter_block *ipb,
char *page)
{
char vmparm[DIAG308_VMPARM_SIZE + 1] = {};
ipl_block_get_ascii_vmparm(vmparm, sizeof(vmparm), ipb);
return sprintf(page, "%s\n", vmparm);
}
static ssize_t reipl_generic_vmparm_store(struct ipl_parameter_block *ipb,
size_t vmparm_max,
const char *buf, size_t len)
{
int i, ip_len;
/* ignore trailing newline */
ip_len = len;
if ((len > 0) && (buf[len - 1] == '\n'))
ip_len--;
if (ip_len > vmparm_max)
return -EINVAL;
/* parm is used to store kernel options, check for common chars */
for (i = 0; i < ip_len; i++)
if (!(isalnum(buf[i]) || isascii(buf[i]) || isprint(buf[i])))
return -EINVAL;
memset(ipb->ccw.vm_parm, 0, DIAG308_VMPARM_SIZE);
ipb->ccw.vm_parm_len = ip_len;
if (ip_len > 0) {
ipb->ccw.vm_flags |= IPL_PB0_CCW_VM_FLAG_VP;
memcpy(ipb->ccw.vm_parm, buf, ip_len);
ASCEBC(ipb->ccw.vm_parm, ip_len);
} else {
ipb->ccw.vm_flags &= ~IPL_PB0_CCW_VM_FLAG_VP;
}
return len;
}
/* NSS wrapper */
static ssize_t reipl_nss_vmparm_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
{
return reipl_generic_vmparm_show(reipl_block_nss, page);
}
static ssize_t reipl_nss_vmparm_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t len)
{
return reipl_generic_vmparm_store(reipl_block_nss, 56, buf, len);
}
/* CCW wrapper */
static ssize_t reipl_ccw_vmparm_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
{
return reipl_generic_vmparm_show(reipl_block_ccw, page);
}
static ssize_t reipl_ccw_vmparm_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t len)
{
return reipl_generic_vmparm_store(reipl_block_ccw, 64, buf, len);
}
static struct kobj_attribute sys_reipl_nss_vmparm_attr =
__ATTR(parm, 0644, reipl_nss_vmparm_show,
reipl_nss_vmparm_store);
static struct kobj_attribute sys_reipl_ccw_vmparm_attr =
__ATTR(parm, 0644, reipl_ccw_vmparm_show,
reipl_ccw_vmparm_store);
/* FCP reipl device attributes */
static ssize_t reipl_fcp_scpdata_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
size_t size = reipl_block_fcp->fcp.scp_data_len;
void *scp_data = reipl_block_fcp->fcp.scp_data;
return memory_read_from_buffer(buf, count, &off, scp_data, size);
}
static ssize_t reipl_fcp_scpdata_write(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
size_t scpdata_len = count;
size_t padding;
if (off)
return -EINVAL;
memcpy(reipl_block_fcp->fcp.scp_data, buf, count);
if (scpdata_len % 8) {
padding = 8 - (scpdata_len % 8);
memset(reipl_block_fcp->fcp.scp_data + scpdata_len,
0, padding);
scpdata_len += padding;
}
reipl_block_fcp->hdr.len = IPL_BP_FCP_LEN + scpdata_len;
reipl_block_fcp->fcp.len = IPL_BP0_FCP_LEN + scpdata_len;
reipl_block_fcp->fcp.scp_data_len = scpdata_len;
return count;
}
static struct bin_attribute sys_reipl_fcp_scp_data_attr =
__BIN_ATTR(scp_data, 0644, reipl_fcp_scpdata_read,
reipl_fcp_scpdata_write, DIAG308_SCPDATA_SIZE);
static struct bin_attribute *reipl_fcp_bin_attrs[] = {
&sys_reipl_fcp_scp_data_attr,
NULL,
};
DEFINE_IPL_ATTR_RW(reipl_fcp, wwpn, "0x%016llx\n", "%llx\n",
reipl_block_fcp->fcp.wwpn);
DEFINE_IPL_ATTR_RW(reipl_fcp, lun, "0x%016llx\n", "%llx\n",
reipl_block_fcp->fcp.lun);
DEFINE_IPL_ATTR_RW(reipl_fcp, bootprog, "%lld\n", "%lld\n",
reipl_block_fcp->fcp.bootprog);
DEFINE_IPL_ATTR_RW(reipl_fcp, br_lba, "%lld\n", "%lld\n",
reipl_block_fcp->fcp.br_lba);
DEFINE_IPL_ATTR_RW(reipl_fcp, device, "0.0.%04llx\n", "0.0.%llx\n",
reipl_block_fcp->fcp.devno);
static void reipl_get_ascii_loadparm(char *loadparm,
struct ipl_parameter_block *ibp)
{
memcpy(loadparm, ibp->common.loadparm, LOADPARM_LEN);
EBCASC(loadparm, LOADPARM_LEN);
loadparm[LOADPARM_LEN] = 0;
strim(loadparm);
}
static ssize_t reipl_generic_loadparm_show(struct ipl_parameter_block *ipb,
char *page)
{
char buf[LOADPARM_LEN + 1];
reipl_get_ascii_loadparm(buf, ipb);
return sprintf(page, "%s\n", buf);
}
static ssize_t reipl_generic_loadparm_store(struct ipl_parameter_block *ipb,
const char *buf, size_t len)
{
int i, lp_len;
/* ignore trailing newline */
lp_len = len;
if ((len > 0) && (buf[len - 1] == '\n'))
lp_len--;
/* loadparm can have max 8 characters and must not start with a blank */
if ((lp_len > LOADPARM_LEN) || ((lp_len > 0) && (buf[0] == ' ')))
return -EINVAL;
/* loadparm can only contain "a-z,A-Z,0-9,SP,." */
for (i = 0; i < lp_len; i++) {
if (isalpha(buf[i]) || isdigit(buf[i]) || (buf[i] == ' ') ||
(buf[i] == '.'))
continue;
return -EINVAL;
}
/* initialize loadparm with blanks */
memset(ipb->common.loadparm, ' ', LOADPARM_LEN);
/* copy and convert to ebcdic */
memcpy(ipb->common.loadparm, buf, lp_len);
ASCEBC(ipb->common.loadparm, LOADPARM_LEN);
ipb->common.flags |= IPL_PB0_FLAG_LOADPARM;
return len;
}
#define DEFINE_GENERIC_LOADPARM(name) \
static ssize_t reipl_##name##_loadparm_show(struct kobject *kobj, \
struct kobj_attribute *attr, char *page) \
{ \
return reipl_generic_loadparm_show(reipl_block_##name, page); \
} \
static ssize_t reipl_##name##_loadparm_store(struct kobject *kobj, \
struct kobj_attribute *attr, \
const char *buf, size_t len) \
{ \
return reipl_generic_loadparm_store(reipl_block_##name, buf, len); \
} \
static struct kobj_attribute sys_reipl_##name##_loadparm_attr = \
__ATTR(loadparm, 0644, reipl_##name##_loadparm_show, \
reipl_##name##_loadparm_store)
DEFINE_GENERIC_LOADPARM(fcp);
DEFINE_GENERIC_LOADPARM(nvme);
DEFINE_GENERIC_LOADPARM(ccw);
DEFINE_GENERIC_LOADPARM(nss);
DEFINE_GENERIC_LOADPARM(eckd);
static ssize_t reipl_fcp_clear_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
{
return sprintf(page, "%u\n", reipl_fcp_clear);
}
static ssize_t reipl_fcp_clear_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t len)
{
if (kstrtobool(buf, &reipl_fcp_clear) < 0)
return -EINVAL;
return len;
}
static struct attribute *reipl_fcp_attrs[] = {
&sys_reipl_fcp_device_attr.attr,
&sys_reipl_fcp_wwpn_attr.attr,
&sys_reipl_fcp_lun_attr.attr,
&sys_reipl_fcp_bootprog_attr.attr,
&sys_reipl_fcp_br_lba_attr.attr,
&sys_reipl_fcp_loadparm_attr.attr,
NULL,
};
static struct attribute_group reipl_fcp_attr_group = {
.attrs = reipl_fcp_attrs,
.bin_attrs = reipl_fcp_bin_attrs,
};
static struct kobj_attribute sys_reipl_fcp_clear_attr =
__ATTR(clear, 0644, reipl_fcp_clear_show, reipl_fcp_clear_store);
/* NVME reipl device attributes */
static ssize_t reipl_nvme_scpdata_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
size_t size = reipl_block_nvme->nvme.scp_data_len;
void *scp_data = reipl_block_nvme->nvme.scp_data;
return memory_read_from_buffer(buf, count, &off, scp_data, size);
}
static ssize_t reipl_nvme_scpdata_write(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
size_t scpdata_len = count;
size_t padding;
if (off)
return -EINVAL;
memcpy(reipl_block_nvme->nvme.scp_data, buf, count);
if (scpdata_len % 8) {
padding = 8 - (scpdata_len % 8);
memset(reipl_block_nvme->nvme.scp_data + scpdata_len,
0, padding);
scpdata_len += padding;
}
reipl_block_nvme->hdr.len = IPL_BP_FCP_LEN + scpdata_len;
reipl_block_nvme->nvme.len = IPL_BP0_FCP_LEN + scpdata_len;
reipl_block_nvme->nvme.scp_data_len = scpdata_len;
return count;
}
static struct bin_attribute sys_reipl_nvme_scp_data_attr =
__BIN_ATTR(scp_data, 0644, reipl_nvme_scpdata_read,
reipl_nvme_scpdata_write, DIAG308_SCPDATA_SIZE);
static struct bin_attribute *reipl_nvme_bin_attrs[] = {
&sys_reipl_nvme_scp_data_attr,
NULL,
};
DEFINE_IPL_ATTR_RW(reipl_nvme, fid, "0x%08llx\n", "%llx\n",
reipl_block_nvme->nvme.fid);
DEFINE_IPL_ATTR_RW(reipl_nvme, nsid, "0x%08llx\n", "%llx\n",
reipl_block_nvme->nvme.nsid);
DEFINE_IPL_ATTR_RW(reipl_nvme, bootprog, "%lld\n", "%lld\n",
reipl_block_nvme->nvme.bootprog);
DEFINE_IPL_ATTR_RW(reipl_nvme, br_lba, "%lld\n", "%lld\n",
reipl_block_nvme->nvme.br_lba);
static struct attribute *reipl_nvme_attrs[] = {
&sys_reipl_nvme_fid_attr.attr,
&sys_reipl_nvme_nsid_attr.attr,
&sys_reipl_nvme_bootprog_attr.attr,
&sys_reipl_nvme_br_lba_attr.attr,
&sys_reipl_nvme_loadparm_attr.attr,
NULL,
};
static struct attribute_group reipl_nvme_attr_group = {
.attrs = reipl_nvme_attrs,
.bin_attrs = reipl_nvme_bin_attrs
};
static ssize_t reipl_nvme_clear_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
{
return sprintf(page, "%u\n", reipl_nvme_clear);
}
static ssize_t reipl_nvme_clear_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t len)
{
if (kstrtobool(buf, &reipl_nvme_clear) < 0)
return -EINVAL;
return len;
}
static struct kobj_attribute sys_reipl_nvme_clear_attr =
__ATTR(clear, 0644, reipl_nvme_clear_show, reipl_nvme_clear_store);
/* CCW reipl device attributes */
DEFINE_IPL_CCW_ATTR_RW(reipl_ccw, device, reipl_block_ccw->ccw);
static ssize_t reipl_ccw_clear_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
{
return sprintf(page, "%u\n", reipl_ccw_clear);
}
static ssize_t reipl_ccw_clear_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t len)
{
if (kstrtobool(buf, &reipl_ccw_clear) < 0)
return -EINVAL;
return len;
}
static struct kobj_attribute sys_reipl_ccw_clear_attr =
__ATTR(clear, 0644, reipl_ccw_clear_show, reipl_ccw_clear_store);
static struct attribute *reipl_ccw_attrs_vm[] = {
&sys_reipl_ccw_device_attr.attr,
&sys_reipl_ccw_loadparm_attr.attr,
&sys_reipl_ccw_vmparm_attr.attr,
&sys_reipl_ccw_clear_attr.attr,
NULL,
};
static struct attribute *reipl_ccw_attrs_lpar[] = {
&sys_reipl_ccw_device_attr.attr,
&sys_reipl_ccw_loadparm_attr.attr,
&sys_reipl_ccw_clear_attr.attr,
NULL,
};
static struct attribute_group reipl_ccw_attr_group_vm = {
.name = IPL_CCW_STR,
.attrs = reipl_ccw_attrs_vm,
};
static struct attribute_group reipl_ccw_attr_group_lpar = {
.name = IPL_CCW_STR,
.attrs = reipl_ccw_attrs_lpar,
};
/* ECKD reipl device attributes */
static ssize_t reipl_eckd_scpdata_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
size_t size = reipl_block_eckd->eckd.scp_data_len;
void *scp_data = reipl_block_eckd->eckd.scp_data;
return memory_read_from_buffer(buf, count, &off, scp_data, size);
}
static ssize_t reipl_eckd_scpdata_write(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
size_t scpdata_len = count;
size_t padding;
if (off)
return -EINVAL;
memcpy(reipl_block_eckd->eckd.scp_data, buf, count);
if (scpdata_len % 8) {
padding = 8 - (scpdata_len % 8);
memset(reipl_block_eckd->eckd.scp_data + scpdata_len,
0, padding);
scpdata_len += padding;
}
reipl_block_eckd->hdr.len = IPL_BP_ECKD_LEN + scpdata_len;
reipl_block_eckd->eckd.len = IPL_BP0_ECKD_LEN + scpdata_len;
reipl_block_eckd->eckd.scp_data_len = scpdata_len;
return count;
}
static struct bin_attribute sys_reipl_eckd_scp_data_attr =
__BIN_ATTR(scp_data, 0644, reipl_eckd_scpdata_read,
reipl_eckd_scpdata_write, DIAG308_SCPDATA_SIZE);
static struct bin_attribute *reipl_eckd_bin_attrs[] = {
&sys_reipl_eckd_scp_data_attr,
NULL,
};
DEFINE_IPL_CCW_ATTR_RW(reipl_eckd, device, reipl_block_eckd->eckd);
DEFINE_IPL_ATTR_RW(reipl_eckd, bootprog, "%lld\n", "%lld\n",
reipl_block_eckd->eckd.bootprog);
static struct attribute *reipl_eckd_attrs[] = {
&sys_reipl_eckd_device_attr.attr,
&sys_reipl_eckd_bootprog_attr.attr,
&sys_reipl_eckd_br_chr_attr.attr,
&sys_reipl_eckd_loadparm_attr.attr,
NULL,
};
static struct attribute_group reipl_eckd_attr_group = {
.attrs = reipl_eckd_attrs,
.bin_attrs = reipl_eckd_bin_attrs
};
static ssize_t reipl_eckd_clear_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
{
return sprintf(page, "%u\n", reipl_eckd_clear);
}
static ssize_t reipl_eckd_clear_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t len)
{
if (kstrtobool(buf, &reipl_eckd_clear) < 0)
return -EINVAL;
return len;
}
static struct kobj_attribute sys_reipl_eckd_clear_attr =
__ATTR(clear, 0644, reipl_eckd_clear_show, reipl_eckd_clear_store);
/* NSS reipl device attributes */
static void reipl_get_ascii_nss_name(char *dst,
struct ipl_parameter_block *ipb)
{
memcpy(dst, ipb->ccw.nss_name, NSS_NAME_SIZE);
EBCASC(dst, NSS_NAME_SIZE);
dst[NSS_NAME_SIZE] = 0;
}
static ssize_t reipl_nss_name_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
{
char nss_name[NSS_NAME_SIZE + 1] = {};
reipl_get_ascii_nss_name(nss_name, reipl_block_nss);
return sprintf(page, "%s\n", nss_name);
}
static ssize_t reipl_nss_name_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t len)
{
int nss_len;
/* ignore trailing newline */
nss_len = len;
if ((len > 0) && (buf[len - 1] == '\n'))
nss_len--;
if (nss_len > NSS_NAME_SIZE)
return -EINVAL;
memset(reipl_block_nss->ccw.nss_name, 0x40, NSS_NAME_SIZE);
if (nss_len > 0) {
reipl_block_nss->ccw.vm_flags |= IPL_PB0_CCW_VM_FLAG_NSS;
memcpy(reipl_block_nss->ccw.nss_name, buf, nss_len);
ASCEBC(reipl_block_nss->ccw.nss_name, nss_len);
EBC_TOUPPER(reipl_block_nss->ccw.nss_name, nss_len);
} else {
reipl_block_nss->ccw.vm_flags &= ~IPL_PB0_CCW_VM_FLAG_NSS;
}
return len;
}
static struct kobj_attribute sys_reipl_nss_name_attr =
__ATTR(name, 0644, reipl_nss_name_show,
reipl_nss_name_store);
static struct attribute *reipl_nss_attrs[] = {
&sys_reipl_nss_name_attr.attr,
&sys_reipl_nss_loadparm_attr.attr,
&sys_reipl_nss_vmparm_attr.attr,
NULL,
};
static struct attribute_group reipl_nss_attr_group = {
.name = IPL_NSS_STR,
.attrs = reipl_nss_attrs,
};
void set_os_info_reipl_block(void)
{
os_info_entry_add(OS_INFO_REIPL_BLOCK, reipl_block_actual,
reipl_block_actual->hdr.len);
}
/* reipl type */
static int reipl_set_type(enum ipl_type type)
{
if (!(reipl_capabilities & type))
return -EINVAL;
switch(type) {
case IPL_TYPE_CCW:
reipl_block_actual = reipl_block_ccw;
break;
case IPL_TYPE_ECKD:
reipl_block_actual = reipl_block_eckd;
break;
case IPL_TYPE_FCP:
reipl_block_actual = reipl_block_fcp;
break;
case IPL_TYPE_NVME:
reipl_block_actual = reipl_block_nvme;
break;
case IPL_TYPE_NSS:
reipl_block_actual = reipl_block_nss;
break;
default:
break;
}
reipl_type = type;
return 0;
}
static ssize_t reipl_type_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
{
return sprintf(page, "%s\n", ipl_type_str(reipl_type));
}
static ssize_t reipl_type_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t len)
{
int rc = -EINVAL;
if (strncmp(buf, IPL_CCW_STR, strlen(IPL_CCW_STR)) == 0)
rc = reipl_set_type(IPL_TYPE_CCW);
else if (strncmp(buf, IPL_ECKD_STR, strlen(IPL_ECKD_STR)) == 0)
rc = reipl_set_type(IPL_TYPE_ECKD);
else if (strncmp(buf, IPL_FCP_STR, strlen(IPL_FCP_STR)) == 0)
rc = reipl_set_type(IPL_TYPE_FCP);
else if (strncmp(buf, IPL_NVME_STR, strlen(IPL_NVME_STR)) == 0)
rc = reipl_set_type(IPL_TYPE_NVME);
else if (strncmp(buf, IPL_NSS_STR, strlen(IPL_NSS_STR)) == 0)
rc = reipl_set_type(IPL_TYPE_NSS);
return (rc != 0) ? rc : len;
}
static struct kobj_attribute reipl_type_attr =
__ATTR(reipl_type, 0644, reipl_type_show, reipl_type_store);
static struct kset *reipl_kset;
static struct kset *reipl_fcp_kset;
static struct kset *reipl_nvme_kset;
static struct kset *reipl_eckd_kset;
static void __reipl_run(void *unused)
{
switch (reipl_type) {
case IPL_TYPE_CCW:
diag308(DIAG308_SET, reipl_block_ccw);
if (reipl_ccw_clear)
diag308(DIAG308_LOAD_CLEAR, NULL);
else
diag308(DIAG308_LOAD_NORMAL_DUMP, NULL);
break;
case IPL_TYPE_ECKD:
diag308(DIAG308_SET, reipl_block_eckd);
if (reipl_eckd_clear)
diag308(DIAG308_LOAD_CLEAR, NULL);
else
diag308(DIAG308_LOAD_NORMAL, NULL);
break;
case IPL_TYPE_FCP:
diag308(DIAG308_SET, reipl_block_fcp);
if (reipl_fcp_clear)
diag308(DIAG308_LOAD_CLEAR, NULL);
else
diag308(DIAG308_LOAD_NORMAL, NULL);
break;
case IPL_TYPE_NVME:
diag308(DIAG308_SET, reipl_block_nvme);
if (reipl_nvme_clear)
diag308(DIAG308_LOAD_CLEAR, NULL);
else
diag308(DIAG308_LOAD_NORMAL, NULL);
break;
case IPL_TYPE_NSS:
diag308(DIAG308_SET, reipl_block_nss);
diag308(DIAG308_LOAD_CLEAR, NULL);
break;
case IPL_TYPE_UNKNOWN:
diag308(DIAG308_LOAD_CLEAR, NULL);
break;
case IPL_TYPE_FCP_DUMP:
case IPL_TYPE_NVME_DUMP:
case IPL_TYPE_ECKD_DUMP:
break;
}
disabled_wait();
}
static void reipl_run(struct shutdown_trigger *trigger)
{
smp_call_ipl_cpu(__reipl_run, NULL);
}
static void reipl_block_ccw_init(struct ipl_parameter_block *ipb)
{
ipb->hdr.len = IPL_BP_CCW_LEN;
ipb->hdr.version = IPL_PARM_BLOCK_VERSION;
ipb->pb0_hdr.len = IPL_BP0_CCW_LEN;
ipb->pb0_hdr.pbt = IPL_PBT_CCW;
}
static void reipl_block_ccw_fill_parms(struct ipl_parameter_block *ipb)
{
/* LOADPARM */
/* check if read scp info worked and set loadparm */
if (sclp_ipl_info.is_valid)
memcpy(ipb->ccw.loadparm, &sclp_ipl_info.loadparm, LOADPARM_LEN);
else
/* read scp info failed: set empty loadparm (EBCDIC blanks) */
memset(ipb->ccw.loadparm, 0x40, LOADPARM_LEN);
ipb->ccw.flags = IPL_PB0_FLAG_LOADPARM;
/* VM PARM */
if (MACHINE_IS_VM && ipl_block_valid &&
(ipl_block.ccw.vm_flags & IPL_PB0_CCW_VM_FLAG_VP)) {
ipb->ccw.vm_flags |= IPL_PB0_CCW_VM_FLAG_VP;
ipb->ccw.vm_parm_len = ipl_block.ccw.vm_parm_len;
memcpy(ipb->ccw.vm_parm,
ipl_block.ccw.vm_parm, DIAG308_VMPARM_SIZE);
}
}
static int __init reipl_nss_init(void)
{
int rc;
if (!MACHINE_IS_VM)
return 0;
reipl_block_nss = (void *) get_zeroed_page(GFP_KERNEL);
if (!reipl_block_nss)
return -ENOMEM;
rc = sysfs_create_group(&reipl_kset->kobj, &reipl_nss_attr_group);
if (rc)
return rc;
reipl_block_ccw_init(reipl_block_nss);
reipl_capabilities |= IPL_TYPE_NSS;
return 0;
}
static int __init reipl_ccw_init(void)
{
int rc;
reipl_block_ccw = (void *) get_zeroed_page(GFP_KERNEL);
if (!reipl_block_ccw)
return -ENOMEM;
rc = sysfs_create_group(&reipl_kset->kobj,
MACHINE_IS_VM ? &reipl_ccw_attr_group_vm
: &reipl_ccw_attr_group_lpar);
if (rc)
return rc;
reipl_block_ccw_init(reipl_block_ccw);
if (ipl_info.type == IPL_TYPE_CCW) {
reipl_block_ccw->ccw.ssid = ipl_block.ccw.ssid;
reipl_block_ccw->ccw.devno = ipl_block.ccw.devno;
reipl_block_ccw_fill_parms(reipl_block_ccw);
}
reipl_capabilities |= IPL_TYPE_CCW;
return 0;
}
static int __init reipl_fcp_init(void)
{
int rc;
reipl_block_fcp = (void *) get_zeroed_page(GFP_KERNEL);
if (!reipl_block_fcp)
return -ENOMEM;
/* sysfs: create fcp kset for mixing attr group and bin attrs */
reipl_fcp_kset = kset_create_and_add(IPL_FCP_STR, NULL,
&reipl_kset->kobj);
if (!reipl_fcp_kset) {
free_page((unsigned long) reipl_block_fcp);
return -ENOMEM;
}
rc = sysfs_create_group(&reipl_fcp_kset->kobj, &reipl_fcp_attr_group);
if (rc)
goto out1;
if (test_facility(141)) {
rc = sysfs_create_file(&reipl_fcp_kset->kobj,
&sys_reipl_fcp_clear_attr.attr);
if (rc)
goto out2;
} else {
reipl_fcp_clear = true;
}
if (ipl_info.type == IPL_TYPE_FCP) {
memcpy(reipl_block_fcp, &ipl_block, sizeof(ipl_block));
/*
* Fix loadparm: There are systems where the (SCSI) LOADPARM
* is invalid in the SCSI IPL parameter block, so take it
* always from sclp_ipl_info.
*/
memcpy(reipl_block_fcp->fcp.loadparm, sclp_ipl_info.loadparm,
LOADPARM_LEN);
} else {
reipl_block_fcp->hdr.len = IPL_BP_FCP_LEN;
reipl_block_fcp->hdr.version = IPL_PARM_BLOCK_VERSION;
reipl_block_fcp->fcp.len = IPL_BP0_FCP_LEN;
reipl_block_fcp->fcp.pbt = IPL_PBT_FCP;
reipl_block_fcp->fcp.opt = IPL_PB0_FCP_OPT_IPL;
}
reipl_capabilities |= IPL_TYPE_FCP;
return 0;
out2:
sysfs_remove_group(&reipl_fcp_kset->kobj, &reipl_fcp_attr_group);
out1:
kset_unregister(reipl_fcp_kset);
free_page((unsigned long) reipl_block_fcp);
return rc;
}
static int __init reipl_nvme_init(void)
{
int rc;
reipl_block_nvme = (void *) get_zeroed_page(GFP_KERNEL);
if (!reipl_block_nvme)
return -ENOMEM;
/* sysfs: create kset for mixing attr group and bin attrs */
reipl_nvme_kset = kset_create_and_add(IPL_NVME_STR, NULL,
&reipl_kset->kobj);
if (!reipl_nvme_kset) {
free_page((unsigned long) reipl_block_nvme);
return -ENOMEM;
}
rc = sysfs_create_group(&reipl_nvme_kset->kobj, &reipl_nvme_attr_group);
if (rc)
goto out1;
if (test_facility(141)) {
rc = sysfs_create_file(&reipl_nvme_kset->kobj,
&sys_reipl_nvme_clear_attr.attr);
if (rc)
goto out2;
} else {
reipl_nvme_clear = true;
}
if (ipl_info.type == IPL_TYPE_NVME) {
memcpy(reipl_block_nvme, &ipl_block, sizeof(ipl_block));
/*
* Fix loadparm: There are systems where the (SCSI) LOADPARM
* is invalid in the IPL parameter block, so take it
* always from sclp_ipl_info.
*/
memcpy(reipl_block_nvme->nvme.loadparm, sclp_ipl_info.loadparm,
LOADPARM_LEN);
} else {
reipl_block_nvme->hdr.len = IPL_BP_NVME_LEN;
reipl_block_nvme->hdr.version = IPL_PARM_BLOCK_VERSION;
reipl_block_nvme->nvme.len = IPL_BP0_NVME_LEN;
reipl_block_nvme->nvme.pbt = IPL_PBT_NVME;
reipl_block_nvme->nvme.opt = IPL_PB0_NVME_OPT_IPL;
}
reipl_capabilities |= IPL_TYPE_NVME;
return 0;
out2:
sysfs_remove_group(&reipl_nvme_kset->kobj, &reipl_nvme_attr_group);
out1:
kset_unregister(reipl_nvme_kset);
free_page((unsigned long) reipl_block_nvme);
return rc;
}
static int __init reipl_eckd_init(void)
{
int rc;
if (!sclp.has_sipl_eckd)
return 0;
reipl_block_eckd = (void *)get_zeroed_page(GFP_KERNEL);
if (!reipl_block_eckd)
return -ENOMEM;
/* sysfs: create kset for mixing attr group and bin attrs */
reipl_eckd_kset = kset_create_and_add(IPL_ECKD_STR, NULL,
&reipl_kset->kobj);
if (!reipl_eckd_kset) {
free_page((unsigned long)reipl_block_eckd);
return -ENOMEM;
}
rc = sysfs_create_group(&reipl_eckd_kset->kobj, &reipl_eckd_attr_group);
if (rc)
goto out1;
if (test_facility(141)) {
rc = sysfs_create_file(&reipl_eckd_kset->kobj,
&sys_reipl_eckd_clear_attr.attr);
if (rc)
goto out2;
} else {
reipl_eckd_clear = true;
}
if (ipl_info.type == IPL_TYPE_ECKD) {
memcpy(reipl_block_eckd, &ipl_block, sizeof(ipl_block));
} else {
reipl_block_eckd->hdr.len = IPL_BP_ECKD_LEN;
reipl_block_eckd->hdr.version = IPL_PARM_BLOCK_VERSION;
reipl_block_eckd->eckd.len = IPL_BP0_ECKD_LEN;
reipl_block_eckd->eckd.pbt = IPL_PBT_ECKD;
reipl_block_eckd->eckd.opt = IPL_PB0_ECKD_OPT_IPL;
}
reipl_capabilities |= IPL_TYPE_ECKD;
return 0;
out2:
sysfs_remove_group(&reipl_eckd_kset->kobj, &reipl_eckd_attr_group);
out1:
kset_unregister(reipl_eckd_kset);
free_page((unsigned long)reipl_block_eckd);
return rc;
}
static int __init reipl_type_init(void)
{
enum ipl_type reipl_type = ipl_info.type;
struct ipl_parameter_block *reipl_block;
unsigned long size;
reipl_block = os_info_old_entry(OS_INFO_REIPL_BLOCK, &size);
if (!reipl_block)
goto out;
/*
* If we have an OS info reipl block, this will be used
*/
if (reipl_block->pb0_hdr.pbt == IPL_PBT_FCP) {
memcpy(reipl_block_fcp, reipl_block, size);
reipl_type = IPL_TYPE_FCP;
} else if (reipl_block->pb0_hdr.pbt == IPL_PBT_NVME) {
memcpy(reipl_block_nvme, reipl_block, size);
reipl_type = IPL_TYPE_NVME;
} else if (reipl_block->pb0_hdr.pbt == IPL_PBT_CCW) {
memcpy(reipl_block_ccw, reipl_block, size);
reipl_type = IPL_TYPE_CCW;
} else if (reipl_block->pb0_hdr.pbt == IPL_PBT_ECKD) {
memcpy(reipl_block_eckd, reipl_block, size);
reipl_type = IPL_TYPE_ECKD;
}
out:
return reipl_set_type(reipl_type);
}
static int __init reipl_init(void)
{
int rc;
reipl_kset = kset_create_and_add("reipl", NULL, firmware_kobj);
if (!reipl_kset)
return -ENOMEM;
rc = sysfs_create_file(&reipl_kset->kobj, &reipl_type_attr.attr);
if (rc) {
kset_unregister(reipl_kset);
return rc;
}
rc = reipl_ccw_init();
if (rc)
return rc;
rc = reipl_eckd_init();
if (rc)
return rc;
rc = reipl_fcp_init();
if (rc)
return rc;
rc = reipl_nvme_init();
if (rc)
return rc;
rc = reipl_nss_init();
if (rc)
return rc;
return reipl_type_init();
}
static struct shutdown_action __refdata reipl_action = {
.name = SHUTDOWN_ACTION_REIPL_STR,
.fn = reipl_run,
.init = reipl_init,
};
/*
* dump shutdown action: Dump Linux on shutdown.
*/
/* FCP dump device attributes */
DEFINE_IPL_ATTR_RW(dump_fcp, wwpn, "0x%016llx\n", "%llx\n",
dump_block_fcp->fcp.wwpn);
DEFINE_IPL_ATTR_RW(dump_fcp, lun, "0x%016llx\n", "%llx\n",
dump_block_fcp->fcp.lun);
DEFINE_IPL_ATTR_RW(dump_fcp, bootprog, "%lld\n", "%lld\n",
dump_block_fcp->fcp.bootprog);
DEFINE_IPL_ATTR_RW(dump_fcp, br_lba, "%lld\n", "%lld\n",
dump_block_fcp->fcp.br_lba);
DEFINE_IPL_ATTR_RW(dump_fcp, device, "0.0.%04llx\n", "0.0.%llx\n",
dump_block_fcp->fcp.devno);
static struct attribute *dump_fcp_attrs[] = {
&sys_dump_fcp_device_attr.attr,
&sys_dump_fcp_wwpn_attr.attr,
&sys_dump_fcp_lun_attr.attr,
&sys_dump_fcp_bootprog_attr.attr,
&sys_dump_fcp_br_lba_attr.attr,
NULL,
};
static struct attribute_group dump_fcp_attr_group = {
.name = IPL_FCP_STR,
.attrs = dump_fcp_attrs,
};
/* NVME dump device attributes */
DEFINE_IPL_ATTR_RW(dump_nvme, fid, "0x%08llx\n", "%llx\n",
dump_block_nvme->nvme.fid);
DEFINE_IPL_ATTR_RW(dump_nvme, nsid, "0x%08llx\n", "%llx\n",
dump_block_nvme->nvme.nsid);
DEFINE_IPL_ATTR_RW(dump_nvme, bootprog, "%lld\n", "%llx\n",
dump_block_nvme->nvme.bootprog);
DEFINE_IPL_ATTR_RW(dump_nvme, br_lba, "%lld\n", "%llx\n",
dump_block_nvme->nvme.br_lba);
static struct attribute *dump_nvme_attrs[] = {
&sys_dump_nvme_fid_attr.attr,
&sys_dump_nvme_nsid_attr.attr,
&sys_dump_nvme_bootprog_attr.attr,
&sys_dump_nvme_br_lba_attr.attr,
NULL,
};
static struct attribute_group dump_nvme_attr_group = {
.name = IPL_NVME_STR,
.attrs = dump_nvme_attrs,
};
/* ECKD dump device attributes */
DEFINE_IPL_CCW_ATTR_RW(dump_eckd, device, dump_block_eckd->eckd);
DEFINE_IPL_ATTR_RW(dump_eckd, bootprog, "%lld\n", "%llx\n",
dump_block_eckd->eckd.bootprog);
IPL_ATTR_BR_CHR_SHOW_FN(dump, dump_block_eckd->eckd);
IPL_ATTR_BR_CHR_STORE_FN(dump, dump_block_eckd->eckd);
static struct kobj_attribute sys_dump_eckd_br_chr_attr =
__ATTR(br_chr, 0644, eckd_dump_br_chr_show, eckd_dump_br_chr_store);
static struct attribute *dump_eckd_attrs[] = {
&sys_dump_eckd_device_attr.attr,
&sys_dump_eckd_bootprog_attr.attr,
&sys_dump_eckd_br_chr_attr.attr,
NULL,
};
static struct attribute_group dump_eckd_attr_group = {
.name = IPL_ECKD_STR,
.attrs = dump_eckd_attrs,
};
/* CCW dump device attributes */
DEFINE_IPL_CCW_ATTR_RW(dump_ccw, device, dump_block_ccw->ccw);
static struct attribute *dump_ccw_attrs[] = {
&sys_dump_ccw_device_attr.attr,
NULL,
};
static struct attribute_group dump_ccw_attr_group = {
.name = IPL_CCW_STR,
.attrs = dump_ccw_attrs,
};
/* dump type */
static int dump_set_type(enum dump_type type)
{
if (!(dump_capabilities & type))
return -EINVAL;
dump_type = type;
return 0;
}
static ssize_t dump_type_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
{
return sprintf(page, "%s\n", dump_type_str(dump_type));
}
static ssize_t dump_type_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t len)
{
int rc = -EINVAL;
if (strncmp(buf, DUMP_NONE_STR, strlen(DUMP_NONE_STR)) == 0)
rc = dump_set_type(DUMP_TYPE_NONE);
else if (strncmp(buf, DUMP_CCW_STR, strlen(DUMP_CCW_STR)) == 0)
rc = dump_set_type(DUMP_TYPE_CCW);
else if (strncmp(buf, DUMP_ECKD_STR, strlen(DUMP_ECKD_STR)) == 0)
rc = dump_set_type(DUMP_TYPE_ECKD);
else if (strncmp(buf, DUMP_FCP_STR, strlen(DUMP_FCP_STR)) == 0)
rc = dump_set_type(DUMP_TYPE_FCP);
else if (strncmp(buf, DUMP_NVME_STR, strlen(DUMP_NVME_STR)) == 0)
rc = dump_set_type(DUMP_TYPE_NVME);
return (rc != 0) ? rc : len;
}
static struct kobj_attribute dump_type_attr =
__ATTR(dump_type, 0644, dump_type_show, dump_type_store);
static struct kset *dump_kset;
static void diag308_dump(void *dump_block)
{
diag308(DIAG308_SET, dump_block);
while (1) {
if (diag308(DIAG308_LOAD_NORMAL_DUMP, NULL) != 0x302)
break;
udelay(USEC_PER_SEC);
}
}
static void __dump_run(void *unused)
{
switch (dump_type) {
case DUMP_TYPE_CCW:
diag308_dump(dump_block_ccw);
break;
case DUMP_TYPE_ECKD:
diag308_dump(dump_block_eckd);
break;
case DUMP_TYPE_FCP:
diag308_dump(dump_block_fcp);
break;
case DUMP_TYPE_NVME:
diag308_dump(dump_block_nvme);
break;
default:
break;
}
}
static void dump_run(struct shutdown_trigger *trigger)
{
if (dump_type == DUMP_TYPE_NONE)
return;
smp_send_stop();
smp_call_ipl_cpu(__dump_run, NULL);
}
static int __init dump_ccw_init(void)
{
int rc;
dump_block_ccw = (void *) get_zeroed_page(GFP_KERNEL);
if (!dump_block_ccw)
return -ENOMEM;
rc = sysfs_create_group(&dump_kset->kobj, &dump_ccw_attr_group);
if (rc) {
free_page((unsigned long)dump_block_ccw);
return rc;
}
dump_block_ccw->hdr.len = IPL_BP_CCW_LEN;
dump_block_ccw->hdr.version = IPL_PARM_BLOCK_VERSION;
dump_block_ccw->ccw.len = IPL_BP0_CCW_LEN;
dump_block_ccw->ccw.pbt = IPL_PBT_CCW;
dump_capabilities |= DUMP_TYPE_CCW;
return 0;
}
static int __init dump_fcp_init(void)
{
int rc;
if (!sclp_ipl_info.has_dump)
return 0; /* LDIPL DUMP is not installed */
dump_block_fcp = (void *) get_zeroed_page(GFP_KERNEL);
if (!dump_block_fcp)
return -ENOMEM;
rc = sysfs_create_group(&dump_kset->kobj, &dump_fcp_attr_group);
if (rc) {
free_page((unsigned long)dump_block_fcp);
return rc;
}
dump_block_fcp->hdr.len = IPL_BP_FCP_LEN;
dump_block_fcp->hdr.version = IPL_PARM_BLOCK_VERSION;
dump_block_fcp->fcp.len = IPL_BP0_FCP_LEN;
dump_block_fcp->fcp.pbt = IPL_PBT_FCP;
dump_block_fcp->fcp.opt = IPL_PB0_FCP_OPT_DUMP;
dump_capabilities |= DUMP_TYPE_FCP;
return 0;
}
static int __init dump_nvme_init(void)
{
int rc;
if (!sclp_ipl_info.has_dump)
return 0; /* LDIPL DUMP is not installed */
dump_block_nvme = (void *) get_zeroed_page(GFP_KERNEL);
if (!dump_block_nvme)
return -ENOMEM;
rc = sysfs_create_group(&dump_kset->kobj, &dump_nvme_attr_group);
if (rc) {
free_page((unsigned long)dump_block_nvme);
return rc;
}
dump_block_nvme->hdr.len = IPL_BP_NVME_LEN;
dump_block_nvme->hdr.version = IPL_PARM_BLOCK_VERSION;
dump_block_nvme->fcp.len = IPL_BP0_NVME_LEN;
dump_block_nvme->fcp.pbt = IPL_PBT_NVME;
dump_block_nvme->fcp.opt = IPL_PB0_NVME_OPT_DUMP;
dump_capabilities |= DUMP_TYPE_NVME;
return 0;
}
static int __init dump_eckd_init(void)
{
int rc;
if (!sclp_ipl_info.has_dump || !sclp.has_sipl_eckd)
return 0; /* LDIPL DUMP is not installed */
dump_block_eckd = (void *)get_zeroed_page(GFP_KERNEL);
if (!dump_block_eckd)
return -ENOMEM;
rc = sysfs_create_group(&dump_kset->kobj, &dump_eckd_attr_group);
if (rc) {
free_page((unsigned long)dump_block_eckd);
return rc;
}
dump_block_eckd->hdr.len = IPL_BP_ECKD_LEN;
dump_block_eckd->hdr.version = IPL_PARM_BLOCK_VERSION;
dump_block_eckd->eckd.len = IPL_BP0_ECKD_LEN;
dump_block_eckd->eckd.pbt = IPL_PBT_ECKD;
dump_block_eckd->eckd.opt = IPL_PB0_ECKD_OPT_DUMP;
dump_capabilities |= DUMP_TYPE_ECKD;
return 0;
}
static int __init dump_init(void)
{
int rc;
dump_kset = kset_create_and_add("dump", NULL, firmware_kobj);
if (!dump_kset)
return -ENOMEM;
rc = sysfs_create_file(&dump_kset->kobj, &dump_type_attr.attr);
if (rc) {
kset_unregister(dump_kset);
return rc;
}
rc = dump_ccw_init();
if (rc)
return rc;
rc = dump_eckd_init();
if (rc)
return rc;
rc = dump_fcp_init();
if (rc)
return rc;
rc = dump_nvme_init();
if (rc)
return rc;
dump_set_type(DUMP_TYPE_NONE);
return 0;
}
static struct shutdown_action __refdata dump_action = {
.name = SHUTDOWN_ACTION_DUMP_STR,
.fn = dump_run,
.init = dump_init,
};
static void dump_reipl_run(struct shutdown_trigger *trigger)
{
struct lowcore *abs_lc;
unsigned int csum;
/*
* Set REIPL_CLEAR flag in os_info flags entry indicating
* 'clear' sysfs attribute has been set on the panicked system
* for specified reipl type.
* Always set for IPL_TYPE_NSS and IPL_TYPE_UNKNOWN.
*/
if ((reipl_type == IPL_TYPE_CCW && reipl_ccw_clear) ||
(reipl_type == IPL_TYPE_ECKD && reipl_eckd_clear) ||
(reipl_type == IPL_TYPE_FCP && reipl_fcp_clear) ||
(reipl_type == IPL_TYPE_NVME && reipl_nvme_clear) ||
reipl_type == IPL_TYPE_NSS ||
reipl_type == IPL_TYPE_UNKNOWN)
os_info_flags |= OS_INFO_FLAG_REIPL_CLEAR;
os_info_entry_add(OS_INFO_FLAGS_ENTRY, &os_info_flags, sizeof(os_info_flags));
csum = (__force unsigned int)
csum_partial(reipl_block_actual, reipl_block_actual->hdr.len, 0);
abs_lc = get_abs_lowcore();
abs_lc->ipib = __pa(reipl_block_actual);
abs_lc->ipib_checksum = csum;
put_abs_lowcore(abs_lc);
dump_run(trigger);
}
static struct shutdown_action __refdata dump_reipl_action = {
.name = SHUTDOWN_ACTION_DUMP_REIPL_STR,
.fn = dump_reipl_run,
};
/*
* vmcmd shutdown action: Trigger vm command on shutdown.
*/
static char vmcmd_on_reboot[128];
static char vmcmd_on_panic[128];
static char vmcmd_on_halt[128];
static char vmcmd_on_poff[128];
static char vmcmd_on_restart[128];
DEFINE_IPL_ATTR_STR_RW(vmcmd, on_reboot, "%s\n", "%s\n", vmcmd_on_reboot);
DEFINE_IPL_ATTR_STR_RW(vmcmd, on_panic, "%s\n", "%s\n", vmcmd_on_panic);
DEFINE_IPL_ATTR_STR_RW(vmcmd, on_halt, "%s\n", "%s\n", vmcmd_on_halt);
DEFINE_IPL_ATTR_STR_RW(vmcmd, on_poff, "%s\n", "%s\n", vmcmd_on_poff);
DEFINE_IPL_ATTR_STR_RW(vmcmd, on_restart, "%s\n", "%s\n", vmcmd_on_restart);
static struct attribute *vmcmd_attrs[] = {
&sys_vmcmd_on_reboot_attr.attr,
&sys_vmcmd_on_panic_attr.attr,
&sys_vmcmd_on_halt_attr.attr,
&sys_vmcmd_on_poff_attr.attr,
&sys_vmcmd_on_restart_attr.attr,
NULL,
};
static struct attribute_group vmcmd_attr_group = {
.attrs = vmcmd_attrs,
};
static struct kset *vmcmd_kset;
static void vmcmd_run(struct shutdown_trigger *trigger)
{
char *cmd;
if (strcmp(trigger->name, ON_REIPL_STR) == 0)
cmd = vmcmd_on_reboot;
else if (strcmp(trigger->name, ON_PANIC_STR) == 0)
cmd = vmcmd_on_panic;
else if (strcmp(trigger->name, ON_HALT_STR) == 0)
cmd = vmcmd_on_halt;
else if (strcmp(trigger->name, ON_POFF_STR) == 0)
cmd = vmcmd_on_poff;
else if (strcmp(trigger->name, ON_RESTART_STR) == 0)
cmd = vmcmd_on_restart;
else
return;
if (strlen(cmd) == 0)
return;
__cpcmd(cmd, NULL, 0, NULL);
}
static int vmcmd_init(void)
{
if (!MACHINE_IS_VM)
return -EOPNOTSUPP;
vmcmd_kset = kset_create_and_add("vmcmd", NULL, firmware_kobj);
if (!vmcmd_kset)
return -ENOMEM;
return sysfs_create_group(&vmcmd_kset->kobj, &vmcmd_attr_group);
}
static struct shutdown_action vmcmd_action = {SHUTDOWN_ACTION_VMCMD_STR,
vmcmd_run, vmcmd_init};
/*
* stop shutdown action: Stop Linux on shutdown.
*/
static void stop_run(struct shutdown_trigger *trigger)
{
if (strcmp(trigger->name, ON_PANIC_STR) == 0 ||
strcmp(trigger->name, ON_RESTART_STR) == 0)
disabled_wait();
smp_stop_cpu();
}
static struct shutdown_action stop_action = {SHUTDOWN_ACTION_STOP_STR,
stop_run, NULL};
/* action list */
static struct shutdown_action *shutdown_actions_list[] = {
&ipl_action, &reipl_action, &dump_reipl_action, &dump_action,
&vmcmd_action, &stop_action};
#define SHUTDOWN_ACTIONS_COUNT (sizeof(shutdown_actions_list) / sizeof(void *))
/*
* Trigger section
*/
static struct kset *shutdown_actions_kset;
static int set_trigger(const char *buf, struct shutdown_trigger *trigger,
size_t len)
{
int i;
for (i = 0; i < SHUTDOWN_ACTIONS_COUNT; i++) {
if (sysfs_streq(buf, shutdown_actions_list[i]->name)) {
if (shutdown_actions_list[i]->init_rc) {
return shutdown_actions_list[i]->init_rc;
} else {
trigger->action = shutdown_actions_list[i];
return len;
}
}
}
return -EINVAL;
}
/* on reipl */
static struct shutdown_trigger on_reboot_trigger = {ON_REIPL_STR,
&reipl_action};
static ssize_t on_reboot_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
{
return sprintf(page, "%s\n", on_reboot_trigger.action->name);
}
static ssize_t on_reboot_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t len)
{
return set_trigger(buf, &on_reboot_trigger, len);
}
static struct kobj_attribute on_reboot_attr = __ATTR_RW(on_reboot);
static void do_machine_restart(char *__unused)
{
smp_send_stop();
on_reboot_trigger.action->fn(&on_reboot_trigger);
reipl_run(NULL);
}
void (*_machine_restart)(char *command) = do_machine_restart;
/* on panic */
static struct shutdown_trigger on_panic_trigger = {ON_PANIC_STR, &stop_action};
static ssize_t on_panic_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
{
return sprintf(page, "%s\n", on_panic_trigger.action->name);
}
static ssize_t on_panic_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t len)
{
return set_trigger(buf, &on_panic_trigger, len);
}
static struct kobj_attribute on_panic_attr = __ATTR_RW(on_panic);
static void do_panic(void)
{
lgr_info_log();
on_panic_trigger.action->fn(&on_panic_trigger);
stop_run(&on_panic_trigger);
}
/* on restart */
static struct shutdown_trigger on_restart_trigger = {ON_RESTART_STR,
&stop_action};
static ssize_t on_restart_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
{
return sprintf(page, "%s\n", on_restart_trigger.action->name);
}
static ssize_t on_restart_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t len)
{
return set_trigger(buf, &on_restart_trigger, len);
}
static struct kobj_attribute on_restart_attr = __ATTR_RW(on_restart);
static void __do_restart(void *ignore)
{
smp_send_stop();
#ifdef CONFIG_CRASH_DUMP
crash_kexec(NULL);
#endif
on_restart_trigger.action->fn(&on_restart_trigger);
stop_run(&on_restart_trigger);
}
void do_restart(void *arg)
{
tracing_off();
debug_locks_off();
lgr_info_log();
smp_call_online_cpu(__do_restart, arg);
}
/* on halt */
static struct shutdown_trigger on_halt_trigger = {ON_HALT_STR, &stop_action};
static ssize_t on_halt_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
{
return sprintf(page, "%s\n", on_halt_trigger.action->name);
}
static ssize_t on_halt_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t len)
{
return set_trigger(buf, &on_halt_trigger, len);
}
static struct kobj_attribute on_halt_attr = __ATTR_RW(on_halt);
static void do_machine_halt(void)
{
smp_send_stop();
on_halt_trigger.action->fn(&on_halt_trigger);
stop_run(&on_halt_trigger);
}
void (*_machine_halt)(void) = do_machine_halt;
/* on power off */
static struct shutdown_trigger on_poff_trigger = {ON_POFF_STR, &stop_action};
static ssize_t on_poff_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
{
return sprintf(page, "%s\n", on_poff_trigger.action->name);
}
static ssize_t on_poff_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t len)
{
return set_trigger(buf, &on_poff_trigger, len);
}
static struct kobj_attribute on_poff_attr = __ATTR_RW(on_poff);
static void do_machine_power_off(void)
{
smp_send_stop();
on_poff_trigger.action->fn(&on_poff_trigger);
stop_run(&on_poff_trigger);
}
void (*_machine_power_off)(void) = do_machine_power_off;
static struct attribute *shutdown_action_attrs[] = {
&on_restart_attr.attr,
&on_reboot_attr.attr,
&on_panic_attr.attr,
&on_halt_attr.attr,
&on_poff_attr.attr,
NULL,
};
static struct attribute_group shutdown_action_attr_group = {
.attrs = shutdown_action_attrs,
};
static void __init shutdown_triggers_init(void)
{
shutdown_actions_kset = kset_create_and_add("shutdown_actions", NULL,
firmware_kobj);
if (!shutdown_actions_kset)
goto fail;
if (sysfs_create_group(&shutdown_actions_kset->kobj,
&shutdown_action_attr_group))
goto fail;
return;
fail:
panic("shutdown_triggers_init failed\n");
}
static void __init shutdown_actions_init(void)
{
int i;
for (i = 0; i < SHUTDOWN_ACTIONS_COUNT; i++) {
if (!shutdown_actions_list[i]->init)
continue;
shutdown_actions_list[i]->init_rc =
shutdown_actions_list[i]->init();
}
}
static int __init s390_ipl_init(void)
{
char str[8] = {0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40};
sclp_early_get_ipl_info(&sclp_ipl_info);
/*
* Fix loadparm: There are systems where the (SCSI) LOADPARM
* returned by read SCP info is invalid (contains EBCDIC blanks)
* when the system has been booted via diag308. In that case we use
* the value from diag308, if available.
*
* There are also systems where diag308 store does not work in
* case the system is booted from HMC. Fortunately in this case
* READ SCP info provides the correct value.
*/
if (memcmp(sclp_ipl_info.loadparm, str, sizeof(str)) == 0 && ipl_block_valid)
memcpy(sclp_ipl_info.loadparm, ipl_block.ccw.loadparm, LOADPARM_LEN);
shutdown_actions_init();
shutdown_triggers_init();
return 0;
}
__initcall(s390_ipl_init);
static void __init strncpy_skip_quote(char *dst, char *src, int n)
{
int sx, dx;
dx = 0;
for (sx = 0; src[sx] != 0; sx++) {
if (src[sx] == '"')
continue;
dst[dx++] = src[sx];
if (dx >= n)
break;
}
}
static int __init vmcmd_on_reboot_setup(char *str)
{
if (!MACHINE_IS_VM)
return 1;
strncpy_skip_quote(vmcmd_on_reboot, str, 127);
vmcmd_on_reboot[127] = 0;
on_reboot_trigger.action = &vmcmd_action;
return 1;
}
__setup("vmreboot=", vmcmd_on_reboot_setup);
static int __init vmcmd_on_panic_setup(char *str)
{
if (!MACHINE_IS_VM)
return 1;
strncpy_skip_quote(vmcmd_on_panic, str, 127);
vmcmd_on_panic[127] = 0;
on_panic_trigger.action = &vmcmd_action;
return 1;
}
__setup("vmpanic=", vmcmd_on_panic_setup);
static int __init vmcmd_on_halt_setup(char *str)
{
if (!MACHINE_IS_VM)
return 1;
strncpy_skip_quote(vmcmd_on_halt, str, 127);
vmcmd_on_halt[127] = 0;
on_halt_trigger.action = &vmcmd_action;
return 1;
}
__setup("vmhalt=", vmcmd_on_halt_setup);
static int __init vmcmd_on_poff_setup(char *str)
{
if (!MACHINE_IS_VM)
return 1;
strncpy_skip_quote(vmcmd_on_poff, str, 127);
vmcmd_on_poff[127] = 0;
on_poff_trigger.action = &vmcmd_action;
return 1;
}
__setup("vmpoff=", vmcmd_on_poff_setup);
static int on_panic_notify(struct notifier_block *self,
unsigned long event, void *data)
{
do_panic();
return NOTIFY_OK;
}
static struct notifier_block on_panic_nb = {
.notifier_call = on_panic_notify,
.priority = INT_MIN,
};
void __init setup_ipl(void)
{
BUILD_BUG_ON(sizeof(struct ipl_parameter_block) != PAGE_SIZE);
ipl_info.type = get_ipl_type();
switch (ipl_info.type) {
case IPL_TYPE_CCW:
ipl_info.data.ccw.dev_id.ssid = ipl_block.ccw.ssid;
ipl_info.data.ccw.dev_id.devno = ipl_block.ccw.devno;
break;
case IPL_TYPE_ECKD:
case IPL_TYPE_ECKD_DUMP:
ipl_info.data.eckd.dev_id.ssid = ipl_block.eckd.ssid;
ipl_info.data.eckd.dev_id.devno = ipl_block.eckd.devno;
break;
case IPL_TYPE_FCP:
case IPL_TYPE_FCP_DUMP:
ipl_info.data.fcp.dev_id.ssid = 0;
ipl_info.data.fcp.dev_id.devno = ipl_block.fcp.devno;
ipl_info.data.fcp.wwpn = ipl_block.fcp.wwpn;
ipl_info.data.fcp.lun = ipl_block.fcp.lun;
break;
case IPL_TYPE_NVME:
case IPL_TYPE_NVME_DUMP:
ipl_info.data.nvme.fid = ipl_block.nvme.fid;
ipl_info.data.nvme.nsid = ipl_block.nvme.nsid;
break;
case IPL_TYPE_NSS:
case IPL_TYPE_UNKNOWN:
/* We have no info to copy */
break;
}
atomic_notifier_chain_register(&panic_notifier_list, &on_panic_nb);
}
void s390_reset_system(void)
{
/* Disable prefixing */
set_prefix(0);
/* Disable lowcore protection */
__ctl_clear_bit(0, 28);
diag_amode31_ops.diag308_reset();
}
#ifdef CONFIG_KEXEC_FILE
int ipl_report_add_component(struct ipl_report *report, struct kexec_buf *kbuf,
unsigned char flags, unsigned short cert)
{
struct ipl_report_component *comp;
comp = vzalloc(sizeof(*comp));
if (!comp)
return -ENOMEM;
list_add_tail(&comp->list, &report->components);
comp->entry.addr = kbuf->mem;
comp->entry.len = kbuf->memsz;
comp->entry.flags = flags;
comp->entry.certificate_index = cert;
report->size += sizeof(comp->entry);
return 0;
}
int ipl_report_add_certificate(struct ipl_report *report, void *key,
unsigned long addr, unsigned long len)
{
struct ipl_report_certificate *cert;
cert = vzalloc(sizeof(*cert));
if (!cert)
return -ENOMEM;
list_add_tail(&cert->list, &report->certificates);
cert->entry.addr = addr;
cert->entry.len = len;
cert->key = key;
report->size += sizeof(cert->entry);
report->size += cert->entry.len;
return 0;
}
struct ipl_report *ipl_report_init(struct ipl_parameter_block *ipib)
{
struct ipl_report *report;
report = vzalloc(sizeof(*report));
if (!report)
return ERR_PTR(-ENOMEM);
report->ipib = ipib;
INIT_LIST_HEAD(&report->components);
INIT_LIST_HEAD(&report->certificates);
report->size = ALIGN(ipib->hdr.len, 8);
report->size += sizeof(struct ipl_rl_hdr);
report->size += sizeof(struct ipl_rb_components);
report->size += sizeof(struct ipl_rb_certificates);
return report;
}
void *ipl_report_finish(struct ipl_report *report)
{
struct ipl_report_certificate *cert;
struct ipl_report_component *comp;
struct ipl_rb_certificates *certs;
struct ipl_parameter_block *ipib;
struct ipl_rb_components *comps;
struct ipl_rl_hdr *rl_hdr;
void *buf, *ptr;
buf = vzalloc(report->size);
if (!buf)
goto out;
ptr = buf;
memcpy(ptr, report->ipib, report->ipib->hdr.len);
ipib = ptr;
if (ipl_secure_flag)
ipib->hdr.flags |= IPL_PL_FLAG_SIPL;
ipib->hdr.flags |= IPL_PL_FLAG_IPLSR;
ptr += report->ipib->hdr.len;
ptr = PTR_ALIGN(ptr, 8);
rl_hdr = ptr;
ptr += sizeof(*rl_hdr);
comps = ptr;
comps->rbt = IPL_RBT_COMPONENTS;
ptr += sizeof(*comps);
list_for_each_entry(comp, &report->components, list) {
memcpy(ptr, &comp->entry, sizeof(comp->entry));
ptr += sizeof(comp->entry);
}
comps->len = ptr - (void *)comps;
certs = ptr;
certs->rbt = IPL_RBT_CERTIFICATES;
ptr += sizeof(*certs);
list_for_each_entry(cert, &report->certificates, list) {
memcpy(ptr, &cert->entry, sizeof(cert->entry));
ptr += sizeof(cert->entry);
}
certs->len = ptr - (void *)certs;
rl_hdr->len = ptr - (void *)rl_hdr;
list_for_each_entry(cert, &report->certificates, list) {
memcpy(ptr, cert->key, cert->entry.len);
ptr += cert->entry.len;
}
BUG_ON(ptr > buf + report->size);
out:
return buf;
}
int ipl_report_free(struct ipl_report *report)
{
struct ipl_report_component *comp, *ncomp;
struct ipl_report_certificate *cert, *ncert;
list_for_each_entry_safe(comp, ncomp, &report->components, list)
vfree(comp);
list_for_each_entry_safe(cert, ncert, &report->certificates, list)
vfree(cert);
vfree(report);
return 0;
}
#endif
| linux-master | arch/s390/kernel/ipl.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2022
*/
#include <linux/cpufeature.h>
#include <linux/bug.h>
#include <asm/elf.h>
enum {
TYPE_HWCAP,
TYPE_FACILITY,
};
struct s390_cpu_feature {
unsigned int type : 4;
unsigned int num : 28;
};
static struct s390_cpu_feature s390_cpu_features[MAX_CPU_FEATURES] = {
[S390_CPU_FEATURE_MSA] = {.type = TYPE_HWCAP, .num = HWCAP_NR_MSA},
[S390_CPU_FEATURE_VXRS] = {.type = TYPE_HWCAP, .num = HWCAP_NR_VXRS},
[S390_CPU_FEATURE_UV] = {.type = TYPE_FACILITY, .num = 158},
};
/*
* cpu_have_feature - Test CPU features on module initialization
*/
int cpu_have_feature(unsigned int num)
{
struct s390_cpu_feature *feature;
if (WARN_ON_ONCE(num >= MAX_CPU_FEATURES))
return 0;
feature = &s390_cpu_features[num];
switch (feature->type) {
case TYPE_HWCAP:
return !!(elf_hwcap & BIT(feature->num));
case TYPE_FACILITY:
return test_facility(feature->num);
default:
WARN_ON_ONCE(1);
return 0;
}
}
EXPORT_SYMBOL(cpu_have_feature);
| linux-master | arch/s390/kernel/cpufeature.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2017
*/
#include <linux/console.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <asm/sclp.h>
static void sclp_early_write(struct console *con, const char *s, unsigned int len)
{
__sclp_early_printk(s, len);
}
static struct console sclp_early_console = {
.name = "earlysclp",
.write = sclp_early_write,
.flags = CON_PRINTBUFFER | CON_BOOT,
.index = -1,
};
static int __init setup_early_printk(char *buf)
{
if (early_console)
return 0;
/* Accept only "earlyprintk" and "earlyprintk=sclp" */
if (buf && !str_has_prefix(buf, "sclp"))
return 0;
if (!sclp.has_linemode && !sclp.has_vt220)
return 0;
early_console = &sclp_early_console;
register_console(early_console);
return 0;
}
early_param("earlyprintk", setup_early_printk);
| linux-master | arch/s390/kernel/early_printk.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Tracepoint definitions for s390
*
* Copyright IBM Corp. 2015
* Author(s): Martin Schwidefsky <[email protected]>
*/
#include <linux/percpu.h>
#define CREATE_TRACE_POINTS
#include <asm/trace/diag.h>
EXPORT_TRACEPOINT_SYMBOL(s390_diagnose);
static DEFINE_PER_CPU(unsigned int, diagnose_trace_depth);
void notrace trace_s390_diagnose_norecursion(int diag_nr)
{
unsigned long flags;
unsigned int *depth;
/* Avoid lockdep recursion. */
if (IS_ENABLED(CONFIG_LOCKDEP))
return;
local_irq_save(flags);
depth = this_cpu_ptr(&diagnose_trace_depth);
if (*depth == 0) {
(*depth)++;
trace_s390_diagnose(diag_nr);
(*depth)--;
}
local_irq_restore(flags);
}
| linux-master | arch/s390/kernel/trace.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/minmax.h>
#include <linux/string.h>
#include <asm/ebcdic.h>
#include <asm/ipl.h>
/* VM IPL PARM routines */
size_t ipl_block_get_ascii_vmparm(char *dest, size_t size,
const struct ipl_parameter_block *ipb)
{
int i;
size_t len;
char has_lowercase = 0;
len = 0;
if ((ipb->ccw.vm_flags & IPL_PB0_CCW_VM_FLAG_VP) &&
(ipb->ccw.vm_parm_len > 0)) {
len = min_t(size_t, size - 1, ipb->ccw.vm_parm_len);
memcpy(dest, ipb->ccw.vm_parm, len);
/* If at least one character is lowercase, we assume mixed
* case; otherwise we convert everything to lowercase.
*/
for (i = 0; i < len; i++)
if ((dest[i] > 0x80 && dest[i] < 0x8a) || /* a-i */
(dest[i] > 0x90 && dest[i] < 0x9a) || /* j-r */
(dest[i] > 0xa1 && dest[i] < 0xaa)) { /* s-z */
has_lowercase = 1;
break;
}
if (!has_lowercase)
EBC_TOLOWER(dest, len);
EBCASC(dest, len);
}
dest[len] = 0;
return len;
}
| linux-master | arch/s390/kernel/ipl_vmparm.c |
// SPDX-License-Identifier: GPL-2.0
/*
* This file handles the architecture dependent parts of process handling.
*
* Copyright IBM Corp. 1999, 2009
* Author(s): Martin Schwidefsky <[email protected]>,
* Hartmut Penner <[email protected]>,
* Denis Joseph Barrow,
*/
#include <linux/elf-randomize.h>
#include <linux/compiler.h>
#include <linux/cpu.h>
#include <linux/sched.h>
#include <linux/sched/debug.h>
#include <linux/sched/task.h>
#include <linux/sched/task_stack.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/elfcore.h>
#include <linux/smp.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/tick.h>
#include <linux/personality.h>
#include <linux/syscalls.h>
#include <linux/compat.h>
#include <linux/kprobes.h>
#include <linux/random.h>
#include <linux/export.h>
#include <linux/init_task.h>
#include <linux/entry-common.h>
#include <linux/io.h>
#include <asm/cpu_mf.h>
#include <asm/processor.h>
#include <asm/vtimer.h>
#include <asm/exec.h>
#include <asm/irq.h>
#include <asm/nmi.h>
#include <asm/smp.h>
#include <asm/stacktrace.h>
#include <asm/switch_to.h>
#include <asm/runtime_instr.h>
#include <asm/unwind.h>
#include "entry.h"
void ret_from_fork(void) asm("ret_from_fork");
void __ret_from_fork(struct task_struct *prev, struct pt_regs *regs)
{
void (*func)(void *arg);
schedule_tail(prev);
if (!user_mode(regs)) {
/* Kernel thread */
func = (void *)regs->gprs[9];
func((void *)regs->gprs[10]);
}
clear_pt_regs_flag(regs, PIF_SYSCALL);
syscall_exit_to_user_mode(regs);
}
void flush_thread(void)
{
}
void arch_setup_new_exec(void)
{
if (S390_lowcore.current_pid != current->pid) {
S390_lowcore.current_pid = current->pid;
if (test_facility(40))
lpp(&S390_lowcore.lpp);
}
}
void arch_release_task_struct(struct task_struct *tsk)
{
runtime_instr_release(tsk);
guarded_storage_release(tsk);
}
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
{
/*
* Save the floating-point or vector register state of the current
* task and set the CIF_FPU flag to lazy restore the FPU register
* state when returning to user space.
*/
save_fpu_regs();
memcpy(dst, src, arch_task_struct_size);
dst->thread.fpu.regs = dst->thread.fpu.fprs;
/*
* Don't transfer over the runtime instrumentation or the guarded
* storage control block pointers. These fields are cleared here instead
* of in copy_thread() to avoid premature freeing of associated memory
* on fork() failure. Wait to clear the RI flag because ->stack still
* refers to the source thread.
*/
dst->thread.ri_cb = NULL;
dst->thread.gs_cb = NULL;
dst->thread.gs_bc_cb = NULL;
return 0;
}
int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
{
unsigned long clone_flags = args->flags;
unsigned long new_stackp = args->stack;
unsigned long tls = args->tls;
struct fake_frame
{
struct stack_frame sf;
struct pt_regs childregs;
} *frame;
frame = container_of(task_pt_regs(p), struct fake_frame, childregs);
p->thread.ksp = (unsigned long) frame;
/* Save access registers to new thread structure. */
save_access_regs(&p->thread.acrs[0]);
/* start new process with ar4 pointing to the correct address space */
/* Don't copy debug registers */
memset(&p->thread.per_user, 0, sizeof(p->thread.per_user));
memset(&p->thread.per_event, 0, sizeof(p->thread.per_event));
clear_tsk_thread_flag(p, TIF_SINGLE_STEP);
p->thread.per_flags = 0;
/* Initialize per thread user and system timer values */
p->thread.user_timer = 0;
p->thread.guest_timer = 0;
p->thread.system_timer = 0;
p->thread.hardirq_timer = 0;
p->thread.softirq_timer = 0;
p->thread.last_break = 1;
frame->sf.back_chain = 0;
frame->sf.gprs[11 - 6] = (unsigned long)&frame->childregs;
frame->sf.gprs[12 - 6] = (unsigned long)p;
/* new return point is ret_from_fork */
frame->sf.gprs[14 - 6] = (unsigned long)ret_from_fork;
/* fake return stack for resume(), don't go back to schedule */
frame->sf.gprs[15 - 6] = (unsigned long)frame;
/* Store access registers to kernel stack of new process. */
if (unlikely(args->fn)) {
/* kernel thread */
memset(&frame->childregs, 0, sizeof(struct pt_regs));
frame->childregs.psw.mask = PSW_KERNEL_BITS | PSW_MASK_IO |
PSW_MASK_EXT | PSW_MASK_MCHECK;
frame->childregs.gprs[9] = (unsigned long)args->fn;
frame->childregs.gprs[10] = (unsigned long)args->fn_arg;
frame->childregs.orig_gpr2 = -1;
frame->childregs.last_break = 1;
return 0;
}
frame->childregs = *current_pt_regs();
frame->childregs.gprs[2] = 0; /* child returns 0 on fork. */
frame->childregs.flags = 0;
if (new_stackp)
frame->childregs.gprs[15] = new_stackp;
/*
* Clear the runtime instrumentation flag after the above childregs
* copy. The CB pointer was already cleared in arch_dup_task_struct().
*/
frame->childregs.psw.mask &= ~PSW_MASK_RI;
/* Set a new TLS ? */
if (clone_flags & CLONE_SETTLS) {
if (is_compat_task()) {
p->thread.acrs[0] = (unsigned int)tls;
} else {
p->thread.acrs[0] = (unsigned int)(tls >> 32);
p->thread.acrs[1] = (unsigned int)tls;
}
}
/*
* s390 stores the svc return address in arch_data when calling
* sigreturn()/restart_syscall() via vdso. 1 means no valid address
* stored.
*/
p->restart_block.arch_data = 1;
return 0;
}
void execve_tail(void)
{
current->thread.fpu.fpc = 0;
asm volatile("sfpc %0" : : "d" (0));
}
unsigned long __get_wchan(struct task_struct *p)
{
struct unwind_state state;
unsigned long ip = 0;
if (!task_stack_page(p))
return 0;
if (!try_get_task_stack(p))
return 0;
unwind_for_each_frame(&state, p, NULL, 0) {
if (state.stack_info.type != STACK_TYPE_TASK) {
ip = 0;
break;
}
ip = unwind_get_return_address(&state);
if (!ip)
break;
if (!in_sched_functions(ip))
break;
}
put_task_stack(p);
return ip;
}
unsigned long arch_align_stack(unsigned long sp)
{
if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
sp -= get_random_u32_below(PAGE_SIZE);
return sp & ~0xf;
}
static inline unsigned long brk_rnd(void)
{
return (get_random_u16() & BRK_RND_MASK) << PAGE_SHIFT;
}
unsigned long arch_randomize_brk(struct mm_struct *mm)
{
unsigned long ret;
ret = PAGE_ALIGN(mm->brk + brk_rnd());
return (ret > mm->brk) ? ret : mm->brk;
}
| linux-master | arch/s390/kernel/process.c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.