python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Glue Code for the AVX/AES-NI/GFNI assembler implementation of the ARIA Cipher
*
* Copyright (c) 2022 Taehee Yoo <[email protected]>
*/
#include <crypto/algapi.h>
#include <crypto/internal/simd.h>
#include <crypto/aria.h>
#include <linux/crypto.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/types.h>
#include "ecb_cbc_helpers.h"
#include "aria-avx.h"
asmlinkage void aria_aesni_avx_encrypt_16way(const void *ctx, u8 *dst,
const u8 *src);
EXPORT_SYMBOL_GPL(aria_aesni_avx_encrypt_16way);
asmlinkage void aria_aesni_avx_decrypt_16way(const void *ctx, u8 *dst,
const u8 *src);
EXPORT_SYMBOL_GPL(aria_aesni_avx_decrypt_16way);
asmlinkage void aria_aesni_avx_ctr_crypt_16way(const void *ctx, u8 *dst,
const u8 *src,
u8 *keystream, u8 *iv);
EXPORT_SYMBOL_GPL(aria_aesni_avx_ctr_crypt_16way);
#ifdef CONFIG_AS_GFNI
asmlinkage void aria_aesni_avx_gfni_encrypt_16way(const void *ctx, u8 *dst,
const u8 *src);
EXPORT_SYMBOL_GPL(aria_aesni_avx_gfni_encrypt_16way);
asmlinkage void aria_aesni_avx_gfni_decrypt_16way(const void *ctx, u8 *dst,
const u8 *src);
EXPORT_SYMBOL_GPL(aria_aesni_avx_gfni_decrypt_16way);
asmlinkage void aria_aesni_avx_gfni_ctr_crypt_16way(const void *ctx, u8 *dst,
const u8 *src,
u8 *keystream, u8 *iv);
EXPORT_SYMBOL_GPL(aria_aesni_avx_gfni_ctr_crypt_16way);
#endif /* CONFIG_AS_GFNI */
static struct aria_avx_ops aria_ops;
struct aria_avx_request_ctx {
u8 keystream[ARIA_AESNI_PARALLEL_BLOCK_SIZE];
};
static int ecb_do_encrypt(struct skcipher_request *req, const u32 *rkey)
{
ECB_WALK_START(req, ARIA_BLOCK_SIZE, ARIA_AESNI_PARALLEL_BLOCKS);
ECB_BLOCK(ARIA_AESNI_PARALLEL_BLOCKS, aria_ops.aria_encrypt_16way);
ECB_BLOCK(1, aria_encrypt);
ECB_WALK_END();
}
static int ecb_do_decrypt(struct skcipher_request *req, const u32 *rkey)
{
ECB_WALK_START(req, ARIA_BLOCK_SIZE, ARIA_AESNI_PARALLEL_BLOCKS);
ECB_BLOCK(ARIA_AESNI_PARALLEL_BLOCKS, aria_ops.aria_decrypt_16way);
ECB_BLOCK(1, aria_decrypt);
ECB_WALK_END();
}
static int aria_avx_ecb_encrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct aria_ctx *ctx = crypto_skcipher_ctx(tfm);
return ecb_do_encrypt(req, ctx->enc_key[0]);
}
static int aria_avx_ecb_decrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct aria_ctx *ctx = crypto_skcipher_ctx(tfm);
return ecb_do_decrypt(req, ctx->dec_key[0]);
}
static int aria_avx_set_key(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen)
{
return aria_set_key(&tfm->base, key, keylen);
}
static int aria_avx_ctr_encrypt(struct skcipher_request *req)
{
struct aria_avx_request_ctx *req_ctx = skcipher_request_ctx(req);
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct aria_ctx *ctx = crypto_skcipher_ctx(tfm);
struct skcipher_walk walk;
unsigned int nbytes;
int err;
err = skcipher_walk_virt(&walk, req, false);
while ((nbytes = walk.nbytes) > 0) {
const u8 *src = walk.src.virt.addr;
u8 *dst = walk.dst.virt.addr;
while (nbytes >= ARIA_AESNI_PARALLEL_BLOCK_SIZE) {
kernel_fpu_begin();
aria_ops.aria_ctr_crypt_16way(ctx, dst, src,
&req_ctx->keystream[0],
walk.iv);
kernel_fpu_end();
dst += ARIA_AESNI_PARALLEL_BLOCK_SIZE;
src += ARIA_AESNI_PARALLEL_BLOCK_SIZE;
nbytes -= ARIA_AESNI_PARALLEL_BLOCK_SIZE;
}
while (nbytes >= ARIA_BLOCK_SIZE) {
memcpy(&req_ctx->keystream[0], walk.iv, ARIA_BLOCK_SIZE);
crypto_inc(walk.iv, ARIA_BLOCK_SIZE);
aria_encrypt(ctx, &req_ctx->keystream[0],
&req_ctx->keystream[0]);
crypto_xor_cpy(dst, src, &req_ctx->keystream[0],
ARIA_BLOCK_SIZE);
dst += ARIA_BLOCK_SIZE;
src += ARIA_BLOCK_SIZE;
nbytes -= ARIA_BLOCK_SIZE;
}
if (walk.nbytes == walk.total && nbytes > 0) {
memcpy(&req_ctx->keystream[0], walk.iv,
ARIA_BLOCK_SIZE);
crypto_inc(walk.iv, ARIA_BLOCK_SIZE);
aria_encrypt(ctx, &req_ctx->keystream[0],
&req_ctx->keystream[0]);
crypto_xor_cpy(dst, src, &req_ctx->keystream[0],
nbytes);
dst += nbytes;
src += nbytes;
nbytes = 0;
}
err = skcipher_walk_done(&walk, nbytes);
}
return err;
}
static int aria_avx_init_tfm(struct crypto_skcipher *tfm)
{
crypto_skcipher_set_reqsize(tfm, sizeof(struct aria_avx_request_ctx));
return 0;
}
static struct skcipher_alg aria_algs[] = {
{
.base.cra_name = "__ecb(aria)",
.base.cra_driver_name = "__ecb-aria-avx",
.base.cra_priority = 400,
.base.cra_flags = CRYPTO_ALG_INTERNAL,
.base.cra_blocksize = ARIA_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct aria_ctx),
.base.cra_module = THIS_MODULE,
.min_keysize = ARIA_MIN_KEY_SIZE,
.max_keysize = ARIA_MAX_KEY_SIZE,
.setkey = aria_avx_set_key,
.encrypt = aria_avx_ecb_encrypt,
.decrypt = aria_avx_ecb_decrypt,
}, {
.base.cra_name = "__ctr(aria)",
.base.cra_driver_name = "__ctr-aria-avx",
.base.cra_priority = 400,
.base.cra_flags = CRYPTO_ALG_INTERNAL,
.base.cra_blocksize = 1,
.base.cra_ctxsize = sizeof(struct aria_ctx),
.base.cra_module = THIS_MODULE,
.min_keysize = ARIA_MIN_KEY_SIZE,
.max_keysize = ARIA_MAX_KEY_SIZE,
.ivsize = ARIA_BLOCK_SIZE,
.chunksize = ARIA_BLOCK_SIZE,
.walksize = 16 * ARIA_BLOCK_SIZE,
.setkey = aria_avx_set_key,
.encrypt = aria_avx_ctr_encrypt,
.decrypt = aria_avx_ctr_encrypt,
.init = aria_avx_init_tfm,
}
};
static struct simd_skcipher_alg *aria_simd_algs[ARRAY_SIZE(aria_algs)];
static int __init aria_avx_init(void)
{
const char *feature_name;
if (!boot_cpu_has(X86_FEATURE_AVX) ||
!boot_cpu_has(X86_FEATURE_AES) ||
!boot_cpu_has(X86_FEATURE_OSXSAVE)) {
pr_info("AVX or AES-NI instructions are not detected.\n");
return -ENODEV;
}
if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM,
&feature_name)) {
pr_info("CPU feature '%s' is not supported.\n", feature_name);
return -ENODEV;
}
if (boot_cpu_has(X86_FEATURE_GFNI) && IS_ENABLED(CONFIG_AS_GFNI)) {
aria_ops.aria_encrypt_16way = aria_aesni_avx_gfni_encrypt_16way;
aria_ops.aria_decrypt_16way = aria_aesni_avx_gfni_decrypt_16way;
aria_ops.aria_ctr_crypt_16way = aria_aesni_avx_gfni_ctr_crypt_16way;
} else {
aria_ops.aria_encrypt_16way = aria_aesni_avx_encrypt_16way;
aria_ops.aria_decrypt_16way = aria_aesni_avx_decrypt_16way;
aria_ops.aria_ctr_crypt_16way = aria_aesni_avx_ctr_crypt_16way;
}
return simd_register_skciphers_compat(aria_algs,
ARRAY_SIZE(aria_algs),
aria_simd_algs);
}
static void __exit aria_avx_exit(void)
{
simd_unregister_skciphers(aria_algs, ARRAY_SIZE(aria_algs),
aria_simd_algs);
}
module_init(aria_avx_init);
module_exit(aria_avx_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Taehee Yoo <[email protected]>");
MODULE_DESCRIPTION("ARIA Cipher Algorithm, AVX/AES-NI/GFNI optimized");
MODULE_ALIAS_CRYPTO("aria");
MODULE_ALIAS_CRYPTO("aria-aesni-avx");
| linux-master | arch/x86/crypto/aria_aesni_avx_glue.c |
/*
* Cryptographic API.
*
* Glue code for the SHA256 Secure Hash Algorithm assembler
* implementation using supplemental SSE3 / AVX / AVX2 instructions.
*
* This file is based on sha256_generic.c
*
* Copyright (C) 2013 Intel Corporation.
*
* Author:
* Tim Chen <[email protected]>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <crypto/internal/hash.h>
#include <crypto/internal/simd.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/types.h>
#include <crypto/sha2.h>
#include <crypto/sha256_base.h>
#include <linux/string.h>
#include <asm/simd.h>
asmlinkage void sha256_transform_ssse3(struct sha256_state *state,
const u8 *data, int blocks);
static int _sha256_update(struct shash_desc *desc, const u8 *data,
unsigned int len, sha256_block_fn *sha256_xform)
{
struct sha256_state *sctx = shash_desc_ctx(desc);
if (!crypto_simd_usable() ||
(sctx->count % SHA256_BLOCK_SIZE) + len < SHA256_BLOCK_SIZE)
return crypto_sha256_update(desc, data, len);
/*
* Make sure struct sha256_state begins directly with the SHA256
* 256-bit internal state, as this is what the asm functions expect.
*/
BUILD_BUG_ON(offsetof(struct sha256_state, state) != 0);
kernel_fpu_begin();
sha256_base_do_update(desc, data, len, sha256_xform);
kernel_fpu_end();
return 0;
}
static int sha256_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out, sha256_block_fn *sha256_xform)
{
if (!crypto_simd_usable())
return crypto_sha256_finup(desc, data, len, out);
kernel_fpu_begin();
if (len)
sha256_base_do_update(desc, data, len, sha256_xform);
sha256_base_do_finalize(desc, sha256_xform);
kernel_fpu_end();
return sha256_base_finish(desc, out);
}
static int sha256_ssse3_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
return _sha256_update(desc, data, len, sha256_transform_ssse3);
}
static int sha256_ssse3_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
return sha256_finup(desc, data, len, out, sha256_transform_ssse3);
}
/* Add padding and return the message digest. */
static int sha256_ssse3_final(struct shash_desc *desc, u8 *out)
{
return sha256_ssse3_finup(desc, NULL, 0, out);
}
static struct shash_alg sha256_ssse3_algs[] = { {
.digestsize = SHA256_DIGEST_SIZE,
.init = sha256_base_init,
.update = sha256_ssse3_update,
.final = sha256_ssse3_final,
.finup = sha256_ssse3_finup,
.descsize = sizeof(struct sha256_state),
.base = {
.cra_name = "sha256",
.cra_driver_name = "sha256-ssse3",
.cra_priority = 150,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
}, {
.digestsize = SHA224_DIGEST_SIZE,
.init = sha224_base_init,
.update = sha256_ssse3_update,
.final = sha256_ssse3_final,
.finup = sha256_ssse3_finup,
.descsize = sizeof(struct sha256_state),
.base = {
.cra_name = "sha224",
.cra_driver_name = "sha224-ssse3",
.cra_priority = 150,
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
} };
static int register_sha256_ssse3(void)
{
if (boot_cpu_has(X86_FEATURE_SSSE3))
return crypto_register_shashes(sha256_ssse3_algs,
ARRAY_SIZE(sha256_ssse3_algs));
return 0;
}
static void unregister_sha256_ssse3(void)
{
if (boot_cpu_has(X86_FEATURE_SSSE3))
crypto_unregister_shashes(sha256_ssse3_algs,
ARRAY_SIZE(sha256_ssse3_algs));
}
asmlinkage void sha256_transform_avx(struct sha256_state *state,
const u8 *data, int blocks);
static int sha256_avx_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
return _sha256_update(desc, data, len, sha256_transform_avx);
}
static int sha256_avx_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
return sha256_finup(desc, data, len, out, sha256_transform_avx);
}
static int sha256_avx_final(struct shash_desc *desc, u8 *out)
{
return sha256_avx_finup(desc, NULL, 0, out);
}
static struct shash_alg sha256_avx_algs[] = { {
.digestsize = SHA256_DIGEST_SIZE,
.init = sha256_base_init,
.update = sha256_avx_update,
.final = sha256_avx_final,
.finup = sha256_avx_finup,
.descsize = sizeof(struct sha256_state),
.base = {
.cra_name = "sha256",
.cra_driver_name = "sha256-avx",
.cra_priority = 160,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
}, {
.digestsize = SHA224_DIGEST_SIZE,
.init = sha224_base_init,
.update = sha256_avx_update,
.final = sha256_avx_final,
.finup = sha256_avx_finup,
.descsize = sizeof(struct sha256_state),
.base = {
.cra_name = "sha224",
.cra_driver_name = "sha224-avx",
.cra_priority = 160,
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
} };
static bool avx_usable(void)
{
if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL)) {
if (boot_cpu_has(X86_FEATURE_AVX))
pr_info("AVX detected but unusable.\n");
return false;
}
return true;
}
static int register_sha256_avx(void)
{
if (avx_usable())
return crypto_register_shashes(sha256_avx_algs,
ARRAY_SIZE(sha256_avx_algs));
return 0;
}
static void unregister_sha256_avx(void)
{
if (avx_usable())
crypto_unregister_shashes(sha256_avx_algs,
ARRAY_SIZE(sha256_avx_algs));
}
asmlinkage void sha256_transform_rorx(struct sha256_state *state,
const u8 *data, int blocks);
static int sha256_avx2_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
return _sha256_update(desc, data, len, sha256_transform_rorx);
}
static int sha256_avx2_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
return sha256_finup(desc, data, len, out, sha256_transform_rorx);
}
static int sha256_avx2_final(struct shash_desc *desc, u8 *out)
{
return sha256_avx2_finup(desc, NULL, 0, out);
}
static struct shash_alg sha256_avx2_algs[] = { {
.digestsize = SHA256_DIGEST_SIZE,
.init = sha256_base_init,
.update = sha256_avx2_update,
.final = sha256_avx2_final,
.finup = sha256_avx2_finup,
.descsize = sizeof(struct sha256_state),
.base = {
.cra_name = "sha256",
.cra_driver_name = "sha256-avx2",
.cra_priority = 170,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
}, {
.digestsize = SHA224_DIGEST_SIZE,
.init = sha224_base_init,
.update = sha256_avx2_update,
.final = sha256_avx2_final,
.finup = sha256_avx2_finup,
.descsize = sizeof(struct sha256_state),
.base = {
.cra_name = "sha224",
.cra_driver_name = "sha224-avx2",
.cra_priority = 170,
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
} };
static bool avx2_usable(void)
{
if (avx_usable() && boot_cpu_has(X86_FEATURE_AVX2) &&
boot_cpu_has(X86_FEATURE_BMI2))
return true;
return false;
}
static int register_sha256_avx2(void)
{
if (avx2_usable())
return crypto_register_shashes(sha256_avx2_algs,
ARRAY_SIZE(sha256_avx2_algs));
return 0;
}
static void unregister_sha256_avx2(void)
{
if (avx2_usable())
crypto_unregister_shashes(sha256_avx2_algs,
ARRAY_SIZE(sha256_avx2_algs));
}
#ifdef CONFIG_AS_SHA256_NI
asmlinkage void sha256_ni_transform(struct sha256_state *digest,
const u8 *data, int rounds);
static int sha256_ni_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
return _sha256_update(desc, data, len, sha256_ni_transform);
}
static int sha256_ni_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
return sha256_finup(desc, data, len, out, sha256_ni_transform);
}
static int sha256_ni_final(struct shash_desc *desc, u8 *out)
{
return sha256_ni_finup(desc, NULL, 0, out);
}
static struct shash_alg sha256_ni_algs[] = { {
.digestsize = SHA256_DIGEST_SIZE,
.init = sha256_base_init,
.update = sha256_ni_update,
.final = sha256_ni_final,
.finup = sha256_ni_finup,
.descsize = sizeof(struct sha256_state),
.base = {
.cra_name = "sha256",
.cra_driver_name = "sha256-ni",
.cra_priority = 250,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
}, {
.digestsize = SHA224_DIGEST_SIZE,
.init = sha224_base_init,
.update = sha256_ni_update,
.final = sha256_ni_final,
.finup = sha256_ni_finup,
.descsize = sizeof(struct sha256_state),
.base = {
.cra_name = "sha224",
.cra_driver_name = "sha224-ni",
.cra_priority = 250,
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
} };
static int register_sha256_ni(void)
{
if (boot_cpu_has(X86_FEATURE_SHA_NI))
return crypto_register_shashes(sha256_ni_algs,
ARRAY_SIZE(sha256_ni_algs));
return 0;
}
static void unregister_sha256_ni(void)
{
if (boot_cpu_has(X86_FEATURE_SHA_NI))
crypto_unregister_shashes(sha256_ni_algs,
ARRAY_SIZE(sha256_ni_algs));
}
#else
static inline int register_sha256_ni(void) { return 0; }
static inline void unregister_sha256_ni(void) { }
#endif
static int __init sha256_ssse3_mod_init(void)
{
if (register_sha256_ssse3())
goto fail;
if (register_sha256_avx()) {
unregister_sha256_ssse3();
goto fail;
}
if (register_sha256_avx2()) {
unregister_sha256_avx();
unregister_sha256_ssse3();
goto fail;
}
if (register_sha256_ni()) {
unregister_sha256_avx2();
unregister_sha256_avx();
unregister_sha256_ssse3();
goto fail;
}
return 0;
fail:
return -ENODEV;
}
static void __exit sha256_ssse3_mod_fini(void)
{
unregister_sha256_ni();
unregister_sha256_avx2();
unregister_sha256_avx();
unregister_sha256_ssse3();
}
module_init(sha256_ssse3_mod_init);
module_exit(sha256_ssse3_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("SHA256 Secure Hash Algorithm, Supplemental SSE3 accelerated");
MODULE_ALIAS_CRYPTO("sha256");
MODULE_ALIAS_CRYPTO("sha256-ssse3");
MODULE_ALIAS_CRYPTO("sha256-avx");
MODULE_ALIAS_CRYPTO("sha256-avx2");
MODULE_ALIAS_CRYPTO("sha224");
MODULE_ALIAS_CRYPTO("sha224-ssse3");
MODULE_ALIAS_CRYPTO("sha224-avx");
MODULE_ALIAS_CRYPTO("sha224-avx2");
#ifdef CONFIG_AS_SHA256_NI
MODULE_ALIAS_CRYPTO("sha256-ni");
MODULE_ALIAS_CRYPTO("sha224-ni");
#endif
| linux-master | arch/x86/crypto/sha256_ssse3_glue.c |
// SPDX-License-Identifier: GPL-2.0
/*
* NHPoly1305 - ε-almost-∆-universal hash function for Adiantum
* (AVX2 accelerated version)
*
* Copyright 2018 Google LLC
*/
#include <crypto/internal/hash.h>
#include <crypto/internal/simd.h>
#include <crypto/nhpoly1305.h>
#include <linux/module.h>
#include <linux/sizes.h>
#include <asm/simd.h>
asmlinkage void nh_avx2(const u32 *key, const u8 *message, size_t message_len,
__le64 hash[NH_NUM_PASSES]);
static int nhpoly1305_avx2_update(struct shash_desc *desc,
const u8 *src, unsigned int srclen)
{
if (srclen < 64 || !crypto_simd_usable())
return crypto_nhpoly1305_update(desc, src, srclen);
do {
unsigned int n = min_t(unsigned int, srclen, SZ_4K);
kernel_fpu_begin();
crypto_nhpoly1305_update_helper(desc, src, n, nh_avx2);
kernel_fpu_end();
src += n;
srclen -= n;
} while (srclen);
return 0;
}
static struct shash_alg nhpoly1305_alg = {
.base.cra_name = "nhpoly1305",
.base.cra_driver_name = "nhpoly1305-avx2",
.base.cra_priority = 300,
.base.cra_ctxsize = sizeof(struct nhpoly1305_key),
.base.cra_module = THIS_MODULE,
.digestsize = POLY1305_DIGEST_SIZE,
.init = crypto_nhpoly1305_init,
.update = nhpoly1305_avx2_update,
.final = crypto_nhpoly1305_final,
.setkey = crypto_nhpoly1305_setkey,
.descsize = sizeof(struct nhpoly1305_state),
};
static int __init nhpoly1305_mod_init(void)
{
if (!boot_cpu_has(X86_FEATURE_AVX2) ||
!boot_cpu_has(X86_FEATURE_OSXSAVE))
return -ENODEV;
return crypto_register_shash(&nhpoly1305_alg);
}
static void __exit nhpoly1305_mod_exit(void)
{
crypto_unregister_shash(&nhpoly1305_alg);
}
module_init(nhpoly1305_mod_init);
module_exit(nhpoly1305_mod_exit);
MODULE_DESCRIPTION("NHPoly1305 ε-almost-∆-universal hash function (AVX2-accelerated)");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Eric Biggers <[email protected]>");
MODULE_ALIAS_CRYPTO("nhpoly1305");
MODULE_ALIAS_CRYPTO("nhpoly1305-avx2");
| linux-master | arch/x86/crypto/nhpoly1305-avx2-glue.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Glue Code for assembler optimized version of Camellia
*
* Copyright (c) 2012 Jussi Kivilinna <[email protected]>
*
* Camellia parts based on code by:
* Copyright (C) 2006 NTT (Nippon Telegraph and Telephone Corporation)
*/
#include <asm/unaligned.h>
#include <linux/crypto.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/types.h>
#include <crypto/algapi.h>
#include "camellia.h"
#include "ecb_cbc_helpers.h"
/* regular block cipher functions */
asmlinkage void __camellia_enc_blk(const void *ctx, u8 *dst, const u8 *src,
bool xor);
EXPORT_SYMBOL_GPL(__camellia_enc_blk);
asmlinkage void camellia_dec_blk(const void *ctx, u8 *dst, const u8 *src);
EXPORT_SYMBOL_GPL(camellia_dec_blk);
/* 2-way parallel cipher functions */
asmlinkage void __camellia_enc_blk_2way(const void *ctx, u8 *dst, const u8 *src,
bool xor);
EXPORT_SYMBOL_GPL(__camellia_enc_blk_2way);
asmlinkage void camellia_dec_blk_2way(const void *ctx, u8 *dst, const u8 *src);
EXPORT_SYMBOL_GPL(camellia_dec_blk_2way);
static void camellia_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
camellia_enc_blk(crypto_tfm_ctx(tfm), dst, src);
}
static void camellia_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
camellia_dec_blk(crypto_tfm_ctx(tfm), dst, src);
}
/* camellia sboxes */
__visible const u64 camellia_sp10011110[256] = {
0x7000007070707000ULL, 0x8200008282828200ULL, 0x2c00002c2c2c2c00ULL,
0xec0000ecececec00ULL, 0xb30000b3b3b3b300ULL, 0x2700002727272700ULL,
0xc00000c0c0c0c000ULL, 0xe50000e5e5e5e500ULL, 0xe40000e4e4e4e400ULL,
0x8500008585858500ULL, 0x5700005757575700ULL, 0x3500003535353500ULL,
0xea0000eaeaeaea00ULL, 0x0c00000c0c0c0c00ULL, 0xae0000aeaeaeae00ULL,
0x4100004141414100ULL, 0x2300002323232300ULL, 0xef0000efefefef00ULL,
0x6b00006b6b6b6b00ULL, 0x9300009393939300ULL, 0x4500004545454500ULL,
0x1900001919191900ULL, 0xa50000a5a5a5a500ULL, 0x2100002121212100ULL,
0xed0000edededed00ULL, 0x0e00000e0e0e0e00ULL, 0x4f00004f4f4f4f00ULL,
0x4e00004e4e4e4e00ULL, 0x1d00001d1d1d1d00ULL, 0x6500006565656500ULL,
0x9200009292929200ULL, 0xbd0000bdbdbdbd00ULL, 0x8600008686868600ULL,
0xb80000b8b8b8b800ULL, 0xaf0000afafafaf00ULL, 0x8f00008f8f8f8f00ULL,
0x7c00007c7c7c7c00ULL, 0xeb0000ebebebeb00ULL, 0x1f00001f1f1f1f00ULL,
0xce0000cececece00ULL, 0x3e00003e3e3e3e00ULL, 0x3000003030303000ULL,
0xdc0000dcdcdcdc00ULL, 0x5f00005f5f5f5f00ULL, 0x5e00005e5e5e5e00ULL,
0xc50000c5c5c5c500ULL, 0x0b00000b0b0b0b00ULL, 0x1a00001a1a1a1a00ULL,
0xa60000a6a6a6a600ULL, 0xe10000e1e1e1e100ULL, 0x3900003939393900ULL,
0xca0000cacacaca00ULL, 0xd50000d5d5d5d500ULL, 0x4700004747474700ULL,
0x5d00005d5d5d5d00ULL, 0x3d00003d3d3d3d00ULL, 0xd90000d9d9d9d900ULL,
0x0100000101010100ULL, 0x5a00005a5a5a5a00ULL, 0xd60000d6d6d6d600ULL,
0x5100005151515100ULL, 0x5600005656565600ULL, 0x6c00006c6c6c6c00ULL,
0x4d00004d4d4d4d00ULL, 0x8b00008b8b8b8b00ULL, 0x0d00000d0d0d0d00ULL,
0x9a00009a9a9a9a00ULL, 0x6600006666666600ULL, 0xfb0000fbfbfbfb00ULL,
0xcc0000cccccccc00ULL, 0xb00000b0b0b0b000ULL, 0x2d00002d2d2d2d00ULL,
0x7400007474747400ULL, 0x1200001212121200ULL, 0x2b00002b2b2b2b00ULL,
0x2000002020202000ULL, 0xf00000f0f0f0f000ULL, 0xb10000b1b1b1b100ULL,
0x8400008484848400ULL, 0x9900009999999900ULL, 0xdf0000dfdfdfdf00ULL,
0x4c00004c4c4c4c00ULL, 0xcb0000cbcbcbcb00ULL, 0xc20000c2c2c2c200ULL,
0x3400003434343400ULL, 0x7e00007e7e7e7e00ULL, 0x7600007676767600ULL,
0x0500000505050500ULL, 0x6d00006d6d6d6d00ULL, 0xb70000b7b7b7b700ULL,
0xa90000a9a9a9a900ULL, 0x3100003131313100ULL, 0xd10000d1d1d1d100ULL,
0x1700001717171700ULL, 0x0400000404040400ULL, 0xd70000d7d7d7d700ULL,
0x1400001414141400ULL, 0x5800005858585800ULL, 0x3a00003a3a3a3a00ULL,
0x6100006161616100ULL, 0xde0000dededede00ULL, 0x1b00001b1b1b1b00ULL,
0x1100001111111100ULL, 0x1c00001c1c1c1c00ULL, 0x3200003232323200ULL,
0x0f00000f0f0f0f00ULL, 0x9c00009c9c9c9c00ULL, 0x1600001616161600ULL,
0x5300005353535300ULL, 0x1800001818181800ULL, 0xf20000f2f2f2f200ULL,
0x2200002222222200ULL, 0xfe0000fefefefe00ULL, 0x4400004444444400ULL,
0xcf0000cfcfcfcf00ULL, 0xb20000b2b2b2b200ULL, 0xc30000c3c3c3c300ULL,
0xb50000b5b5b5b500ULL, 0x7a00007a7a7a7a00ULL, 0x9100009191919100ULL,
0x2400002424242400ULL, 0x0800000808080800ULL, 0xe80000e8e8e8e800ULL,
0xa80000a8a8a8a800ULL, 0x6000006060606000ULL, 0xfc0000fcfcfcfc00ULL,
0x6900006969696900ULL, 0x5000005050505000ULL, 0xaa0000aaaaaaaa00ULL,
0xd00000d0d0d0d000ULL, 0xa00000a0a0a0a000ULL, 0x7d00007d7d7d7d00ULL,
0xa10000a1a1a1a100ULL, 0x8900008989898900ULL, 0x6200006262626200ULL,
0x9700009797979700ULL, 0x5400005454545400ULL, 0x5b00005b5b5b5b00ULL,
0x1e00001e1e1e1e00ULL, 0x9500009595959500ULL, 0xe00000e0e0e0e000ULL,
0xff0000ffffffff00ULL, 0x6400006464646400ULL, 0xd20000d2d2d2d200ULL,
0x1000001010101000ULL, 0xc40000c4c4c4c400ULL, 0x0000000000000000ULL,
0x4800004848484800ULL, 0xa30000a3a3a3a300ULL, 0xf70000f7f7f7f700ULL,
0x7500007575757500ULL, 0xdb0000dbdbdbdb00ULL, 0x8a00008a8a8a8a00ULL,
0x0300000303030300ULL, 0xe60000e6e6e6e600ULL, 0xda0000dadadada00ULL,
0x0900000909090900ULL, 0x3f00003f3f3f3f00ULL, 0xdd0000dddddddd00ULL,
0x9400009494949400ULL, 0x8700008787878700ULL, 0x5c00005c5c5c5c00ULL,
0x8300008383838300ULL, 0x0200000202020200ULL, 0xcd0000cdcdcdcd00ULL,
0x4a00004a4a4a4a00ULL, 0x9000009090909000ULL, 0x3300003333333300ULL,
0x7300007373737300ULL, 0x6700006767676700ULL, 0xf60000f6f6f6f600ULL,
0xf30000f3f3f3f300ULL, 0x9d00009d9d9d9d00ULL, 0x7f00007f7f7f7f00ULL,
0xbf0000bfbfbfbf00ULL, 0xe20000e2e2e2e200ULL, 0x5200005252525200ULL,
0x9b00009b9b9b9b00ULL, 0xd80000d8d8d8d800ULL, 0x2600002626262600ULL,
0xc80000c8c8c8c800ULL, 0x3700003737373700ULL, 0xc60000c6c6c6c600ULL,
0x3b00003b3b3b3b00ULL, 0x8100008181818100ULL, 0x9600009696969600ULL,
0x6f00006f6f6f6f00ULL, 0x4b00004b4b4b4b00ULL, 0x1300001313131300ULL,
0xbe0000bebebebe00ULL, 0x6300006363636300ULL, 0x2e00002e2e2e2e00ULL,
0xe90000e9e9e9e900ULL, 0x7900007979797900ULL, 0xa70000a7a7a7a700ULL,
0x8c00008c8c8c8c00ULL, 0x9f00009f9f9f9f00ULL, 0x6e00006e6e6e6e00ULL,
0xbc0000bcbcbcbc00ULL, 0x8e00008e8e8e8e00ULL, 0x2900002929292900ULL,
0xf50000f5f5f5f500ULL, 0xf90000f9f9f9f900ULL, 0xb60000b6b6b6b600ULL,
0x2f00002f2f2f2f00ULL, 0xfd0000fdfdfdfd00ULL, 0xb40000b4b4b4b400ULL,
0x5900005959595900ULL, 0x7800007878787800ULL, 0x9800009898989800ULL,
0x0600000606060600ULL, 0x6a00006a6a6a6a00ULL, 0xe70000e7e7e7e700ULL,
0x4600004646464600ULL, 0x7100007171717100ULL, 0xba0000babababa00ULL,
0xd40000d4d4d4d400ULL, 0x2500002525252500ULL, 0xab0000abababab00ULL,
0x4200004242424200ULL, 0x8800008888888800ULL, 0xa20000a2a2a2a200ULL,
0x8d00008d8d8d8d00ULL, 0xfa0000fafafafa00ULL, 0x7200007272727200ULL,
0x0700000707070700ULL, 0xb90000b9b9b9b900ULL, 0x5500005555555500ULL,
0xf80000f8f8f8f800ULL, 0xee0000eeeeeeee00ULL, 0xac0000acacacac00ULL,
0x0a00000a0a0a0a00ULL, 0x3600003636363600ULL, 0x4900004949494900ULL,
0x2a00002a2a2a2a00ULL, 0x6800006868686800ULL, 0x3c00003c3c3c3c00ULL,
0x3800003838383800ULL, 0xf10000f1f1f1f100ULL, 0xa40000a4a4a4a400ULL,
0x4000004040404000ULL, 0x2800002828282800ULL, 0xd30000d3d3d3d300ULL,
0x7b00007b7b7b7b00ULL, 0xbb0000bbbbbbbb00ULL, 0xc90000c9c9c9c900ULL,
0x4300004343434300ULL, 0xc10000c1c1c1c100ULL, 0x1500001515151500ULL,
0xe30000e3e3e3e300ULL, 0xad0000adadadad00ULL, 0xf40000f4f4f4f400ULL,
0x7700007777777700ULL, 0xc70000c7c7c7c700ULL, 0x8000008080808000ULL,
0x9e00009e9e9e9e00ULL,
};
__visible const u64 camellia_sp22000222[256] = {
0xe0e0000000e0e0e0ULL, 0x0505000000050505ULL, 0x5858000000585858ULL,
0xd9d9000000d9d9d9ULL, 0x6767000000676767ULL, 0x4e4e0000004e4e4eULL,
0x8181000000818181ULL, 0xcbcb000000cbcbcbULL, 0xc9c9000000c9c9c9ULL,
0x0b0b0000000b0b0bULL, 0xaeae000000aeaeaeULL, 0x6a6a0000006a6a6aULL,
0xd5d5000000d5d5d5ULL, 0x1818000000181818ULL, 0x5d5d0000005d5d5dULL,
0x8282000000828282ULL, 0x4646000000464646ULL, 0xdfdf000000dfdfdfULL,
0xd6d6000000d6d6d6ULL, 0x2727000000272727ULL, 0x8a8a0000008a8a8aULL,
0x3232000000323232ULL, 0x4b4b0000004b4b4bULL, 0x4242000000424242ULL,
0xdbdb000000dbdbdbULL, 0x1c1c0000001c1c1cULL, 0x9e9e0000009e9e9eULL,
0x9c9c0000009c9c9cULL, 0x3a3a0000003a3a3aULL, 0xcaca000000cacacaULL,
0x2525000000252525ULL, 0x7b7b0000007b7b7bULL, 0x0d0d0000000d0d0dULL,
0x7171000000717171ULL, 0x5f5f0000005f5f5fULL, 0x1f1f0000001f1f1fULL,
0xf8f8000000f8f8f8ULL, 0xd7d7000000d7d7d7ULL, 0x3e3e0000003e3e3eULL,
0x9d9d0000009d9d9dULL, 0x7c7c0000007c7c7cULL, 0x6060000000606060ULL,
0xb9b9000000b9b9b9ULL, 0xbebe000000bebebeULL, 0xbcbc000000bcbcbcULL,
0x8b8b0000008b8b8bULL, 0x1616000000161616ULL, 0x3434000000343434ULL,
0x4d4d0000004d4d4dULL, 0xc3c3000000c3c3c3ULL, 0x7272000000727272ULL,
0x9595000000959595ULL, 0xabab000000abababULL, 0x8e8e0000008e8e8eULL,
0xbaba000000bababaULL, 0x7a7a0000007a7a7aULL, 0xb3b3000000b3b3b3ULL,
0x0202000000020202ULL, 0xb4b4000000b4b4b4ULL, 0xadad000000adadadULL,
0xa2a2000000a2a2a2ULL, 0xacac000000acacacULL, 0xd8d8000000d8d8d8ULL,
0x9a9a0000009a9a9aULL, 0x1717000000171717ULL, 0x1a1a0000001a1a1aULL,
0x3535000000353535ULL, 0xcccc000000ccccccULL, 0xf7f7000000f7f7f7ULL,
0x9999000000999999ULL, 0x6161000000616161ULL, 0x5a5a0000005a5a5aULL,
0xe8e8000000e8e8e8ULL, 0x2424000000242424ULL, 0x5656000000565656ULL,
0x4040000000404040ULL, 0xe1e1000000e1e1e1ULL, 0x6363000000636363ULL,
0x0909000000090909ULL, 0x3333000000333333ULL, 0xbfbf000000bfbfbfULL,
0x9898000000989898ULL, 0x9797000000979797ULL, 0x8585000000858585ULL,
0x6868000000686868ULL, 0xfcfc000000fcfcfcULL, 0xecec000000ecececULL,
0x0a0a0000000a0a0aULL, 0xdada000000dadadaULL, 0x6f6f0000006f6f6fULL,
0x5353000000535353ULL, 0x6262000000626262ULL, 0xa3a3000000a3a3a3ULL,
0x2e2e0000002e2e2eULL, 0x0808000000080808ULL, 0xafaf000000afafafULL,
0x2828000000282828ULL, 0xb0b0000000b0b0b0ULL, 0x7474000000747474ULL,
0xc2c2000000c2c2c2ULL, 0xbdbd000000bdbdbdULL, 0x3636000000363636ULL,
0x2222000000222222ULL, 0x3838000000383838ULL, 0x6464000000646464ULL,
0x1e1e0000001e1e1eULL, 0x3939000000393939ULL, 0x2c2c0000002c2c2cULL,
0xa6a6000000a6a6a6ULL, 0x3030000000303030ULL, 0xe5e5000000e5e5e5ULL,
0x4444000000444444ULL, 0xfdfd000000fdfdfdULL, 0x8888000000888888ULL,
0x9f9f0000009f9f9fULL, 0x6565000000656565ULL, 0x8787000000878787ULL,
0x6b6b0000006b6b6bULL, 0xf4f4000000f4f4f4ULL, 0x2323000000232323ULL,
0x4848000000484848ULL, 0x1010000000101010ULL, 0xd1d1000000d1d1d1ULL,
0x5151000000515151ULL, 0xc0c0000000c0c0c0ULL, 0xf9f9000000f9f9f9ULL,
0xd2d2000000d2d2d2ULL, 0xa0a0000000a0a0a0ULL, 0x5555000000555555ULL,
0xa1a1000000a1a1a1ULL, 0x4141000000414141ULL, 0xfafa000000fafafaULL,
0x4343000000434343ULL, 0x1313000000131313ULL, 0xc4c4000000c4c4c4ULL,
0x2f2f0000002f2f2fULL, 0xa8a8000000a8a8a8ULL, 0xb6b6000000b6b6b6ULL,
0x3c3c0000003c3c3cULL, 0x2b2b0000002b2b2bULL, 0xc1c1000000c1c1c1ULL,
0xffff000000ffffffULL, 0xc8c8000000c8c8c8ULL, 0xa5a5000000a5a5a5ULL,
0x2020000000202020ULL, 0x8989000000898989ULL, 0x0000000000000000ULL,
0x9090000000909090ULL, 0x4747000000474747ULL, 0xefef000000efefefULL,
0xeaea000000eaeaeaULL, 0xb7b7000000b7b7b7ULL, 0x1515000000151515ULL,
0x0606000000060606ULL, 0xcdcd000000cdcdcdULL, 0xb5b5000000b5b5b5ULL,
0x1212000000121212ULL, 0x7e7e0000007e7e7eULL, 0xbbbb000000bbbbbbULL,
0x2929000000292929ULL, 0x0f0f0000000f0f0fULL, 0xb8b8000000b8b8b8ULL,
0x0707000000070707ULL, 0x0404000000040404ULL, 0x9b9b0000009b9b9bULL,
0x9494000000949494ULL, 0x2121000000212121ULL, 0x6666000000666666ULL,
0xe6e6000000e6e6e6ULL, 0xcece000000cececeULL, 0xeded000000edededULL,
0xe7e7000000e7e7e7ULL, 0x3b3b0000003b3b3bULL, 0xfefe000000fefefeULL,
0x7f7f0000007f7f7fULL, 0xc5c5000000c5c5c5ULL, 0xa4a4000000a4a4a4ULL,
0x3737000000373737ULL, 0xb1b1000000b1b1b1ULL, 0x4c4c0000004c4c4cULL,
0x9191000000919191ULL, 0x6e6e0000006e6e6eULL, 0x8d8d0000008d8d8dULL,
0x7676000000767676ULL, 0x0303000000030303ULL, 0x2d2d0000002d2d2dULL,
0xdede000000dededeULL, 0x9696000000969696ULL, 0x2626000000262626ULL,
0x7d7d0000007d7d7dULL, 0xc6c6000000c6c6c6ULL, 0x5c5c0000005c5c5cULL,
0xd3d3000000d3d3d3ULL, 0xf2f2000000f2f2f2ULL, 0x4f4f0000004f4f4fULL,
0x1919000000191919ULL, 0x3f3f0000003f3f3fULL, 0xdcdc000000dcdcdcULL,
0x7979000000797979ULL, 0x1d1d0000001d1d1dULL, 0x5252000000525252ULL,
0xebeb000000ebebebULL, 0xf3f3000000f3f3f3ULL, 0x6d6d0000006d6d6dULL,
0x5e5e0000005e5e5eULL, 0xfbfb000000fbfbfbULL, 0x6969000000696969ULL,
0xb2b2000000b2b2b2ULL, 0xf0f0000000f0f0f0ULL, 0x3131000000313131ULL,
0x0c0c0000000c0c0cULL, 0xd4d4000000d4d4d4ULL, 0xcfcf000000cfcfcfULL,
0x8c8c0000008c8c8cULL, 0xe2e2000000e2e2e2ULL, 0x7575000000757575ULL,
0xa9a9000000a9a9a9ULL, 0x4a4a0000004a4a4aULL, 0x5757000000575757ULL,
0x8484000000848484ULL, 0x1111000000111111ULL, 0x4545000000454545ULL,
0x1b1b0000001b1b1bULL, 0xf5f5000000f5f5f5ULL, 0xe4e4000000e4e4e4ULL,
0x0e0e0000000e0e0eULL, 0x7373000000737373ULL, 0xaaaa000000aaaaaaULL,
0xf1f1000000f1f1f1ULL, 0xdddd000000ddddddULL, 0x5959000000595959ULL,
0x1414000000141414ULL, 0x6c6c0000006c6c6cULL, 0x9292000000929292ULL,
0x5454000000545454ULL, 0xd0d0000000d0d0d0ULL, 0x7878000000787878ULL,
0x7070000000707070ULL, 0xe3e3000000e3e3e3ULL, 0x4949000000494949ULL,
0x8080000000808080ULL, 0x5050000000505050ULL, 0xa7a7000000a7a7a7ULL,
0xf6f6000000f6f6f6ULL, 0x7777000000777777ULL, 0x9393000000939393ULL,
0x8686000000868686ULL, 0x8383000000838383ULL, 0x2a2a0000002a2a2aULL,
0xc7c7000000c7c7c7ULL, 0x5b5b0000005b5b5bULL, 0xe9e9000000e9e9e9ULL,
0xeeee000000eeeeeeULL, 0x8f8f0000008f8f8fULL, 0x0101000000010101ULL,
0x3d3d0000003d3d3dULL,
};
__visible const u64 camellia_sp03303033[256] = {
0x0038380038003838ULL, 0x0041410041004141ULL, 0x0016160016001616ULL,
0x0076760076007676ULL, 0x00d9d900d900d9d9ULL, 0x0093930093009393ULL,
0x0060600060006060ULL, 0x00f2f200f200f2f2ULL, 0x0072720072007272ULL,
0x00c2c200c200c2c2ULL, 0x00abab00ab00ababULL, 0x009a9a009a009a9aULL,
0x0075750075007575ULL, 0x0006060006000606ULL, 0x0057570057005757ULL,
0x00a0a000a000a0a0ULL, 0x0091910091009191ULL, 0x00f7f700f700f7f7ULL,
0x00b5b500b500b5b5ULL, 0x00c9c900c900c9c9ULL, 0x00a2a200a200a2a2ULL,
0x008c8c008c008c8cULL, 0x00d2d200d200d2d2ULL, 0x0090900090009090ULL,
0x00f6f600f600f6f6ULL, 0x0007070007000707ULL, 0x00a7a700a700a7a7ULL,
0x0027270027002727ULL, 0x008e8e008e008e8eULL, 0x00b2b200b200b2b2ULL,
0x0049490049004949ULL, 0x00dede00de00dedeULL, 0x0043430043004343ULL,
0x005c5c005c005c5cULL, 0x00d7d700d700d7d7ULL, 0x00c7c700c700c7c7ULL,
0x003e3e003e003e3eULL, 0x00f5f500f500f5f5ULL, 0x008f8f008f008f8fULL,
0x0067670067006767ULL, 0x001f1f001f001f1fULL, 0x0018180018001818ULL,
0x006e6e006e006e6eULL, 0x00afaf00af00afafULL, 0x002f2f002f002f2fULL,
0x00e2e200e200e2e2ULL, 0x0085850085008585ULL, 0x000d0d000d000d0dULL,
0x0053530053005353ULL, 0x00f0f000f000f0f0ULL, 0x009c9c009c009c9cULL,
0x0065650065006565ULL, 0x00eaea00ea00eaeaULL, 0x00a3a300a300a3a3ULL,
0x00aeae00ae00aeaeULL, 0x009e9e009e009e9eULL, 0x00ecec00ec00ececULL,
0x0080800080008080ULL, 0x002d2d002d002d2dULL, 0x006b6b006b006b6bULL,
0x00a8a800a800a8a8ULL, 0x002b2b002b002b2bULL, 0x0036360036003636ULL,
0x00a6a600a600a6a6ULL, 0x00c5c500c500c5c5ULL, 0x0086860086008686ULL,
0x004d4d004d004d4dULL, 0x0033330033003333ULL, 0x00fdfd00fd00fdfdULL,
0x0066660066006666ULL, 0x0058580058005858ULL, 0x0096960096009696ULL,
0x003a3a003a003a3aULL, 0x0009090009000909ULL, 0x0095950095009595ULL,
0x0010100010001010ULL, 0x0078780078007878ULL, 0x00d8d800d800d8d8ULL,
0x0042420042004242ULL, 0x00cccc00cc00ccccULL, 0x00efef00ef00efefULL,
0x0026260026002626ULL, 0x00e5e500e500e5e5ULL, 0x0061610061006161ULL,
0x001a1a001a001a1aULL, 0x003f3f003f003f3fULL, 0x003b3b003b003b3bULL,
0x0082820082008282ULL, 0x00b6b600b600b6b6ULL, 0x00dbdb00db00dbdbULL,
0x00d4d400d400d4d4ULL, 0x0098980098009898ULL, 0x00e8e800e800e8e8ULL,
0x008b8b008b008b8bULL, 0x0002020002000202ULL, 0x00ebeb00eb00ebebULL,
0x000a0a000a000a0aULL, 0x002c2c002c002c2cULL, 0x001d1d001d001d1dULL,
0x00b0b000b000b0b0ULL, 0x006f6f006f006f6fULL, 0x008d8d008d008d8dULL,
0x0088880088008888ULL, 0x000e0e000e000e0eULL, 0x0019190019001919ULL,
0x0087870087008787ULL, 0x004e4e004e004e4eULL, 0x000b0b000b000b0bULL,
0x00a9a900a900a9a9ULL, 0x000c0c000c000c0cULL, 0x0079790079007979ULL,
0x0011110011001111ULL, 0x007f7f007f007f7fULL, 0x0022220022002222ULL,
0x00e7e700e700e7e7ULL, 0x0059590059005959ULL, 0x00e1e100e100e1e1ULL,
0x00dada00da00dadaULL, 0x003d3d003d003d3dULL, 0x00c8c800c800c8c8ULL,
0x0012120012001212ULL, 0x0004040004000404ULL, 0x0074740074007474ULL,
0x0054540054005454ULL, 0x0030300030003030ULL, 0x007e7e007e007e7eULL,
0x00b4b400b400b4b4ULL, 0x0028280028002828ULL, 0x0055550055005555ULL,
0x0068680068006868ULL, 0x0050500050005050ULL, 0x00bebe00be00bebeULL,
0x00d0d000d000d0d0ULL, 0x00c4c400c400c4c4ULL, 0x0031310031003131ULL,
0x00cbcb00cb00cbcbULL, 0x002a2a002a002a2aULL, 0x00adad00ad00adadULL,
0x000f0f000f000f0fULL, 0x00caca00ca00cacaULL, 0x0070700070007070ULL,
0x00ffff00ff00ffffULL, 0x0032320032003232ULL, 0x0069690069006969ULL,
0x0008080008000808ULL, 0x0062620062006262ULL, 0x0000000000000000ULL,
0x0024240024002424ULL, 0x00d1d100d100d1d1ULL, 0x00fbfb00fb00fbfbULL,
0x00baba00ba00babaULL, 0x00eded00ed00ededULL, 0x0045450045004545ULL,
0x0081810081008181ULL, 0x0073730073007373ULL, 0x006d6d006d006d6dULL,
0x0084840084008484ULL, 0x009f9f009f009f9fULL, 0x00eeee00ee00eeeeULL,
0x004a4a004a004a4aULL, 0x00c3c300c300c3c3ULL, 0x002e2e002e002e2eULL,
0x00c1c100c100c1c1ULL, 0x0001010001000101ULL, 0x00e6e600e600e6e6ULL,
0x0025250025002525ULL, 0x0048480048004848ULL, 0x0099990099009999ULL,
0x00b9b900b900b9b9ULL, 0x00b3b300b300b3b3ULL, 0x007b7b007b007b7bULL,
0x00f9f900f900f9f9ULL, 0x00cece00ce00ceceULL, 0x00bfbf00bf00bfbfULL,
0x00dfdf00df00dfdfULL, 0x0071710071007171ULL, 0x0029290029002929ULL,
0x00cdcd00cd00cdcdULL, 0x006c6c006c006c6cULL, 0x0013130013001313ULL,
0x0064640064006464ULL, 0x009b9b009b009b9bULL, 0x0063630063006363ULL,
0x009d9d009d009d9dULL, 0x00c0c000c000c0c0ULL, 0x004b4b004b004b4bULL,
0x00b7b700b700b7b7ULL, 0x00a5a500a500a5a5ULL, 0x0089890089008989ULL,
0x005f5f005f005f5fULL, 0x00b1b100b100b1b1ULL, 0x0017170017001717ULL,
0x00f4f400f400f4f4ULL, 0x00bcbc00bc00bcbcULL, 0x00d3d300d300d3d3ULL,
0x0046460046004646ULL, 0x00cfcf00cf00cfcfULL, 0x0037370037003737ULL,
0x005e5e005e005e5eULL, 0x0047470047004747ULL, 0x0094940094009494ULL,
0x00fafa00fa00fafaULL, 0x00fcfc00fc00fcfcULL, 0x005b5b005b005b5bULL,
0x0097970097009797ULL, 0x00fefe00fe00fefeULL, 0x005a5a005a005a5aULL,
0x00acac00ac00acacULL, 0x003c3c003c003c3cULL, 0x004c4c004c004c4cULL,
0x0003030003000303ULL, 0x0035350035003535ULL, 0x00f3f300f300f3f3ULL,
0x0023230023002323ULL, 0x00b8b800b800b8b8ULL, 0x005d5d005d005d5dULL,
0x006a6a006a006a6aULL, 0x0092920092009292ULL, 0x00d5d500d500d5d5ULL,
0x0021210021002121ULL, 0x0044440044004444ULL, 0x0051510051005151ULL,
0x00c6c600c600c6c6ULL, 0x007d7d007d007d7dULL, 0x0039390039003939ULL,
0x0083830083008383ULL, 0x00dcdc00dc00dcdcULL, 0x00aaaa00aa00aaaaULL,
0x007c7c007c007c7cULL, 0x0077770077007777ULL, 0x0056560056005656ULL,
0x0005050005000505ULL, 0x001b1b001b001b1bULL, 0x00a4a400a400a4a4ULL,
0x0015150015001515ULL, 0x0034340034003434ULL, 0x001e1e001e001e1eULL,
0x001c1c001c001c1cULL, 0x00f8f800f800f8f8ULL, 0x0052520052005252ULL,
0x0020200020002020ULL, 0x0014140014001414ULL, 0x00e9e900e900e9e9ULL,
0x00bdbd00bd00bdbdULL, 0x00dddd00dd00ddddULL, 0x00e4e400e400e4e4ULL,
0x00a1a100a100a1a1ULL, 0x00e0e000e000e0e0ULL, 0x008a8a008a008a8aULL,
0x00f1f100f100f1f1ULL, 0x00d6d600d600d6d6ULL, 0x007a7a007a007a7aULL,
0x00bbbb00bb00bbbbULL, 0x00e3e300e300e3e3ULL, 0x0040400040004040ULL,
0x004f4f004f004f4fULL,
};
__visible const u64 camellia_sp00444404[256] = {
0x0000707070700070ULL, 0x00002c2c2c2c002cULL, 0x0000b3b3b3b300b3ULL,
0x0000c0c0c0c000c0ULL, 0x0000e4e4e4e400e4ULL, 0x0000575757570057ULL,
0x0000eaeaeaea00eaULL, 0x0000aeaeaeae00aeULL, 0x0000232323230023ULL,
0x00006b6b6b6b006bULL, 0x0000454545450045ULL, 0x0000a5a5a5a500a5ULL,
0x0000edededed00edULL, 0x00004f4f4f4f004fULL, 0x00001d1d1d1d001dULL,
0x0000929292920092ULL, 0x0000868686860086ULL, 0x0000afafafaf00afULL,
0x00007c7c7c7c007cULL, 0x00001f1f1f1f001fULL, 0x00003e3e3e3e003eULL,
0x0000dcdcdcdc00dcULL, 0x00005e5e5e5e005eULL, 0x00000b0b0b0b000bULL,
0x0000a6a6a6a600a6ULL, 0x0000393939390039ULL, 0x0000d5d5d5d500d5ULL,
0x00005d5d5d5d005dULL, 0x0000d9d9d9d900d9ULL, 0x00005a5a5a5a005aULL,
0x0000515151510051ULL, 0x00006c6c6c6c006cULL, 0x00008b8b8b8b008bULL,
0x00009a9a9a9a009aULL, 0x0000fbfbfbfb00fbULL, 0x0000b0b0b0b000b0ULL,
0x0000747474740074ULL, 0x00002b2b2b2b002bULL, 0x0000f0f0f0f000f0ULL,
0x0000848484840084ULL, 0x0000dfdfdfdf00dfULL, 0x0000cbcbcbcb00cbULL,
0x0000343434340034ULL, 0x0000767676760076ULL, 0x00006d6d6d6d006dULL,
0x0000a9a9a9a900a9ULL, 0x0000d1d1d1d100d1ULL, 0x0000040404040004ULL,
0x0000141414140014ULL, 0x00003a3a3a3a003aULL, 0x0000dededede00deULL,
0x0000111111110011ULL, 0x0000323232320032ULL, 0x00009c9c9c9c009cULL,
0x0000535353530053ULL, 0x0000f2f2f2f200f2ULL, 0x0000fefefefe00feULL,
0x0000cfcfcfcf00cfULL, 0x0000c3c3c3c300c3ULL, 0x00007a7a7a7a007aULL,
0x0000242424240024ULL, 0x0000e8e8e8e800e8ULL, 0x0000606060600060ULL,
0x0000696969690069ULL, 0x0000aaaaaaaa00aaULL, 0x0000a0a0a0a000a0ULL,
0x0000a1a1a1a100a1ULL, 0x0000626262620062ULL, 0x0000545454540054ULL,
0x00001e1e1e1e001eULL, 0x0000e0e0e0e000e0ULL, 0x0000646464640064ULL,
0x0000101010100010ULL, 0x0000000000000000ULL, 0x0000a3a3a3a300a3ULL,
0x0000757575750075ULL, 0x00008a8a8a8a008aULL, 0x0000e6e6e6e600e6ULL,
0x0000090909090009ULL, 0x0000dddddddd00ddULL, 0x0000878787870087ULL,
0x0000838383830083ULL, 0x0000cdcdcdcd00cdULL, 0x0000909090900090ULL,
0x0000737373730073ULL, 0x0000f6f6f6f600f6ULL, 0x00009d9d9d9d009dULL,
0x0000bfbfbfbf00bfULL, 0x0000525252520052ULL, 0x0000d8d8d8d800d8ULL,
0x0000c8c8c8c800c8ULL, 0x0000c6c6c6c600c6ULL, 0x0000818181810081ULL,
0x00006f6f6f6f006fULL, 0x0000131313130013ULL, 0x0000636363630063ULL,
0x0000e9e9e9e900e9ULL, 0x0000a7a7a7a700a7ULL, 0x00009f9f9f9f009fULL,
0x0000bcbcbcbc00bcULL, 0x0000292929290029ULL, 0x0000f9f9f9f900f9ULL,
0x00002f2f2f2f002fULL, 0x0000b4b4b4b400b4ULL, 0x0000787878780078ULL,
0x0000060606060006ULL, 0x0000e7e7e7e700e7ULL, 0x0000717171710071ULL,
0x0000d4d4d4d400d4ULL, 0x0000abababab00abULL, 0x0000888888880088ULL,
0x00008d8d8d8d008dULL, 0x0000727272720072ULL, 0x0000b9b9b9b900b9ULL,
0x0000f8f8f8f800f8ULL, 0x0000acacacac00acULL, 0x0000363636360036ULL,
0x00002a2a2a2a002aULL, 0x00003c3c3c3c003cULL, 0x0000f1f1f1f100f1ULL,
0x0000404040400040ULL, 0x0000d3d3d3d300d3ULL, 0x0000bbbbbbbb00bbULL,
0x0000434343430043ULL, 0x0000151515150015ULL, 0x0000adadadad00adULL,
0x0000777777770077ULL, 0x0000808080800080ULL, 0x0000828282820082ULL,
0x0000ecececec00ecULL, 0x0000272727270027ULL, 0x0000e5e5e5e500e5ULL,
0x0000858585850085ULL, 0x0000353535350035ULL, 0x00000c0c0c0c000cULL,
0x0000414141410041ULL, 0x0000efefefef00efULL, 0x0000939393930093ULL,
0x0000191919190019ULL, 0x0000212121210021ULL, 0x00000e0e0e0e000eULL,
0x00004e4e4e4e004eULL, 0x0000656565650065ULL, 0x0000bdbdbdbd00bdULL,
0x0000b8b8b8b800b8ULL, 0x00008f8f8f8f008fULL, 0x0000ebebebeb00ebULL,
0x0000cececece00ceULL, 0x0000303030300030ULL, 0x00005f5f5f5f005fULL,
0x0000c5c5c5c500c5ULL, 0x00001a1a1a1a001aULL, 0x0000e1e1e1e100e1ULL,
0x0000cacacaca00caULL, 0x0000474747470047ULL, 0x00003d3d3d3d003dULL,
0x0000010101010001ULL, 0x0000d6d6d6d600d6ULL, 0x0000565656560056ULL,
0x00004d4d4d4d004dULL, 0x00000d0d0d0d000dULL, 0x0000666666660066ULL,
0x0000cccccccc00ccULL, 0x00002d2d2d2d002dULL, 0x0000121212120012ULL,
0x0000202020200020ULL, 0x0000b1b1b1b100b1ULL, 0x0000999999990099ULL,
0x00004c4c4c4c004cULL, 0x0000c2c2c2c200c2ULL, 0x00007e7e7e7e007eULL,
0x0000050505050005ULL, 0x0000b7b7b7b700b7ULL, 0x0000313131310031ULL,
0x0000171717170017ULL, 0x0000d7d7d7d700d7ULL, 0x0000585858580058ULL,
0x0000616161610061ULL, 0x00001b1b1b1b001bULL, 0x00001c1c1c1c001cULL,
0x00000f0f0f0f000fULL, 0x0000161616160016ULL, 0x0000181818180018ULL,
0x0000222222220022ULL, 0x0000444444440044ULL, 0x0000b2b2b2b200b2ULL,
0x0000b5b5b5b500b5ULL, 0x0000919191910091ULL, 0x0000080808080008ULL,
0x0000a8a8a8a800a8ULL, 0x0000fcfcfcfc00fcULL, 0x0000505050500050ULL,
0x0000d0d0d0d000d0ULL, 0x00007d7d7d7d007dULL, 0x0000898989890089ULL,
0x0000979797970097ULL, 0x00005b5b5b5b005bULL, 0x0000959595950095ULL,
0x0000ffffffff00ffULL, 0x0000d2d2d2d200d2ULL, 0x0000c4c4c4c400c4ULL,
0x0000484848480048ULL, 0x0000f7f7f7f700f7ULL, 0x0000dbdbdbdb00dbULL,
0x0000030303030003ULL, 0x0000dadadada00daULL, 0x00003f3f3f3f003fULL,
0x0000949494940094ULL, 0x00005c5c5c5c005cULL, 0x0000020202020002ULL,
0x00004a4a4a4a004aULL, 0x0000333333330033ULL, 0x0000676767670067ULL,
0x0000f3f3f3f300f3ULL, 0x00007f7f7f7f007fULL, 0x0000e2e2e2e200e2ULL,
0x00009b9b9b9b009bULL, 0x0000262626260026ULL, 0x0000373737370037ULL,
0x00003b3b3b3b003bULL, 0x0000969696960096ULL, 0x00004b4b4b4b004bULL,
0x0000bebebebe00beULL, 0x00002e2e2e2e002eULL, 0x0000797979790079ULL,
0x00008c8c8c8c008cULL, 0x00006e6e6e6e006eULL, 0x00008e8e8e8e008eULL,
0x0000f5f5f5f500f5ULL, 0x0000b6b6b6b600b6ULL, 0x0000fdfdfdfd00fdULL,
0x0000595959590059ULL, 0x0000989898980098ULL, 0x00006a6a6a6a006aULL,
0x0000464646460046ULL, 0x0000babababa00baULL, 0x0000252525250025ULL,
0x0000424242420042ULL, 0x0000a2a2a2a200a2ULL, 0x0000fafafafa00faULL,
0x0000070707070007ULL, 0x0000555555550055ULL, 0x0000eeeeeeee00eeULL,
0x00000a0a0a0a000aULL, 0x0000494949490049ULL, 0x0000686868680068ULL,
0x0000383838380038ULL, 0x0000a4a4a4a400a4ULL, 0x0000282828280028ULL,
0x00007b7b7b7b007bULL, 0x0000c9c9c9c900c9ULL, 0x0000c1c1c1c100c1ULL,
0x0000e3e3e3e300e3ULL, 0x0000f4f4f4f400f4ULL, 0x0000c7c7c7c700c7ULL,
0x00009e9e9e9e009eULL,
};
__visible const u64 camellia_sp02220222[256] = {
0x00e0e0e000e0e0e0ULL, 0x0005050500050505ULL, 0x0058585800585858ULL,
0x00d9d9d900d9d9d9ULL, 0x0067676700676767ULL, 0x004e4e4e004e4e4eULL,
0x0081818100818181ULL, 0x00cbcbcb00cbcbcbULL, 0x00c9c9c900c9c9c9ULL,
0x000b0b0b000b0b0bULL, 0x00aeaeae00aeaeaeULL, 0x006a6a6a006a6a6aULL,
0x00d5d5d500d5d5d5ULL, 0x0018181800181818ULL, 0x005d5d5d005d5d5dULL,
0x0082828200828282ULL, 0x0046464600464646ULL, 0x00dfdfdf00dfdfdfULL,
0x00d6d6d600d6d6d6ULL, 0x0027272700272727ULL, 0x008a8a8a008a8a8aULL,
0x0032323200323232ULL, 0x004b4b4b004b4b4bULL, 0x0042424200424242ULL,
0x00dbdbdb00dbdbdbULL, 0x001c1c1c001c1c1cULL, 0x009e9e9e009e9e9eULL,
0x009c9c9c009c9c9cULL, 0x003a3a3a003a3a3aULL, 0x00cacaca00cacacaULL,
0x0025252500252525ULL, 0x007b7b7b007b7b7bULL, 0x000d0d0d000d0d0dULL,
0x0071717100717171ULL, 0x005f5f5f005f5f5fULL, 0x001f1f1f001f1f1fULL,
0x00f8f8f800f8f8f8ULL, 0x00d7d7d700d7d7d7ULL, 0x003e3e3e003e3e3eULL,
0x009d9d9d009d9d9dULL, 0x007c7c7c007c7c7cULL, 0x0060606000606060ULL,
0x00b9b9b900b9b9b9ULL, 0x00bebebe00bebebeULL, 0x00bcbcbc00bcbcbcULL,
0x008b8b8b008b8b8bULL, 0x0016161600161616ULL, 0x0034343400343434ULL,
0x004d4d4d004d4d4dULL, 0x00c3c3c300c3c3c3ULL, 0x0072727200727272ULL,
0x0095959500959595ULL, 0x00ababab00abababULL, 0x008e8e8e008e8e8eULL,
0x00bababa00bababaULL, 0x007a7a7a007a7a7aULL, 0x00b3b3b300b3b3b3ULL,
0x0002020200020202ULL, 0x00b4b4b400b4b4b4ULL, 0x00adadad00adadadULL,
0x00a2a2a200a2a2a2ULL, 0x00acacac00acacacULL, 0x00d8d8d800d8d8d8ULL,
0x009a9a9a009a9a9aULL, 0x0017171700171717ULL, 0x001a1a1a001a1a1aULL,
0x0035353500353535ULL, 0x00cccccc00ccccccULL, 0x00f7f7f700f7f7f7ULL,
0x0099999900999999ULL, 0x0061616100616161ULL, 0x005a5a5a005a5a5aULL,
0x00e8e8e800e8e8e8ULL, 0x0024242400242424ULL, 0x0056565600565656ULL,
0x0040404000404040ULL, 0x00e1e1e100e1e1e1ULL, 0x0063636300636363ULL,
0x0009090900090909ULL, 0x0033333300333333ULL, 0x00bfbfbf00bfbfbfULL,
0x0098989800989898ULL, 0x0097979700979797ULL, 0x0085858500858585ULL,
0x0068686800686868ULL, 0x00fcfcfc00fcfcfcULL, 0x00ececec00ecececULL,
0x000a0a0a000a0a0aULL, 0x00dadada00dadadaULL, 0x006f6f6f006f6f6fULL,
0x0053535300535353ULL, 0x0062626200626262ULL, 0x00a3a3a300a3a3a3ULL,
0x002e2e2e002e2e2eULL, 0x0008080800080808ULL, 0x00afafaf00afafafULL,
0x0028282800282828ULL, 0x00b0b0b000b0b0b0ULL, 0x0074747400747474ULL,
0x00c2c2c200c2c2c2ULL, 0x00bdbdbd00bdbdbdULL, 0x0036363600363636ULL,
0x0022222200222222ULL, 0x0038383800383838ULL, 0x0064646400646464ULL,
0x001e1e1e001e1e1eULL, 0x0039393900393939ULL, 0x002c2c2c002c2c2cULL,
0x00a6a6a600a6a6a6ULL, 0x0030303000303030ULL, 0x00e5e5e500e5e5e5ULL,
0x0044444400444444ULL, 0x00fdfdfd00fdfdfdULL, 0x0088888800888888ULL,
0x009f9f9f009f9f9fULL, 0x0065656500656565ULL, 0x0087878700878787ULL,
0x006b6b6b006b6b6bULL, 0x00f4f4f400f4f4f4ULL, 0x0023232300232323ULL,
0x0048484800484848ULL, 0x0010101000101010ULL, 0x00d1d1d100d1d1d1ULL,
0x0051515100515151ULL, 0x00c0c0c000c0c0c0ULL, 0x00f9f9f900f9f9f9ULL,
0x00d2d2d200d2d2d2ULL, 0x00a0a0a000a0a0a0ULL, 0x0055555500555555ULL,
0x00a1a1a100a1a1a1ULL, 0x0041414100414141ULL, 0x00fafafa00fafafaULL,
0x0043434300434343ULL, 0x0013131300131313ULL, 0x00c4c4c400c4c4c4ULL,
0x002f2f2f002f2f2fULL, 0x00a8a8a800a8a8a8ULL, 0x00b6b6b600b6b6b6ULL,
0x003c3c3c003c3c3cULL, 0x002b2b2b002b2b2bULL, 0x00c1c1c100c1c1c1ULL,
0x00ffffff00ffffffULL, 0x00c8c8c800c8c8c8ULL, 0x00a5a5a500a5a5a5ULL,
0x0020202000202020ULL, 0x0089898900898989ULL, 0x0000000000000000ULL,
0x0090909000909090ULL, 0x0047474700474747ULL, 0x00efefef00efefefULL,
0x00eaeaea00eaeaeaULL, 0x00b7b7b700b7b7b7ULL, 0x0015151500151515ULL,
0x0006060600060606ULL, 0x00cdcdcd00cdcdcdULL, 0x00b5b5b500b5b5b5ULL,
0x0012121200121212ULL, 0x007e7e7e007e7e7eULL, 0x00bbbbbb00bbbbbbULL,
0x0029292900292929ULL, 0x000f0f0f000f0f0fULL, 0x00b8b8b800b8b8b8ULL,
0x0007070700070707ULL, 0x0004040400040404ULL, 0x009b9b9b009b9b9bULL,
0x0094949400949494ULL, 0x0021212100212121ULL, 0x0066666600666666ULL,
0x00e6e6e600e6e6e6ULL, 0x00cecece00cececeULL, 0x00ededed00edededULL,
0x00e7e7e700e7e7e7ULL, 0x003b3b3b003b3b3bULL, 0x00fefefe00fefefeULL,
0x007f7f7f007f7f7fULL, 0x00c5c5c500c5c5c5ULL, 0x00a4a4a400a4a4a4ULL,
0x0037373700373737ULL, 0x00b1b1b100b1b1b1ULL, 0x004c4c4c004c4c4cULL,
0x0091919100919191ULL, 0x006e6e6e006e6e6eULL, 0x008d8d8d008d8d8dULL,
0x0076767600767676ULL, 0x0003030300030303ULL, 0x002d2d2d002d2d2dULL,
0x00dedede00dededeULL, 0x0096969600969696ULL, 0x0026262600262626ULL,
0x007d7d7d007d7d7dULL, 0x00c6c6c600c6c6c6ULL, 0x005c5c5c005c5c5cULL,
0x00d3d3d300d3d3d3ULL, 0x00f2f2f200f2f2f2ULL, 0x004f4f4f004f4f4fULL,
0x0019191900191919ULL, 0x003f3f3f003f3f3fULL, 0x00dcdcdc00dcdcdcULL,
0x0079797900797979ULL, 0x001d1d1d001d1d1dULL, 0x0052525200525252ULL,
0x00ebebeb00ebebebULL, 0x00f3f3f300f3f3f3ULL, 0x006d6d6d006d6d6dULL,
0x005e5e5e005e5e5eULL, 0x00fbfbfb00fbfbfbULL, 0x0069696900696969ULL,
0x00b2b2b200b2b2b2ULL, 0x00f0f0f000f0f0f0ULL, 0x0031313100313131ULL,
0x000c0c0c000c0c0cULL, 0x00d4d4d400d4d4d4ULL, 0x00cfcfcf00cfcfcfULL,
0x008c8c8c008c8c8cULL, 0x00e2e2e200e2e2e2ULL, 0x0075757500757575ULL,
0x00a9a9a900a9a9a9ULL, 0x004a4a4a004a4a4aULL, 0x0057575700575757ULL,
0x0084848400848484ULL, 0x0011111100111111ULL, 0x0045454500454545ULL,
0x001b1b1b001b1b1bULL, 0x00f5f5f500f5f5f5ULL, 0x00e4e4e400e4e4e4ULL,
0x000e0e0e000e0e0eULL, 0x0073737300737373ULL, 0x00aaaaaa00aaaaaaULL,
0x00f1f1f100f1f1f1ULL, 0x00dddddd00ddddddULL, 0x0059595900595959ULL,
0x0014141400141414ULL, 0x006c6c6c006c6c6cULL, 0x0092929200929292ULL,
0x0054545400545454ULL, 0x00d0d0d000d0d0d0ULL, 0x0078787800787878ULL,
0x0070707000707070ULL, 0x00e3e3e300e3e3e3ULL, 0x0049494900494949ULL,
0x0080808000808080ULL, 0x0050505000505050ULL, 0x00a7a7a700a7a7a7ULL,
0x00f6f6f600f6f6f6ULL, 0x0077777700777777ULL, 0x0093939300939393ULL,
0x0086868600868686ULL, 0x0083838300838383ULL, 0x002a2a2a002a2a2aULL,
0x00c7c7c700c7c7c7ULL, 0x005b5b5b005b5b5bULL, 0x00e9e9e900e9e9e9ULL,
0x00eeeeee00eeeeeeULL, 0x008f8f8f008f8f8fULL, 0x0001010100010101ULL,
0x003d3d3d003d3d3dULL,
};
__visible const u64 camellia_sp30333033[256] = {
0x3800383838003838ULL, 0x4100414141004141ULL, 0x1600161616001616ULL,
0x7600767676007676ULL, 0xd900d9d9d900d9d9ULL, 0x9300939393009393ULL,
0x6000606060006060ULL, 0xf200f2f2f200f2f2ULL, 0x7200727272007272ULL,
0xc200c2c2c200c2c2ULL, 0xab00ababab00ababULL, 0x9a009a9a9a009a9aULL,
0x7500757575007575ULL, 0x0600060606000606ULL, 0x5700575757005757ULL,
0xa000a0a0a000a0a0ULL, 0x9100919191009191ULL, 0xf700f7f7f700f7f7ULL,
0xb500b5b5b500b5b5ULL, 0xc900c9c9c900c9c9ULL, 0xa200a2a2a200a2a2ULL,
0x8c008c8c8c008c8cULL, 0xd200d2d2d200d2d2ULL, 0x9000909090009090ULL,
0xf600f6f6f600f6f6ULL, 0x0700070707000707ULL, 0xa700a7a7a700a7a7ULL,
0x2700272727002727ULL, 0x8e008e8e8e008e8eULL, 0xb200b2b2b200b2b2ULL,
0x4900494949004949ULL, 0xde00dedede00dedeULL, 0x4300434343004343ULL,
0x5c005c5c5c005c5cULL, 0xd700d7d7d700d7d7ULL, 0xc700c7c7c700c7c7ULL,
0x3e003e3e3e003e3eULL, 0xf500f5f5f500f5f5ULL, 0x8f008f8f8f008f8fULL,
0x6700676767006767ULL, 0x1f001f1f1f001f1fULL, 0x1800181818001818ULL,
0x6e006e6e6e006e6eULL, 0xaf00afafaf00afafULL, 0x2f002f2f2f002f2fULL,
0xe200e2e2e200e2e2ULL, 0x8500858585008585ULL, 0x0d000d0d0d000d0dULL,
0x5300535353005353ULL, 0xf000f0f0f000f0f0ULL, 0x9c009c9c9c009c9cULL,
0x6500656565006565ULL, 0xea00eaeaea00eaeaULL, 0xa300a3a3a300a3a3ULL,
0xae00aeaeae00aeaeULL, 0x9e009e9e9e009e9eULL, 0xec00ececec00ececULL,
0x8000808080008080ULL, 0x2d002d2d2d002d2dULL, 0x6b006b6b6b006b6bULL,
0xa800a8a8a800a8a8ULL, 0x2b002b2b2b002b2bULL, 0x3600363636003636ULL,
0xa600a6a6a600a6a6ULL, 0xc500c5c5c500c5c5ULL, 0x8600868686008686ULL,
0x4d004d4d4d004d4dULL, 0x3300333333003333ULL, 0xfd00fdfdfd00fdfdULL,
0x6600666666006666ULL, 0x5800585858005858ULL, 0x9600969696009696ULL,
0x3a003a3a3a003a3aULL, 0x0900090909000909ULL, 0x9500959595009595ULL,
0x1000101010001010ULL, 0x7800787878007878ULL, 0xd800d8d8d800d8d8ULL,
0x4200424242004242ULL, 0xcc00cccccc00ccccULL, 0xef00efefef00efefULL,
0x2600262626002626ULL, 0xe500e5e5e500e5e5ULL, 0x6100616161006161ULL,
0x1a001a1a1a001a1aULL, 0x3f003f3f3f003f3fULL, 0x3b003b3b3b003b3bULL,
0x8200828282008282ULL, 0xb600b6b6b600b6b6ULL, 0xdb00dbdbdb00dbdbULL,
0xd400d4d4d400d4d4ULL, 0x9800989898009898ULL, 0xe800e8e8e800e8e8ULL,
0x8b008b8b8b008b8bULL, 0x0200020202000202ULL, 0xeb00ebebeb00ebebULL,
0x0a000a0a0a000a0aULL, 0x2c002c2c2c002c2cULL, 0x1d001d1d1d001d1dULL,
0xb000b0b0b000b0b0ULL, 0x6f006f6f6f006f6fULL, 0x8d008d8d8d008d8dULL,
0x8800888888008888ULL, 0x0e000e0e0e000e0eULL, 0x1900191919001919ULL,
0x8700878787008787ULL, 0x4e004e4e4e004e4eULL, 0x0b000b0b0b000b0bULL,
0xa900a9a9a900a9a9ULL, 0x0c000c0c0c000c0cULL, 0x7900797979007979ULL,
0x1100111111001111ULL, 0x7f007f7f7f007f7fULL, 0x2200222222002222ULL,
0xe700e7e7e700e7e7ULL, 0x5900595959005959ULL, 0xe100e1e1e100e1e1ULL,
0xda00dadada00dadaULL, 0x3d003d3d3d003d3dULL, 0xc800c8c8c800c8c8ULL,
0x1200121212001212ULL, 0x0400040404000404ULL, 0x7400747474007474ULL,
0x5400545454005454ULL, 0x3000303030003030ULL, 0x7e007e7e7e007e7eULL,
0xb400b4b4b400b4b4ULL, 0x2800282828002828ULL, 0x5500555555005555ULL,
0x6800686868006868ULL, 0x5000505050005050ULL, 0xbe00bebebe00bebeULL,
0xd000d0d0d000d0d0ULL, 0xc400c4c4c400c4c4ULL, 0x3100313131003131ULL,
0xcb00cbcbcb00cbcbULL, 0x2a002a2a2a002a2aULL, 0xad00adadad00adadULL,
0x0f000f0f0f000f0fULL, 0xca00cacaca00cacaULL, 0x7000707070007070ULL,
0xff00ffffff00ffffULL, 0x3200323232003232ULL, 0x6900696969006969ULL,
0x0800080808000808ULL, 0x6200626262006262ULL, 0x0000000000000000ULL,
0x2400242424002424ULL, 0xd100d1d1d100d1d1ULL, 0xfb00fbfbfb00fbfbULL,
0xba00bababa00babaULL, 0xed00ededed00ededULL, 0x4500454545004545ULL,
0x8100818181008181ULL, 0x7300737373007373ULL, 0x6d006d6d6d006d6dULL,
0x8400848484008484ULL, 0x9f009f9f9f009f9fULL, 0xee00eeeeee00eeeeULL,
0x4a004a4a4a004a4aULL, 0xc300c3c3c300c3c3ULL, 0x2e002e2e2e002e2eULL,
0xc100c1c1c100c1c1ULL, 0x0100010101000101ULL, 0xe600e6e6e600e6e6ULL,
0x2500252525002525ULL, 0x4800484848004848ULL, 0x9900999999009999ULL,
0xb900b9b9b900b9b9ULL, 0xb300b3b3b300b3b3ULL, 0x7b007b7b7b007b7bULL,
0xf900f9f9f900f9f9ULL, 0xce00cecece00ceceULL, 0xbf00bfbfbf00bfbfULL,
0xdf00dfdfdf00dfdfULL, 0x7100717171007171ULL, 0x2900292929002929ULL,
0xcd00cdcdcd00cdcdULL, 0x6c006c6c6c006c6cULL, 0x1300131313001313ULL,
0x6400646464006464ULL, 0x9b009b9b9b009b9bULL, 0x6300636363006363ULL,
0x9d009d9d9d009d9dULL, 0xc000c0c0c000c0c0ULL, 0x4b004b4b4b004b4bULL,
0xb700b7b7b700b7b7ULL, 0xa500a5a5a500a5a5ULL, 0x8900898989008989ULL,
0x5f005f5f5f005f5fULL, 0xb100b1b1b100b1b1ULL, 0x1700171717001717ULL,
0xf400f4f4f400f4f4ULL, 0xbc00bcbcbc00bcbcULL, 0xd300d3d3d300d3d3ULL,
0x4600464646004646ULL, 0xcf00cfcfcf00cfcfULL, 0x3700373737003737ULL,
0x5e005e5e5e005e5eULL, 0x4700474747004747ULL, 0x9400949494009494ULL,
0xfa00fafafa00fafaULL, 0xfc00fcfcfc00fcfcULL, 0x5b005b5b5b005b5bULL,
0x9700979797009797ULL, 0xfe00fefefe00fefeULL, 0x5a005a5a5a005a5aULL,
0xac00acacac00acacULL, 0x3c003c3c3c003c3cULL, 0x4c004c4c4c004c4cULL,
0x0300030303000303ULL, 0x3500353535003535ULL, 0xf300f3f3f300f3f3ULL,
0x2300232323002323ULL, 0xb800b8b8b800b8b8ULL, 0x5d005d5d5d005d5dULL,
0x6a006a6a6a006a6aULL, 0x9200929292009292ULL, 0xd500d5d5d500d5d5ULL,
0x2100212121002121ULL, 0x4400444444004444ULL, 0x5100515151005151ULL,
0xc600c6c6c600c6c6ULL, 0x7d007d7d7d007d7dULL, 0x3900393939003939ULL,
0x8300838383008383ULL, 0xdc00dcdcdc00dcdcULL, 0xaa00aaaaaa00aaaaULL,
0x7c007c7c7c007c7cULL, 0x7700777777007777ULL, 0x5600565656005656ULL,
0x0500050505000505ULL, 0x1b001b1b1b001b1bULL, 0xa400a4a4a400a4a4ULL,
0x1500151515001515ULL, 0x3400343434003434ULL, 0x1e001e1e1e001e1eULL,
0x1c001c1c1c001c1cULL, 0xf800f8f8f800f8f8ULL, 0x5200525252005252ULL,
0x2000202020002020ULL, 0x1400141414001414ULL, 0xe900e9e9e900e9e9ULL,
0xbd00bdbdbd00bdbdULL, 0xdd00dddddd00ddddULL, 0xe400e4e4e400e4e4ULL,
0xa100a1a1a100a1a1ULL, 0xe000e0e0e000e0e0ULL, 0x8a008a8a8a008a8aULL,
0xf100f1f1f100f1f1ULL, 0xd600d6d6d600d6d6ULL, 0x7a007a7a7a007a7aULL,
0xbb00bbbbbb00bbbbULL, 0xe300e3e3e300e3e3ULL, 0x4000404040004040ULL,
0x4f004f4f4f004f4fULL,
};
__visible const u64 camellia_sp44044404[256] = {
0x7070007070700070ULL, 0x2c2c002c2c2c002cULL, 0xb3b300b3b3b300b3ULL,
0xc0c000c0c0c000c0ULL, 0xe4e400e4e4e400e4ULL, 0x5757005757570057ULL,
0xeaea00eaeaea00eaULL, 0xaeae00aeaeae00aeULL, 0x2323002323230023ULL,
0x6b6b006b6b6b006bULL, 0x4545004545450045ULL, 0xa5a500a5a5a500a5ULL,
0xeded00ededed00edULL, 0x4f4f004f4f4f004fULL, 0x1d1d001d1d1d001dULL,
0x9292009292920092ULL, 0x8686008686860086ULL, 0xafaf00afafaf00afULL,
0x7c7c007c7c7c007cULL, 0x1f1f001f1f1f001fULL, 0x3e3e003e3e3e003eULL,
0xdcdc00dcdcdc00dcULL, 0x5e5e005e5e5e005eULL, 0x0b0b000b0b0b000bULL,
0xa6a600a6a6a600a6ULL, 0x3939003939390039ULL, 0xd5d500d5d5d500d5ULL,
0x5d5d005d5d5d005dULL, 0xd9d900d9d9d900d9ULL, 0x5a5a005a5a5a005aULL,
0x5151005151510051ULL, 0x6c6c006c6c6c006cULL, 0x8b8b008b8b8b008bULL,
0x9a9a009a9a9a009aULL, 0xfbfb00fbfbfb00fbULL, 0xb0b000b0b0b000b0ULL,
0x7474007474740074ULL, 0x2b2b002b2b2b002bULL, 0xf0f000f0f0f000f0ULL,
0x8484008484840084ULL, 0xdfdf00dfdfdf00dfULL, 0xcbcb00cbcbcb00cbULL,
0x3434003434340034ULL, 0x7676007676760076ULL, 0x6d6d006d6d6d006dULL,
0xa9a900a9a9a900a9ULL, 0xd1d100d1d1d100d1ULL, 0x0404000404040004ULL,
0x1414001414140014ULL, 0x3a3a003a3a3a003aULL, 0xdede00dedede00deULL,
0x1111001111110011ULL, 0x3232003232320032ULL, 0x9c9c009c9c9c009cULL,
0x5353005353530053ULL, 0xf2f200f2f2f200f2ULL, 0xfefe00fefefe00feULL,
0xcfcf00cfcfcf00cfULL, 0xc3c300c3c3c300c3ULL, 0x7a7a007a7a7a007aULL,
0x2424002424240024ULL, 0xe8e800e8e8e800e8ULL, 0x6060006060600060ULL,
0x6969006969690069ULL, 0xaaaa00aaaaaa00aaULL, 0xa0a000a0a0a000a0ULL,
0xa1a100a1a1a100a1ULL, 0x6262006262620062ULL, 0x5454005454540054ULL,
0x1e1e001e1e1e001eULL, 0xe0e000e0e0e000e0ULL, 0x6464006464640064ULL,
0x1010001010100010ULL, 0x0000000000000000ULL, 0xa3a300a3a3a300a3ULL,
0x7575007575750075ULL, 0x8a8a008a8a8a008aULL, 0xe6e600e6e6e600e6ULL,
0x0909000909090009ULL, 0xdddd00dddddd00ddULL, 0x8787008787870087ULL,
0x8383008383830083ULL, 0xcdcd00cdcdcd00cdULL, 0x9090009090900090ULL,
0x7373007373730073ULL, 0xf6f600f6f6f600f6ULL, 0x9d9d009d9d9d009dULL,
0xbfbf00bfbfbf00bfULL, 0x5252005252520052ULL, 0xd8d800d8d8d800d8ULL,
0xc8c800c8c8c800c8ULL, 0xc6c600c6c6c600c6ULL, 0x8181008181810081ULL,
0x6f6f006f6f6f006fULL, 0x1313001313130013ULL, 0x6363006363630063ULL,
0xe9e900e9e9e900e9ULL, 0xa7a700a7a7a700a7ULL, 0x9f9f009f9f9f009fULL,
0xbcbc00bcbcbc00bcULL, 0x2929002929290029ULL, 0xf9f900f9f9f900f9ULL,
0x2f2f002f2f2f002fULL, 0xb4b400b4b4b400b4ULL, 0x7878007878780078ULL,
0x0606000606060006ULL, 0xe7e700e7e7e700e7ULL, 0x7171007171710071ULL,
0xd4d400d4d4d400d4ULL, 0xabab00ababab00abULL, 0x8888008888880088ULL,
0x8d8d008d8d8d008dULL, 0x7272007272720072ULL, 0xb9b900b9b9b900b9ULL,
0xf8f800f8f8f800f8ULL, 0xacac00acacac00acULL, 0x3636003636360036ULL,
0x2a2a002a2a2a002aULL, 0x3c3c003c3c3c003cULL, 0xf1f100f1f1f100f1ULL,
0x4040004040400040ULL, 0xd3d300d3d3d300d3ULL, 0xbbbb00bbbbbb00bbULL,
0x4343004343430043ULL, 0x1515001515150015ULL, 0xadad00adadad00adULL,
0x7777007777770077ULL, 0x8080008080800080ULL, 0x8282008282820082ULL,
0xecec00ececec00ecULL, 0x2727002727270027ULL, 0xe5e500e5e5e500e5ULL,
0x8585008585850085ULL, 0x3535003535350035ULL, 0x0c0c000c0c0c000cULL,
0x4141004141410041ULL, 0xefef00efefef00efULL, 0x9393009393930093ULL,
0x1919001919190019ULL, 0x2121002121210021ULL, 0x0e0e000e0e0e000eULL,
0x4e4e004e4e4e004eULL, 0x6565006565650065ULL, 0xbdbd00bdbdbd00bdULL,
0xb8b800b8b8b800b8ULL, 0x8f8f008f8f8f008fULL, 0xebeb00ebebeb00ebULL,
0xcece00cecece00ceULL, 0x3030003030300030ULL, 0x5f5f005f5f5f005fULL,
0xc5c500c5c5c500c5ULL, 0x1a1a001a1a1a001aULL, 0xe1e100e1e1e100e1ULL,
0xcaca00cacaca00caULL, 0x4747004747470047ULL, 0x3d3d003d3d3d003dULL,
0x0101000101010001ULL, 0xd6d600d6d6d600d6ULL, 0x5656005656560056ULL,
0x4d4d004d4d4d004dULL, 0x0d0d000d0d0d000dULL, 0x6666006666660066ULL,
0xcccc00cccccc00ccULL, 0x2d2d002d2d2d002dULL, 0x1212001212120012ULL,
0x2020002020200020ULL, 0xb1b100b1b1b100b1ULL, 0x9999009999990099ULL,
0x4c4c004c4c4c004cULL, 0xc2c200c2c2c200c2ULL, 0x7e7e007e7e7e007eULL,
0x0505000505050005ULL, 0xb7b700b7b7b700b7ULL, 0x3131003131310031ULL,
0x1717001717170017ULL, 0xd7d700d7d7d700d7ULL, 0x5858005858580058ULL,
0x6161006161610061ULL, 0x1b1b001b1b1b001bULL, 0x1c1c001c1c1c001cULL,
0x0f0f000f0f0f000fULL, 0x1616001616160016ULL, 0x1818001818180018ULL,
0x2222002222220022ULL, 0x4444004444440044ULL, 0xb2b200b2b2b200b2ULL,
0xb5b500b5b5b500b5ULL, 0x9191009191910091ULL, 0x0808000808080008ULL,
0xa8a800a8a8a800a8ULL, 0xfcfc00fcfcfc00fcULL, 0x5050005050500050ULL,
0xd0d000d0d0d000d0ULL, 0x7d7d007d7d7d007dULL, 0x8989008989890089ULL,
0x9797009797970097ULL, 0x5b5b005b5b5b005bULL, 0x9595009595950095ULL,
0xffff00ffffff00ffULL, 0xd2d200d2d2d200d2ULL, 0xc4c400c4c4c400c4ULL,
0x4848004848480048ULL, 0xf7f700f7f7f700f7ULL, 0xdbdb00dbdbdb00dbULL,
0x0303000303030003ULL, 0xdada00dadada00daULL, 0x3f3f003f3f3f003fULL,
0x9494009494940094ULL, 0x5c5c005c5c5c005cULL, 0x0202000202020002ULL,
0x4a4a004a4a4a004aULL, 0x3333003333330033ULL, 0x6767006767670067ULL,
0xf3f300f3f3f300f3ULL, 0x7f7f007f7f7f007fULL, 0xe2e200e2e2e200e2ULL,
0x9b9b009b9b9b009bULL, 0x2626002626260026ULL, 0x3737003737370037ULL,
0x3b3b003b3b3b003bULL, 0x9696009696960096ULL, 0x4b4b004b4b4b004bULL,
0xbebe00bebebe00beULL, 0x2e2e002e2e2e002eULL, 0x7979007979790079ULL,
0x8c8c008c8c8c008cULL, 0x6e6e006e6e6e006eULL, 0x8e8e008e8e8e008eULL,
0xf5f500f5f5f500f5ULL, 0xb6b600b6b6b600b6ULL, 0xfdfd00fdfdfd00fdULL,
0x5959005959590059ULL, 0x9898009898980098ULL, 0x6a6a006a6a6a006aULL,
0x4646004646460046ULL, 0xbaba00bababa00baULL, 0x2525002525250025ULL,
0x4242004242420042ULL, 0xa2a200a2a2a200a2ULL, 0xfafa00fafafa00faULL,
0x0707000707070007ULL, 0x5555005555550055ULL, 0xeeee00eeeeee00eeULL,
0x0a0a000a0a0a000aULL, 0x4949004949490049ULL, 0x6868006868680068ULL,
0x3838003838380038ULL, 0xa4a400a4a4a400a4ULL, 0x2828002828280028ULL,
0x7b7b007b7b7b007bULL, 0xc9c900c9c9c900c9ULL, 0xc1c100c1c1c100c1ULL,
0xe3e300e3e3e300e3ULL, 0xf4f400f4f4f400f4ULL, 0xc7c700c7c7c700c7ULL,
0x9e9e009e9e9e009eULL,
};
__visible const u64 camellia_sp11101110[256] = {
0x7070700070707000ULL, 0x8282820082828200ULL, 0x2c2c2c002c2c2c00ULL,
0xececec00ececec00ULL, 0xb3b3b300b3b3b300ULL, 0x2727270027272700ULL,
0xc0c0c000c0c0c000ULL, 0xe5e5e500e5e5e500ULL, 0xe4e4e400e4e4e400ULL,
0x8585850085858500ULL, 0x5757570057575700ULL, 0x3535350035353500ULL,
0xeaeaea00eaeaea00ULL, 0x0c0c0c000c0c0c00ULL, 0xaeaeae00aeaeae00ULL,
0x4141410041414100ULL, 0x2323230023232300ULL, 0xefefef00efefef00ULL,
0x6b6b6b006b6b6b00ULL, 0x9393930093939300ULL, 0x4545450045454500ULL,
0x1919190019191900ULL, 0xa5a5a500a5a5a500ULL, 0x2121210021212100ULL,
0xededed00ededed00ULL, 0x0e0e0e000e0e0e00ULL, 0x4f4f4f004f4f4f00ULL,
0x4e4e4e004e4e4e00ULL, 0x1d1d1d001d1d1d00ULL, 0x6565650065656500ULL,
0x9292920092929200ULL, 0xbdbdbd00bdbdbd00ULL, 0x8686860086868600ULL,
0xb8b8b800b8b8b800ULL, 0xafafaf00afafaf00ULL, 0x8f8f8f008f8f8f00ULL,
0x7c7c7c007c7c7c00ULL, 0xebebeb00ebebeb00ULL, 0x1f1f1f001f1f1f00ULL,
0xcecece00cecece00ULL, 0x3e3e3e003e3e3e00ULL, 0x3030300030303000ULL,
0xdcdcdc00dcdcdc00ULL, 0x5f5f5f005f5f5f00ULL, 0x5e5e5e005e5e5e00ULL,
0xc5c5c500c5c5c500ULL, 0x0b0b0b000b0b0b00ULL, 0x1a1a1a001a1a1a00ULL,
0xa6a6a600a6a6a600ULL, 0xe1e1e100e1e1e100ULL, 0x3939390039393900ULL,
0xcacaca00cacaca00ULL, 0xd5d5d500d5d5d500ULL, 0x4747470047474700ULL,
0x5d5d5d005d5d5d00ULL, 0x3d3d3d003d3d3d00ULL, 0xd9d9d900d9d9d900ULL,
0x0101010001010100ULL, 0x5a5a5a005a5a5a00ULL, 0xd6d6d600d6d6d600ULL,
0x5151510051515100ULL, 0x5656560056565600ULL, 0x6c6c6c006c6c6c00ULL,
0x4d4d4d004d4d4d00ULL, 0x8b8b8b008b8b8b00ULL, 0x0d0d0d000d0d0d00ULL,
0x9a9a9a009a9a9a00ULL, 0x6666660066666600ULL, 0xfbfbfb00fbfbfb00ULL,
0xcccccc00cccccc00ULL, 0xb0b0b000b0b0b000ULL, 0x2d2d2d002d2d2d00ULL,
0x7474740074747400ULL, 0x1212120012121200ULL, 0x2b2b2b002b2b2b00ULL,
0x2020200020202000ULL, 0xf0f0f000f0f0f000ULL, 0xb1b1b100b1b1b100ULL,
0x8484840084848400ULL, 0x9999990099999900ULL, 0xdfdfdf00dfdfdf00ULL,
0x4c4c4c004c4c4c00ULL, 0xcbcbcb00cbcbcb00ULL, 0xc2c2c200c2c2c200ULL,
0x3434340034343400ULL, 0x7e7e7e007e7e7e00ULL, 0x7676760076767600ULL,
0x0505050005050500ULL, 0x6d6d6d006d6d6d00ULL, 0xb7b7b700b7b7b700ULL,
0xa9a9a900a9a9a900ULL, 0x3131310031313100ULL, 0xd1d1d100d1d1d100ULL,
0x1717170017171700ULL, 0x0404040004040400ULL, 0xd7d7d700d7d7d700ULL,
0x1414140014141400ULL, 0x5858580058585800ULL, 0x3a3a3a003a3a3a00ULL,
0x6161610061616100ULL, 0xdedede00dedede00ULL, 0x1b1b1b001b1b1b00ULL,
0x1111110011111100ULL, 0x1c1c1c001c1c1c00ULL, 0x3232320032323200ULL,
0x0f0f0f000f0f0f00ULL, 0x9c9c9c009c9c9c00ULL, 0x1616160016161600ULL,
0x5353530053535300ULL, 0x1818180018181800ULL, 0xf2f2f200f2f2f200ULL,
0x2222220022222200ULL, 0xfefefe00fefefe00ULL, 0x4444440044444400ULL,
0xcfcfcf00cfcfcf00ULL, 0xb2b2b200b2b2b200ULL, 0xc3c3c300c3c3c300ULL,
0xb5b5b500b5b5b500ULL, 0x7a7a7a007a7a7a00ULL, 0x9191910091919100ULL,
0x2424240024242400ULL, 0x0808080008080800ULL, 0xe8e8e800e8e8e800ULL,
0xa8a8a800a8a8a800ULL, 0x6060600060606000ULL, 0xfcfcfc00fcfcfc00ULL,
0x6969690069696900ULL, 0x5050500050505000ULL, 0xaaaaaa00aaaaaa00ULL,
0xd0d0d000d0d0d000ULL, 0xa0a0a000a0a0a000ULL, 0x7d7d7d007d7d7d00ULL,
0xa1a1a100a1a1a100ULL, 0x8989890089898900ULL, 0x6262620062626200ULL,
0x9797970097979700ULL, 0x5454540054545400ULL, 0x5b5b5b005b5b5b00ULL,
0x1e1e1e001e1e1e00ULL, 0x9595950095959500ULL, 0xe0e0e000e0e0e000ULL,
0xffffff00ffffff00ULL, 0x6464640064646400ULL, 0xd2d2d200d2d2d200ULL,
0x1010100010101000ULL, 0xc4c4c400c4c4c400ULL, 0x0000000000000000ULL,
0x4848480048484800ULL, 0xa3a3a300a3a3a300ULL, 0xf7f7f700f7f7f700ULL,
0x7575750075757500ULL, 0xdbdbdb00dbdbdb00ULL, 0x8a8a8a008a8a8a00ULL,
0x0303030003030300ULL, 0xe6e6e600e6e6e600ULL, 0xdadada00dadada00ULL,
0x0909090009090900ULL, 0x3f3f3f003f3f3f00ULL, 0xdddddd00dddddd00ULL,
0x9494940094949400ULL, 0x8787870087878700ULL, 0x5c5c5c005c5c5c00ULL,
0x8383830083838300ULL, 0x0202020002020200ULL, 0xcdcdcd00cdcdcd00ULL,
0x4a4a4a004a4a4a00ULL, 0x9090900090909000ULL, 0x3333330033333300ULL,
0x7373730073737300ULL, 0x6767670067676700ULL, 0xf6f6f600f6f6f600ULL,
0xf3f3f300f3f3f300ULL, 0x9d9d9d009d9d9d00ULL, 0x7f7f7f007f7f7f00ULL,
0xbfbfbf00bfbfbf00ULL, 0xe2e2e200e2e2e200ULL, 0x5252520052525200ULL,
0x9b9b9b009b9b9b00ULL, 0xd8d8d800d8d8d800ULL, 0x2626260026262600ULL,
0xc8c8c800c8c8c800ULL, 0x3737370037373700ULL, 0xc6c6c600c6c6c600ULL,
0x3b3b3b003b3b3b00ULL, 0x8181810081818100ULL, 0x9696960096969600ULL,
0x6f6f6f006f6f6f00ULL, 0x4b4b4b004b4b4b00ULL, 0x1313130013131300ULL,
0xbebebe00bebebe00ULL, 0x6363630063636300ULL, 0x2e2e2e002e2e2e00ULL,
0xe9e9e900e9e9e900ULL, 0x7979790079797900ULL, 0xa7a7a700a7a7a700ULL,
0x8c8c8c008c8c8c00ULL, 0x9f9f9f009f9f9f00ULL, 0x6e6e6e006e6e6e00ULL,
0xbcbcbc00bcbcbc00ULL, 0x8e8e8e008e8e8e00ULL, 0x2929290029292900ULL,
0xf5f5f500f5f5f500ULL, 0xf9f9f900f9f9f900ULL, 0xb6b6b600b6b6b600ULL,
0x2f2f2f002f2f2f00ULL, 0xfdfdfd00fdfdfd00ULL, 0xb4b4b400b4b4b400ULL,
0x5959590059595900ULL, 0x7878780078787800ULL, 0x9898980098989800ULL,
0x0606060006060600ULL, 0x6a6a6a006a6a6a00ULL, 0xe7e7e700e7e7e700ULL,
0x4646460046464600ULL, 0x7171710071717100ULL, 0xbababa00bababa00ULL,
0xd4d4d400d4d4d400ULL, 0x2525250025252500ULL, 0xababab00ababab00ULL,
0x4242420042424200ULL, 0x8888880088888800ULL, 0xa2a2a200a2a2a200ULL,
0x8d8d8d008d8d8d00ULL, 0xfafafa00fafafa00ULL, 0x7272720072727200ULL,
0x0707070007070700ULL, 0xb9b9b900b9b9b900ULL, 0x5555550055555500ULL,
0xf8f8f800f8f8f800ULL, 0xeeeeee00eeeeee00ULL, 0xacacac00acacac00ULL,
0x0a0a0a000a0a0a00ULL, 0x3636360036363600ULL, 0x4949490049494900ULL,
0x2a2a2a002a2a2a00ULL, 0x6868680068686800ULL, 0x3c3c3c003c3c3c00ULL,
0x3838380038383800ULL, 0xf1f1f100f1f1f100ULL, 0xa4a4a400a4a4a400ULL,
0x4040400040404000ULL, 0x2828280028282800ULL, 0xd3d3d300d3d3d300ULL,
0x7b7b7b007b7b7b00ULL, 0xbbbbbb00bbbbbb00ULL, 0xc9c9c900c9c9c900ULL,
0x4343430043434300ULL, 0xc1c1c100c1c1c100ULL, 0x1515150015151500ULL,
0xe3e3e300e3e3e300ULL, 0xadadad00adadad00ULL, 0xf4f4f400f4f4f400ULL,
0x7777770077777700ULL, 0xc7c7c700c7c7c700ULL, 0x8080800080808000ULL,
0x9e9e9e009e9e9e00ULL,
};
/* key constants */
#define CAMELLIA_SIGMA1L (0xA09E667FL)
#define CAMELLIA_SIGMA1R (0x3BCC908BL)
#define CAMELLIA_SIGMA2L (0xB67AE858L)
#define CAMELLIA_SIGMA2R (0x4CAA73B2L)
#define CAMELLIA_SIGMA3L (0xC6EF372FL)
#define CAMELLIA_SIGMA3R (0xE94F82BEL)
#define CAMELLIA_SIGMA4L (0x54FF53A5L)
#define CAMELLIA_SIGMA4R (0xF1D36F1CL)
#define CAMELLIA_SIGMA5L (0x10E527FAL)
#define CAMELLIA_SIGMA5R (0xDE682D1DL)
#define CAMELLIA_SIGMA6L (0xB05688C2L)
#define CAMELLIA_SIGMA6R (0xB3E6C1FDL)
/* macros */
#define ROLDQ(l, r, bits) ({ \
u64 t = l; \
l = (l << bits) | (r >> (64 - bits)); \
r = (r << bits) | (t >> (64 - bits)); \
})
#define CAMELLIA_F(x, kl, kr, y) ({ \
u64 ii = x ^ (((u64)kl << 32) | kr); \
y = camellia_sp11101110[(uint8_t)ii]; \
y ^= camellia_sp44044404[(uint8_t)(ii >> 8)]; \
ii >>= 16; \
y ^= camellia_sp30333033[(uint8_t)ii]; \
y ^= camellia_sp02220222[(uint8_t)(ii >> 8)]; \
ii >>= 16; \
y ^= camellia_sp00444404[(uint8_t)ii]; \
y ^= camellia_sp03303033[(uint8_t)(ii >> 8)]; \
ii >>= 16; \
y ^= camellia_sp22000222[(uint8_t)ii]; \
y ^= camellia_sp10011110[(uint8_t)(ii >> 8)]; \
y = ror64(y, 32); \
})
#define SET_SUBKEY_LR(INDEX, sRL) (subkey[(INDEX)] = ror64((sRL), 32))
static void camellia_setup_tail(u64 *subkey, u64 *subRL, int max)
{
u64 kw4, tt;
u32 dw, tl, tr;
/* absorb kw2 to other subkeys */
/* round 2 */
subRL[3] ^= subRL[1];
/* round 4 */
subRL[5] ^= subRL[1];
/* round 6 */
subRL[7] ^= subRL[1];
subRL[1] ^= (subRL[1] & ~subRL[9]) << 32;
/* modified for FLinv(kl2) */
dw = (subRL[1] & subRL[9]) >> 32;
subRL[1] ^= rol32(dw, 1);
/* round 8 */
subRL[11] ^= subRL[1];
/* round 10 */
subRL[13] ^= subRL[1];
/* round 12 */
subRL[15] ^= subRL[1];
subRL[1] ^= (subRL[1] & ~subRL[17]) << 32;
/* modified for FLinv(kl4) */
dw = (subRL[1] & subRL[17]) >> 32;
subRL[1] ^= rol32(dw, 1);
/* round 14 */
subRL[19] ^= subRL[1];
/* round 16 */
subRL[21] ^= subRL[1];
/* round 18 */
subRL[23] ^= subRL[1];
if (max == 24) {
/* kw3 */
subRL[24] ^= subRL[1];
/* absorb kw4 to other subkeys */
kw4 = subRL[25];
} else {
subRL[1] ^= (subRL[1] & ~subRL[25]) << 32;
/* modified for FLinv(kl6) */
dw = (subRL[1] & subRL[25]) >> 32;
subRL[1] ^= rol32(dw, 1);
/* round 20 */
subRL[27] ^= subRL[1];
/* round 22 */
subRL[29] ^= subRL[1];
/* round 24 */
subRL[31] ^= subRL[1];
/* kw3 */
subRL[32] ^= subRL[1];
/* absorb kw4 to other subkeys */
kw4 = subRL[33];
/* round 23 */
subRL[30] ^= kw4;
/* round 21 */
subRL[28] ^= kw4;
/* round 19 */
subRL[26] ^= kw4;
kw4 ^= (kw4 & ~subRL[24]) << 32;
/* modified for FL(kl5) */
dw = (kw4 & subRL[24]) >> 32;
kw4 ^= rol32(dw, 1);
}
/* round 17 */
subRL[22] ^= kw4;
/* round 15 */
subRL[20] ^= kw4;
/* round 13 */
subRL[18] ^= kw4;
kw4 ^= (kw4 & ~subRL[16]) << 32;
/* modified for FL(kl3) */
dw = (kw4 & subRL[16]) >> 32;
kw4 ^= rol32(dw, 1);
/* round 11 */
subRL[14] ^= kw4;
/* round 9 */
subRL[12] ^= kw4;
/* round 7 */
subRL[10] ^= kw4;
kw4 ^= (kw4 & ~subRL[8]) << 32;
/* modified for FL(kl1) */
dw = (kw4 & subRL[8]) >> 32;
kw4 ^= rol32(dw, 1);
/* round 5 */
subRL[6] ^= kw4;
/* round 3 */
subRL[4] ^= kw4;
/* round 1 */
subRL[2] ^= kw4;
/* kw1 */
subRL[0] ^= kw4;
/* key XOR is end of F-function */
SET_SUBKEY_LR(0, subRL[0] ^ subRL[2]); /* kw1 */
SET_SUBKEY_LR(2, subRL[3]); /* round 1 */
SET_SUBKEY_LR(3, subRL[2] ^ subRL[4]); /* round 2 */
SET_SUBKEY_LR(4, subRL[3] ^ subRL[5]); /* round 3 */
SET_SUBKEY_LR(5, subRL[4] ^ subRL[6]); /* round 4 */
SET_SUBKEY_LR(6, subRL[5] ^ subRL[7]); /* round 5 */
tl = (subRL[10] >> 32) ^ (subRL[10] & ~subRL[8]);
dw = tl & (subRL[8] >> 32); /* FL(kl1) */
tr = subRL[10] ^ rol32(dw, 1);
tt = (tr | ((u64)tl << 32));
SET_SUBKEY_LR(7, subRL[6] ^ tt); /* round 6 */
SET_SUBKEY_LR(8, subRL[8]); /* FL(kl1) */
SET_SUBKEY_LR(9, subRL[9]); /* FLinv(kl2) */
tl = (subRL[7] >> 32) ^ (subRL[7] & ~subRL[9]);
dw = tl & (subRL[9] >> 32); /* FLinv(kl2) */
tr = subRL[7] ^ rol32(dw, 1);
tt = (tr | ((u64)tl << 32));
SET_SUBKEY_LR(10, subRL[11] ^ tt); /* round 7 */
SET_SUBKEY_LR(11, subRL[10] ^ subRL[12]); /* round 8 */
SET_SUBKEY_LR(12, subRL[11] ^ subRL[13]); /* round 9 */
SET_SUBKEY_LR(13, subRL[12] ^ subRL[14]); /* round 10 */
SET_SUBKEY_LR(14, subRL[13] ^ subRL[15]); /* round 11 */
tl = (subRL[18] >> 32) ^ (subRL[18] & ~subRL[16]);
dw = tl & (subRL[16] >> 32); /* FL(kl3) */
tr = subRL[18] ^ rol32(dw, 1);
tt = (tr | ((u64)tl << 32));
SET_SUBKEY_LR(15, subRL[14] ^ tt); /* round 12 */
SET_SUBKEY_LR(16, subRL[16]); /* FL(kl3) */
SET_SUBKEY_LR(17, subRL[17]); /* FLinv(kl4) */
tl = (subRL[15] >> 32) ^ (subRL[15] & ~subRL[17]);
dw = tl & (subRL[17] >> 32); /* FLinv(kl4) */
tr = subRL[15] ^ rol32(dw, 1);
tt = (tr | ((u64)tl << 32));
SET_SUBKEY_LR(18, subRL[19] ^ tt); /* round 13 */
SET_SUBKEY_LR(19, subRL[18] ^ subRL[20]); /* round 14 */
SET_SUBKEY_LR(20, subRL[19] ^ subRL[21]); /* round 15 */
SET_SUBKEY_LR(21, subRL[20] ^ subRL[22]); /* round 16 */
SET_SUBKEY_LR(22, subRL[21] ^ subRL[23]); /* round 17 */
if (max == 24) {
SET_SUBKEY_LR(23, subRL[22]); /* round 18 */
SET_SUBKEY_LR(24, subRL[24] ^ subRL[23]); /* kw3 */
} else {
tl = (subRL[26] >> 32) ^ (subRL[26] & ~subRL[24]);
dw = tl & (subRL[24] >> 32); /* FL(kl5) */
tr = subRL[26] ^ rol32(dw, 1);
tt = (tr | ((u64)tl << 32));
SET_SUBKEY_LR(23, subRL[22] ^ tt); /* round 18 */
SET_SUBKEY_LR(24, subRL[24]); /* FL(kl5) */
SET_SUBKEY_LR(25, subRL[25]); /* FLinv(kl6) */
tl = (subRL[23] >> 32) ^ (subRL[23] & ~subRL[25]);
dw = tl & (subRL[25] >> 32); /* FLinv(kl6) */
tr = subRL[23] ^ rol32(dw, 1);
tt = (tr | ((u64)tl << 32));
SET_SUBKEY_LR(26, subRL[27] ^ tt); /* round 19 */
SET_SUBKEY_LR(27, subRL[26] ^ subRL[28]); /* round 20 */
SET_SUBKEY_LR(28, subRL[27] ^ subRL[29]); /* round 21 */
SET_SUBKEY_LR(29, subRL[28] ^ subRL[30]); /* round 22 */
SET_SUBKEY_LR(30, subRL[29] ^ subRL[31]); /* round 23 */
SET_SUBKEY_LR(31, subRL[30]); /* round 24 */
SET_SUBKEY_LR(32, subRL[32] ^ subRL[31]); /* kw3 */
}
}
static void camellia_setup128(const unsigned char *key, u64 *subkey)
{
u64 kl, kr, ww;
u64 subRL[26];
/**
* k == kl || kr (|| is concatenation)
*/
kl = get_unaligned_be64(key);
kr = get_unaligned_be64(key + 8);
/* generate KL dependent subkeys */
/* kw1 */
subRL[0] = kl;
/* kw2 */
subRL[1] = kr;
/* rotation left shift 15bit */
ROLDQ(kl, kr, 15);
/* k3 */
subRL[4] = kl;
/* k4 */
subRL[5] = kr;
/* rotation left shift 15+30bit */
ROLDQ(kl, kr, 30);
/* k7 */
subRL[10] = kl;
/* k8 */
subRL[11] = kr;
/* rotation left shift 15+30+15bit */
ROLDQ(kl, kr, 15);
/* k10 */
subRL[13] = kr;
/* rotation left shift 15+30+15+17 bit */
ROLDQ(kl, kr, 17);
/* kl3 */
subRL[16] = kl;
/* kl4 */
subRL[17] = kr;
/* rotation left shift 15+30+15+17+17 bit */
ROLDQ(kl, kr, 17);
/* k13 */
subRL[18] = kl;
/* k14 */
subRL[19] = kr;
/* rotation left shift 15+30+15+17+17+17 bit */
ROLDQ(kl, kr, 17);
/* k17 */
subRL[22] = kl;
/* k18 */
subRL[23] = kr;
/* generate KA */
kl = subRL[0];
kr = subRL[1];
CAMELLIA_F(kl, CAMELLIA_SIGMA1L, CAMELLIA_SIGMA1R, ww);
kr ^= ww;
CAMELLIA_F(kr, CAMELLIA_SIGMA2L, CAMELLIA_SIGMA2R, kl);
/* current status == (kll, klr, w0, w1) */
CAMELLIA_F(kl, CAMELLIA_SIGMA3L, CAMELLIA_SIGMA3R, kr);
kr ^= ww;
CAMELLIA_F(kr, CAMELLIA_SIGMA4L, CAMELLIA_SIGMA4R, ww);
kl ^= ww;
/* generate KA dependent subkeys */
/* k1, k2 */
subRL[2] = kl;
subRL[3] = kr;
ROLDQ(kl, kr, 15);
/* k5,k6 */
subRL[6] = kl;
subRL[7] = kr;
ROLDQ(kl, kr, 15);
/* kl1, kl2 */
subRL[8] = kl;
subRL[9] = kr;
ROLDQ(kl, kr, 15);
/* k9 */
subRL[12] = kl;
ROLDQ(kl, kr, 15);
/* k11, k12 */
subRL[14] = kl;
subRL[15] = kr;
ROLDQ(kl, kr, 34);
/* k15, k16 */
subRL[20] = kl;
subRL[21] = kr;
ROLDQ(kl, kr, 17);
/* kw3, kw4 */
subRL[24] = kl;
subRL[25] = kr;
camellia_setup_tail(subkey, subRL, 24);
}
static void camellia_setup256(const unsigned char *key, u64 *subkey)
{
u64 kl, kr; /* left half of key */
u64 krl, krr; /* right half of key */
u64 ww; /* temporary variables */
u64 subRL[34];
/**
* key = (kl || kr || krl || krr) (|| is concatenation)
*/
kl = get_unaligned_be64(key);
kr = get_unaligned_be64(key + 8);
krl = get_unaligned_be64(key + 16);
krr = get_unaligned_be64(key + 24);
/* generate KL dependent subkeys */
/* kw1 */
subRL[0] = kl;
/* kw2 */
subRL[1] = kr;
ROLDQ(kl, kr, 45);
/* k9 */
subRL[12] = kl;
/* k10 */
subRL[13] = kr;
ROLDQ(kl, kr, 15);
/* kl3 */
subRL[16] = kl;
/* kl4 */
subRL[17] = kr;
ROLDQ(kl, kr, 17);
/* k17 */
subRL[22] = kl;
/* k18 */
subRL[23] = kr;
ROLDQ(kl, kr, 34);
/* k23 */
subRL[30] = kl;
/* k24 */
subRL[31] = kr;
/* generate KR dependent subkeys */
ROLDQ(krl, krr, 15);
/* k3 */
subRL[4] = krl;
/* k4 */
subRL[5] = krr;
ROLDQ(krl, krr, 15);
/* kl1 */
subRL[8] = krl;
/* kl2 */
subRL[9] = krr;
ROLDQ(krl, krr, 30);
/* k13 */
subRL[18] = krl;
/* k14 */
subRL[19] = krr;
ROLDQ(krl, krr, 34);
/* k19 */
subRL[26] = krl;
/* k20 */
subRL[27] = krr;
ROLDQ(krl, krr, 34);
/* generate KA */
kl = subRL[0] ^ krl;
kr = subRL[1] ^ krr;
CAMELLIA_F(kl, CAMELLIA_SIGMA1L, CAMELLIA_SIGMA1R, ww);
kr ^= ww;
CAMELLIA_F(kr, CAMELLIA_SIGMA2L, CAMELLIA_SIGMA2R, kl);
kl ^= krl;
CAMELLIA_F(kl, CAMELLIA_SIGMA3L, CAMELLIA_SIGMA3R, kr);
kr ^= ww ^ krr;
CAMELLIA_F(kr, CAMELLIA_SIGMA4L, CAMELLIA_SIGMA4R, ww);
kl ^= ww;
/* generate KB */
krl ^= kl;
krr ^= kr;
CAMELLIA_F(krl, CAMELLIA_SIGMA5L, CAMELLIA_SIGMA5R, ww);
krr ^= ww;
CAMELLIA_F(krr, CAMELLIA_SIGMA6L, CAMELLIA_SIGMA6R, ww);
krl ^= ww;
/* generate KA dependent subkeys */
ROLDQ(kl, kr, 15);
/* k5 */
subRL[6] = kl;
/* k6 */
subRL[7] = kr;
ROLDQ(kl, kr, 30);
/* k11 */
subRL[14] = kl;
/* k12 */
subRL[15] = kr;
/* rotation left shift 32bit */
ROLDQ(kl, kr, 32);
/* kl5 */
subRL[24] = kl;
/* kl6 */
subRL[25] = kr;
/* rotation left shift 17 from k11,k12 -> k21,k22 */
ROLDQ(kl, kr, 17);
/* k21 */
subRL[28] = kl;
/* k22 */
subRL[29] = kr;
/* generate KB dependent subkeys */
/* k1 */
subRL[2] = krl;
/* k2 */
subRL[3] = krr;
ROLDQ(krl, krr, 30);
/* k7 */
subRL[10] = krl;
/* k8 */
subRL[11] = krr;
ROLDQ(krl, krr, 30);
/* k15 */
subRL[20] = krl;
/* k16 */
subRL[21] = krr;
ROLDQ(krl, krr, 51);
/* kw3 */
subRL[32] = krl;
/* kw4 */
subRL[33] = krr;
camellia_setup_tail(subkey, subRL, 32);
}
static void camellia_setup192(const unsigned char *key, u64 *subkey)
{
unsigned char kk[32];
u64 krl, krr;
memcpy(kk, key, 24);
memcpy((unsigned char *)&krl, key+16, 8);
krr = ~krl;
memcpy(kk+24, (unsigned char *)&krr, 8);
camellia_setup256(kk, subkey);
}
int __camellia_setkey(struct camellia_ctx *cctx, const unsigned char *key,
unsigned int key_len)
{
if (key_len != 16 && key_len != 24 && key_len != 32)
return -EINVAL;
cctx->key_length = key_len;
switch (key_len) {
case 16:
camellia_setup128(key, cctx->key_table);
break;
case 24:
camellia_setup192(key, cctx->key_table);
break;
case 32:
camellia_setup256(key, cctx->key_table);
break;
}
return 0;
}
EXPORT_SYMBOL_GPL(__camellia_setkey);
static int camellia_setkey(struct crypto_tfm *tfm, const u8 *key,
unsigned int key_len)
{
return __camellia_setkey(crypto_tfm_ctx(tfm), key, key_len);
}
static int camellia_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key,
unsigned int key_len)
{
return camellia_setkey(&tfm->base, key, key_len);
}
void camellia_decrypt_cbc_2way(const void *ctx, u8 *dst, const u8 *src)
{
u8 buf[CAMELLIA_BLOCK_SIZE];
const u8 *iv = src;
if (dst == src)
iv = memcpy(buf, iv, sizeof(buf));
camellia_dec_blk_2way(ctx, dst, src);
crypto_xor(dst + CAMELLIA_BLOCK_SIZE, iv, CAMELLIA_BLOCK_SIZE);
}
EXPORT_SYMBOL_GPL(camellia_decrypt_cbc_2way);
static int ecb_encrypt(struct skcipher_request *req)
{
ECB_WALK_START(req, CAMELLIA_BLOCK_SIZE, -1);
ECB_BLOCK(2, camellia_enc_blk_2way);
ECB_BLOCK(1, camellia_enc_blk);
ECB_WALK_END();
}
static int ecb_decrypt(struct skcipher_request *req)
{
ECB_WALK_START(req, CAMELLIA_BLOCK_SIZE, -1);
ECB_BLOCK(2, camellia_dec_blk_2way);
ECB_BLOCK(1, camellia_dec_blk);
ECB_WALK_END();
}
static int cbc_encrypt(struct skcipher_request *req)
{
CBC_WALK_START(req, CAMELLIA_BLOCK_SIZE, -1);
CBC_ENC_BLOCK(camellia_enc_blk);
CBC_WALK_END();
}
static int cbc_decrypt(struct skcipher_request *req)
{
CBC_WALK_START(req, CAMELLIA_BLOCK_SIZE, -1);
CBC_DEC_BLOCK(2, camellia_decrypt_cbc_2way);
CBC_DEC_BLOCK(1, camellia_dec_blk);
CBC_WALK_END();
}
static struct crypto_alg camellia_cipher_alg = {
.cra_name = "camellia",
.cra_driver_name = "camellia-asm",
.cra_priority = 200,
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = CAMELLIA_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct camellia_ctx),
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
.cra_u = {
.cipher = {
.cia_min_keysize = CAMELLIA_MIN_KEY_SIZE,
.cia_max_keysize = CAMELLIA_MAX_KEY_SIZE,
.cia_setkey = camellia_setkey,
.cia_encrypt = camellia_encrypt,
.cia_decrypt = camellia_decrypt
}
}
};
static struct skcipher_alg camellia_skcipher_algs[] = {
{
.base.cra_name = "ecb(camellia)",
.base.cra_driver_name = "ecb-camellia-asm",
.base.cra_priority = 300,
.base.cra_blocksize = CAMELLIA_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct camellia_ctx),
.base.cra_module = THIS_MODULE,
.min_keysize = CAMELLIA_MIN_KEY_SIZE,
.max_keysize = CAMELLIA_MAX_KEY_SIZE,
.setkey = camellia_setkey_skcipher,
.encrypt = ecb_encrypt,
.decrypt = ecb_decrypt,
}, {
.base.cra_name = "cbc(camellia)",
.base.cra_driver_name = "cbc-camellia-asm",
.base.cra_priority = 300,
.base.cra_blocksize = CAMELLIA_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct camellia_ctx),
.base.cra_module = THIS_MODULE,
.min_keysize = CAMELLIA_MIN_KEY_SIZE,
.max_keysize = CAMELLIA_MAX_KEY_SIZE,
.ivsize = CAMELLIA_BLOCK_SIZE,
.setkey = camellia_setkey_skcipher,
.encrypt = cbc_encrypt,
.decrypt = cbc_decrypt,
}
};
static bool is_blacklisted_cpu(void)
{
if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
return false;
if (boot_cpu_data.x86 == 0x0f) {
/*
* On Pentium 4, camellia-asm is slower than original assembler
* implementation because excessive uses of 64bit rotate and
* left-shifts (which are really slow on P4) needed to store and
* handle 128bit block in two 64bit registers.
*/
return true;
}
return false;
}
static int force;
module_param(force, int, 0);
MODULE_PARM_DESC(force, "Force module load, ignore CPU blacklist");
static int __init camellia_init(void)
{
int err;
if (!force && is_blacklisted_cpu()) {
printk(KERN_INFO
"camellia-x86_64: performance on this CPU "
"would be suboptimal: disabling "
"camellia-x86_64.\n");
return -ENODEV;
}
err = crypto_register_alg(&camellia_cipher_alg);
if (err)
return err;
err = crypto_register_skciphers(camellia_skcipher_algs,
ARRAY_SIZE(camellia_skcipher_algs));
if (err)
crypto_unregister_alg(&camellia_cipher_alg);
return err;
}
static void __exit camellia_fini(void)
{
crypto_unregister_alg(&camellia_cipher_alg);
crypto_unregister_skciphers(camellia_skcipher_algs,
ARRAY_SIZE(camellia_skcipher_algs));
}
module_init(camellia_init);
module_exit(camellia_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Camellia Cipher Algorithm, asm optimized");
MODULE_ALIAS_CRYPTO("camellia");
MODULE_ALIAS_CRYPTO("camellia-asm");
| linux-master | arch/x86/crypto/camellia_glue.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* The AEGIS-128 Authenticated-Encryption Algorithm
* Glue for AES-NI + SSE2 implementation
*
* Copyright (c) 2017-2018 Ondrej Mosnacek <[email protected]>
* Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved.
*/
#include <crypto/internal/aead.h>
#include <crypto/internal/simd.h>
#include <crypto/internal/skcipher.h>
#include <crypto/scatterwalk.h>
#include <linux/module.h>
#include <asm/fpu/api.h>
#include <asm/cpu_device_id.h>
#define AEGIS128_BLOCK_ALIGN 16
#define AEGIS128_BLOCK_SIZE 16
#define AEGIS128_NONCE_SIZE 16
#define AEGIS128_STATE_BLOCKS 5
#define AEGIS128_KEY_SIZE 16
#define AEGIS128_MIN_AUTH_SIZE 8
#define AEGIS128_MAX_AUTH_SIZE 16
asmlinkage void crypto_aegis128_aesni_init(void *state, void *key, void *iv);
asmlinkage void crypto_aegis128_aesni_ad(
void *state, unsigned int length, const void *data);
asmlinkage void crypto_aegis128_aesni_enc(
void *state, unsigned int length, const void *src, void *dst);
asmlinkage void crypto_aegis128_aesni_dec(
void *state, unsigned int length, const void *src, void *dst);
asmlinkage void crypto_aegis128_aesni_enc_tail(
void *state, unsigned int length, const void *src, void *dst);
asmlinkage void crypto_aegis128_aesni_dec_tail(
void *state, unsigned int length, const void *src, void *dst);
asmlinkage void crypto_aegis128_aesni_final(
void *state, void *tag_xor, unsigned int cryptlen,
unsigned int assoclen);
struct aegis_block {
u8 bytes[AEGIS128_BLOCK_SIZE] __aligned(AEGIS128_BLOCK_ALIGN);
};
struct aegis_state {
struct aegis_block blocks[AEGIS128_STATE_BLOCKS];
};
struct aegis_ctx {
struct aegis_block key;
};
struct aegis_crypt_ops {
int (*skcipher_walk_init)(struct skcipher_walk *walk,
struct aead_request *req, bool atomic);
void (*crypt_blocks)(void *state, unsigned int length, const void *src,
void *dst);
void (*crypt_tail)(void *state, unsigned int length, const void *src,
void *dst);
};
static void crypto_aegis128_aesni_process_ad(
struct aegis_state *state, struct scatterlist *sg_src,
unsigned int assoclen)
{
struct scatter_walk walk;
struct aegis_block buf;
unsigned int pos = 0;
scatterwalk_start(&walk, sg_src);
while (assoclen != 0) {
unsigned int size = scatterwalk_clamp(&walk, assoclen);
unsigned int left = size;
void *mapped = scatterwalk_map(&walk);
const u8 *src = (const u8 *)mapped;
if (pos + size >= AEGIS128_BLOCK_SIZE) {
if (pos > 0) {
unsigned int fill = AEGIS128_BLOCK_SIZE - pos;
memcpy(buf.bytes + pos, src, fill);
crypto_aegis128_aesni_ad(state,
AEGIS128_BLOCK_SIZE,
buf.bytes);
pos = 0;
left -= fill;
src += fill;
}
crypto_aegis128_aesni_ad(state, left, src);
src += left & ~(AEGIS128_BLOCK_SIZE - 1);
left &= AEGIS128_BLOCK_SIZE - 1;
}
memcpy(buf.bytes + pos, src, left);
pos += left;
assoclen -= size;
scatterwalk_unmap(mapped);
scatterwalk_advance(&walk, size);
scatterwalk_done(&walk, 0, assoclen);
}
if (pos > 0) {
memset(buf.bytes + pos, 0, AEGIS128_BLOCK_SIZE - pos);
crypto_aegis128_aesni_ad(state, AEGIS128_BLOCK_SIZE, buf.bytes);
}
}
static void crypto_aegis128_aesni_process_crypt(
struct aegis_state *state, struct skcipher_walk *walk,
const struct aegis_crypt_ops *ops)
{
while (walk->nbytes >= AEGIS128_BLOCK_SIZE) {
ops->crypt_blocks(state,
round_down(walk->nbytes, AEGIS128_BLOCK_SIZE),
walk->src.virt.addr, walk->dst.virt.addr);
skcipher_walk_done(walk, walk->nbytes % AEGIS128_BLOCK_SIZE);
}
if (walk->nbytes) {
ops->crypt_tail(state, walk->nbytes, walk->src.virt.addr,
walk->dst.virt.addr);
skcipher_walk_done(walk, 0);
}
}
static struct aegis_ctx *crypto_aegis128_aesni_ctx(struct crypto_aead *aead)
{
u8 *ctx = crypto_aead_ctx(aead);
ctx = PTR_ALIGN(ctx, __alignof__(struct aegis_ctx));
return (void *)ctx;
}
static int crypto_aegis128_aesni_setkey(struct crypto_aead *aead, const u8 *key,
unsigned int keylen)
{
struct aegis_ctx *ctx = crypto_aegis128_aesni_ctx(aead);
if (keylen != AEGIS128_KEY_SIZE)
return -EINVAL;
memcpy(ctx->key.bytes, key, AEGIS128_KEY_SIZE);
return 0;
}
static int crypto_aegis128_aesni_setauthsize(struct crypto_aead *tfm,
unsigned int authsize)
{
if (authsize > AEGIS128_MAX_AUTH_SIZE)
return -EINVAL;
if (authsize < AEGIS128_MIN_AUTH_SIZE)
return -EINVAL;
return 0;
}
static void crypto_aegis128_aesni_crypt(struct aead_request *req,
struct aegis_block *tag_xor,
unsigned int cryptlen,
const struct aegis_crypt_ops *ops)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct aegis_ctx *ctx = crypto_aegis128_aesni_ctx(tfm);
struct skcipher_walk walk;
struct aegis_state state;
ops->skcipher_walk_init(&walk, req, true);
kernel_fpu_begin();
crypto_aegis128_aesni_init(&state, ctx->key.bytes, req->iv);
crypto_aegis128_aesni_process_ad(&state, req->src, req->assoclen);
crypto_aegis128_aesni_process_crypt(&state, &walk, ops);
crypto_aegis128_aesni_final(&state, tag_xor, req->assoclen, cryptlen);
kernel_fpu_end();
}
static int crypto_aegis128_aesni_encrypt(struct aead_request *req)
{
static const struct aegis_crypt_ops OPS = {
.skcipher_walk_init = skcipher_walk_aead_encrypt,
.crypt_blocks = crypto_aegis128_aesni_enc,
.crypt_tail = crypto_aegis128_aesni_enc_tail,
};
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct aegis_block tag = {};
unsigned int authsize = crypto_aead_authsize(tfm);
unsigned int cryptlen = req->cryptlen;
crypto_aegis128_aesni_crypt(req, &tag, cryptlen, &OPS);
scatterwalk_map_and_copy(tag.bytes, req->dst,
req->assoclen + cryptlen, authsize, 1);
return 0;
}
static int crypto_aegis128_aesni_decrypt(struct aead_request *req)
{
static const struct aegis_block zeros = {};
static const struct aegis_crypt_ops OPS = {
.skcipher_walk_init = skcipher_walk_aead_decrypt,
.crypt_blocks = crypto_aegis128_aesni_dec,
.crypt_tail = crypto_aegis128_aesni_dec_tail,
};
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct aegis_block tag;
unsigned int authsize = crypto_aead_authsize(tfm);
unsigned int cryptlen = req->cryptlen - authsize;
scatterwalk_map_and_copy(tag.bytes, req->src,
req->assoclen + cryptlen, authsize, 0);
crypto_aegis128_aesni_crypt(req, &tag, cryptlen, &OPS);
return crypto_memneq(tag.bytes, zeros.bytes, authsize) ? -EBADMSG : 0;
}
static int crypto_aegis128_aesni_init_tfm(struct crypto_aead *aead)
{
return 0;
}
static void crypto_aegis128_aesni_exit_tfm(struct crypto_aead *aead)
{
}
static struct aead_alg crypto_aegis128_aesni_alg = {
.setkey = crypto_aegis128_aesni_setkey,
.setauthsize = crypto_aegis128_aesni_setauthsize,
.encrypt = crypto_aegis128_aesni_encrypt,
.decrypt = crypto_aegis128_aesni_decrypt,
.init = crypto_aegis128_aesni_init_tfm,
.exit = crypto_aegis128_aesni_exit_tfm,
.ivsize = AEGIS128_NONCE_SIZE,
.maxauthsize = AEGIS128_MAX_AUTH_SIZE,
.chunksize = AEGIS128_BLOCK_SIZE,
.base = {
.cra_flags = CRYPTO_ALG_INTERNAL,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct aegis_ctx) +
__alignof__(struct aegis_ctx),
.cra_alignmask = 0,
.cra_priority = 400,
.cra_name = "__aegis128",
.cra_driver_name = "__aegis128-aesni",
.cra_module = THIS_MODULE,
}
};
static struct simd_aead_alg *simd_alg;
static int __init crypto_aegis128_aesni_module_init(void)
{
if (!boot_cpu_has(X86_FEATURE_XMM2) ||
!boot_cpu_has(X86_FEATURE_AES) ||
!cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
return -ENODEV;
return simd_register_aeads_compat(&crypto_aegis128_aesni_alg, 1,
&simd_alg);
}
static void __exit crypto_aegis128_aesni_module_exit(void)
{
simd_unregister_aeads(&crypto_aegis128_aesni_alg, 1, &simd_alg);
}
module_init(crypto_aegis128_aesni_module_init);
module_exit(crypto_aegis128_aesni_module_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Ondrej Mosnacek <[email protected]>");
MODULE_DESCRIPTION("AEGIS-128 AEAD algorithm -- AESNI+SSE2 implementation");
MODULE_ALIAS_CRYPTO("aegis128");
MODULE_ALIAS_CRYPTO("aegis128-aesni");
| linux-master | arch/x86/crypto/aegis128-aesni-glue.c |
// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
* Copyright (C) 2015-2019 Jason A. Donenfeld <[email protected]>. All Rights Reserved.
*/
#include <crypto/internal/blake2s.h>
#include <linux/types.h>
#include <linux/jump_label.h>
#include <linux/kernel.h>
#include <linux/sizes.h>
#include <asm/cpufeature.h>
#include <asm/fpu/api.h>
#include <asm/processor.h>
#include <asm/simd.h>
asmlinkage void blake2s_compress_ssse3(struct blake2s_state *state,
const u8 *block, const size_t nblocks,
const u32 inc);
asmlinkage void blake2s_compress_avx512(struct blake2s_state *state,
const u8 *block, const size_t nblocks,
const u32 inc);
static __ro_after_init DEFINE_STATIC_KEY_FALSE(blake2s_use_ssse3);
static __ro_after_init DEFINE_STATIC_KEY_FALSE(blake2s_use_avx512);
void blake2s_compress(struct blake2s_state *state, const u8 *block,
size_t nblocks, const u32 inc)
{
/* SIMD disables preemption, so relax after processing each page. */
BUILD_BUG_ON(SZ_4K / BLAKE2S_BLOCK_SIZE < 8);
if (!static_branch_likely(&blake2s_use_ssse3) || !may_use_simd()) {
blake2s_compress_generic(state, block, nblocks, inc);
return;
}
do {
const size_t blocks = min_t(size_t, nblocks,
SZ_4K / BLAKE2S_BLOCK_SIZE);
kernel_fpu_begin();
if (IS_ENABLED(CONFIG_AS_AVX512) &&
static_branch_likely(&blake2s_use_avx512))
blake2s_compress_avx512(state, block, blocks, inc);
else
blake2s_compress_ssse3(state, block, blocks, inc);
kernel_fpu_end();
nblocks -= blocks;
block += blocks * BLAKE2S_BLOCK_SIZE;
} while (nblocks);
}
EXPORT_SYMBOL(blake2s_compress);
static int __init blake2s_mod_init(void)
{
if (boot_cpu_has(X86_FEATURE_SSSE3))
static_branch_enable(&blake2s_use_ssse3);
if (IS_ENABLED(CONFIG_AS_AVX512) &&
boot_cpu_has(X86_FEATURE_AVX) &&
boot_cpu_has(X86_FEATURE_AVX2) &&
boot_cpu_has(X86_FEATURE_AVX512F) &&
boot_cpu_has(X86_FEATURE_AVX512VL) &&
cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM |
XFEATURE_MASK_AVX512, NULL))
static_branch_enable(&blake2s_use_avx512);
return 0;
}
subsys_initcall(blake2s_mod_init);
| linux-master | arch/x86/crypto/blake2s-glue.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Glue Code for assembler optimized version of Blowfish
*
* Copyright (c) 2011 Jussi Kivilinna <[email protected]>
*
* CBC & ECB parts based on code (crypto/cbc.c,ecb.c) by:
* Copyright (c) 2006 Herbert Xu <[email protected]>
*/
#include <crypto/algapi.h>
#include <crypto/blowfish.h>
#include <crypto/internal/skcipher.h>
#include <linux/crypto.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/types.h>
#include "ecb_cbc_helpers.h"
/* regular block cipher functions */
asmlinkage void blowfish_enc_blk(struct bf_ctx *ctx, u8 *dst, const u8 *src);
asmlinkage void blowfish_dec_blk(struct bf_ctx *ctx, u8 *dst, const u8 *src);
/* 4-way parallel cipher functions */
asmlinkage void blowfish_enc_blk_4way(struct bf_ctx *ctx, u8 *dst,
const u8 *src);
asmlinkage void __blowfish_dec_blk_4way(struct bf_ctx *ctx, u8 *dst,
const u8 *src, bool cbc);
static inline void blowfish_dec_ecb_4way(struct bf_ctx *ctx, u8 *dst,
const u8 *src)
{
return __blowfish_dec_blk_4way(ctx, dst, src, false);
}
static inline void blowfish_dec_cbc_4way(struct bf_ctx *ctx, u8 *dst,
const u8 *src)
{
return __blowfish_dec_blk_4way(ctx, dst, src, true);
}
static void blowfish_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
blowfish_enc_blk(crypto_tfm_ctx(tfm), dst, src);
}
static void blowfish_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
blowfish_dec_blk(crypto_tfm_ctx(tfm), dst, src);
}
static int blowfish_setkey_skcipher(struct crypto_skcipher *tfm,
const u8 *key, unsigned int keylen)
{
return blowfish_setkey(&tfm->base, key, keylen);
}
static int ecb_encrypt(struct skcipher_request *req)
{
ECB_WALK_START(req, BF_BLOCK_SIZE, -1);
ECB_BLOCK(4, blowfish_enc_blk_4way);
ECB_BLOCK(1, blowfish_enc_blk);
ECB_WALK_END();
}
static int ecb_decrypt(struct skcipher_request *req)
{
ECB_WALK_START(req, BF_BLOCK_SIZE, -1);
ECB_BLOCK(4, blowfish_dec_ecb_4way);
ECB_BLOCK(1, blowfish_dec_blk);
ECB_WALK_END();
}
static int cbc_encrypt(struct skcipher_request *req)
{
CBC_WALK_START(req, BF_BLOCK_SIZE, -1);
CBC_ENC_BLOCK(blowfish_enc_blk);
CBC_WALK_END();
}
static int cbc_decrypt(struct skcipher_request *req)
{
CBC_WALK_START(req, BF_BLOCK_SIZE, -1);
CBC_DEC_BLOCK(4, blowfish_dec_cbc_4way);
CBC_DEC_BLOCK(1, blowfish_dec_blk);
CBC_WALK_END();
}
static struct crypto_alg bf_cipher_alg = {
.cra_name = "blowfish",
.cra_driver_name = "blowfish-asm",
.cra_priority = 200,
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = BF_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct bf_ctx),
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
.cra_u = {
.cipher = {
.cia_min_keysize = BF_MIN_KEY_SIZE,
.cia_max_keysize = BF_MAX_KEY_SIZE,
.cia_setkey = blowfish_setkey,
.cia_encrypt = blowfish_encrypt,
.cia_decrypt = blowfish_decrypt,
}
}
};
static struct skcipher_alg bf_skcipher_algs[] = {
{
.base.cra_name = "ecb(blowfish)",
.base.cra_driver_name = "ecb-blowfish-asm",
.base.cra_priority = 300,
.base.cra_blocksize = BF_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct bf_ctx),
.base.cra_module = THIS_MODULE,
.min_keysize = BF_MIN_KEY_SIZE,
.max_keysize = BF_MAX_KEY_SIZE,
.setkey = blowfish_setkey_skcipher,
.encrypt = ecb_encrypt,
.decrypt = ecb_decrypt,
}, {
.base.cra_name = "cbc(blowfish)",
.base.cra_driver_name = "cbc-blowfish-asm",
.base.cra_priority = 300,
.base.cra_blocksize = BF_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct bf_ctx),
.base.cra_module = THIS_MODULE,
.min_keysize = BF_MIN_KEY_SIZE,
.max_keysize = BF_MAX_KEY_SIZE,
.ivsize = BF_BLOCK_SIZE,
.setkey = blowfish_setkey_skcipher,
.encrypt = cbc_encrypt,
.decrypt = cbc_decrypt,
},
};
static bool is_blacklisted_cpu(void)
{
if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
return false;
if (boot_cpu_data.x86 == 0x0f) {
/*
* On Pentium 4, blowfish-x86_64 is slower than generic C
* implementation because use of 64bit rotates (which are really
* slow on P4). Therefore blacklist P4s.
*/
return true;
}
return false;
}
static int force;
module_param(force, int, 0);
MODULE_PARM_DESC(force, "Force module load, ignore CPU blacklist");
static int __init blowfish_init(void)
{
int err;
if (!force && is_blacklisted_cpu()) {
printk(KERN_INFO
"blowfish-x86_64: performance on this CPU "
"would be suboptimal: disabling "
"blowfish-x86_64.\n");
return -ENODEV;
}
err = crypto_register_alg(&bf_cipher_alg);
if (err)
return err;
err = crypto_register_skciphers(bf_skcipher_algs,
ARRAY_SIZE(bf_skcipher_algs));
if (err)
crypto_unregister_alg(&bf_cipher_alg);
return err;
}
static void __exit blowfish_fini(void)
{
crypto_unregister_alg(&bf_cipher_alg);
crypto_unregister_skciphers(bf_skcipher_algs,
ARRAY_SIZE(bf_skcipher_algs));
}
module_init(blowfish_init);
module_exit(blowfish_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Blowfish Cipher Algorithm, asm optimized");
MODULE_ALIAS_CRYPTO("blowfish");
MODULE_ALIAS_CRYPTO("blowfish-asm");
| linux-master | arch/x86/crypto/blowfish_glue.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/range.h>
#include "bus_numa.h"
LIST_HEAD(pci_root_infos);
static struct pci_root_info *x86_find_pci_root_info(int bus)
{
struct pci_root_info *info;
list_for_each_entry(info, &pci_root_infos, list)
if (info->busn.start == bus)
return info;
return NULL;
}
int x86_pci_root_bus_node(int bus)
{
struct pci_root_info *info = x86_find_pci_root_info(bus);
if (!info)
return NUMA_NO_NODE;
return info->node;
}
void x86_pci_root_bus_resources(int bus, struct list_head *resources)
{
struct pci_root_info *info = x86_find_pci_root_info(bus);
struct pci_root_res *root_res;
struct resource_entry *window;
bool found = false;
if (!info)
goto default_resources;
printk(KERN_DEBUG "PCI: root bus %02x: hardware-probed resources\n",
bus);
/* already added by acpi ? */
resource_list_for_each_entry(window, resources)
if (window->res->flags & IORESOURCE_BUS) {
found = true;
break;
}
if (!found)
pci_add_resource(resources, &info->busn);
list_for_each_entry(root_res, &info->resources, list)
pci_add_resource(resources, &root_res->res);
return;
default_resources:
/*
* We don't have any host bridge aperture information from the
* "native host bridge drivers," e.g., amd_bus or broadcom_bus,
* so fall back to the defaults historically used by pci_create_bus().
*/
printk(KERN_DEBUG "PCI: root bus %02x: using default resources\n", bus);
pci_add_resource(resources, &ioport_resource);
pci_add_resource(resources, &iomem_resource);
}
struct pci_root_info __init *alloc_pci_root_info(int bus_min, int bus_max,
int node, int link)
{
struct pci_root_info *info;
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info)
return info;
sprintf(info->name, "PCI Bus #%02x", bus_min);
INIT_LIST_HEAD(&info->resources);
info->busn.name = info->name;
info->busn.start = bus_min;
info->busn.end = bus_max;
info->busn.flags = IORESOURCE_BUS;
info->node = node;
info->link = link;
list_add_tail(&info->list, &pci_root_infos);
return info;
}
void update_res(struct pci_root_info *info, resource_size_t start,
resource_size_t end, unsigned long flags, int merge)
{
struct resource *res;
struct pci_root_res *root_res;
if (start > end)
return;
if (start == RESOURCE_SIZE_MAX)
return;
if (!merge)
goto addit;
/* try to merge it with old one */
list_for_each_entry(root_res, &info->resources, list) {
resource_size_t final_start, final_end;
resource_size_t common_start, common_end;
res = &root_res->res;
if (res->flags != flags)
continue;
common_start = max(res->start, start);
common_end = min(res->end, end);
if (common_start > common_end + 1)
continue;
final_start = min(res->start, start);
final_end = max(res->end, end);
res->start = final_start;
res->end = final_end;
return;
}
addit:
/* need to add that */
root_res = kzalloc(sizeof(*root_res), GFP_KERNEL);
if (!root_res)
return;
res = &root_res->res;
res->name = info->name;
res->flags = flags;
res->start = start;
res->end = end;
list_add_tail(&root_res->list, &info->resources);
}
| linux-master | arch/x86/pci/bus_numa.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Low-Level PCI Support for PC
*
* (c) 1999--2000 Martin Mares <[email protected]>
*/
#include <linux/sched.h>
#include <linux/pci.h>
#include <linux/pci-acpi.h>
#include <linux/ioport.h>
#include <linux/init.h>
#include <linux/dmi.h>
#include <linux/slab.h>
#include <asm/acpi.h>
#include <asm/segment.h>
#include <asm/io.h>
#include <asm/smp.h>
#include <asm/pci_x86.h>
#include <asm/setup.h>
#include <asm/irqdomain.h>
unsigned int pci_probe = PCI_PROBE_BIOS | PCI_PROBE_CONF1 | PCI_PROBE_CONF2 |
PCI_PROBE_MMCONF;
static int pci_bf_sort;
int pci_routeirq;
int noioapicquirk;
#ifdef CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS
int noioapicreroute = 0;
#else
int noioapicreroute = 1;
#endif
int pcibios_last_bus = -1;
unsigned long pirq_table_addr;
const struct pci_raw_ops *__read_mostly raw_pci_ops;
const struct pci_raw_ops *__read_mostly raw_pci_ext_ops;
int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
int reg, int len, u32 *val)
{
if (domain == 0 && reg < 256 && raw_pci_ops)
return raw_pci_ops->read(domain, bus, devfn, reg, len, val);
if (raw_pci_ext_ops)
return raw_pci_ext_ops->read(domain, bus, devfn, reg, len, val);
return -EINVAL;
}
int raw_pci_write(unsigned int domain, unsigned int bus, unsigned int devfn,
int reg, int len, u32 val)
{
if (domain == 0 && reg < 256 && raw_pci_ops)
return raw_pci_ops->write(domain, bus, devfn, reg, len, val);
if (raw_pci_ext_ops)
return raw_pci_ext_ops->write(domain, bus, devfn, reg, len, val);
return -EINVAL;
}
static int pci_read(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *value)
{
return raw_pci_read(pci_domain_nr(bus), bus->number,
devfn, where, size, value);
}
static int pci_write(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 value)
{
return raw_pci_write(pci_domain_nr(bus), bus->number,
devfn, where, size, value);
}
struct pci_ops pci_root_ops = {
.read = pci_read,
.write = pci_write,
};
/*
* This interrupt-safe spinlock protects all accesses to PCI configuration
* space, except for the mmconfig (ECAM) based operations.
*/
DEFINE_RAW_SPINLOCK(pci_config_lock);
static int __init can_skip_ioresource_align(const struct dmi_system_id *d)
{
pci_probe |= PCI_CAN_SKIP_ISA_ALIGN;
printk(KERN_INFO "PCI: %s detected, can skip ISA alignment\n", d->ident);
return 0;
}
static const struct dmi_system_id can_skip_pciprobe_dmi_table[] __initconst = {
/*
* Systems where PCI IO resource ISA alignment can be skipped
* when the ISA enable bit in the bridge control is not set
*/
{
.callback = can_skip_ioresource_align,
.ident = "IBM System x3800",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "IBM"),
DMI_MATCH(DMI_PRODUCT_NAME, "x3800"),
},
},
{
.callback = can_skip_ioresource_align,
.ident = "IBM System x3850",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "IBM"),
DMI_MATCH(DMI_PRODUCT_NAME, "x3850"),
},
},
{
.callback = can_skip_ioresource_align,
.ident = "IBM System x3950",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "IBM"),
DMI_MATCH(DMI_PRODUCT_NAME, "x3950"),
},
},
{}
};
void __init dmi_check_skip_isa_align(void)
{
dmi_check_system(can_skip_pciprobe_dmi_table);
}
static void pcibios_fixup_device_resources(struct pci_dev *dev)
{
struct resource *rom_r = &dev->resource[PCI_ROM_RESOURCE];
struct resource *bar_r;
int bar;
if (pci_probe & PCI_NOASSIGN_BARS) {
/*
* If the BIOS did not assign the BAR, zero out the
* resource so the kernel doesn't attempt to assign
* it later on in pci_assign_unassigned_resources
*/
for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
bar_r = &dev->resource[bar];
if (bar_r->start == 0 && bar_r->end != 0) {
bar_r->flags = 0;
bar_r->end = 0;
}
}
}
if (pci_probe & PCI_NOASSIGN_ROMS) {
if (rom_r->parent)
return;
if (rom_r->start) {
/* we deal with BIOS assigned ROM later */
return;
}
rom_r->start = rom_r->end = rom_r->flags = 0;
}
}
/*
* Called after each bus is probed, but before its children
* are examined.
*/
void pcibios_fixup_bus(struct pci_bus *b)
{
struct pci_dev *dev;
pci_read_bridge_bases(b);
list_for_each_entry(dev, &b->devices, bus_list)
pcibios_fixup_device_resources(dev);
}
void pcibios_add_bus(struct pci_bus *bus)
{
acpi_pci_add_bus(bus);
}
void pcibios_remove_bus(struct pci_bus *bus)
{
acpi_pci_remove_bus(bus);
}
/*
* Only use DMI information to set this if nothing was passed
* on the kernel command line (which was parsed earlier).
*/
static int __init set_bf_sort(const struct dmi_system_id *d)
{
if (pci_bf_sort == pci_bf_sort_default) {
pci_bf_sort = pci_dmi_bf;
printk(KERN_INFO "PCI: %s detected, enabling pci=bfsort.\n", d->ident);
}
return 0;
}
static void __init read_dmi_type_b1(const struct dmi_header *dm,
void *private_data)
{
u8 *data = (u8 *)dm + 4;
if (dm->type != 0xB1)
return;
if ((((*(u32 *)data) >> 9) & 0x03) == 0x01)
set_bf_sort((const struct dmi_system_id *)private_data);
}
static int __init find_sort_method(const struct dmi_system_id *d)
{
dmi_walk(read_dmi_type_b1, (void *)d);
return 0;
}
/*
* Enable renumbering of PCI bus# ranges to reach all PCI busses (Cardbus)
*/
#ifdef __i386__
static int __init assign_all_busses(const struct dmi_system_id *d)
{
pci_probe |= PCI_ASSIGN_ALL_BUSSES;
printk(KERN_INFO "%s detected: enabling PCI bus# renumbering"
" (pci=assign-busses)\n", d->ident);
return 0;
}
#endif
static int __init set_scan_all(const struct dmi_system_id *d)
{
printk(KERN_INFO "PCI: %s detected, enabling pci=pcie_scan_all\n",
d->ident);
pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS);
return 0;
}
static const struct dmi_system_id pciprobe_dmi_table[] __initconst = {
#ifdef __i386__
/*
* Laptops which need pci=assign-busses to see Cardbus cards
*/
{
.callback = assign_all_busses,
.ident = "Samsung X20 Laptop",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Samsung Electronics"),
DMI_MATCH(DMI_PRODUCT_NAME, "SX20S"),
},
},
#endif /* __i386__ */
{
.callback = set_bf_sort,
.ident = "Dell PowerEdge 1950",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell"),
DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 1950"),
},
},
{
.callback = set_bf_sort,
.ident = "Dell PowerEdge 1955",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell"),
DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 1955"),
},
},
{
.callback = set_bf_sort,
.ident = "Dell PowerEdge 2900",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell"),
DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 2900"),
},
},
{
.callback = set_bf_sort,
.ident = "Dell PowerEdge 2950",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell"),
DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 2950"),
},
},
{
.callback = set_bf_sort,
.ident = "Dell PowerEdge R900",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell"),
DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R900"),
},
},
{
.callback = find_sort_method,
.ident = "Dell System",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"),
},
},
{
.callback = set_bf_sort,
.ident = "HP ProLiant BL20p G3",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL20p G3"),
},
},
{
.callback = set_bf_sort,
.ident = "HP ProLiant BL20p G4",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL20p G4"),
},
},
{
.callback = set_bf_sort,
.ident = "HP ProLiant BL30p G1",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL30p G1"),
},
},
{
.callback = set_bf_sort,
.ident = "HP ProLiant BL25p G1",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL25p G1"),
},
},
{
.callback = set_bf_sort,
.ident = "HP ProLiant BL35p G1",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL35p G1"),
},
},
{
.callback = set_bf_sort,
.ident = "HP ProLiant BL45p G1",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL45p G1"),
},
},
{
.callback = set_bf_sort,
.ident = "HP ProLiant BL45p G2",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL45p G2"),
},
},
{
.callback = set_bf_sort,
.ident = "HP ProLiant BL460c G1",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL460c G1"),
},
},
{
.callback = set_bf_sort,
.ident = "HP ProLiant BL465c G1",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL465c G1"),
},
},
{
.callback = set_bf_sort,
.ident = "HP ProLiant BL480c G1",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL480c G1"),
},
},
{
.callback = set_bf_sort,
.ident = "HP ProLiant BL685c G1",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL685c G1"),
},
},
{
.callback = set_bf_sort,
.ident = "HP ProLiant DL360",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant DL360"),
},
},
{
.callback = set_bf_sort,
.ident = "HP ProLiant DL380",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant DL380"),
},
},
#ifdef __i386__
{
.callback = assign_all_busses,
.ident = "Compaq EVO N800c",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Compaq"),
DMI_MATCH(DMI_PRODUCT_NAME, "EVO N800c"),
},
},
#endif
{
.callback = set_bf_sort,
.ident = "HP ProLiant DL385 G2",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant DL385 G2"),
},
},
{
.callback = set_bf_sort,
.ident = "HP ProLiant DL585 G2",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant DL585 G2"),
},
},
{
.callback = set_scan_all,
.ident = "Stratus/NEC ftServer",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Stratus"),
DMI_MATCH(DMI_PRODUCT_NAME, "ftServer"),
},
},
{
.callback = set_scan_all,
.ident = "Stratus/NEC ftServer",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "NEC"),
DMI_MATCH(DMI_PRODUCT_NAME, "Express5800/R32"),
},
},
{
.callback = set_scan_all,
.ident = "Stratus/NEC ftServer",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "NEC"),
DMI_MATCH(DMI_PRODUCT_NAME, "Express5800/R31"),
},
},
{}
};
void __init dmi_check_pciprobe(void)
{
dmi_check_system(pciprobe_dmi_table);
}
void pcibios_scan_root(int busnum)
{
struct pci_bus *bus;
struct pci_sysdata *sd;
LIST_HEAD(resources);
sd = kzalloc(sizeof(*sd), GFP_KERNEL);
if (!sd) {
printk(KERN_ERR "PCI: OOM, skipping PCI bus %02x\n", busnum);
return;
}
sd->node = x86_pci_root_bus_node(busnum);
x86_pci_root_bus_resources(busnum, &resources);
printk(KERN_DEBUG "PCI: Probing PCI hardware (bus %02x)\n", busnum);
bus = pci_scan_root_bus(NULL, busnum, &pci_root_ops, sd, &resources);
if (!bus) {
pci_free_resource_list(&resources);
kfree(sd);
return;
}
pci_bus_add_devices(bus);
}
void __init pcibios_set_cache_line_size(void)
{
struct cpuinfo_x86 *c = &boot_cpu_data;
/*
* Set PCI cacheline size to that of the CPU if the CPU has reported it.
* (For older CPUs that don't support cpuid, we se it to 32 bytes
* It's also good for 386/486s (which actually have 16)
* as quite a few PCI devices do not support smaller values.
*/
if (c->x86_clflush_size > 0) {
pci_dfl_cache_line_size = c->x86_clflush_size >> 2;
printk(KERN_DEBUG "PCI: pci_cache_line_size set to %d bytes\n",
pci_dfl_cache_line_size << 2);
} else {
pci_dfl_cache_line_size = 32 >> 2;
printk(KERN_DEBUG "PCI: Unknown cacheline size. Setting to 32 bytes\n");
}
}
int __init pcibios_init(void)
{
if (!raw_pci_ops && !raw_pci_ext_ops) {
printk(KERN_WARNING "PCI: System does not support PCI\n");
return 0;
}
pcibios_set_cache_line_size();
pcibios_resource_survey();
if (pci_bf_sort >= pci_force_bf)
pci_sort_breadthfirst();
return 0;
}
char *__init pcibios_setup(char *str)
{
if (!strcmp(str, "off")) {
pci_probe = 0;
return NULL;
} else if (!strcmp(str, "bfsort")) {
pci_bf_sort = pci_force_bf;
return NULL;
} else if (!strcmp(str, "nobfsort")) {
pci_bf_sort = pci_force_nobf;
return NULL;
}
#ifdef CONFIG_PCI_BIOS
else if (!strcmp(str, "bios")) {
pci_probe = PCI_PROBE_BIOS;
return NULL;
} else if (!strcmp(str, "nobios")) {
pci_probe &= ~PCI_PROBE_BIOS;
return NULL;
} else if (!strcmp(str, "biosirq")) {
pci_probe |= PCI_BIOS_IRQ_SCAN;
return NULL;
} else if (!strncmp(str, "pirqaddr=", 9)) {
pirq_table_addr = simple_strtoul(str+9, NULL, 0);
return NULL;
}
#endif
#ifdef CONFIG_PCI_DIRECT
else if (!strcmp(str, "conf1")) {
pci_probe = PCI_PROBE_CONF1 | PCI_NO_CHECKS;
return NULL;
}
else if (!strcmp(str, "conf2")) {
pci_probe = PCI_PROBE_CONF2 | PCI_NO_CHECKS;
return NULL;
}
#endif
#ifdef CONFIG_PCI_MMCONFIG
else if (!strcmp(str, "nommconf")) {
pci_probe &= ~PCI_PROBE_MMCONF;
return NULL;
}
else if (!strcmp(str, "check_enable_amd_mmconf")) {
pci_probe |= PCI_CHECK_ENABLE_AMD_MMCONF;
return NULL;
}
#endif
else if (!strcmp(str, "noacpi")) {
acpi_noirq_set();
return NULL;
}
else if (!strcmp(str, "noearly")) {
pci_probe |= PCI_PROBE_NOEARLY;
return NULL;
}
else if (!strcmp(str, "usepirqmask")) {
pci_probe |= PCI_USE_PIRQ_MASK;
return NULL;
} else if (!strncmp(str, "irqmask=", 8)) {
pcibios_irq_mask = simple_strtol(str+8, NULL, 0);
return NULL;
} else if (!strncmp(str, "lastbus=", 8)) {
pcibios_last_bus = simple_strtol(str+8, NULL, 0);
return NULL;
} else if (!strcmp(str, "rom")) {
pci_probe |= PCI_ASSIGN_ROMS;
return NULL;
} else if (!strcmp(str, "norom")) {
pci_probe |= PCI_NOASSIGN_ROMS;
return NULL;
} else if (!strcmp(str, "nobar")) {
pci_probe |= PCI_NOASSIGN_BARS;
return NULL;
} else if (!strcmp(str, "assign-busses")) {
pci_probe |= PCI_ASSIGN_ALL_BUSSES;
return NULL;
} else if (!strcmp(str, "use_crs")) {
pci_probe |= PCI_USE__CRS;
return NULL;
} else if (!strcmp(str, "nocrs")) {
pci_probe |= PCI_ROOT_NO_CRS;
return NULL;
} else if (!strcmp(str, "use_e820")) {
pci_probe |= PCI_USE_E820;
add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
return NULL;
} else if (!strcmp(str, "no_e820")) {
pci_probe |= PCI_NO_E820;
add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
return NULL;
#ifdef CONFIG_PHYS_ADDR_T_64BIT
} else if (!strcmp(str, "big_root_window")) {
pci_probe |= PCI_BIG_ROOT_WINDOW;
return NULL;
#endif
} else if (!strcmp(str, "routeirq")) {
pci_routeirq = 1;
return NULL;
} else if (!strcmp(str, "skip_isa_align")) {
pci_probe |= PCI_CAN_SKIP_ISA_ALIGN;
return NULL;
} else if (!strcmp(str, "noioapicquirk")) {
noioapicquirk = 1;
return NULL;
} else if (!strcmp(str, "ioapicreroute")) {
if (noioapicreroute != -1)
noioapicreroute = 0;
return NULL;
} else if (!strcmp(str, "noioapicreroute")) {
if (noioapicreroute != -1)
noioapicreroute = 1;
return NULL;
}
return str;
}
unsigned int pcibios_assign_all_busses(void)
{
return (pci_probe & PCI_ASSIGN_ALL_BUSSES) ? 1 : 0;
}
static void set_dev_domain_options(struct pci_dev *pdev)
{
if (is_vmd(pdev->bus))
pdev->hotplug_user_indicators = 1;
}
int pcibios_device_add(struct pci_dev *dev)
{
struct pci_setup_rom *rom;
struct irq_domain *msidom;
struct setup_data *data;
u64 pa_data;
pa_data = boot_params.hdr.setup_data;
while (pa_data) {
data = memremap(pa_data, sizeof(*rom), MEMREMAP_WB);
if (!data)
return -ENOMEM;
if (data->type == SETUP_PCI) {
rom = (struct pci_setup_rom *)data;
if ((pci_domain_nr(dev->bus) == rom->segment) &&
(dev->bus->number == rom->bus) &&
(PCI_SLOT(dev->devfn) == rom->device) &&
(PCI_FUNC(dev->devfn) == rom->function) &&
(dev->vendor == rom->vendor) &&
(dev->device == rom->devid)) {
dev->rom = pa_data +
offsetof(struct pci_setup_rom, romdata);
dev->romlen = rom->pcilen;
}
}
pa_data = data->next;
memunmap(data);
}
set_dev_domain_options(dev);
/*
* Setup the initial MSI domain of the device. If the underlying
* bus has a PCI/MSI irqdomain associated use the bus domain,
* otherwise set the default domain. This ensures that special irq
* domains e.g. VMD are preserved. The default ensures initial
* operation if irq remapping is not active. If irq remapping is
* active it will overwrite the domain pointer when the device is
* associated to a remapping domain.
*/
msidom = dev_get_msi_domain(&dev->bus->dev);
if (!msidom)
msidom = x86_pci_msi_default_domain;
dev_set_msi_domain(&dev->dev, msidom);
return 0;
}
int pcibios_enable_device(struct pci_dev *dev, int mask)
{
int err;
if ((err = pci_enable_resources(dev, mask)) < 0)
return err;
if (!pci_dev_msi_enabled(dev))
return pcibios_enable_irq(dev);
return 0;
}
void pcibios_disable_device (struct pci_dev *dev)
{
if (!pci_dev_msi_enabled(dev) && pcibios_disable_irq)
pcibios_disable_irq(dev);
}
#ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
void pcibios_release_device(struct pci_dev *dev)
{
if (atomic_dec_return(&dev->enable_cnt) >= 0)
pcibios_disable_device(dev);
}
#endif
int pci_ext_cfg_avail(void)
{
if (raw_pci_ext_ops)
return 1;
else
return 0;
}
#if IS_ENABLED(CONFIG_VMD)
struct pci_dev *pci_real_dma_dev(struct pci_dev *dev)
{
if (is_vmd(dev->bus))
return to_pci_sysdata(dev->bus)->vmd_dev;
return dev;
}
#endif
| linux-master | arch/x86/pci/common.c |
// SPDX-License-Identifier: GPL-2.0
/*
* BIOS32 and PCI BIOS handling.
*/
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/uaccess.h>
#include <asm/pci_x86.h>
#include <asm/e820/types.h>
#include <asm/pci-functions.h>
#include <asm/set_memory.h>
/* BIOS32 signature: "_32_" */
#define BIOS32_SIGNATURE (('_' << 0) + ('3' << 8) + ('2' << 16) + ('_' << 24))
/* PCI signature: "PCI " */
#define PCI_SIGNATURE (('P' << 0) + ('C' << 8) + ('I' << 16) + (' ' << 24))
/* PCI service signature: "$PCI" */
#define PCI_SERVICE (('$' << 0) + ('P' << 8) + ('C' << 16) + ('I' << 24))
/* PCI BIOS hardware mechanism flags */
#define PCIBIOS_HW_TYPE1 0x01
#define PCIBIOS_HW_TYPE2 0x02
#define PCIBIOS_HW_TYPE1_SPEC 0x10
#define PCIBIOS_HW_TYPE2_SPEC 0x20
int pcibios_enabled;
/* According to the BIOS specification at:
* http://members.datafast.net.au/dft0802/specs/bios21.pdf, we could
* restrict the x zone to some pages and make it ro. But this may be
* broken on some bios, complex to handle with static_protections.
* We could make the 0xe0000-0x100000 range rox, but this can break
* some ISA mapping.
*
* So we let's an rw and x hole when pcibios is used. This shouldn't
* happen for modern system with mmconfig, and if you don't want it
* you could disable pcibios...
*/
static inline void set_bios_x(void)
{
pcibios_enabled = 1;
set_memory_x(PAGE_OFFSET + BIOS_BEGIN, (BIOS_END - BIOS_BEGIN) >> PAGE_SHIFT);
if (__supported_pte_mask & _PAGE_NX)
printk(KERN_INFO "PCI: PCI BIOS area is rw and x. Use pci=nobios if you want it NX.\n");
}
/*
* This is the standard structure used to identify the entry point
* to the BIOS32 Service Directory, as documented in
* Standard BIOS 32-bit Service Directory Proposal
* Revision 0.4 May 24, 1993
* Phoenix Technologies Ltd.
* Norwood, MA
* and the PCI BIOS specification.
*/
union bios32 {
struct {
unsigned long signature; /* _32_ */
unsigned long entry; /* 32 bit physical address */
unsigned char revision; /* Revision level, 0 */
unsigned char length; /* Length in paragraphs should be 01 */
unsigned char checksum; /* All bytes must add up to zero */
unsigned char reserved[5]; /* Must be zero */
} fields;
char chars[16];
};
/*
* Physical address of the service directory. I don't know if we're
* allowed to have more than one of these or not, so just in case
* we'll make pcibios_present() take a memory start parameter and store
* the array there.
*/
static struct {
unsigned long address;
unsigned short segment;
} bios32_indirect __initdata = { 0, __KERNEL_CS };
/*
* Returns the entry point for the given service, NULL on error
*/
static unsigned long __init bios32_service(unsigned long service)
{
unsigned char return_code; /* %al */
unsigned long address; /* %ebx */
unsigned long length; /* %ecx */
unsigned long entry; /* %edx */
unsigned long flags;
local_irq_save(flags);
__asm__("lcall *(%%edi); cld"
: "=a" (return_code),
"=b" (address),
"=c" (length),
"=d" (entry)
: "0" (service),
"1" (0),
"D" (&bios32_indirect));
local_irq_restore(flags);
switch (return_code) {
case 0:
return address + entry;
case 0x80: /* Not present */
printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
return 0;
default: /* Shouldn't happen */
printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
service, return_code);
return 0;
}
}
static struct {
unsigned long address;
unsigned short segment;
} pci_indirect __ro_after_init = {
.address = 0,
.segment = __KERNEL_CS,
};
static int pci_bios_present __ro_after_init;
static int __init check_pcibios(void)
{
u32 signature, eax, ebx, ecx;
u8 status, major_ver, minor_ver, hw_mech;
unsigned long flags, pcibios_entry;
if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
pci_indirect.address = pcibios_entry + PAGE_OFFSET;
local_irq_save(flags);
__asm__(
"lcall *(%%edi); cld\n\t"
"jc 1f\n\t"
"xor %%ah, %%ah\n"
"1:"
: "=d" (signature),
"=a" (eax),
"=b" (ebx),
"=c" (ecx)
: "1" (PCIBIOS_PCI_BIOS_PRESENT),
"D" (&pci_indirect)
: "memory");
local_irq_restore(flags);
status = (eax >> 8) & 0xff;
hw_mech = eax & 0xff;
major_ver = (ebx >> 8) & 0xff;
minor_ver = ebx & 0xff;
if (pcibios_last_bus < 0)
pcibios_last_bus = ecx & 0xff;
DBG("PCI: BIOS probe returned s=%02x hw=%02x ver=%02x.%02x l=%02x\n",
status, hw_mech, major_ver, minor_ver, pcibios_last_bus);
if (status || signature != PCI_SIGNATURE) {
printk (KERN_ERR "PCI: BIOS BUG #%x[%08x] found\n",
status, signature);
return 0;
}
printk(KERN_INFO "PCI: PCI BIOS revision %x.%02x entry at 0x%lx, last bus=%d\n",
major_ver, minor_ver, pcibios_entry, pcibios_last_bus);
#ifdef CONFIG_PCI_DIRECT
if (!(hw_mech & PCIBIOS_HW_TYPE1))
pci_probe &= ~PCI_PROBE_CONF1;
if (!(hw_mech & PCIBIOS_HW_TYPE2))
pci_probe &= ~PCI_PROBE_CONF2;
#endif
return 1;
}
return 0;
}
static int pci_bios_read(unsigned int seg, unsigned int bus,
unsigned int devfn, int reg, int len, u32 *value)
{
unsigned long result = 0;
unsigned long flags;
unsigned long bx = (bus << 8) | devfn;
u16 number = 0, mask = 0;
WARN_ON(seg);
if (!value || (bus > 255) || (devfn > 255) || (reg > 255))
return -EINVAL;
raw_spin_lock_irqsave(&pci_config_lock, flags);
switch (len) {
case 1:
number = PCIBIOS_READ_CONFIG_BYTE;
mask = 0xff;
break;
case 2:
number = PCIBIOS_READ_CONFIG_WORD;
mask = 0xffff;
break;
case 4:
number = PCIBIOS_READ_CONFIG_DWORD;
break;
}
__asm__("lcall *(%%esi); cld\n\t"
"jc 1f\n\t"
"xor %%ah, %%ah\n"
"1:"
: "=c" (*value),
"=a" (result)
: "1" (number),
"b" (bx),
"D" ((long)reg),
"S" (&pci_indirect));
/*
* Zero-extend the result beyond 8 or 16 bits, do not trust the
* BIOS having done it:
*/
if (mask)
*value &= mask;
raw_spin_unlock_irqrestore(&pci_config_lock, flags);
return (int)((result & 0xff00) >> 8);
}
static int pci_bios_write(unsigned int seg, unsigned int bus,
unsigned int devfn, int reg, int len, u32 value)
{
unsigned long result = 0;
unsigned long flags;
unsigned long bx = (bus << 8) | devfn;
u16 number = 0;
WARN_ON(seg);
if ((bus > 255) || (devfn > 255) || (reg > 255))
return -EINVAL;
raw_spin_lock_irqsave(&pci_config_lock, flags);
switch (len) {
case 1:
number = PCIBIOS_WRITE_CONFIG_BYTE;
break;
case 2:
number = PCIBIOS_WRITE_CONFIG_WORD;
break;
case 4:
number = PCIBIOS_WRITE_CONFIG_DWORD;
break;
}
__asm__("lcall *(%%esi); cld\n\t"
"jc 1f\n\t"
"xor %%ah, %%ah\n"
"1:"
: "=a" (result)
: "0" (number),
"c" (value),
"b" (bx),
"D" ((long)reg),
"S" (&pci_indirect));
raw_spin_unlock_irqrestore(&pci_config_lock, flags);
return (int)((result & 0xff00) >> 8);
}
/*
* Function table for BIOS32 access
*/
static const struct pci_raw_ops pci_bios_access = {
.read = pci_bios_read,
.write = pci_bios_write
};
/*
* Try to find PCI BIOS.
*/
static const struct pci_raw_ops *__init pci_find_bios(void)
{
union bios32 *check;
unsigned char sum;
int i, length;
/*
* Follow the standard procedure for locating the BIOS32 Service
* directory by scanning the permissible address range from
* 0xe0000 through 0xfffff for a valid BIOS32 structure.
*/
for (check = (union bios32 *) __va(0xe0000);
check <= (union bios32 *) __va(0xffff0);
++check) {
long sig;
if (get_kernel_nofault(sig, &check->fields.signature))
continue;
if (check->fields.signature != BIOS32_SIGNATURE)
continue;
length = check->fields.length * 16;
if (!length)
continue;
sum = 0;
for (i = 0; i < length ; ++i)
sum += check->chars[i];
if (sum != 0)
continue;
if (check->fields.revision != 0) {
printk("PCI: unsupported BIOS32 revision %d at 0x%p\n",
check->fields.revision, check);
continue;
}
DBG("PCI: BIOS32 Service Directory structure at 0x%p\n", check);
if (check->fields.entry >= 0x100000) {
printk("PCI: BIOS32 entry (0x%p) in high memory, "
"cannot use.\n", check);
return NULL;
} else {
unsigned long bios32_entry = check->fields.entry;
DBG("PCI: BIOS32 Service Directory entry at 0x%lx\n",
bios32_entry);
bios32_indirect.address = bios32_entry + PAGE_OFFSET;
set_bios_x();
if (check_pcibios())
return &pci_bios_access;
}
break; /* Hopefully more than one BIOS32 cannot happen... */
}
return NULL;
}
/*
* BIOS Functions for IRQ Routing
*/
struct irq_routing_options {
u16 size;
struct irq_info *table;
u16 segment;
} __attribute__((packed));
struct irq_routing_table * pcibios_get_irq_routing_table(void)
{
struct irq_routing_options opt;
struct irq_routing_table *rt = NULL;
int ret, map;
unsigned long page;
if (!pci_bios_present)
return NULL;
page = __get_free_page(GFP_KERNEL);
if (!page)
return NULL;
opt.table = (struct irq_info *) page;
opt.size = PAGE_SIZE;
opt.segment = __KERNEL_DS;
DBG("PCI: Fetching IRQ routing table... ");
__asm__("push %%es\n\t"
"push %%ds\n\t"
"pop %%es\n\t"
"lcall *(%%esi); cld\n\t"
"pop %%es\n\t"
"jc 1f\n\t"
"xor %%ah, %%ah\n"
"1:"
: "=a" (ret),
"=b" (map),
"=m" (opt)
: "0" (PCIBIOS_GET_ROUTING_OPTIONS),
"1" (0),
"D" ((long) &opt),
"S" (&pci_indirect),
"m" (opt)
: "memory");
DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
if (ret & 0xff00)
printk(KERN_ERR "PCI: Error %02x when fetching IRQ routing table.\n", (ret >> 8) & 0xff);
else if (opt.size) {
rt = kmalloc(sizeof(struct irq_routing_table) + opt.size, GFP_KERNEL);
if (rt) {
memset(rt, 0, sizeof(struct irq_routing_table));
rt->size = opt.size + sizeof(struct irq_routing_table);
rt->exclusive_irqs = map;
memcpy(rt->slots, (void *) page, opt.size);
printk(KERN_INFO "PCI: Using BIOS Interrupt Routing Table\n");
}
}
free_page(page);
return rt;
}
EXPORT_SYMBOL(pcibios_get_irq_routing_table);
int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
{
int ret;
__asm__("lcall *(%%esi); cld\n\t"
"jc 1f\n\t"
"xor %%ah, %%ah\n"
"1:"
: "=a" (ret)
: "0" (PCIBIOS_SET_PCI_HW_INT),
"b" ((dev->bus->number << 8) | dev->devfn),
"c" ((irq << 8) | (pin + 10)),
"S" (&pci_indirect));
return !(ret & 0xff00);
}
EXPORT_SYMBOL(pcibios_set_irq_routing);
void __init pci_pcbios_init(void)
{
if ((pci_probe & PCI_PROBE_BIOS)
&& ((raw_pci_ops = pci_find_bios()))) {
pci_bios_present = 1;
}
}
| linux-master | arch/x86/pci/pcbios.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/topology.h>
#include <linux/cpu.h>
#include <linux/range.h>
#include <asm/amd_nb.h>
#include <asm/pci_x86.h>
#include <asm/pci-direct.h>
#include "bus_numa.h"
#define AMD_NB_F0_NODE_ID 0x60
#define AMD_NB_F0_UNIT_ID 0x64
#define AMD_NB_F1_CONFIG_MAP_REG 0xe0
#define RANGE_NUM 16
#define AMD_NB_F1_CONFIG_MAP_RANGES 4
struct amd_hostbridge {
u32 bus;
u32 slot;
u32 device;
};
/*
* IMPORTANT NOTE:
* hb_probes[] and early_root_info_init() is in maintenance mode.
* It only supports K8, Fam10h, Fam11h, and Fam15h_00h-0fh .
* Future processor will rely on information in ACPI.
*/
static struct amd_hostbridge hb_probes[] __initdata = {
{ 0, 0x18, 0x1100 }, /* K8 */
{ 0, 0x18, 0x1200 }, /* Family10h */
{ 0xff, 0, 0x1200 }, /* Family10h */
{ 0, 0x18, 0x1300 }, /* Family11h */
{ 0, 0x18, 0x1600 }, /* Family15h */
};
static struct pci_root_info __init *find_pci_root_info(int node, int link)
{
struct pci_root_info *info;
/* find the position */
list_for_each_entry(info, &pci_root_infos, list)
if (info->node == node && info->link == link)
return info;
return NULL;
}
static inline resource_size_t cap_resource(u64 val)
{
if (val > RESOURCE_SIZE_MAX)
return RESOURCE_SIZE_MAX;
return val;
}
/**
* early_root_info_init()
* called before pcibios_scan_root and pci_scan_bus
* fills the mp_bus_to_cpumask array based according
* to the LDT Bus Number Registers found in the northbridge.
*/
static int __init early_root_info_init(void)
{
int i;
unsigned bus;
unsigned slot;
int node;
int link;
int def_node;
int def_link;
struct pci_root_info *info;
u32 reg;
u64 start;
u64 end;
struct range range[RANGE_NUM];
u64 val;
u32 address;
bool found;
struct resource fam10h_mmconf_res, *fam10h_mmconf;
u64 fam10h_mmconf_start;
u64 fam10h_mmconf_end;
if (!early_pci_allowed())
return -1;
found = false;
for (i = 0; i < ARRAY_SIZE(hb_probes); i++) {
u32 id;
u16 device;
u16 vendor;
bus = hb_probes[i].bus;
slot = hb_probes[i].slot;
id = read_pci_config(bus, slot, 0, PCI_VENDOR_ID);
vendor = id & 0xffff;
device = (id>>16) & 0xffff;
if (vendor != PCI_VENDOR_ID_AMD &&
vendor != PCI_VENDOR_ID_HYGON)
continue;
if (hb_probes[i].device == device) {
found = true;
break;
}
}
if (!found)
return 0;
/*
* We should learn topology and routing information from _PXM and
* _CRS methods in the ACPI namespace. We extract node numbers
* here to work around BIOSes that don't supply _PXM.
*/
for (i = 0; i < AMD_NB_F1_CONFIG_MAP_RANGES; i++) {
int min_bus;
int max_bus;
reg = read_pci_config(bus, slot, 1,
AMD_NB_F1_CONFIG_MAP_REG + (i << 2));
/* Check if that register is enabled for bus range */
if ((reg & 7) != 3)
continue;
min_bus = (reg >> 16) & 0xff;
max_bus = (reg >> 24) & 0xff;
node = (reg >> 4) & 0x07;
link = (reg >> 8) & 0x03;
alloc_pci_root_info(min_bus, max_bus, node, link);
}
/*
* The following code extracts routing information for use on old
* systems where Linux doesn't automatically use host bridge _CRS
* methods (or when the user specifies "pci=nocrs").
*
* We only do this through Fam11h, because _CRS should be enough on
* newer systems.
*/
if (boot_cpu_data.x86 > 0x11)
return 0;
/* get the default node and link for left over res */
reg = read_pci_config(bus, slot, 0, AMD_NB_F0_NODE_ID);
def_node = (reg >> 8) & 0x07;
reg = read_pci_config(bus, slot, 0, AMD_NB_F0_UNIT_ID);
def_link = (reg >> 8) & 0x03;
memset(range, 0, sizeof(range));
add_range(range, RANGE_NUM, 0, 0, 0xffff + 1);
/* io port resource */
for (i = 0; i < 4; i++) {
reg = read_pci_config(bus, slot, 1, 0xc0 + (i << 3));
if (!(reg & 3))
continue;
start = reg & 0xfff000;
reg = read_pci_config(bus, slot, 1, 0xc4 + (i << 3));
node = reg & 0x07;
link = (reg >> 4) & 0x03;
end = (reg & 0xfff000) | 0xfff;
info = find_pci_root_info(node, link);
if (!info)
continue; /* not found */
printk(KERN_DEBUG "node %d link %d: io port [%llx, %llx]\n",
node, link, start, end);
/* kernel only handle 16 bit only */
if (end > 0xffff)
end = 0xffff;
update_res(info, start, end, IORESOURCE_IO, 1);
subtract_range(range, RANGE_NUM, start, end + 1);
}
/* add left over io port range to def node/link, [0, 0xffff] */
/* find the position */
info = find_pci_root_info(def_node, def_link);
if (info) {
for (i = 0; i < RANGE_NUM; i++) {
if (!range[i].end)
continue;
update_res(info, range[i].start, range[i].end - 1,
IORESOURCE_IO, 1);
}
}
memset(range, 0, sizeof(range));
/* 0xfd00000000-0xffffffffff for HT */
end = cap_resource((0xfdULL<<32) - 1);
end++;
add_range(range, RANGE_NUM, 0, 0, end);
/* need to take out [0, TOM) for RAM*/
address = MSR_K8_TOP_MEM1;
rdmsrl(address, val);
end = (val & 0xffffff800000ULL);
printk(KERN_INFO "TOM: %016llx aka %lldM\n", end, end>>20);
if (end < (1ULL<<32))
subtract_range(range, RANGE_NUM, 0, end);
/* get mmconfig */
fam10h_mmconf = amd_get_mmconfig_range(&fam10h_mmconf_res);
/* need to take out mmconf range */
if (fam10h_mmconf) {
printk(KERN_DEBUG "Fam 10h mmconf %pR\n", fam10h_mmconf);
fam10h_mmconf_start = fam10h_mmconf->start;
fam10h_mmconf_end = fam10h_mmconf->end;
subtract_range(range, RANGE_NUM, fam10h_mmconf_start,
fam10h_mmconf_end + 1);
} else {
fam10h_mmconf_start = 0;
fam10h_mmconf_end = 0;
}
/* mmio resource */
for (i = 0; i < 8; i++) {
reg = read_pci_config(bus, slot, 1, 0x80 + (i << 3));
if (!(reg & 3))
continue;
start = reg & 0xffffff00; /* 39:16 on 31:8*/
start <<= 8;
reg = read_pci_config(bus, slot, 1, 0x84 + (i << 3));
node = reg & 0x07;
link = (reg >> 4) & 0x03;
end = (reg & 0xffffff00);
end <<= 8;
end |= 0xffff;
info = find_pci_root_info(node, link);
if (!info)
continue;
printk(KERN_DEBUG "node %d link %d: mmio [%llx, %llx]",
node, link, start, end);
/*
* some sick allocation would have range overlap with fam10h
* mmconf range, so need to update start and end.
*/
if (fam10h_mmconf_end) {
int changed = 0;
u64 endx = 0;
if (start >= fam10h_mmconf_start &&
start <= fam10h_mmconf_end) {
start = fam10h_mmconf_end + 1;
changed = 1;
}
if (end >= fam10h_mmconf_start &&
end <= fam10h_mmconf_end) {
end = fam10h_mmconf_start - 1;
changed = 1;
}
if (start < fam10h_mmconf_start &&
end > fam10h_mmconf_end) {
/* we got a hole */
endx = fam10h_mmconf_start - 1;
update_res(info, start, endx, IORESOURCE_MEM, 0);
subtract_range(range, RANGE_NUM, start,
endx + 1);
printk(KERN_CONT " ==> [%llx, %llx]", start, endx);
start = fam10h_mmconf_end + 1;
changed = 1;
}
if (changed) {
if (start <= end) {
printk(KERN_CONT " %s [%llx, %llx]", endx ? "and" : "==>", start, end);
} else {
printk(KERN_CONT "%s\n", endx?"":" ==> none");
continue;
}
}
}
update_res(info, cap_resource(start), cap_resource(end),
IORESOURCE_MEM, 1);
subtract_range(range, RANGE_NUM, start, end + 1);
printk(KERN_CONT "\n");
}
/* need to take out [4G, TOM2) for RAM*/
/* SYS_CFG */
address = MSR_AMD64_SYSCFG;
rdmsrl(address, val);
/* TOP_MEM2 is enabled? */
if (val & (1<<21)) {
/* TOP_MEM2 */
address = MSR_K8_TOP_MEM2;
rdmsrl(address, val);
end = (val & 0xffffff800000ULL);
printk(KERN_INFO "TOM2: %016llx aka %lldM\n", end, end>>20);
subtract_range(range, RANGE_NUM, 1ULL<<32, end);
}
/*
* add left over mmio range to def node/link ?
* that is tricky, just record range in from start_min to 4G
*/
info = find_pci_root_info(def_node, def_link);
if (info) {
for (i = 0; i < RANGE_NUM; i++) {
if (!range[i].end)
continue;
update_res(info, cap_resource(range[i].start),
cap_resource(range[i].end - 1),
IORESOURCE_MEM, 1);
}
}
list_for_each_entry(info, &pci_root_infos, list) {
int busnum;
struct pci_root_res *root_res;
busnum = info->busn.start;
printk(KERN_DEBUG "bus: %pR on node %x link %x\n",
&info->busn, info->node, info->link);
list_for_each_entry(root_res, &info->resources, list)
printk(KERN_DEBUG "bus: %02x %pR\n",
busnum, &root_res->res);
}
return 0;
}
#define ENABLE_CF8_EXT_CFG (1ULL << 46)
static int amd_bus_cpu_online(unsigned int cpu)
{
u64 reg;
rdmsrl(MSR_AMD64_NB_CFG, reg);
if (!(reg & ENABLE_CF8_EXT_CFG)) {
reg |= ENABLE_CF8_EXT_CFG;
wrmsrl(MSR_AMD64_NB_CFG, reg);
}
return 0;
}
static void __init pci_enable_pci_io_ecs(void)
{
#ifdef CONFIG_AMD_NB
unsigned int i, n;
for (n = i = 0; !n && amd_nb_bus_dev_ranges[i].dev_limit; ++i) {
u8 bus = amd_nb_bus_dev_ranges[i].bus;
u8 slot = amd_nb_bus_dev_ranges[i].dev_base;
u8 limit = amd_nb_bus_dev_ranges[i].dev_limit;
for (; slot < limit; ++slot) {
u32 val = read_pci_config(bus, slot, 3, 0);
if (!early_is_amd_nb(val))
continue;
val = read_pci_config(bus, slot, 3, 0x8c);
if (!(val & (ENABLE_CF8_EXT_CFG >> 32))) {
val |= ENABLE_CF8_EXT_CFG >> 32;
write_pci_config(bus, slot, 3, 0x8c, val);
}
++n;
}
}
#endif
}
static int __init pci_io_ecs_init(void)
{
int ret;
/* assume all cpus from fam10h have IO ECS */
if (boot_cpu_data.x86 < 0x10)
return 0;
/* Try the PCI method first. */
if (early_pci_allowed())
pci_enable_pci_io_ecs();
ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "pci/amd_bus:online",
amd_bus_cpu_online, NULL);
WARN_ON(ret < 0);
pci_probe |= PCI_HAS_IO_ECS;
return 0;
}
static int __init amd_postcore_init(void)
{
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
return 0;
early_root_info_init();
pci_io_ecs_init();
return 0;
}
postcore_initcall(amd_postcore_init);
| linux-master | arch/x86/pci/amd_bus.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Xen PCI - handle PCI (INTx) and MSI infrastructure calls for PV, HVM and
* initial domain support. We also handle the DSDT _PRT callbacks for GSI's
* used in HVM and initial domain mode (PV does not parse ACPI, so it has no
* concept of GSIs). Under PV we hook under the pnbbios API for IRQs and
* 0xcf8 PCI configuration read/write.
*
* Author: Ryan Wilson <[email protected]>
* Konrad Rzeszutek Wilk <[email protected]>
* Stefano Stabellini <[email protected]>
*/
#include <linux/export.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/acpi.h>
#include <linux/io.h>
#include <asm/io_apic.h>
#include <asm/pci_x86.h>
#include <asm/xen/hypervisor.h>
#include <xen/features.h>
#include <xen/events.h>
#include <xen/pci.h>
#include <asm/xen/pci.h>
#include <asm/xen/cpuid.h>
#include <asm/apic.h>
#include <asm/acpi.h>
#include <asm/i8259.h>
static int xen_pcifront_enable_irq(struct pci_dev *dev)
{
int rc;
int share = 1;
int pirq;
u8 gsi;
rc = pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &gsi);
if (rc < 0) {
dev_warn(&dev->dev, "Xen PCI: failed to read interrupt line: %d\n",
rc);
return rc;
}
/* In PV DomU the Xen PCI backend puts the PIRQ in the interrupt line.*/
pirq = gsi;
if (gsi < nr_legacy_irqs())
share = 0;
rc = xen_bind_pirq_gsi_to_irq(gsi, pirq, share, "pcifront");
if (rc < 0) {
dev_warn(&dev->dev, "Xen PCI: failed to bind GSI%d (PIRQ%d) to IRQ: %d\n",
gsi, pirq, rc);
return rc;
}
dev->irq = rc;
dev_info(&dev->dev, "Xen PCI mapped GSI%d to IRQ%d\n", gsi, dev->irq);
return 0;
}
#ifdef CONFIG_ACPI
static int xen_register_pirq(u32 gsi, int triggering, bool set_pirq)
{
int rc, pirq = -1, irq;
struct physdev_map_pirq map_irq;
int shareable = 0;
char *name;
irq = xen_irq_from_gsi(gsi);
if (irq > 0)
return irq;
if (set_pirq)
pirq = gsi;
map_irq.domid = DOMID_SELF;
map_irq.type = MAP_PIRQ_TYPE_GSI;
map_irq.index = gsi;
map_irq.pirq = pirq;
rc = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq);
if (rc) {
printk(KERN_WARNING "xen map irq failed %d\n", rc);
return -1;
}
if (triggering == ACPI_EDGE_SENSITIVE) {
shareable = 0;
name = "ioapic-edge";
} else {
shareable = 1;
name = "ioapic-level";
}
irq = xen_bind_pirq_gsi_to_irq(gsi, map_irq.pirq, shareable, name);
if (irq < 0)
goto out;
printk(KERN_DEBUG "xen: --> pirq=%d -> irq=%d (gsi=%d)\n", map_irq.pirq, irq, gsi);
out:
return irq;
}
static int acpi_register_gsi_xen_hvm(struct device *dev, u32 gsi,
int trigger, int polarity)
{
if (!xen_hvm_domain())
return -1;
return xen_register_pirq(gsi, trigger,
false /* no mapping of GSI to PIRQ */);
}
#ifdef CONFIG_XEN_PV_DOM0
static int xen_register_gsi(u32 gsi, int triggering, int polarity)
{
int rc, irq;
struct physdev_setup_gsi setup_gsi;
if (!xen_pv_domain())
return -1;
printk(KERN_DEBUG "xen: registering gsi %u triggering %d polarity %d\n",
gsi, triggering, polarity);
irq = xen_register_pirq(gsi, triggering, true);
setup_gsi.gsi = gsi;
setup_gsi.triggering = (triggering == ACPI_EDGE_SENSITIVE ? 0 : 1);
setup_gsi.polarity = (polarity == ACPI_ACTIVE_HIGH ? 0 : 1);
rc = HYPERVISOR_physdev_op(PHYSDEVOP_setup_gsi, &setup_gsi);
if (rc == -EEXIST)
printk(KERN_INFO "Already setup the GSI :%d\n", gsi);
else if (rc) {
printk(KERN_ERR "Failed to setup GSI :%d, err_code:%d\n",
gsi, rc);
}
return irq;
}
static int acpi_register_gsi_xen(struct device *dev, u32 gsi,
int trigger, int polarity)
{
return xen_register_gsi(gsi, trigger, polarity);
}
#endif
#endif
#if defined(CONFIG_PCI_MSI)
#include <linux/msi.h>
struct xen_pci_frontend_ops *xen_pci_frontend;
EXPORT_SYMBOL_GPL(xen_pci_frontend);
struct xen_msi_ops {
int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type);
void (*teardown_msi_irqs)(struct pci_dev *dev);
};
static struct xen_msi_ops xen_msi_ops __ro_after_init;
static int xen_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
{
int irq, ret, i;
struct msi_desc *msidesc;
int *v;
if (type == PCI_CAP_ID_MSI && nvec > 1)
return 1;
v = kcalloc(max(1, nvec), sizeof(int), GFP_KERNEL);
if (!v)
return -ENOMEM;
if (type == PCI_CAP_ID_MSIX)
ret = xen_pci_frontend_enable_msix(dev, v, nvec);
else
ret = xen_pci_frontend_enable_msi(dev, v);
if (ret)
goto error;
i = 0;
msi_for_each_desc(msidesc, &dev->dev, MSI_DESC_NOTASSOCIATED) {
irq = xen_bind_pirq_msi_to_irq(dev, msidesc, v[i],
(type == PCI_CAP_ID_MSI) ? nvec : 1,
(type == PCI_CAP_ID_MSIX) ?
"pcifront-msi-x" :
"pcifront-msi",
DOMID_SELF);
if (irq < 0) {
ret = irq;
goto free;
}
i++;
}
kfree(v);
return msi_device_populate_sysfs(&dev->dev);
error:
if (ret == -ENOSYS)
dev_err(&dev->dev, "Xen PCI frontend has not registered MSI/MSI-X support!\n");
else if (ret)
dev_err(&dev->dev, "Xen PCI frontend error: %d!\n", ret);
free:
kfree(v);
return ret;
}
static void xen_msi_compose_msg(struct pci_dev *pdev, unsigned int pirq,
struct msi_msg *msg)
{
/*
* We set vector == 0 to tell the hypervisor we don't care about
* it, but we want a pirq setup instead. We use the dest_id fields
* to pass the pirq that we want.
*/
memset(msg, 0, sizeof(*msg));
msg->address_hi = X86_MSI_BASE_ADDRESS_HIGH;
msg->arch_addr_hi.destid_8_31 = pirq >> 8;
msg->arch_addr_lo.destid_0_7 = pirq & 0xFF;
msg->arch_addr_lo.base_address = X86_MSI_BASE_ADDRESS_LOW;
msg->arch_data.delivery_mode = APIC_DELIVERY_MODE_EXTINT;
}
static int xen_hvm_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
{
int irq, pirq;
struct msi_desc *msidesc;
struct msi_msg msg;
if (type == PCI_CAP_ID_MSI && nvec > 1)
return 1;
msi_for_each_desc(msidesc, &dev->dev, MSI_DESC_NOTASSOCIATED) {
pirq = xen_allocate_pirq_msi(dev, msidesc);
if (pirq < 0) {
irq = -ENODEV;
goto error;
}
xen_msi_compose_msg(dev, pirq, &msg);
__pci_write_msi_msg(msidesc, &msg);
dev_dbg(&dev->dev, "xen: msi bound to pirq=%d\n", pirq);
irq = xen_bind_pirq_msi_to_irq(dev, msidesc, pirq,
(type == PCI_CAP_ID_MSI) ? nvec : 1,
(type == PCI_CAP_ID_MSIX) ?
"msi-x" : "msi",
DOMID_SELF);
if (irq < 0)
goto error;
dev_dbg(&dev->dev,
"xen: msi --> pirq=%d --> irq=%d\n", pirq, irq);
}
return msi_device_populate_sysfs(&dev->dev);
error:
dev_err(&dev->dev, "Failed to create MSI%s! ret=%d!\n",
type == PCI_CAP_ID_MSI ? "" : "-X", irq);
return irq;
}
#ifdef CONFIG_XEN_PV_DOM0
static bool __read_mostly pci_seg_supported = true;
static int xen_initdom_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
{
int ret = 0;
struct msi_desc *msidesc;
msi_for_each_desc(msidesc, &dev->dev, MSI_DESC_NOTASSOCIATED) {
struct physdev_map_pirq map_irq;
domid_t domid;
domid = ret = xen_find_device_domain_owner(dev);
/* N.B. Casting int's -ENODEV to uint16_t results in 0xFFED,
* hence check ret value for < 0. */
if (ret < 0)
domid = DOMID_SELF;
memset(&map_irq, 0, sizeof(map_irq));
map_irq.domid = domid;
map_irq.type = MAP_PIRQ_TYPE_MSI_SEG;
map_irq.index = -1;
map_irq.pirq = -1;
map_irq.bus = dev->bus->number |
(pci_domain_nr(dev->bus) << 16);
map_irq.devfn = dev->devfn;
if (type == PCI_CAP_ID_MSI && nvec > 1) {
map_irq.type = MAP_PIRQ_TYPE_MULTI_MSI;
map_irq.entry_nr = nvec;
} else if (type == PCI_CAP_ID_MSIX) {
int pos;
unsigned long flags;
u32 table_offset, bir;
pos = dev->msix_cap;
pci_read_config_dword(dev, pos + PCI_MSIX_TABLE,
&table_offset);
bir = (u8)(table_offset & PCI_MSIX_TABLE_BIR);
flags = pci_resource_flags(dev, bir);
if (!flags || (flags & IORESOURCE_UNSET))
return -EINVAL;
map_irq.table_base = pci_resource_start(dev, bir);
map_irq.entry_nr = msidesc->msi_index;
}
ret = -EINVAL;
if (pci_seg_supported)
ret = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq,
&map_irq);
if (type == PCI_CAP_ID_MSI && nvec > 1 && ret) {
/*
* If MAP_PIRQ_TYPE_MULTI_MSI is not available
* there's nothing else we can do in this case.
* Just set ret > 0 so driver can retry with
* single MSI.
*/
ret = 1;
goto out;
}
if (ret == -EINVAL && !pci_domain_nr(dev->bus)) {
map_irq.type = MAP_PIRQ_TYPE_MSI;
map_irq.index = -1;
map_irq.pirq = -1;
map_irq.bus = dev->bus->number;
ret = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq,
&map_irq);
if (ret != -EINVAL)
pci_seg_supported = false;
}
if (ret) {
dev_warn(&dev->dev, "xen map irq failed %d for %d domain\n",
ret, domid);
goto out;
}
ret = xen_bind_pirq_msi_to_irq(dev, msidesc, map_irq.pirq,
(type == PCI_CAP_ID_MSI) ? nvec : 1,
(type == PCI_CAP_ID_MSIX) ? "msi-x" : "msi",
domid);
if (ret < 0)
goto out;
}
ret = msi_device_populate_sysfs(&dev->dev);
out:
return ret;
}
bool xen_initdom_restore_msi(struct pci_dev *dev)
{
int ret = 0;
if (!xen_initial_domain())
return true;
if (pci_seg_supported) {
struct physdev_pci_device restore_ext;
restore_ext.seg = pci_domain_nr(dev->bus);
restore_ext.bus = dev->bus->number;
restore_ext.devfn = dev->devfn;
ret = HYPERVISOR_physdev_op(PHYSDEVOP_restore_msi_ext,
&restore_ext);
if (ret == -ENOSYS)
pci_seg_supported = false;
WARN(ret && ret != -ENOSYS, "restore_msi_ext -> %d\n", ret);
}
if (!pci_seg_supported) {
struct physdev_restore_msi restore;
restore.bus = dev->bus->number;
restore.devfn = dev->devfn;
ret = HYPERVISOR_physdev_op(PHYSDEVOP_restore_msi, &restore);
WARN(ret && ret != -ENOSYS, "restore_msi -> %d\n", ret);
}
return false;
}
#else /* CONFIG_XEN_PV_DOM0 */
#define xen_initdom_setup_msi_irqs NULL
#endif /* !CONFIG_XEN_PV_DOM0 */
static void xen_teardown_msi_irqs(struct pci_dev *dev)
{
struct msi_desc *msidesc;
int i;
msi_for_each_desc(msidesc, &dev->dev, MSI_DESC_ASSOCIATED) {
for (i = 0; i < msidesc->nvec_used; i++)
xen_destroy_irq(msidesc->irq + i);
msidesc->irq = 0;
}
msi_device_destroy_sysfs(&dev->dev);
}
static void xen_pv_teardown_msi_irqs(struct pci_dev *dev)
{
if (dev->msix_enabled)
xen_pci_frontend_disable_msix(dev);
else
xen_pci_frontend_disable_msi(dev);
xen_teardown_msi_irqs(dev);
}
static int xen_msi_domain_alloc_irqs(struct irq_domain *domain,
struct device *dev, int nvec)
{
int type;
if (WARN_ON_ONCE(!dev_is_pci(dev)))
return -EINVAL;
type = to_pci_dev(dev)->msix_enabled ? PCI_CAP_ID_MSIX : PCI_CAP_ID_MSI;
return xen_msi_ops.setup_msi_irqs(to_pci_dev(dev), nvec, type);
}
static void xen_msi_domain_free_irqs(struct irq_domain *domain,
struct device *dev)
{
if (WARN_ON_ONCE(!dev_is_pci(dev)))
return;
xen_msi_ops.teardown_msi_irqs(to_pci_dev(dev));
}
static struct msi_domain_ops xen_pci_msi_domain_ops = {
.domain_alloc_irqs = xen_msi_domain_alloc_irqs,
.domain_free_irqs = xen_msi_domain_free_irqs,
};
static struct msi_domain_info xen_pci_msi_domain_info = {
.flags = MSI_FLAG_PCI_MSIX | MSI_FLAG_FREE_MSI_DESCS | MSI_FLAG_DEV_SYSFS,
.ops = &xen_pci_msi_domain_ops,
};
/*
* This irq domain is a blatant violation of the irq domain design, but
* distangling XEN into real irq domains is not a job for mere mortals with
* limited XENology. But it's the least dangerous way for a mere mortal to
* get rid of the arch_*_msi_irqs() hackery in order to store the irq
* domain pointer in struct device. This irq domain wrappery allows to do
* that without breaking XEN terminally.
*/
static __init struct irq_domain *xen_create_pci_msi_domain(void)
{
struct irq_domain *d = NULL;
struct fwnode_handle *fn;
fn = irq_domain_alloc_named_fwnode("XEN-MSI");
if (fn)
d = msi_create_irq_domain(fn, &xen_pci_msi_domain_info, NULL);
/* FIXME: No idea how to survive if this fails */
BUG_ON(!d);
return d;
}
static __init void xen_setup_pci_msi(void)
{
if (xen_pv_domain()) {
if (xen_initial_domain())
xen_msi_ops.setup_msi_irqs = xen_initdom_setup_msi_irqs;
else
xen_msi_ops.setup_msi_irqs = xen_setup_msi_irqs;
xen_msi_ops.teardown_msi_irqs = xen_pv_teardown_msi_irqs;
} else if (xen_hvm_domain()) {
xen_msi_ops.setup_msi_irqs = xen_hvm_setup_msi_irqs;
xen_msi_ops.teardown_msi_irqs = xen_teardown_msi_irqs;
} else {
WARN_ON_ONCE(1);
return;
}
/*
* Override the PCI/MSI irq domain init function. No point
* in allocating the native domain and never use it.
*/
x86_init.irqs.create_pci_msi_domain = xen_create_pci_msi_domain;
/*
* With XEN PIRQ/Eventchannels in use PCI/MSI[-X] masking is solely
* controlled by the hypervisor.
*/
pci_msi_ignore_mask = 1;
}
#else /* CONFIG_PCI_MSI */
static inline void xen_setup_pci_msi(void) { }
#endif /* CONFIG_PCI_MSI */
int __init pci_xen_init(void)
{
if (!xen_pv_domain() || xen_initial_domain())
return -ENODEV;
printk(KERN_INFO "PCI: setting up Xen PCI frontend stub\n");
pcibios_set_cache_line_size();
pcibios_enable_irq = xen_pcifront_enable_irq;
pcibios_disable_irq = NULL;
/* Keep ACPI out of the picture */
acpi_noirq_set();
xen_setup_pci_msi();
return 0;
}
#ifdef CONFIG_PCI_MSI
static void __init xen_hvm_msi_init(void)
{
if (!apic_is_disabled) {
/*
* If hardware supports (x2)APIC virtualization (as indicated
* by hypervisor's leaf 4) then we don't need to use pirqs/
* event channels for MSI handling and instead use regular
* APIC processing
*/
uint32_t eax = cpuid_eax(xen_cpuid_base() + 4);
if (((eax & XEN_HVM_CPUID_X2APIC_VIRT) && x2apic_mode) ||
((eax & XEN_HVM_CPUID_APIC_ACCESS_VIRT) && boot_cpu_has(X86_FEATURE_APIC)))
return;
}
xen_setup_pci_msi();
}
#endif
int __init pci_xen_hvm_init(void)
{
if (!xen_have_vector_callback || !xen_feature(XENFEAT_hvm_pirqs))
return 0;
#ifdef CONFIG_ACPI
/*
* We don't want to change the actual ACPI delivery model,
* just how GSIs get registered.
*/
__acpi_register_gsi = acpi_register_gsi_xen_hvm;
__acpi_unregister_gsi = NULL;
#endif
#ifdef CONFIG_PCI_MSI
/*
* We need to wait until after x2apic is initialized
* before we can set MSI IRQ ops.
*/
x86_platform.apic_post_init = xen_hvm_msi_init;
#endif
return 0;
}
#ifdef CONFIG_XEN_PV_DOM0
int __init pci_xen_initial_domain(void)
{
int irq;
xen_setup_pci_msi();
__acpi_register_gsi = acpi_register_gsi_xen;
__acpi_unregister_gsi = NULL;
/*
* Pre-allocate the legacy IRQs. Use NR_LEGACY_IRQS here
* because we don't have a PIC and thus nr_legacy_irqs() is zero.
*/
for (irq = 0; irq < NR_IRQS_LEGACY; irq++) {
int trigger, polarity;
if (acpi_get_override_irq(irq, &trigger, &polarity) == -1)
continue;
xen_register_pirq(irq,
trigger ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE,
true /* Map GSI to PIRQ */);
}
if (0 == nr_ioapics) {
for (irq = 0; irq < nr_legacy_irqs(); irq++)
xen_bind_pirq_gsi_to_irq(irq, irq, 0, "xt-pic");
}
return 0;
}
#endif
| linux-master | arch/x86/pci/xen.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Low-Level PCI Support for PC -- Routing of Interrupts
*
* (c) 1999--2000 Martin Mares <[email protected]>
*/
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/dmi.h>
#include <linux/io.h>
#include <linux/smp.h>
#include <linux/spinlock.h>
#include <asm/io_apic.h>
#include <linux/irq.h>
#include <linux/acpi.h>
#include <asm/i8259.h>
#include <asm/pc-conf-reg.h>
#include <asm/pci_x86.h>
#define PIRQ_SIGNATURE (('$' << 0) + ('P' << 8) + ('I' << 16) + ('R' << 24))
#define PIRQ_VERSION 0x0100
#define IRT_SIGNATURE (('$' << 0) + ('I' << 8) + ('R' << 16) + ('T' << 24))
static int broken_hp_bios_irq9;
static int acer_tm360_irqrouting;
static struct irq_routing_table *pirq_table;
static int pirq_enable_irq(struct pci_dev *dev);
static void pirq_disable_irq(struct pci_dev *dev);
/*
* Never use: 0, 1, 2 (timer, keyboard, and cascade)
* Avoid using: 13, 14 and 15 (FP error and IDE).
* Penalize: 3, 4, 6, 7, 12 (known ISA uses: serial, floppy, parallel and mouse)
*/
unsigned int pcibios_irq_mask = 0xfff8;
static int pirq_penalty[16] = {
1000000, 1000000, 1000000, 1000, 1000, 0, 1000, 1000,
0, 0, 0, 0, 1000, 100000, 100000, 100000
};
struct irq_router {
char *name;
u16 vendor, device;
int (*get)(struct pci_dev *router, struct pci_dev *dev, int pirq);
int (*set)(struct pci_dev *router, struct pci_dev *dev, int pirq,
int new);
int (*lvl)(struct pci_dev *router, struct pci_dev *dev, int pirq,
int irq);
};
struct irq_router_handler {
u16 vendor;
int (*probe)(struct irq_router *r, struct pci_dev *router, u16 device);
};
int (*pcibios_enable_irq)(struct pci_dev *dev) = pirq_enable_irq;
void (*pcibios_disable_irq)(struct pci_dev *dev) = pirq_disable_irq;
/*
* Check passed address for the PCI IRQ Routing Table signature
* and perform checksum verification.
*/
static inline struct irq_routing_table *pirq_check_routing_table(u8 *addr,
u8 *limit)
{
struct irq_routing_table *rt;
int i;
u8 sum;
rt = (struct irq_routing_table *)addr;
if (rt->signature != PIRQ_SIGNATURE ||
rt->version != PIRQ_VERSION ||
rt->size % 16 ||
rt->size < sizeof(struct irq_routing_table) ||
(limit && rt->size > limit - addr))
return NULL;
sum = 0;
for (i = 0; i < rt->size; i++)
sum += addr[i];
if (!sum) {
DBG(KERN_DEBUG "PCI: Interrupt Routing Table found at 0x%lx\n",
__pa(rt));
return rt;
}
return NULL;
}
/*
* Handle the $IRT PCI IRQ Routing Table format used by AMI for its BCP
* (BIOS Configuration Program) external tool meant for tweaking BIOS
* structures without the need to rebuild it from sources. The $IRT
* format has been invented by AMI before Microsoft has come up with its
* $PIR format and a $IRT table is therefore there in some systems that
* lack a $PIR table.
*
* It uses the same PCI BIOS 2.1 format for interrupt routing entries
* themselves but has a different simpler header prepended instead,
* occupying 8 bytes, where a `$IRT' signature is followed by one byte
* specifying the total number of interrupt routing entries allocated in
* the table, then one byte specifying the actual number of entries used
* (which the BCP tool can take advantage of when modifying the table),
* and finally a 16-bit word giving the IRQs devoted exclusively to PCI.
* Unlike with the $PIR table there is no alignment guarantee.
*
* Given the similarity of the two formats the $IRT one is trivial to
* convert to the $PIR one, which we do here, except that obviously we
* have no information as to the router device to use, but we can handle
* it by matching PCI device IDs actually seen on the bus against ones
* that our individual routers recognise.
*
* Reportedly there is another $IRT table format where a 16-bit word
* follows the header instead that points to interrupt routing entries
* in a $PIR table provided elsewhere. In that case this code will not
* be reached though as the $PIR table will have been chosen instead.
*/
static inline struct irq_routing_table *pirq_convert_irt_table(u8 *addr,
u8 *limit)
{
struct irt_routing_table *ir;
struct irq_routing_table *rt;
u16 size;
u8 sum;
int i;
ir = (struct irt_routing_table *)addr;
if (ir->signature != IRT_SIGNATURE || !ir->used || ir->size < ir->used)
return NULL;
size = struct_size(ir, slots, ir->used);
if (size > limit - addr)
return NULL;
DBG(KERN_DEBUG "PCI: $IRT Interrupt Routing Table found at 0x%lx\n",
__pa(ir));
size = struct_size(rt, slots, ir->used);
rt = kzalloc(size, GFP_KERNEL);
if (!rt)
return NULL;
rt->signature = PIRQ_SIGNATURE;
rt->version = PIRQ_VERSION;
rt->size = size;
rt->exclusive_irqs = ir->exclusive_irqs;
for (i = 0; i < ir->used; i++)
rt->slots[i] = ir->slots[i];
addr = (u8 *)rt;
sum = 0;
for (i = 0; i < size; i++)
sum += addr[i];
rt->checksum = -sum;
return rt;
}
/*
* Search 0xf0000 -- 0xfffff for the PCI IRQ Routing Table.
*/
static struct irq_routing_table * __init pirq_find_routing_table(void)
{
u8 * const bios_start = (u8 *)__va(0xf0000);
u8 * const bios_end = (u8 *)__va(0x100000);
u8 *addr;
struct irq_routing_table *rt;
if (pirq_table_addr) {
rt = pirq_check_routing_table((u8 *)__va(pirq_table_addr),
NULL);
if (rt)
return rt;
printk(KERN_WARNING "PCI: PIRQ table NOT found at pirqaddr\n");
}
for (addr = bios_start;
addr < bios_end - sizeof(struct irq_routing_table);
addr += 16) {
rt = pirq_check_routing_table(addr, bios_end);
if (rt)
return rt;
}
for (addr = bios_start;
addr < bios_end - sizeof(struct irt_routing_table);
addr++) {
rt = pirq_convert_irt_table(addr, bios_end);
if (rt)
return rt;
}
return NULL;
}
/*
* If we have a IRQ routing table, use it to search for peer host
* bridges. It's a gross hack, but since there are no other known
* ways how to get a list of buses, we have to go this way.
*/
static void __init pirq_peer_trick(void)
{
struct irq_routing_table *rt = pirq_table;
u8 busmap[256];
int i;
struct irq_info *e;
memset(busmap, 0, sizeof(busmap));
for (i = 0; i < (rt->size - sizeof(struct irq_routing_table)) / sizeof(struct irq_info); i++) {
e = &rt->slots[i];
#ifdef DEBUG
{
int j;
DBG(KERN_DEBUG "%02x:%02x.%x slot=%02x",
e->bus, e->devfn / 8, e->devfn % 8, e->slot);
for (j = 0; j < 4; j++)
DBG(" %d:%02x/%04x", j, e->irq[j].link, e->irq[j].bitmap);
DBG("\n");
}
#endif
busmap[e->bus] = 1;
}
for (i = 1; i < 256; i++) {
if (!busmap[i] || pci_find_bus(0, i))
continue;
pcibios_scan_root(i);
}
pcibios_last_bus = -1;
}
/*
* Code for querying and setting of IRQ routes on various interrupt routers.
* PIC Edge/Level Control Registers (ELCR) 0x4d0 & 0x4d1.
*/
void elcr_set_level_irq(unsigned int irq)
{
unsigned char mask = 1 << (irq & 7);
unsigned int port = PIC_ELCR1 + (irq >> 3);
unsigned char val;
static u16 elcr_irq_mask;
if (irq >= 16 || (1 << irq) & elcr_irq_mask)
return;
elcr_irq_mask |= (1 << irq);
printk(KERN_DEBUG "PCI: setting IRQ %u as level-triggered\n", irq);
val = inb(port);
if (!(val & mask)) {
DBG(KERN_DEBUG " -> edge");
outb(val | mask, port);
}
}
/*
* PIRQ routing for the M1487 ISA Bus Controller (IBC) ASIC used
* with the ALi FinALi 486 chipset. The IBC is not decoded in the
* PCI configuration space, so we identify it by the accompanying
* M1489 Cache-Memory PCI Controller (CMP) ASIC.
*
* There are four 4-bit mappings provided, spread across two PCI
* INTx Routing Table Mapping Registers, available in the port I/O
* space accessible indirectly via the index/data register pair at
* 0x22/0x23, located at indices 0x42 and 0x43 for the INT1/INT2
* and INT3/INT4 lines respectively. The INT1/INT3 and INT2/INT4
* lines are mapped in the low and the high 4-bit nibble of the
* corresponding register as follows:
*
* 0000 : Disabled
* 0001 : IRQ9
* 0010 : IRQ3
* 0011 : IRQ10
* 0100 : IRQ4
* 0101 : IRQ5
* 0110 : IRQ7
* 0111 : IRQ6
* 1000 : Reserved
* 1001 : IRQ11
* 1010 : Reserved
* 1011 : IRQ12
* 1100 : Reserved
* 1101 : IRQ14
* 1110 : Reserved
* 1111 : IRQ15
*
* In addition to the usual ELCR register pair there is a separate
* PCI INTx Sensitivity Register at index 0x44 in the same port I/O
* space, whose bits 3:0 select the trigger mode for INT[4:1] lines
* respectively. Any bit set to 1 causes interrupts coming on the
* corresponding line to be passed to ISA as edge-triggered and
* otherwise they are passed as level-triggered. Manufacturer's
* documentation says this register has to be set consistently with
* the relevant ELCR register.
*
* Accesses to the port I/O space concerned here need to be unlocked
* by writing the value of 0xc5 to the Lock Register at index 0x03
* beforehand. Any other value written to said register prevents
* further accesses from reaching the register file, except for the
* Lock Register being written with 0xc5 again.
*
* References:
*
* "M1489/M1487: 486 PCI Chip Set", Version 1.2, Acer Laboratories
* Inc., July 1997
*/
#define PC_CONF_FINALI_LOCK 0x03u
#define PC_CONF_FINALI_PCI_INTX_RT1 0x42u
#define PC_CONF_FINALI_PCI_INTX_RT2 0x43u
#define PC_CONF_FINALI_PCI_INTX_SENS 0x44u
#define PC_CONF_FINALI_LOCK_KEY 0xc5u
static u8 read_pc_conf_nybble(u8 base, u8 index)
{
u8 reg = base + (index >> 1);
u8 x;
x = pc_conf_get(reg);
return index & 1 ? x >> 4 : x & 0xf;
}
static void write_pc_conf_nybble(u8 base, u8 index, u8 val)
{
u8 reg = base + (index >> 1);
u8 x;
x = pc_conf_get(reg);
x = index & 1 ? (x & 0x0f) | (val << 4) : (x & 0xf0) | val;
pc_conf_set(reg, x);
}
/*
* FinALi pirq rules are as follows:
*
* - bit 0 selects between INTx Routing Table Mapping Registers,
*
* - bit 3 selects the nibble within the INTx Routing Table Mapping Register,
*
* - bits 7:4 map to bits 3:0 of the PCI INTx Sensitivity Register.
*/
static int pirq_finali_get(struct pci_dev *router, struct pci_dev *dev,
int pirq)
{
static const u8 irqmap[16] = {
0, 9, 3, 10, 4, 5, 7, 6, 0, 11, 0, 12, 0, 14, 0, 15
};
unsigned long flags;
u8 index;
u8 x;
index = (pirq & 1) << 1 | (pirq & 8) >> 3;
raw_spin_lock_irqsave(&pc_conf_lock, flags);
pc_conf_set(PC_CONF_FINALI_LOCK, PC_CONF_FINALI_LOCK_KEY);
x = irqmap[read_pc_conf_nybble(PC_CONF_FINALI_PCI_INTX_RT1, index)];
pc_conf_set(PC_CONF_FINALI_LOCK, 0);
raw_spin_unlock_irqrestore(&pc_conf_lock, flags);
return x;
}
static int pirq_finali_set(struct pci_dev *router, struct pci_dev *dev,
int pirq, int irq)
{
static const u8 irqmap[16] = {
0, 0, 0, 2, 4, 5, 7, 6, 0, 1, 3, 9, 11, 0, 13, 15
};
u8 val = irqmap[irq];
unsigned long flags;
u8 index;
if (!val)
return 0;
index = (pirq & 1) << 1 | (pirq & 8) >> 3;
raw_spin_lock_irqsave(&pc_conf_lock, flags);
pc_conf_set(PC_CONF_FINALI_LOCK, PC_CONF_FINALI_LOCK_KEY);
write_pc_conf_nybble(PC_CONF_FINALI_PCI_INTX_RT1, index, val);
pc_conf_set(PC_CONF_FINALI_LOCK, 0);
raw_spin_unlock_irqrestore(&pc_conf_lock, flags);
return 1;
}
static int pirq_finali_lvl(struct pci_dev *router, struct pci_dev *dev,
int pirq, int irq)
{
u8 mask = ~((pirq & 0xf0u) >> 4);
unsigned long flags;
u8 trig;
elcr_set_level_irq(irq);
raw_spin_lock_irqsave(&pc_conf_lock, flags);
pc_conf_set(PC_CONF_FINALI_LOCK, PC_CONF_FINALI_LOCK_KEY);
trig = pc_conf_get(PC_CONF_FINALI_PCI_INTX_SENS);
trig &= mask;
pc_conf_set(PC_CONF_FINALI_PCI_INTX_SENS, trig);
pc_conf_set(PC_CONF_FINALI_LOCK, 0);
raw_spin_unlock_irqrestore(&pc_conf_lock, flags);
return 1;
}
/*
* Common IRQ routing practice: nibbles in config space,
* offset by some magic constant.
*/
static unsigned int read_config_nybble(struct pci_dev *router, unsigned offset, unsigned nr)
{
u8 x;
unsigned reg = offset + (nr >> 1);
pci_read_config_byte(router, reg, &x);
return (nr & 1) ? (x >> 4) : (x & 0xf);
}
static void write_config_nybble(struct pci_dev *router, unsigned offset,
unsigned nr, unsigned int val)
{
u8 x;
unsigned reg = offset + (nr >> 1);
pci_read_config_byte(router, reg, &x);
x = (nr & 1) ? ((x & 0x0f) | (val << 4)) : ((x & 0xf0) | val);
pci_write_config_byte(router, reg, x);
}
/*
* ALI pirq entries are damn ugly, and completely undocumented.
* This has been figured out from pirq tables, and it's not a pretty
* picture.
*/
static int pirq_ali_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
{
static const unsigned char irqmap[16] = { 0, 9, 3, 10, 4, 5, 7, 6, 1, 11, 0, 12, 0, 14, 0, 15 };
WARN_ON_ONCE(pirq > 16);
return irqmap[read_config_nybble(router, 0x48, pirq-1)];
}
static int pirq_ali_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
{
static const unsigned char irqmap[16] = { 0, 8, 0, 2, 4, 5, 7, 6, 0, 1, 3, 9, 11, 0, 13, 15 };
unsigned int val = irqmap[irq];
WARN_ON_ONCE(pirq > 16);
if (val) {
write_config_nybble(router, 0x48, pirq-1, val);
return 1;
}
return 0;
}
/*
* PIRQ routing for the 82374EB/82374SB EISA System Component (ESC)
* ASIC used with the Intel 82420 and 82430 PCIsets. The ESC is not
* decoded in the PCI configuration space, so we identify it by the
* accompanying 82375EB/82375SB PCI-EISA Bridge (PCEB) ASIC.
*
* There are four PIRQ Route Control registers, available in the
* port I/O space accessible indirectly via the index/data register
* pair at 0x22/0x23, located at indices 0x60/0x61/0x62/0x63 for the
* PIRQ0/1/2/3# lines respectively. The semantics is the same as
* with the PIIX router.
*
* Accesses to the port I/O space concerned here need to be unlocked
* by writing the value of 0x0f to the ESC ID Register at index 0x02
* beforehand. Any other value written to said register prevents
* further accesses from reaching the register file, except for the
* ESC ID Register being written with 0x0f again.
*
* References:
*
* "82374EB/82374SB EISA System Component (ESC)", Intel Corporation,
* Order Number: 290476-004, March 1996
*
* "82375EB/82375SB PCI-EISA Bridge (PCEB)", Intel Corporation, Order
* Number: 290477-004, March 1996
*/
#define PC_CONF_I82374_ESC_ID 0x02u
#define PC_CONF_I82374_PIRQ_ROUTE_CONTROL 0x60u
#define PC_CONF_I82374_ESC_ID_KEY 0x0fu
static int pirq_esc_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
{
unsigned long flags;
int reg;
u8 x;
reg = pirq;
if (reg >= 1 && reg <= 4)
reg += PC_CONF_I82374_PIRQ_ROUTE_CONTROL - 1;
raw_spin_lock_irqsave(&pc_conf_lock, flags);
pc_conf_set(PC_CONF_I82374_ESC_ID, PC_CONF_I82374_ESC_ID_KEY);
x = pc_conf_get(reg);
pc_conf_set(PC_CONF_I82374_ESC_ID, 0);
raw_spin_unlock_irqrestore(&pc_conf_lock, flags);
return (x < 16) ? x : 0;
}
static int pirq_esc_set(struct pci_dev *router, struct pci_dev *dev, int pirq,
int irq)
{
unsigned long flags;
int reg;
reg = pirq;
if (reg >= 1 && reg <= 4)
reg += PC_CONF_I82374_PIRQ_ROUTE_CONTROL - 1;
raw_spin_lock_irqsave(&pc_conf_lock, flags);
pc_conf_set(PC_CONF_I82374_ESC_ID, PC_CONF_I82374_ESC_ID_KEY);
pc_conf_set(reg, irq);
pc_conf_set(PC_CONF_I82374_ESC_ID, 0);
raw_spin_unlock_irqrestore(&pc_conf_lock, flags);
return 1;
}
/*
* The Intel PIIX4 pirq rules are fairly simple: "pirq" is
* just a pointer to the config space.
*/
static int pirq_piix_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
{
u8 x;
pci_read_config_byte(router, pirq, &x);
return (x < 16) ? x : 0;
}
static int pirq_piix_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
{
pci_write_config_byte(router, pirq, irq);
return 1;
}
/*
* PIRQ routing for the 82426EX ISA Bridge (IB) ASIC used with the
* Intel 82420EX PCIset.
*
* There are only two PIRQ Route Control registers, available in the
* combined 82425EX/82426EX PCI configuration space, at 0x66 and 0x67
* for the PIRQ0# and PIRQ1# lines respectively. The semantics is
* the same as with the PIIX router.
*
* References:
*
* "82420EX PCIset Data Sheet, 82425EX PCI System Controller (PSC)
* and 82426EX ISA Bridge (IB)", Intel Corporation, Order Number:
* 290488-004, December 1995
*/
#define PCI_I82426EX_PIRQ_ROUTE_CONTROL 0x66u
static int pirq_ib_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
{
int reg;
u8 x;
reg = pirq;
if (reg >= 1 && reg <= 2)
reg += PCI_I82426EX_PIRQ_ROUTE_CONTROL - 1;
pci_read_config_byte(router, reg, &x);
return (x < 16) ? x : 0;
}
static int pirq_ib_set(struct pci_dev *router, struct pci_dev *dev, int pirq,
int irq)
{
int reg;
reg = pirq;
if (reg >= 1 && reg <= 2)
reg += PCI_I82426EX_PIRQ_ROUTE_CONTROL - 1;
pci_write_config_byte(router, reg, irq);
return 1;
}
/*
* The VIA pirq rules are nibble-based, like ALI,
* but without the ugly irq number munging.
* However, PIRQD is in the upper instead of lower 4 bits.
*/
static int pirq_via_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
{
return read_config_nybble(router, 0x55, pirq == 4 ? 5 : pirq);
}
static int pirq_via_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
{
write_config_nybble(router, 0x55, pirq == 4 ? 5 : pirq, irq);
return 1;
}
/*
* The VIA pirq rules are nibble-based, like ALI,
* but without the ugly irq number munging.
* However, for 82C586, nibble map is different .
*/
static int pirq_via586_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
{
static const unsigned int pirqmap[5] = { 3, 2, 5, 1, 1 };
WARN_ON_ONCE(pirq > 5);
return read_config_nybble(router, 0x55, pirqmap[pirq-1]);
}
static int pirq_via586_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
{
static const unsigned int pirqmap[5] = { 3, 2, 5, 1, 1 };
WARN_ON_ONCE(pirq > 5);
write_config_nybble(router, 0x55, pirqmap[pirq-1], irq);
return 1;
}
/*
* ITE 8330G pirq rules are nibble-based
* FIXME: pirqmap may be { 1, 0, 3, 2 },
* 2+3 are both mapped to irq 9 on my system
*/
static int pirq_ite_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
{
static const unsigned char pirqmap[4] = { 1, 0, 2, 3 };
WARN_ON_ONCE(pirq > 4);
return read_config_nybble(router, 0x43, pirqmap[pirq-1]);
}
static int pirq_ite_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
{
static const unsigned char pirqmap[4] = { 1, 0, 2, 3 };
WARN_ON_ONCE(pirq > 4);
write_config_nybble(router, 0x43, pirqmap[pirq-1], irq);
return 1;
}
/*
* OPTI: high four bits are nibble pointer..
* I wonder what the low bits do?
*/
static int pirq_opti_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
{
return read_config_nybble(router, 0xb8, pirq >> 4);
}
static int pirq_opti_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
{
write_config_nybble(router, 0xb8, pirq >> 4, irq);
return 1;
}
/*
* Cyrix: nibble offset 0x5C
* 0x5C bits 7:4 is INTB bits 3:0 is INTA
* 0x5D bits 7:4 is INTD bits 3:0 is INTC
*/
static int pirq_cyrix_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
{
return read_config_nybble(router, 0x5C, (pirq-1)^1);
}
static int pirq_cyrix_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
{
write_config_nybble(router, 0x5C, (pirq-1)^1, irq);
return 1;
}
/*
* PIRQ routing for the SiS85C497 AT Bus Controller & Megacell (ATM)
* ISA bridge used with the SiS 85C496/497 486 Green PC VESA/ISA/PCI
* Chipset.
*
* There are four PCI INTx#-to-IRQ Link registers provided in the
* SiS85C497 part of the peculiar combined 85C496/497 configuration
* space decoded by the SiS85C496 PCI & CPU Memory Controller (PCM)
* host bridge, at 0xc0/0xc1/0xc2/0xc3 respectively for the PCI INT
* A/B/C/D lines. Bit 7 enables the respective link if set and bits
* 3:0 select the 8259A IRQ line as follows:
*
* 0000 : Reserved
* 0001 : Reserved
* 0010 : Reserved
* 0011 : IRQ3
* 0100 : IRQ4
* 0101 : IRQ5
* 0110 : IRQ6
* 0111 : IRQ7
* 1000 : Reserved
* 1001 : IRQ9
* 1010 : IRQ10
* 1011 : IRQ11
* 1100 : IRQ12
* 1101 : Reserved
* 1110 : IRQ14
* 1111 : IRQ15
*
* We avoid using a reserved value for disabled links, hence the
* choice of IRQ15 for that case.
*
* References:
*
* "486 Green PC VESA/ISA/PCI Chipset, SiS 85C496/497", Rev 3.0,
* Silicon Integrated Systems Corp., July 1995
*/
#define PCI_SIS497_INTA_TO_IRQ_LINK 0xc0u
#define PIRQ_SIS497_IRQ_MASK 0x0fu
#define PIRQ_SIS497_IRQ_ENABLE 0x80u
static int pirq_sis497_get(struct pci_dev *router, struct pci_dev *dev,
int pirq)
{
int reg;
u8 x;
reg = pirq;
if (reg >= 1 && reg <= 4)
reg += PCI_SIS497_INTA_TO_IRQ_LINK - 1;
pci_read_config_byte(router, reg, &x);
return (x & PIRQ_SIS497_IRQ_ENABLE) ? (x & PIRQ_SIS497_IRQ_MASK) : 0;
}
static int pirq_sis497_set(struct pci_dev *router, struct pci_dev *dev,
int pirq, int irq)
{
int reg;
u8 x;
reg = pirq;
if (reg >= 1 && reg <= 4)
reg += PCI_SIS497_INTA_TO_IRQ_LINK - 1;
pci_read_config_byte(router, reg, &x);
x &= ~(PIRQ_SIS497_IRQ_MASK | PIRQ_SIS497_IRQ_ENABLE);
x |= irq ? (PIRQ_SIS497_IRQ_ENABLE | irq) : PIRQ_SIS497_IRQ_MASK;
pci_write_config_byte(router, reg, x);
return 1;
}
/*
* PIRQ routing for SiS 85C503 router used in several SiS chipsets.
* We have to deal with the following issues here:
* - vendors have different ideas about the meaning of link values
* - some onboard devices (integrated in the chipset) have special
* links and are thus routed differently (i.e. not via PCI INTA-INTD)
* - different revision of the router have a different layout for
* the routing registers, particularly for the onchip devices
*
* For all routing registers the common thing is we have one byte
* per routeable link which is defined as:
* bit 7 IRQ mapping enabled (0) or disabled (1)
* bits [6:4] reserved (sometimes used for onchip devices)
* bits [3:0] IRQ to map to
* allowed: 3-7, 9-12, 14-15
* reserved: 0, 1, 2, 8, 13
*
* The config-space registers located at 0x41/0x42/0x43/0x44 are
* always used to route the normal PCI INT A/B/C/D respectively.
* Apparently there are systems implementing PCI routing table using
* link values 0x01-0x04 and others using 0x41-0x44 for PCI INTA..D.
* We try our best to handle both link mappings.
*
* Currently (2003-05-21) it appears most SiS chipsets follow the
* definition of routing registers from the SiS-5595 southbridge.
* According to the SiS 5595 datasheets the revision id's of the
* router (ISA-bridge) should be 0x01 or 0xb0.
*
* Furthermore we've also seen lspci dumps with revision 0x00 and 0xb1.
* Looks like these are used in a number of SiS 5xx/6xx/7xx chipsets.
* They seem to work with the current routing code. However there is
* some concern because of the two USB-OHCI HCs (original SiS 5595
* had only one). YMMV.
*
* Onchip routing for router rev-id 0x01/0xb0 and probably 0x00/0xb1:
*
* 0x61: IDEIRQ:
* bits [6:5] must be written 01
* bit 4 channel-select primary (0), secondary (1)
*
* 0x62: USBIRQ:
* bit 6 OHCI function disabled (0), enabled (1)
*
* 0x6a: ACPI/SCI IRQ: bits 4-6 reserved
*
* 0x7e: Data Acq. Module IRQ - bits 4-6 reserved
*
* We support USBIRQ (in addition to INTA-INTD) and keep the
* IDE, ACPI and DAQ routing untouched as set by the BIOS.
*
* Currently the only reported exception is the new SiS 65x chipset
* which includes the SiS 69x southbridge. Here we have the 85C503
* router revision 0x04 and there are changes in the register layout
* mostly related to the different USB HCs with USB 2.0 support.
*
* Onchip routing for router rev-id 0x04 (try-and-error observation)
*
* 0x60/0x61/0x62/0x63: 1xEHCI and 3xOHCI (companion) USB-HCs
* bit 6-4 are probably unused, not like 5595
*/
#define PIRQ_SIS503_IRQ_MASK 0x0f
#define PIRQ_SIS503_IRQ_DISABLE 0x80
#define PIRQ_SIS503_USB_ENABLE 0x40
static int pirq_sis503_get(struct pci_dev *router, struct pci_dev *dev,
int pirq)
{
u8 x;
int reg;
reg = pirq;
if (reg >= 0x01 && reg <= 0x04)
reg += 0x40;
pci_read_config_byte(router, reg, &x);
return (x & PIRQ_SIS503_IRQ_DISABLE) ? 0 : (x & PIRQ_SIS503_IRQ_MASK);
}
static int pirq_sis503_set(struct pci_dev *router, struct pci_dev *dev,
int pirq, int irq)
{
u8 x;
int reg;
reg = pirq;
if (reg >= 0x01 && reg <= 0x04)
reg += 0x40;
pci_read_config_byte(router, reg, &x);
x &= ~(PIRQ_SIS503_IRQ_MASK | PIRQ_SIS503_IRQ_DISABLE);
x |= irq ? irq : PIRQ_SIS503_IRQ_DISABLE;
pci_write_config_byte(router, reg, x);
return 1;
}
/*
* VLSI: nibble offset 0x74 - educated guess due to routing table and
* config space of VLSI 82C534 PCI-bridge/router (1004:0102)
* Tested on HP OmniBook 800 covering PIRQ 1, 2, 4, 8 for onboard
* devices, PIRQ 3 for non-pci(!) soundchip and (untested) PIRQ 6
* for the busbridge to the docking station.
*/
static int pirq_vlsi_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
{
WARN_ON_ONCE(pirq >= 9);
if (pirq > 8) {
dev_info(&dev->dev, "VLSI router PIRQ escape (%d)\n", pirq);
return 0;
}
return read_config_nybble(router, 0x74, pirq-1);
}
static int pirq_vlsi_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
{
WARN_ON_ONCE(pirq >= 9);
if (pirq > 8) {
dev_info(&dev->dev, "VLSI router PIRQ escape (%d)\n", pirq);
return 0;
}
write_config_nybble(router, 0x74, pirq-1, irq);
return 1;
}
/*
* ServerWorks: PCI interrupts mapped to system IRQ lines through Index
* and Redirect I/O registers (0x0c00 and 0x0c01). The Index register
* format is (PCIIRQ## | 0x10), e.g.: PCIIRQ10=0x1a. The Redirect
* register is a straight binary coding of desired PIC IRQ (low nibble).
*
* The 'link' value in the PIRQ table is already in the correct format
* for the Index register. There are some special index values:
* 0x00 for ACPI (SCI), 0x01 for USB, 0x02 for IDE0, 0x04 for IDE1,
* and 0x03 for SMBus.
*/
static int pirq_serverworks_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
{
outb(pirq, 0xc00);
return inb(0xc01) & 0xf;
}
static int pirq_serverworks_set(struct pci_dev *router, struct pci_dev *dev,
int pirq, int irq)
{
outb(pirq, 0xc00);
outb(irq, 0xc01);
return 1;
}
/* Support for AMD756 PCI IRQ Routing
* Jhon H. Caicedo <[email protected]>
* Jun/21/2001 0.2.0 Release, fixed to use "nybble" functions... (jhcaiced)
* Jun/19/2001 Alpha Release 0.1.0 (jhcaiced)
* The AMD756 pirq rules are nibble-based
* offset 0x56 0-3 PIRQA 4-7 PIRQB
* offset 0x57 0-3 PIRQC 4-7 PIRQD
*/
static int pirq_amd756_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
{
u8 irq;
irq = 0;
if (pirq <= 4)
irq = read_config_nybble(router, 0x56, pirq - 1);
dev_info(&dev->dev,
"AMD756: dev [%04x:%04x], router PIRQ %d get IRQ %d\n",
dev->vendor, dev->device, pirq, irq);
return irq;
}
static int pirq_amd756_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
{
dev_info(&dev->dev,
"AMD756: dev [%04x:%04x], router PIRQ %d set IRQ %d\n",
dev->vendor, dev->device, pirq, irq);
if (pirq <= 4)
write_config_nybble(router, 0x56, pirq - 1, irq);
return 1;
}
/*
* PicoPower PT86C523
*/
static int pirq_pico_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
{
outb(0x10 + ((pirq - 1) >> 1), 0x24);
return ((pirq - 1) & 1) ? (inb(0x26) >> 4) : (inb(0x26) & 0xf);
}
static int pirq_pico_set(struct pci_dev *router, struct pci_dev *dev, int pirq,
int irq)
{
unsigned int x;
outb(0x10 + ((pirq - 1) >> 1), 0x24);
x = inb(0x26);
x = ((pirq - 1) & 1) ? ((x & 0x0f) | (irq << 4)) : ((x & 0xf0) | (irq));
outb(x, 0x26);
return 1;
}
#ifdef CONFIG_PCI_BIOS
static int pirq_bios_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
{
struct pci_dev *bridge;
int pin = pci_get_interrupt_pin(dev, &bridge);
return pcibios_set_irq_routing(bridge, pin - 1, irq);
}
#endif
static __init int intel_router_probe(struct irq_router *r, struct pci_dev *router, u16 device)
{
static struct pci_device_id __initdata pirq_440gx[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_0) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_2) },
{ },
};
/* 440GX has a proprietary PIRQ router -- don't use it */
if (pci_dev_present(pirq_440gx))
return 0;
switch (device) {
case PCI_DEVICE_ID_INTEL_82375:
r->name = "PCEB/ESC";
r->get = pirq_esc_get;
r->set = pirq_esc_set;
return 1;
case PCI_DEVICE_ID_INTEL_82371FB_0:
case PCI_DEVICE_ID_INTEL_82371SB_0:
case PCI_DEVICE_ID_INTEL_82371AB_0:
case PCI_DEVICE_ID_INTEL_82371MX:
case PCI_DEVICE_ID_INTEL_82443MX_0:
case PCI_DEVICE_ID_INTEL_82801AA_0:
case PCI_DEVICE_ID_INTEL_82801AB_0:
case PCI_DEVICE_ID_INTEL_82801BA_0:
case PCI_DEVICE_ID_INTEL_82801BA_10:
case PCI_DEVICE_ID_INTEL_82801CA_0:
case PCI_DEVICE_ID_INTEL_82801CA_12:
case PCI_DEVICE_ID_INTEL_82801DB_0:
case PCI_DEVICE_ID_INTEL_82801E_0:
case PCI_DEVICE_ID_INTEL_82801EB_0:
case PCI_DEVICE_ID_INTEL_ESB_1:
case PCI_DEVICE_ID_INTEL_ICH6_0:
case PCI_DEVICE_ID_INTEL_ICH6_1:
case PCI_DEVICE_ID_INTEL_ICH7_0:
case PCI_DEVICE_ID_INTEL_ICH7_1:
case PCI_DEVICE_ID_INTEL_ICH7_30:
case PCI_DEVICE_ID_INTEL_ICH7_31:
case PCI_DEVICE_ID_INTEL_TGP_LPC:
case PCI_DEVICE_ID_INTEL_ESB2_0:
case PCI_DEVICE_ID_INTEL_ICH8_0:
case PCI_DEVICE_ID_INTEL_ICH8_1:
case PCI_DEVICE_ID_INTEL_ICH8_2:
case PCI_DEVICE_ID_INTEL_ICH8_3:
case PCI_DEVICE_ID_INTEL_ICH8_4:
case PCI_DEVICE_ID_INTEL_ICH9_0:
case PCI_DEVICE_ID_INTEL_ICH9_1:
case PCI_DEVICE_ID_INTEL_ICH9_2:
case PCI_DEVICE_ID_INTEL_ICH9_3:
case PCI_DEVICE_ID_INTEL_ICH9_4:
case PCI_DEVICE_ID_INTEL_ICH9_5:
case PCI_DEVICE_ID_INTEL_EP80579_0:
case PCI_DEVICE_ID_INTEL_ICH10_0:
case PCI_DEVICE_ID_INTEL_ICH10_1:
case PCI_DEVICE_ID_INTEL_ICH10_2:
case PCI_DEVICE_ID_INTEL_ICH10_3:
case PCI_DEVICE_ID_INTEL_PATSBURG_LPC_0:
case PCI_DEVICE_ID_INTEL_PATSBURG_LPC_1:
r->name = "PIIX/ICH";
r->get = pirq_piix_get;
r->set = pirq_piix_set;
return 1;
case PCI_DEVICE_ID_INTEL_82425:
r->name = "PSC/IB";
r->get = pirq_ib_get;
r->set = pirq_ib_set;
return 1;
}
if ((device >= PCI_DEVICE_ID_INTEL_5_3400_SERIES_LPC_MIN &&
device <= PCI_DEVICE_ID_INTEL_5_3400_SERIES_LPC_MAX)
|| (device >= PCI_DEVICE_ID_INTEL_COUGARPOINT_LPC_MIN &&
device <= PCI_DEVICE_ID_INTEL_COUGARPOINT_LPC_MAX)
|| (device >= PCI_DEVICE_ID_INTEL_DH89XXCC_LPC_MIN &&
device <= PCI_DEVICE_ID_INTEL_DH89XXCC_LPC_MAX)
|| (device >= PCI_DEVICE_ID_INTEL_PANTHERPOINT_LPC_MIN &&
device <= PCI_DEVICE_ID_INTEL_PANTHERPOINT_LPC_MAX)) {
r->name = "PIIX/ICH";
r->get = pirq_piix_get;
r->set = pirq_piix_set;
return 1;
}
return 0;
}
static __init int via_router_probe(struct irq_router *r,
struct pci_dev *router, u16 device)
{
/* FIXME: We should move some of the quirk fixup stuff here */
/*
* workarounds for some buggy BIOSes
*/
if (device == PCI_DEVICE_ID_VIA_82C586_0) {
switch (router->device) {
case PCI_DEVICE_ID_VIA_82C686:
/*
* Asus k7m bios wrongly reports 82C686A
* as 586-compatible
*/
device = PCI_DEVICE_ID_VIA_82C686;
break;
case PCI_DEVICE_ID_VIA_8235:
/**
* Asus a7v-x bios wrongly reports 8235
* as 586-compatible
*/
device = PCI_DEVICE_ID_VIA_8235;
break;
case PCI_DEVICE_ID_VIA_8237:
/**
* Asus a7v600 bios wrongly reports 8237
* as 586-compatible
*/
device = PCI_DEVICE_ID_VIA_8237;
break;
}
}
switch (device) {
case PCI_DEVICE_ID_VIA_82C586_0:
r->name = "VIA";
r->get = pirq_via586_get;
r->set = pirq_via586_set;
return 1;
case PCI_DEVICE_ID_VIA_82C596:
case PCI_DEVICE_ID_VIA_82C686:
case PCI_DEVICE_ID_VIA_8231:
case PCI_DEVICE_ID_VIA_8233A:
case PCI_DEVICE_ID_VIA_8235:
case PCI_DEVICE_ID_VIA_8237:
/* FIXME: add new ones for 8233/5 */
r->name = "VIA";
r->get = pirq_via_get;
r->set = pirq_via_set;
return 1;
}
return 0;
}
static __init int vlsi_router_probe(struct irq_router *r, struct pci_dev *router, u16 device)
{
switch (device) {
case PCI_DEVICE_ID_VLSI_82C534:
r->name = "VLSI 82C534";
r->get = pirq_vlsi_get;
r->set = pirq_vlsi_set;
return 1;
}
return 0;
}
static __init int serverworks_router_probe(struct irq_router *r,
struct pci_dev *router, u16 device)
{
switch (device) {
case PCI_DEVICE_ID_SERVERWORKS_OSB4:
case PCI_DEVICE_ID_SERVERWORKS_CSB5:
r->name = "ServerWorks";
r->get = pirq_serverworks_get;
r->set = pirq_serverworks_set;
return 1;
}
return 0;
}
static __init int sis_router_probe(struct irq_router *r, struct pci_dev *router, u16 device)
{
switch (device) {
case PCI_DEVICE_ID_SI_496:
r->name = "SiS85C497";
r->get = pirq_sis497_get;
r->set = pirq_sis497_set;
return 1;
case PCI_DEVICE_ID_SI_503:
r->name = "SiS85C503";
r->get = pirq_sis503_get;
r->set = pirq_sis503_set;
return 1;
}
return 0;
}
static __init int cyrix_router_probe(struct irq_router *r, struct pci_dev *router, u16 device)
{
switch (device) {
case PCI_DEVICE_ID_CYRIX_5520:
r->name = "NatSemi";
r->get = pirq_cyrix_get;
r->set = pirq_cyrix_set;
return 1;
}
return 0;
}
static __init int opti_router_probe(struct irq_router *r, struct pci_dev *router, u16 device)
{
switch (device) {
case PCI_DEVICE_ID_OPTI_82C700:
r->name = "OPTI";
r->get = pirq_opti_get;
r->set = pirq_opti_set;
return 1;
}
return 0;
}
static __init int ite_router_probe(struct irq_router *r, struct pci_dev *router, u16 device)
{
switch (device) {
case PCI_DEVICE_ID_ITE_IT8330G_0:
r->name = "ITE";
r->get = pirq_ite_get;
r->set = pirq_ite_set;
return 1;
}
return 0;
}
static __init int ali_router_probe(struct irq_router *r, struct pci_dev *router, u16 device)
{
switch (device) {
case PCI_DEVICE_ID_AL_M1489:
r->name = "FinALi";
r->get = pirq_finali_get;
r->set = pirq_finali_set;
r->lvl = pirq_finali_lvl;
return 1;
case PCI_DEVICE_ID_AL_M1533:
case PCI_DEVICE_ID_AL_M1563:
r->name = "ALI";
r->get = pirq_ali_get;
r->set = pirq_ali_set;
return 1;
}
return 0;
}
static __init int amd_router_probe(struct irq_router *r, struct pci_dev *router, u16 device)
{
switch (device) {
case PCI_DEVICE_ID_AMD_VIPER_740B:
r->name = "AMD756";
break;
case PCI_DEVICE_ID_AMD_VIPER_7413:
r->name = "AMD766";
break;
case PCI_DEVICE_ID_AMD_VIPER_7443:
r->name = "AMD768";
break;
default:
return 0;
}
r->get = pirq_amd756_get;
r->set = pirq_amd756_set;
return 1;
}
static __init int pico_router_probe(struct irq_router *r, struct pci_dev *router, u16 device)
{
switch (device) {
case PCI_DEVICE_ID_PICOPOWER_PT86C523:
r->name = "PicoPower PT86C523";
r->get = pirq_pico_get;
r->set = pirq_pico_set;
return 1;
case PCI_DEVICE_ID_PICOPOWER_PT86C523BBP:
r->name = "PicoPower PT86C523 rev. BB+";
r->get = pirq_pico_get;
r->set = pirq_pico_set;
return 1;
}
return 0;
}
static __initdata struct irq_router_handler pirq_routers[] = {
{ PCI_VENDOR_ID_INTEL, intel_router_probe },
{ PCI_VENDOR_ID_AL, ali_router_probe },
{ PCI_VENDOR_ID_ITE, ite_router_probe },
{ PCI_VENDOR_ID_VIA, via_router_probe },
{ PCI_VENDOR_ID_OPTI, opti_router_probe },
{ PCI_VENDOR_ID_SI, sis_router_probe },
{ PCI_VENDOR_ID_CYRIX, cyrix_router_probe },
{ PCI_VENDOR_ID_VLSI, vlsi_router_probe },
{ PCI_VENDOR_ID_SERVERWORKS, serverworks_router_probe },
{ PCI_VENDOR_ID_AMD, amd_router_probe },
{ PCI_VENDOR_ID_PICOPOWER, pico_router_probe },
/* Someone with docs needs to add the ATI Radeon IGP */
{ 0, NULL }
};
static struct irq_router pirq_router;
static struct pci_dev *pirq_router_dev;
/*
* FIXME: should we have an option to say "generic for
* chipset" ?
*/
static bool __init pirq_try_router(struct irq_router *r,
struct irq_routing_table *rt,
struct pci_dev *dev)
{
struct irq_router_handler *h;
DBG(KERN_DEBUG "PCI: Trying IRQ router for [%04x:%04x]\n",
dev->vendor, dev->device);
for (h = pirq_routers; h->vendor; h++) {
/* First look for a router match */
if (rt->rtr_vendor == h->vendor &&
h->probe(r, dev, rt->rtr_device))
return true;
/* Fall back to a device match */
if (dev->vendor == h->vendor &&
h->probe(r, dev, dev->device))
return true;
}
return false;
}
static void __init pirq_find_router(struct irq_router *r)
{
struct irq_routing_table *rt = pirq_table;
struct pci_dev *dev;
#ifdef CONFIG_PCI_BIOS
if (!rt->signature) {
printk(KERN_INFO "PCI: Using BIOS for IRQ routing\n");
r->set = pirq_bios_set;
r->name = "BIOS";
return;
}
#endif
/* Default unless a driver reloads it */
r->name = "default";
r->get = NULL;
r->set = NULL;
DBG(KERN_DEBUG "PCI: Attempting to find IRQ router for [%04x:%04x]\n",
rt->rtr_vendor, rt->rtr_device);
/* Use any vendor:device provided by the routing table or try all. */
if (rt->rtr_vendor) {
dev = pci_get_domain_bus_and_slot(0, rt->rtr_bus,
rt->rtr_devfn);
if (dev && pirq_try_router(r, rt, dev))
pirq_router_dev = dev;
} else {
dev = NULL;
for_each_pci_dev(dev) {
if (pirq_try_router(r, rt, dev)) {
pirq_router_dev = dev;
break;
}
}
}
if (pirq_router_dev)
dev_info(&pirq_router_dev->dev, "%s IRQ router [%04x:%04x]\n",
pirq_router.name,
pirq_router_dev->vendor, pirq_router_dev->device);
else
DBG(KERN_DEBUG "PCI: Interrupt router not found at "
"%02x:%02x\n", rt->rtr_bus, rt->rtr_devfn);
/* The device remains referenced for the kernel lifetime */
}
/*
* We're supposed to match on the PCI device only and not the function,
* but some BIOSes build their tables with the PCI function included
* for motherboard devices, so if a complete match is found, then give
* it precedence over a slot match.
*/
static struct irq_info *pirq_get_dev_info(struct pci_dev *dev)
{
struct irq_routing_table *rt = pirq_table;
int entries = (rt->size - sizeof(struct irq_routing_table)) /
sizeof(struct irq_info);
struct irq_info *slotinfo = NULL;
struct irq_info *info;
for (info = rt->slots; entries--; info++)
if (info->bus == dev->bus->number) {
if (info->devfn == dev->devfn)
return info;
if (!slotinfo &&
PCI_SLOT(info->devfn) == PCI_SLOT(dev->devfn))
slotinfo = info;
}
return slotinfo;
}
/*
* Buses behind bridges are typically not listed in the PIRQ routing table.
* Do the usual dance then and walk the tree of bridges up adjusting the
* pin number accordingly on the way until the originating root bus device
* has been reached and then use its routing information.
*/
static struct irq_info *pirq_get_info(struct pci_dev *dev, u8 *pin)
{
struct pci_dev *temp_dev = dev;
struct irq_info *info;
u8 temp_pin = *pin;
u8 dpin = temp_pin;
info = pirq_get_dev_info(dev);
while (!info && temp_dev->bus->parent) {
struct pci_dev *bridge = temp_dev->bus->self;
temp_pin = pci_swizzle_interrupt_pin(temp_dev, temp_pin);
info = pirq_get_dev_info(bridge);
if (info)
dev_warn(&dev->dev,
"using bridge %s INT %c to get INT %c\n",
pci_name(bridge),
'A' + temp_pin - 1, 'A' + dpin - 1);
temp_dev = bridge;
}
*pin = temp_pin;
return info;
}
static int pcibios_lookup_irq(struct pci_dev *dev, int assign)
{
struct irq_info *info;
int i, pirq, newirq;
u8 dpin, pin;
int irq = 0;
u32 mask;
struct irq_router *r = &pirq_router;
struct pci_dev *dev2 = NULL;
char *msg = NULL;
/* Find IRQ pin */
pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &dpin);
if (!dpin) {
dev_dbg(&dev->dev, "no interrupt pin\n");
return 0;
}
if (io_apic_assign_pci_irqs)
return 0;
/* Find IRQ routing entry */
if (!pirq_table)
return 0;
pin = dpin;
info = pirq_get_info(dev, &pin);
if (!info) {
dev_dbg(&dev->dev, "PCI INT %c not found in routing table\n",
'A' + dpin - 1);
return 0;
}
pirq = info->irq[pin - 1].link;
mask = info->irq[pin - 1].bitmap;
if (!pirq) {
dev_dbg(&dev->dev, "PCI INT %c not routed\n", 'A' + dpin - 1);
return 0;
}
dev_dbg(&dev->dev, "PCI INT %c -> PIRQ %02x, mask %04x, excl %04x",
'A' + dpin - 1, pirq, mask, pirq_table->exclusive_irqs);
mask &= pcibios_irq_mask;
/* Work around broken HP Pavilion Notebooks which assign USB to
IRQ 9 even though it is actually wired to IRQ 11 */
if (broken_hp_bios_irq9 && pirq == 0x59 && dev->irq == 9) {
dev->irq = 11;
pci_write_config_byte(dev, PCI_INTERRUPT_LINE, 11);
r->set(pirq_router_dev, dev, pirq, 11);
}
/* same for Acer Travelmate 360, but with CB and irq 11 -> 10 */
if (acer_tm360_irqrouting && dev->irq == 11 &&
dev->vendor == PCI_VENDOR_ID_O2) {
pirq = 0x68;
mask = 0x400;
dev->irq = r->get(pirq_router_dev, dev, pirq);
pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq);
}
/*
* Find the best IRQ to assign: use the one
* reported by the device if possible.
*/
newirq = dev->irq;
if (newirq && !((1 << newirq) & mask)) {
if (pci_probe & PCI_USE_PIRQ_MASK)
newirq = 0;
else
dev_warn(&dev->dev, "IRQ %d doesn't match PIRQ mask "
"%#x; try pci=usepirqmask\n", newirq, mask);
}
if (!newirq && assign) {
for (i = 0; i < 16; i++) {
if (!(mask & (1 << i)))
continue;
if (pirq_penalty[i] < pirq_penalty[newirq] &&
can_request_irq(i, IRQF_SHARED))
newirq = i;
}
}
dev_dbg(&dev->dev, "PCI INT %c -> newirq %d", 'A' + dpin - 1, newirq);
/* Check if it is hardcoded */
if ((pirq & 0xf0) == 0xf0) {
irq = pirq & 0xf;
msg = "hardcoded";
} else if (r->get && (irq = r->get(pirq_router_dev, dev, pirq)) && \
((!(pci_probe & PCI_USE_PIRQ_MASK)) || ((1 << irq) & mask))) {
msg = "found";
if (r->lvl)
r->lvl(pirq_router_dev, dev, pirq, irq);
else
elcr_set_level_irq(irq);
} else if (newirq && r->set &&
(dev->class >> 8) != PCI_CLASS_DISPLAY_VGA) {
if (r->set(pirq_router_dev, dev, pirq, newirq)) {
if (r->lvl)
r->lvl(pirq_router_dev, dev, pirq, newirq);
else
elcr_set_level_irq(newirq);
msg = "assigned";
irq = newirq;
}
}
if (!irq) {
if (newirq && mask == (1 << newirq)) {
msg = "guessed";
irq = newirq;
} else {
dev_dbg(&dev->dev, "can't route interrupt\n");
return 0;
}
}
dev_info(&dev->dev, "%s PCI INT %c -> IRQ %d\n",
msg, 'A' + dpin - 1, irq);
/* Update IRQ for all devices with the same pirq value */
for_each_pci_dev(dev2) {
pci_read_config_byte(dev2, PCI_INTERRUPT_PIN, &dpin);
if (!dpin)
continue;
pin = dpin;
info = pirq_get_info(dev2, &pin);
if (!info)
continue;
if (info->irq[pin - 1].link == pirq) {
/*
* We refuse to override the dev->irq
* information. Give a warning!
*/
if (dev2->irq && dev2->irq != irq && \
(!(pci_probe & PCI_USE_PIRQ_MASK) || \
((1 << dev2->irq) & mask))) {
#ifndef CONFIG_PCI_MSI
dev_info(&dev2->dev, "IRQ routing conflict: "
"have IRQ %d, want IRQ %d\n",
dev2->irq, irq);
#endif
continue;
}
dev2->irq = irq;
pirq_penalty[irq]++;
if (dev != dev2)
dev_info(&dev->dev, "sharing IRQ %d with %s\n",
irq, pci_name(dev2));
}
}
return 1;
}
void __init pcibios_fixup_irqs(void)
{
struct pci_dev *dev = NULL;
u8 pin;
DBG(KERN_DEBUG "PCI: IRQ fixup\n");
for_each_pci_dev(dev) {
/*
* If the BIOS has set an out of range IRQ number, just
* ignore it. Also keep track of which IRQ's are
* already in use.
*/
if (dev->irq >= 16) {
dev_dbg(&dev->dev, "ignoring bogus IRQ %d\n", dev->irq);
dev->irq = 0;
}
/*
* If the IRQ is already assigned to a PCI device,
* ignore its ISA use penalty
*/
if (pirq_penalty[dev->irq] >= 100 &&
pirq_penalty[dev->irq] < 100000)
pirq_penalty[dev->irq] = 0;
pirq_penalty[dev->irq]++;
}
if (io_apic_assign_pci_irqs)
return;
dev = NULL;
for_each_pci_dev(dev) {
pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
if (!pin)
continue;
/*
* Still no IRQ? Try to lookup one...
*/
if (!dev->irq)
pcibios_lookup_irq(dev, 0);
}
}
/*
* Work around broken HP Pavilion Notebooks which assign USB to
* IRQ 9 even though it is actually wired to IRQ 11
*/
static int __init fix_broken_hp_bios_irq9(const struct dmi_system_id *d)
{
if (!broken_hp_bios_irq9) {
broken_hp_bios_irq9 = 1;
printk(KERN_INFO "%s detected - fixing broken IRQ routing\n",
d->ident);
}
return 0;
}
/*
* Work around broken Acer TravelMate 360 Notebooks which assign
* Cardbus to IRQ 11 even though it is actually wired to IRQ 10
*/
static int __init fix_acer_tm360_irqrouting(const struct dmi_system_id *d)
{
if (!acer_tm360_irqrouting) {
acer_tm360_irqrouting = 1;
printk(KERN_INFO "%s detected - fixing broken IRQ routing\n",
d->ident);
}
return 0;
}
static const struct dmi_system_id pciirq_dmi_table[] __initconst = {
{
.callback = fix_broken_hp_bios_irq9,
.ident = "HP Pavilion N5400 Series Laptop",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
DMI_MATCH(DMI_BIOS_VERSION, "GE.M1.03"),
DMI_MATCH(DMI_PRODUCT_VERSION,
"HP Pavilion Notebook Model GE"),
DMI_MATCH(DMI_BOARD_VERSION, "OmniBook N32N-736"),
},
},
{
.callback = fix_acer_tm360_irqrouting,
.ident = "Acer TravelMate 36x Laptop",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 360"),
},
},
{ }
};
void __init pcibios_irq_init(void)
{
struct irq_routing_table *rtable = NULL;
DBG(KERN_DEBUG "PCI: IRQ init\n");
if (raw_pci_ops == NULL)
return;
dmi_check_system(pciirq_dmi_table);
pirq_table = pirq_find_routing_table();
#ifdef CONFIG_PCI_BIOS
if (!pirq_table && (pci_probe & PCI_BIOS_IRQ_SCAN)) {
pirq_table = pcibios_get_irq_routing_table();
rtable = pirq_table;
}
#endif
if (pirq_table) {
pirq_peer_trick();
pirq_find_router(&pirq_router);
if (pirq_table->exclusive_irqs) {
int i;
for (i = 0; i < 16; i++)
if (!(pirq_table->exclusive_irqs & (1 << i)))
pirq_penalty[i] += 100;
}
/*
* If we're using the I/O APIC, avoid using the PCI IRQ
* routing table
*/
if (io_apic_assign_pci_irqs) {
kfree(rtable);
pirq_table = NULL;
}
}
x86_init.pci.fixup_irqs();
if (io_apic_assign_pci_irqs && pci_routeirq) {
struct pci_dev *dev = NULL;
/*
* PCI IRQ routing is set up by pci_enable_device(), but we
* also do it here in case there are still broken drivers that
* don't use pci_enable_device().
*/
printk(KERN_INFO "PCI: Routing PCI interrupts for all devices because \"pci=routeirq\" specified\n");
for_each_pci_dev(dev)
pirq_enable_irq(dev);
}
}
static void pirq_penalize_isa_irq(int irq, int active)
{
/*
* If any ISAPnP device reports an IRQ in its list of possible
* IRQ's, we try to avoid assigning it to PCI devices.
*/
if (irq < 16) {
if (active)
pirq_penalty[irq] += 1000;
else
pirq_penalty[irq] += 100;
}
}
void pcibios_penalize_isa_irq(int irq, int active)
{
#ifdef CONFIG_ACPI
if (!acpi_noirq)
acpi_penalize_isa_irq(irq, active);
else
#endif
pirq_penalize_isa_irq(irq, active);
}
static int pirq_enable_irq(struct pci_dev *dev)
{
u8 pin = 0;
pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
if (pin && !pcibios_lookup_irq(dev, 1)) {
char *msg = "";
if (!io_apic_assign_pci_irqs && dev->irq)
return 0;
if (io_apic_assign_pci_irqs) {
#ifdef CONFIG_X86_IO_APIC
struct pci_dev *temp_dev;
int irq;
if (dev->irq_managed && dev->irq > 0)
return 0;
irq = IO_APIC_get_PCI_irq_vector(dev->bus->number,
PCI_SLOT(dev->devfn), pin - 1);
/*
* Busses behind bridges are typically not listed in the MP-table.
* In this case we have to look up the IRQ based on the parent bus,
* parent slot, and pin number. The SMP code detects such bridged
* busses itself so we should get into this branch reliably.
*/
temp_dev = dev;
while (irq < 0 && dev->bus->parent) { /* go back to the bridge */
struct pci_dev *bridge = dev->bus->self;
pin = pci_swizzle_interrupt_pin(dev, pin);
irq = IO_APIC_get_PCI_irq_vector(bridge->bus->number,
PCI_SLOT(bridge->devfn),
pin - 1);
if (irq >= 0)
dev_warn(&dev->dev, "using bridge %s "
"INT %c to get IRQ %d\n",
pci_name(bridge), 'A' + pin - 1,
irq);
dev = bridge;
}
dev = temp_dev;
if (irq >= 0) {
dev->irq_managed = 1;
dev->irq = irq;
dev_info(&dev->dev, "PCI->APIC IRQ transform: "
"INT %c -> IRQ %d\n", 'A' + pin - 1, irq);
return 0;
} else
msg = "; probably buggy MP table";
#endif
} else if (pci_probe & PCI_BIOS_IRQ_SCAN)
msg = "";
else
msg = "; please try using pci=biosirq";
/*
* With IDE legacy devices the IRQ lookup failure is not
* a problem..
*/
if (dev->class >> 8 == PCI_CLASS_STORAGE_IDE &&
!(dev->class & 0x5))
return 0;
dev_warn(&dev->dev, "can't find IRQ for PCI INT %c%s\n",
'A' + pin - 1, msg);
}
return 0;
}
bool mp_should_keep_irq(struct device *dev)
{
if (dev->power.is_prepared)
return true;
#ifdef CONFIG_PM
if (dev->power.runtime_status == RPM_SUSPENDING)
return true;
#endif
return false;
}
static void pirq_disable_irq(struct pci_dev *dev)
{
if (io_apic_assign_pci_irqs && !mp_should_keep_irq(&dev->dev) &&
dev->irq_managed && dev->irq) {
mp_unmap_irq(dev->irq);
dev->irq = 0;
dev->irq_managed = 0;
}
}
| linux-master | arch/x86/pci/irq.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Intel MID PCI support
* Copyright (c) 2008 Intel Corporation
* Jesse Barnes <[email protected]>
*
* Moorestown has an interesting PCI implementation:
* - configuration space is memory mapped (as defined by MCFG)
* - Lincroft devices also have a real, type 1 configuration space
* - Early Lincroft silicon has a type 1 access bug that will cause
* a hang if non-existent devices are accessed
* - some devices have the "fixed BAR" capability, which means
* they can't be relocated or modified; check for that during
* BAR sizing
*
* So, we use the MCFG space for all reads and writes, but also send
* Lincroft writes to type 1 space. But only read/write if the device
* actually exists, otherwise return all 1s for reads and bit bucket
* the writes.
*/
#include <linux/sched.h>
#include <linux/pci.h>
#include <linux/ioport.h>
#include <linux/init.h>
#include <linux/dmi.h>
#include <linux/acpi.h>
#include <linux/io.h>
#include <linux/smp.h>
#include <asm/cpu_device_id.h>
#include <asm/segment.h>
#include <asm/pci_x86.h>
#include <asm/hw_irq.h>
#include <asm/io_apic.h>
#include <asm/intel-family.h>
#include <asm/intel-mid.h>
#include <asm/acpi.h>
#define PCIE_CAP_OFFSET 0x100
/* Quirks for the listed devices */
#define PCI_DEVICE_ID_INTEL_MRFLD_MMC 0x1190
#define PCI_DEVICE_ID_INTEL_MRFLD_HSU 0x1191
/* Fixed BAR fields */
#define PCIE_VNDR_CAP_ID_FIXED_BAR 0x00 /* Fixed BAR (TBD) */
#define PCI_FIXED_BAR_0_SIZE 0x04
#define PCI_FIXED_BAR_1_SIZE 0x08
#define PCI_FIXED_BAR_2_SIZE 0x0c
#define PCI_FIXED_BAR_3_SIZE 0x10
#define PCI_FIXED_BAR_4_SIZE 0x14
#define PCI_FIXED_BAR_5_SIZE 0x1c
static int pci_soc_mode;
/**
* fixed_bar_cap - return the offset of the fixed BAR cap if found
* @bus: PCI bus
* @devfn: device in question
*
* Look for the fixed BAR cap on @bus and @devfn, returning its offset
* if found or 0 otherwise.
*/
static int fixed_bar_cap(struct pci_bus *bus, unsigned int devfn)
{
int pos;
u32 pcie_cap = 0, cap_data;
pos = PCIE_CAP_OFFSET;
if (!raw_pci_ext_ops)
return 0;
while (pos) {
if (raw_pci_ext_ops->read(pci_domain_nr(bus), bus->number,
devfn, pos, 4, &pcie_cap))
return 0;
if (PCI_EXT_CAP_ID(pcie_cap) == 0x0000 ||
PCI_EXT_CAP_ID(pcie_cap) == 0xffff)
break;
if (PCI_EXT_CAP_ID(pcie_cap) == PCI_EXT_CAP_ID_VNDR) {
raw_pci_ext_ops->read(pci_domain_nr(bus), bus->number,
devfn, pos + 4, 4, &cap_data);
if ((cap_data & 0xffff) == PCIE_VNDR_CAP_ID_FIXED_BAR)
return pos;
}
pos = PCI_EXT_CAP_NEXT(pcie_cap);
}
return 0;
}
static int pci_device_update_fixed(struct pci_bus *bus, unsigned int devfn,
int reg, int len, u32 val, int offset)
{
u32 size;
unsigned int domain, busnum;
int bar = (reg - PCI_BASE_ADDRESS_0) >> 2;
domain = pci_domain_nr(bus);
busnum = bus->number;
if (val == ~0 && len == 4) {
unsigned long decode;
raw_pci_ext_ops->read(domain, busnum, devfn,
offset + 8 + (bar * 4), 4, &size);
/* Turn the size into a decode pattern for the sizing code */
if (size) {
decode = size - 1;
decode |= decode >> 1;
decode |= decode >> 2;
decode |= decode >> 4;
decode |= decode >> 8;
decode |= decode >> 16;
decode++;
decode = ~(decode - 1);
} else {
decode = 0;
}
/*
* If val is all ones, the core code is trying to size the reg,
* so update the mmconfig space with the real size.
*
* Note: this assumes the fixed size we got is a power of two.
*/
return raw_pci_ext_ops->write(domain, busnum, devfn, reg, 4,
decode);
}
/* This is some other kind of BAR write, so just do it. */
return raw_pci_ext_ops->write(domain, busnum, devfn, reg, len, val);
}
/**
* type1_access_ok - check whether to use type 1
* @bus: bus number
* @devfn: device & function in question
* @reg: configuration register offset
*
* If the bus is on a Lincroft chip and it exists, or is not on a Lincroft at
* all, the we can go ahead with any reads & writes. If it's on a Lincroft,
* but doesn't exist, avoid the access altogether to keep the chip from
* hanging.
*/
static bool type1_access_ok(unsigned int bus, unsigned int devfn, int reg)
{
/*
* This is a workaround for A0 LNC bug where PCI status register does
* not have new CAP bit set. can not be written by SW either.
*
* PCI header type in real LNC indicates a single function device, this
* will prevent probing other devices under the same function in PCI
* shim. Therefore, use the header type in shim instead.
*/
if (reg >= 0x100 || reg == PCI_STATUS || reg == PCI_HEADER_TYPE)
return false;
if (bus == 0 && (devfn == PCI_DEVFN(2, 0)
|| devfn == PCI_DEVFN(0, 0)
|| devfn == PCI_DEVFN(3, 0)))
return true;
return false; /* Langwell on others */
}
static int pci_read(struct pci_bus *bus, unsigned int devfn, int where,
int size, u32 *value)
{
if (type1_access_ok(bus->number, devfn, where))
return pci_direct_conf1.read(pci_domain_nr(bus), bus->number,
devfn, where, size, value);
return raw_pci_ext_ops->read(pci_domain_nr(bus), bus->number,
devfn, where, size, value);
}
static int pci_write(struct pci_bus *bus, unsigned int devfn, int where,
int size, u32 value)
{
int offset;
/*
* On MRST, there is no PCI ROM BAR, this will cause a subsequent read
* to ROM BAR return 0 then being ignored.
*/
if (where == PCI_ROM_ADDRESS)
return 0;
/*
* Devices with fixed BARs need special handling:
* - BAR sizing code will save, write ~0, read size, restore
* - so writes to fixed BARs need special handling
* - other writes to fixed BAR devices should go through mmconfig
*/
offset = fixed_bar_cap(bus, devfn);
if (offset &&
(where >= PCI_BASE_ADDRESS_0 && where <= PCI_BASE_ADDRESS_5)) {
return pci_device_update_fixed(bus, devfn, where, size, value,
offset);
}
/*
* On Moorestown update both real & mmconfig space
* Note: early Lincroft silicon can't handle type 1 accesses to
* non-existent devices, so just eat the write in that case.
*/
if (type1_access_ok(bus->number, devfn, where))
return pci_direct_conf1.write(pci_domain_nr(bus), bus->number,
devfn, where, size, value);
return raw_pci_ext_ops->write(pci_domain_nr(bus), bus->number, devfn,
where, size, value);
}
static const struct x86_cpu_id intel_mid_cpu_ids[] = {
X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT_MID, NULL),
{}
};
static int intel_mid_pci_irq_enable(struct pci_dev *dev)
{
const struct x86_cpu_id *id;
struct irq_alloc_info info;
bool polarity_low;
u16 model = 0;
int ret;
u8 gsi;
if (dev->irq_managed && dev->irq > 0)
return 0;
ret = pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &gsi);
if (ret < 0) {
dev_warn(&dev->dev, "Failed to read interrupt line: %d\n", ret);
return ret;
}
id = x86_match_cpu(intel_mid_cpu_ids);
if (id)
model = id->model;
switch (model) {
case INTEL_FAM6_ATOM_SILVERMONT_MID:
polarity_low = false;
/* Special treatment for IRQ0 */
if (gsi == 0) {
/*
* Skip HS UART common registers device since it has
* IRQ0 assigned and not used by the kernel.
*/
if (dev->device == PCI_DEVICE_ID_INTEL_MRFLD_HSU)
return -EBUSY;
/*
* TNG has IRQ0 assigned to eMMC controller. But there
* are also other devices with bogus PCI configuration
* that have IRQ0 assigned. This check ensures that
* eMMC gets it. The rest of devices still could be
* enabled without interrupt line being allocated.
*/
if (dev->device != PCI_DEVICE_ID_INTEL_MRFLD_MMC)
return 0;
}
break;
default:
polarity_low = true;
break;
}
ioapic_set_alloc_attr(&info, dev_to_node(&dev->dev), 1, polarity_low);
/*
* MRST only have IOAPIC, the PCI irq lines are 1:1 mapped to
* IOAPIC RTE entries, so we just enable RTE for the device.
*/
ret = mp_map_gsi_to_irq(gsi, IOAPIC_MAP_ALLOC, &info);
if (ret < 0)
return ret;
dev->irq = ret;
dev->irq_managed = 1;
return 0;
}
static void intel_mid_pci_irq_disable(struct pci_dev *dev)
{
if (!mp_should_keep_irq(&dev->dev) && dev->irq_managed &&
dev->irq > 0) {
mp_unmap_irq(dev->irq);
dev->irq_managed = 0;
}
}
static const struct pci_ops intel_mid_pci_ops __initconst = {
.read = pci_read,
.write = pci_write,
};
/**
* intel_mid_pci_init - installs intel_mid_pci_ops
*
* Moorestown has an interesting PCI implementation (see above).
* Called when the early platform detection installs it.
*/
int __init intel_mid_pci_init(void)
{
pr_info("Intel MID platform detected, using MID PCI ops\n");
pci_mmcfg_late_init();
pcibios_enable_irq = intel_mid_pci_irq_enable;
pcibios_disable_irq = intel_mid_pci_irq_disable;
pci_root_ops = intel_mid_pci_ops;
pci_soc_mode = 1;
/* Continue with standard init */
acpi_noirq_set();
return 1;
}
/*
* Langwell devices are not true PCI devices; they are not subject to 10 ms
* d3 to d0 delay required by PCI spec.
*/
static void pci_d3delay_fixup(struct pci_dev *dev)
{
/*
* PCI fixups are effectively decided compile time. If we have a dual
* SoC/non-SoC kernel we don't want to mangle d3 on non-SoC devices.
*/
if (!pci_soc_mode)
return;
/*
* True PCI devices in Lincroft should allow type 1 access, the rest
* are Langwell fake PCI devices.
*/
if (type1_access_ok(dev->bus->number, dev->devfn, PCI_DEVICE_ID))
return;
dev->d3hot_delay = 0;
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_d3delay_fixup);
static void mid_power_off_one_device(struct pci_dev *dev)
{
u16 pmcsr;
/*
* Update current state first, otherwise PCI core enforces PCI_D0 in
* pci_set_power_state() for devices which status was PCI_UNKNOWN.
*/
pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
dev->current_state = (pci_power_t __force)(pmcsr & PCI_PM_CTRL_STATE_MASK);
pci_set_power_state(dev, PCI_D3hot);
}
static void mid_power_off_devices(struct pci_dev *dev)
{
int id;
if (!pci_soc_mode)
return;
id = intel_mid_pwr_get_lss_id(dev);
if (id < 0)
return;
/*
* This sets only PMCSR bits. The actual power off will happen in
* arch/x86/platform/intel-mid/pwr.c.
*/
mid_power_off_one_device(dev);
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, mid_power_off_devices);
/*
* Langwell devices reside at fixed offsets, don't try to move them.
*/
static void pci_fixed_bar_fixup(struct pci_dev *dev)
{
unsigned long offset;
u32 size;
int i;
if (!pci_soc_mode)
return;
/* Must have extended configuration space */
if (dev->cfg_size < PCIE_CAP_OFFSET + 4)
return;
/* Fixup the BAR sizes for fixed BAR devices and make them unmoveable */
offset = fixed_bar_cap(dev->bus, dev->devfn);
if (!offset || PCI_DEVFN(2, 0) == dev->devfn ||
PCI_DEVFN(2, 2) == dev->devfn)
return;
for (i = 0; i < PCI_STD_NUM_BARS; i++) {
pci_read_config_dword(dev, offset + 8 + (i * 4), &size);
dev->resource[i].end = dev->resource[i].start + size - 1;
dev->resource[i].flags |= IORESOURCE_PCI_FIXED;
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_fixed_bar_fixup);
| linux-master | arch/x86/pci/intel_mid_pci.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Low-Level PCI Access for i386 machines
*
* Copyright 1993, 1994 Drew Eckhardt
* Visionary Computing
* (Unix and Linux consulting and custom programming)
* [email protected]
* +1 (303) 786-7975
*
* Drew's work was sponsored by:
* iX Multiuser Multitasking Magazine
* Hannover, Germany
* [email protected]
*
* Copyright 1997--2000 Martin Mares <[email protected]>
*
* For more information, please consult the following manuals (look at
* http://www.pcisig.com/ for how to get them):
*
* PCI BIOS Specification
* PCI Local Bus Specification
* PCI to PCI Bridge Specification
* PCI System Design Guide
*
*/
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/errno.h>
#include <linux/memblock.h>
#include <asm/memtype.h>
#include <asm/e820/api.h>
#include <asm/pci_x86.h>
#include <asm/io_apic.h>
/*
* This list of dynamic mappings is for temporarily maintaining
* original BIOS BAR addresses for possible reinstatement.
*/
struct pcibios_fwaddrmap {
struct list_head list;
struct pci_dev *dev;
resource_size_t fw_addr[DEVICE_COUNT_RESOURCE];
};
static LIST_HEAD(pcibios_fwaddrmappings);
static DEFINE_SPINLOCK(pcibios_fwaddrmap_lock);
static bool pcibios_fw_addr_done;
/* Must be called with 'pcibios_fwaddrmap_lock' lock held. */
static struct pcibios_fwaddrmap *pcibios_fwaddrmap_lookup(struct pci_dev *dev)
{
struct pcibios_fwaddrmap *map;
lockdep_assert_held(&pcibios_fwaddrmap_lock);
list_for_each_entry(map, &pcibios_fwaddrmappings, list)
if (map->dev == dev)
return map;
return NULL;
}
static void
pcibios_save_fw_addr(struct pci_dev *dev, int idx, resource_size_t fw_addr)
{
unsigned long flags;
struct pcibios_fwaddrmap *map;
if (pcibios_fw_addr_done)
return;
spin_lock_irqsave(&pcibios_fwaddrmap_lock, flags);
map = pcibios_fwaddrmap_lookup(dev);
if (!map) {
spin_unlock_irqrestore(&pcibios_fwaddrmap_lock, flags);
map = kzalloc(sizeof(*map), GFP_KERNEL);
if (!map)
return;
map->dev = pci_dev_get(dev);
map->fw_addr[idx] = fw_addr;
INIT_LIST_HEAD(&map->list);
spin_lock_irqsave(&pcibios_fwaddrmap_lock, flags);
list_add_tail(&map->list, &pcibios_fwaddrmappings);
} else
map->fw_addr[idx] = fw_addr;
spin_unlock_irqrestore(&pcibios_fwaddrmap_lock, flags);
}
resource_size_t pcibios_retrieve_fw_addr(struct pci_dev *dev, int idx)
{
unsigned long flags;
struct pcibios_fwaddrmap *map;
resource_size_t fw_addr = 0;
if (pcibios_fw_addr_done)
return 0;
spin_lock_irqsave(&pcibios_fwaddrmap_lock, flags);
map = pcibios_fwaddrmap_lookup(dev);
if (map)
fw_addr = map->fw_addr[idx];
spin_unlock_irqrestore(&pcibios_fwaddrmap_lock, flags);
return fw_addr;
}
static void __init pcibios_fw_addr_list_del(void)
{
unsigned long flags;
struct pcibios_fwaddrmap *entry, *next;
spin_lock_irqsave(&pcibios_fwaddrmap_lock, flags);
list_for_each_entry_safe(entry, next, &pcibios_fwaddrmappings, list) {
list_del(&entry->list);
pci_dev_put(entry->dev);
kfree(entry);
}
spin_unlock_irqrestore(&pcibios_fwaddrmap_lock, flags);
pcibios_fw_addr_done = true;
}
static int
skip_isa_ioresource_align(struct pci_dev *dev) {
if ((pci_probe & PCI_CAN_SKIP_ISA_ALIGN) &&
!(dev->bus->bridge_ctl & PCI_BRIDGE_CTL_ISA))
return 1;
return 0;
}
/*
* We need to avoid collisions with `mirrored' VGA ports
* and other strange ISA hardware, so we always want the
* addresses to be allocated in the 0x000-0x0ff region
* modulo 0x400.
*
* Why? Because some silly external IO cards only decode
* the low 10 bits of the IO address. The 0x00-0xff region
* is reserved for motherboard devices that decode all 16
* bits, so it's ok to allocate at, say, 0x2800-0x28ff,
* but we want to try to avoid allocating at 0x2900-0x2bff
* which might have be mirrored at 0x0100-0x03ff..
*/
resource_size_t
pcibios_align_resource(void *data, const struct resource *res,
resource_size_t size, resource_size_t align)
{
struct pci_dev *dev = data;
resource_size_t start = res->start;
if (res->flags & IORESOURCE_IO) {
if (skip_isa_ioresource_align(dev))
return start;
if (start & 0x300)
start = (start + 0x3ff) & ~0x3ff;
} else if (res->flags & IORESOURCE_MEM) {
/* The low 1MB range is reserved for ISA cards */
if (start < BIOS_END)
start = BIOS_END;
}
return start;
}
EXPORT_SYMBOL(pcibios_align_resource);
/*
* Handle resources of PCI devices. If the world were perfect, we could
* just allocate all the resource regions and do nothing more. It isn't.
* On the other hand, we cannot just re-allocate all devices, as it would
* require us to know lots of host bridge internals. So we attempt to
* keep as much of the original configuration as possible, but tweak it
* when it's found to be wrong.
*
* Known BIOS problems we have to work around:
* - I/O or memory regions not configured
* - regions configured, but not enabled in the command register
* - bogus I/O addresses above 64K used
* - expansion ROMs left enabled (this may sound harmless, but given
* the fact the PCI specs explicitly allow address decoders to be
* shared between expansion ROMs and other resource regions, it's
* at least dangerous)
* - bad resource sizes or overlaps with other regions
*
* Our solution:
* (1) Allocate resources for all buses behind PCI-to-PCI bridges.
* This gives us fixed barriers on where we can allocate.
* (2) Allocate resources for all enabled devices. If there is
* a collision, just mark the resource as unallocated. Also
* disable expansion ROMs during this step.
* (3) Try to allocate resources for disabled devices. If the
* resources were assigned correctly, everything goes well,
* if they weren't, they won't disturb allocation of other
* resources.
* (4) Assign new addresses to resources which were either
* not configured at all or misconfigured. If explicitly
* requested by the user, configure expansion ROM address
* as well.
*/
static void pcibios_allocate_bridge_resources(struct pci_dev *dev)
{
int idx;
struct resource *r;
for (idx = PCI_BRIDGE_RESOURCES; idx < PCI_NUM_RESOURCES; idx++) {
r = &dev->resource[idx];
if (!r->flags)
continue;
if (r->parent) /* Already allocated */
continue;
if (!r->start || pci_claim_bridge_resource(dev, idx) < 0) {
/*
* Something is wrong with the region.
* Invalidate the resource to prevent
* child resource allocations in this
* range.
*/
r->start = r->end = 0;
r->flags = 0;
}
}
}
static void pcibios_allocate_bus_resources(struct pci_bus *bus)
{
struct pci_bus *child;
/* Depth-First Search on bus tree */
if (bus->self)
pcibios_allocate_bridge_resources(bus->self);
list_for_each_entry(child, &bus->children, node)
pcibios_allocate_bus_resources(child);
}
struct pci_check_idx_range {
int start;
int end;
};
static void pcibios_allocate_dev_resources(struct pci_dev *dev, int pass)
{
int idx, disabled, i;
u16 command;
struct resource *r;
struct pci_check_idx_range idx_range[] = {
{ PCI_STD_RESOURCES, PCI_STD_RESOURCE_END },
#ifdef CONFIG_PCI_IOV
{ PCI_IOV_RESOURCES, PCI_IOV_RESOURCE_END },
#endif
};
pci_read_config_word(dev, PCI_COMMAND, &command);
for (i = 0; i < ARRAY_SIZE(idx_range); i++)
for (idx = idx_range[i].start; idx <= idx_range[i].end; idx++) {
r = &dev->resource[idx];
if (r->parent) /* Already allocated */
continue;
if (!r->start) /* Address not assigned at all */
continue;
if (r->flags & IORESOURCE_IO)
disabled = !(command & PCI_COMMAND_IO);
else
disabled = !(command & PCI_COMMAND_MEMORY);
if (pass == disabled) {
dev_dbg(&dev->dev,
"BAR %d: reserving %pr (d=%d, p=%d)\n",
idx, r, disabled, pass);
if (pci_claim_resource(dev, idx) < 0) {
if (r->flags & IORESOURCE_PCI_FIXED) {
dev_info(&dev->dev, "BAR %d %pR is immovable\n",
idx, r);
} else {
/* We'll assign a new address later */
pcibios_save_fw_addr(dev,
idx, r->start);
r->end -= r->start;
r->start = 0;
}
}
}
}
if (!pass) {
r = &dev->resource[PCI_ROM_RESOURCE];
if (r->flags & IORESOURCE_ROM_ENABLE) {
/* Turn the ROM off, leave the resource region,
* but keep it unregistered. */
u32 reg;
dev_dbg(&dev->dev, "disabling ROM %pR\n", r);
r->flags &= ~IORESOURCE_ROM_ENABLE;
pci_read_config_dword(dev, dev->rom_base_reg, ®);
pci_write_config_dword(dev, dev->rom_base_reg,
reg & ~PCI_ROM_ADDRESS_ENABLE);
}
}
}
static void pcibios_allocate_resources(struct pci_bus *bus, int pass)
{
struct pci_dev *dev;
struct pci_bus *child;
list_for_each_entry(dev, &bus->devices, bus_list) {
pcibios_allocate_dev_resources(dev, pass);
child = dev->subordinate;
if (child)
pcibios_allocate_resources(child, pass);
}
}
static void pcibios_allocate_dev_rom_resource(struct pci_dev *dev)
{
struct resource *r;
/*
* Try to use BIOS settings for ROMs, otherwise let
* pci_assign_unassigned_resources() allocate the new
* addresses.
*/
r = &dev->resource[PCI_ROM_RESOURCE];
if (!r->flags || !r->start)
return;
if (r->parent) /* Already allocated */
return;
if (pci_claim_resource(dev, PCI_ROM_RESOURCE) < 0) {
r->end -= r->start;
r->start = 0;
}
}
static void pcibios_allocate_rom_resources(struct pci_bus *bus)
{
struct pci_dev *dev;
struct pci_bus *child;
list_for_each_entry(dev, &bus->devices, bus_list) {
pcibios_allocate_dev_rom_resource(dev);
child = dev->subordinate;
if (child)
pcibios_allocate_rom_resources(child);
}
}
static int __init pcibios_assign_resources(void)
{
struct pci_bus *bus;
if (!(pci_probe & PCI_ASSIGN_ROMS))
list_for_each_entry(bus, &pci_root_buses, node)
pcibios_allocate_rom_resources(bus);
pci_assign_unassigned_resources();
pcibios_fw_addr_list_del();
return 0;
}
/*
* This is an fs_initcall (one below subsys_initcall) in order to reserve
* resources properly.
*/
fs_initcall(pcibios_assign_resources);
void pcibios_resource_survey_bus(struct pci_bus *bus)
{
dev_printk(KERN_DEBUG, &bus->dev, "Allocating resources\n");
pcibios_allocate_bus_resources(bus);
pcibios_allocate_resources(bus, 0);
pcibios_allocate_resources(bus, 1);
if (!(pci_probe & PCI_ASSIGN_ROMS))
pcibios_allocate_rom_resources(bus);
}
void __init pcibios_resource_survey(void)
{
struct pci_bus *bus;
DBG("PCI: Allocating resources\n");
list_for_each_entry(bus, &pci_root_buses, node)
pcibios_allocate_bus_resources(bus);
list_for_each_entry(bus, &pci_root_buses, node)
pcibios_allocate_resources(bus, 0);
list_for_each_entry(bus, &pci_root_buses, node)
pcibios_allocate_resources(bus, 1);
e820__reserve_resources_late();
/*
* Insert the IO APIC resources after PCI initialization has
* occurred to handle IO APICS that are mapped in on a BAR in
* PCI space, but before trying to assign unassigned pci res.
*/
ioapic_insert_resources();
}
| linux-master | arch/x86/pci/i386.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/pci.h>
#include <linux/init.h>
#include <asm/pci_x86.h>
#include <asm/x86_init.h>
#include <asm/irqdomain.h>
/* arch_initcall has too random ordering, so call the initializers
in the right sequence from here. */
static __init int pci_arch_init(void)
{
int type, pcbios = 1;
type = pci_direct_probe();
if (!(pci_probe & PCI_PROBE_NOEARLY))
pci_mmcfg_early_init();
if (x86_init.pci.arch_init)
pcbios = x86_init.pci.arch_init();
/*
* Must happen after x86_init.pci.arch_init(). Xen sets up the
* x86_init.irqs.create_pci_msi_domain there.
*/
x86_create_pci_msi_domain();
if (!pcbios)
return 0;
pci_pcbios_init();
/*
* don't check for raw_pci_ops here because we want pcbios as last
* fallback, yet it's needed to run first to set pcibios_last_bus
* in case legacy PCI probing is used. otherwise detecting peer busses
* fails.
*/
pci_direct_init(type);
if (!raw_pci_ops && !raw_pci_ext_ops)
printk(KERN_ERR
"PCI: Fatal: No config space access function found\n");
dmi_check_pciprobe();
dmi_check_skip_isa_align();
return 0;
}
arch_initcall(pci_arch_init);
| linux-master | arch/x86/pci/init.c |
// SPDX-License-Identifier: GPL-2.0
/*
* mmconfig.c - Low-level direct PCI config space access via MMCONFIG
*
* This is an 64bit optimized version that always keeps the full mmconfig
* space mapped. This allows lockless config space operation.
*/
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/acpi.h>
#include <linux/bitmap.h>
#include <linux/rcupdate.h>
#include <asm/e820/api.h>
#include <asm/pci_x86.h>
#define PREFIX "PCI: "
static char __iomem *pci_dev_base(unsigned int seg, unsigned int bus, unsigned int devfn)
{
struct pci_mmcfg_region *cfg = pci_mmconfig_lookup(seg, bus);
if (cfg && cfg->virt)
return cfg->virt + (PCI_MMCFG_BUS_OFFSET(bus) | (devfn << 12));
return NULL;
}
static int pci_mmcfg_read(unsigned int seg, unsigned int bus,
unsigned int devfn, int reg, int len, u32 *value)
{
char __iomem *addr;
/* Why do we have this when nobody checks it. How about a BUG()!? -AK */
if (unlikely((bus > 255) || (devfn > 255) || (reg > 4095))) {
err: *value = -1;
return -EINVAL;
}
rcu_read_lock();
addr = pci_dev_base(seg, bus, devfn);
if (!addr) {
rcu_read_unlock();
goto err;
}
switch (len) {
case 1:
*value = mmio_config_readb(addr + reg);
break;
case 2:
*value = mmio_config_readw(addr + reg);
break;
case 4:
*value = mmio_config_readl(addr + reg);
break;
}
rcu_read_unlock();
return 0;
}
static int pci_mmcfg_write(unsigned int seg, unsigned int bus,
unsigned int devfn, int reg, int len, u32 value)
{
char __iomem *addr;
/* Why do we have this when nobody checks it. How about a BUG()!? -AK */
if (unlikely((bus > 255) || (devfn > 255) || (reg > 4095)))
return -EINVAL;
rcu_read_lock();
addr = pci_dev_base(seg, bus, devfn);
if (!addr) {
rcu_read_unlock();
return -EINVAL;
}
switch (len) {
case 1:
mmio_config_writeb(addr + reg, value);
break;
case 2:
mmio_config_writew(addr + reg, value);
break;
case 4:
mmio_config_writel(addr + reg, value);
break;
}
rcu_read_unlock();
return 0;
}
const struct pci_raw_ops pci_mmcfg = {
.read = pci_mmcfg_read,
.write = pci_mmcfg_write,
};
static void __iomem *mcfg_ioremap(struct pci_mmcfg_region *cfg)
{
void __iomem *addr;
u64 start, size;
int num_buses;
start = cfg->address + PCI_MMCFG_BUS_OFFSET(cfg->start_bus);
num_buses = cfg->end_bus - cfg->start_bus + 1;
size = PCI_MMCFG_BUS_OFFSET(num_buses);
addr = ioremap(start, size);
if (addr)
addr -= PCI_MMCFG_BUS_OFFSET(cfg->start_bus);
return addr;
}
int __init pci_mmcfg_arch_init(void)
{
struct pci_mmcfg_region *cfg;
list_for_each_entry(cfg, &pci_mmcfg_list, list)
if (pci_mmcfg_arch_map(cfg)) {
pci_mmcfg_arch_free();
return 0;
}
raw_pci_ext_ops = &pci_mmcfg;
return 1;
}
void __init pci_mmcfg_arch_free(void)
{
struct pci_mmcfg_region *cfg;
list_for_each_entry(cfg, &pci_mmcfg_list, list)
pci_mmcfg_arch_unmap(cfg);
}
int pci_mmcfg_arch_map(struct pci_mmcfg_region *cfg)
{
cfg->virt = mcfg_ioremap(cfg);
if (!cfg->virt) {
pr_err(PREFIX "can't map MMCONFIG at %pR\n", &cfg->res);
return -ENOMEM;
}
return 0;
}
void pci_mmcfg_arch_unmap(struct pci_mmcfg_region *cfg)
{
if (cfg && cfg->virt) {
iounmap(cfg->virt + PCI_MMCFG_BUS_OFFSET(cfg->start_bus));
cfg->virt = NULL;
}
}
| linux-master | arch/x86/pci/mmconfig_64.c |
// SPDX-License-Identifier: GPL-2.0
/*
* direct.c - Low-level direct PCI config space access
*/
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/dmi.h>
#include <asm/pci_x86.h>
/*
* Functions for accessing PCI base (first 256 bytes) and extended
* (4096 bytes per PCI function) configuration space with type 1
* accesses.
*/
#define PCI_CONF1_ADDRESS(bus, devfn, reg) \
(0x80000000 | ((reg & 0xF00) << 16) | (bus << 16) \
| (devfn << 8) | (reg & 0xFC))
static int pci_conf1_read(unsigned int seg, unsigned int bus,
unsigned int devfn, int reg, int len, u32 *value)
{
unsigned long flags;
if (seg || (bus > 255) || (devfn > 255) || (reg > 4095)) {
*value = -1;
return -EINVAL;
}
raw_spin_lock_irqsave(&pci_config_lock, flags);
outl(PCI_CONF1_ADDRESS(bus, devfn, reg), 0xCF8);
switch (len) {
case 1:
*value = inb(0xCFC + (reg & 3));
break;
case 2:
*value = inw(0xCFC + (reg & 2));
break;
case 4:
*value = inl(0xCFC);
break;
}
raw_spin_unlock_irqrestore(&pci_config_lock, flags);
return 0;
}
static int pci_conf1_write(unsigned int seg, unsigned int bus,
unsigned int devfn, int reg, int len, u32 value)
{
unsigned long flags;
if (seg || (bus > 255) || (devfn > 255) || (reg > 4095))
return -EINVAL;
raw_spin_lock_irqsave(&pci_config_lock, flags);
outl(PCI_CONF1_ADDRESS(bus, devfn, reg), 0xCF8);
switch (len) {
case 1:
outb((u8)value, 0xCFC + (reg & 3));
break;
case 2:
outw((u16)value, 0xCFC + (reg & 2));
break;
case 4:
outl((u32)value, 0xCFC);
break;
}
raw_spin_unlock_irqrestore(&pci_config_lock, flags);
return 0;
}
#undef PCI_CONF1_ADDRESS
const struct pci_raw_ops pci_direct_conf1 = {
.read = pci_conf1_read,
.write = pci_conf1_write,
};
/*
* Functions for accessing PCI configuration space with type 2 accesses
*/
#define PCI_CONF2_ADDRESS(dev, reg) (u16)(0xC000 | (dev << 8) | reg)
static int pci_conf2_read(unsigned int seg, unsigned int bus,
unsigned int devfn, int reg, int len, u32 *value)
{
unsigned long flags;
int dev, fn;
WARN_ON(seg);
if ((bus > 255) || (devfn > 255) || (reg > 255)) {
*value = -1;
return -EINVAL;
}
dev = PCI_SLOT(devfn);
fn = PCI_FUNC(devfn);
if (dev & 0x10)
return PCIBIOS_DEVICE_NOT_FOUND;
raw_spin_lock_irqsave(&pci_config_lock, flags);
outb((u8)(0xF0 | (fn << 1)), 0xCF8);
outb((u8)bus, 0xCFA);
switch (len) {
case 1:
*value = inb(PCI_CONF2_ADDRESS(dev, reg));
break;
case 2:
*value = inw(PCI_CONF2_ADDRESS(dev, reg));
break;
case 4:
*value = inl(PCI_CONF2_ADDRESS(dev, reg));
break;
}
outb(0, 0xCF8);
raw_spin_unlock_irqrestore(&pci_config_lock, flags);
return 0;
}
static int pci_conf2_write(unsigned int seg, unsigned int bus,
unsigned int devfn, int reg, int len, u32 value)
{
unsigned long flags;
int dev, fn;
WARN_ON(seg);
if ((bus > 255) || (devfn > 255) || (reg > 255))
return -EINVAL;
dev = PCI_SLOT(devfn);
fn = PCI_FUNC(devfn);
if (dev & 0x10)
return PCIBIOS_DEVICE_NOT_FOUND;
raw_spin_lock_irqsave(&pci_config_lock, flags);
outb((u8)(0xF0 | (fn << 1)), 0xCF8);
outb((u8)bus, 0xCFA);
switch (len) {
case 1:
outb((u8)value, PCI_CONF2_ADDRESS(dev, reg));
break;
case 2:
outw((u16)value, PCI_CONF2_ADDRESS(dev, reg));
break;
case 4:
outl((u32)value, PCI_CONF2_ADDRESS(dev, reg));
break;
}
outb(0, 0xCF8);
raw_spin_unlock_irqrestore(&pci_config_lock, flags);
return 0;
}
#undef PCI_CONF2_ADDRESS
static const struct pci_raw_ops pci_direct_conf2 = {
.read = pci_conf2_read,
.write = pci_conf2_write,
};
/*
* Before we decide to use direct hardware access mechanisms, we try to do some
* trivial checks to ensure it at least _seems_ to be working -- we just test
* whether bus 00 contains a host bridge (this is similar to checking
* techniques used in XFree86, but ours should be more reliable since we
* attempt to make use of direct access hints provided by the PCI BIOS).
*
* This should be close to trivial, but it isn't, because there are buggy
* chipsets (yes, you guessed it, by Intel and Compaq) that have no class ID.
*/
static int __init pci_sanity_check(const struct pci_raw_ops *o)
{
u32 x = 0;
int devfn;
if (pci_probe & PCI_NO_CHECKS)
return 1;
/* Assume Type 1 works for newer systems.
This handles machines that don't have anything on PCI Bus 0. */
if (dmi_get_bios_year() >= 2001)
return 1;
for (devfn = 0; devfn < 0x100; devfn++) {
if (o->read(0, 0, devfn, PCI_CLASS_DEVICE, 2, &x))
continue;
if (x == PCI_CLASS_BRIDGE_HOST || x == PCI_CLASS_DISPLAY_VGA)
return 1;
if (o->read(0, 0, devfn, PCI_VENDOR_ID, 2, &x))
continue;
if (x == PCI_VENDOR_ID_INTEL || x == PCI_VENDOR_ID_COMPAQ)
return 1;
}
DBG(KERN_WARNING "PCI: Sanity check failed\n");
return 0;
}
static int __init pci_check_type1(void)
{
unsigned long flags;
unsigned int tmp;
int works = 0;
local_irq_save(flags);
outb(0x01, 0xCFB);
tmp = inl(0xCF8);
outl(0x80000000, 0xCF8);
if (inl(0xCF8) == 0x80000000 && pci_sanity_check(&pci_direct_conf1)) {
works = 1;
}
outl(tmp, 0xCF8);
local_irq_restore(flags);
return works;
}
static int __init pci_check_type2(void)
{
unsigned long flags;
int works = 0;
local_irq_save(flags);
outb(0x00, 0xCFB);
outb(0x00, 0xCF8);
outb(0x00, 0xCFA);
if (inb(0xCF8) == 0x00 && inb(0xCFA) == 0x00 &&
pci_sanity_check(&pci_direct_conf2)) {
works = 1;
}
local_irq_restore(flags);
return works;
}
void __init pci_direct_init(int type)
{
if (type == 0)
return;
printk(KERN_INFO "PCI: Using configuration type %d for base access\n",
type);
if (type == 1) {
raw_pci_ops = &pci_direct_conf1;
if (raw_pci_ext_ops)
return;
if (!(pci_probe & PCI_HAS_IO_ECS))
return;
printk(KERN_INFO "PCI: Using configuration type 1 "
"for extended access\n");
raw_pci_ext_ops = &pci_direct_conf1;
return;
}
raw_pci_ops = &pci_direct_conf2;
}
int __init pci_direct_probe(void)
{
if ((pci_probe & PCI_PROBE_CONF1) == 0)
goto type2;
if (!request_region(0xCF8, 8, "PCI conf1"))
goto type2;
if (pci_check_type1()) {
raw_pci_ops = &pci_direct_conf1;
port_cf9_safe = true;
return 1;
}
release_region(0xCF8, 8);
type2:
if ((pci_probe & PCI_PROBE_CONF2) == 0)
return 0;
if (!request_region(0xCF8, 4, "PCI conf2"))
return 0;
if (!request_region(0xC000, 0x1000, "PCI conf2"))
goto fail2;
if (pci_check_type2()) {
raw_pci_ops = &pci_direct_conf2;
port_cf9_safe = true;
return 2;
}
release_region(0xC000, 0x1000);
fail2:
release_region(0xCF8, 4);
return 0;
}
| linux-master | arch/x86/pci/direct.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright(c) 2010 Intel Corporation. All rights reserved.
*
* Contact Information:
* Intel Corporation
* 2200 Mission College Blvd.
* Santa Clara, CA 97052
*
* This provides access methods for PCI registers that mis-behave on
* the CE4100. Each register can be assigned a private init, read and
* write routine. The exception to this is the bridge device. The
* bridge device is the only device on bus zero (0) that requires any
* fixup so it is a special case ATM
*/
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <asm/ce4100.h>
#include <asm/pci_x86.h>
struct sim_reg {
u32 value;
u32 mask;
};
struct sim_dev_reg {
int dev_func;
int reg;
void (*init)(struct sim_dev_reg *reg);
void (*read)(struct sim_dev_reg *reg, u32 *value);
void (*write)(struct sim_dev_reg *reg, u32 value);
struct sim_reg sim_reg;
};
struct sim_reg_op {
void (*init)(struct sim_dev_reg *reg);
void (*read)(struct sim_dev_reg *reg, u32 value);
void (*write)(struct sim_dev_reg *reg, u32 value);
};
#define MB (1024 * 1024)
#define KB (1024)
#define SIZE_TO_MASK(size) (~(size - 1))
#define DEFINE_REG(device, func, offset, size, init_op, read_op, write_op)\
{ PCI_DEVFN(device, func), offset, init_op, read_op, write_op,\
{0, SIZE_TO_MASK(size)} },
/*
* All read/write functions are called with pci_config_lock held.
*/
static void reg_init(struct sim_dev_reg *reg)
{
pci_direct_conf1.read(0, 1, reg->dev_func, reg->reg, 4,
®->sim_reg.value);
}
static void reg_read(struct sim_dev_reg *reg, u32 *value)
{
*value = reg->sim_reg.value;
}
static void reg_write(struct sim_dev_reg *reg, u32 value)
{
reg->sim_reg.value = (value & reg->sim_reg.mask) |
(reg->sim_reg.value & ~reg->sim_reg.mask);
}
static void sata_reg_init(struct sim_dev_reg *reg)
{
pci_direct_conf1.read(0, 1, PCI_DEVFN(14, 0), 0x10, 4,
®->sim_reg.value);
reg->sim_reg.value += 0x400;
}
static void ehci_reg_read(struct sim_dev_reg *reg, u32 *value)
{
reg_read(reg, value);
if (*value != reg->sim_reg.mask)
*value |= 0x100;
}
static void sata_revid_init(struct sim_dev_reg *reg)
{
reg->sim_reg.value = 0x01060100;
reg->sim_reg.mask = 0;
}
static void sata_revid_read(struct sim_dev_reg *reg, u32 *value)
{
reg_read(reg, value);
}
static void reg_noirq_read(struct sim_dev_reg *reg, u32 *value)
{
/* force interrupt pin value to 0 */
*value = reg->sim_reg.value & 0xfff00ff;
}
static struct sim_dev_reg bus1_fixups[] = {
DEFINE_REG(2, 0, 0x10, (16*MB), reg_init, reg_read, reg_write)
DEFINE_REG(2, 0, 0x14, (256), reg_init, reg_read, reg_write)
DEFINE_REG(2, 1, 0x10, (64*KB), reg_init, reg_read, reg_write)
DEFINE_REG(3, 0, 0x10, (64*KB), reg_init, reg_read, reg_write)
DEFINE_REG(4, 0, 0x10, (128*KB), reg_init, reg_read, reg_write)
DEFINE_REG(4, 1, 0x10, (128*KB), reg_init, reg_read, reg_write)
DEFINE_REG(6, 0, 0x10, (512*KB), reg_init, reg_read, reg_write)
DEFINE_REG(6, 1, 0x10, (512*KB), reg_init, reg_read, reg_write)
DEFINE_REG(6, 2, 0x10, (64*KB), reg_init, reg_read, reg_write)
DEFINE_REG(8, 0, 0x10, (1*MB), reg_init, reg_read, reg_write)
DEFINE_REG(8, 1, 0x10, (64*KB), reg_init, reg_read, reg_write)
DEFINE_REG(8, 2, 0x10, (64*KB), reg_init, reg_read, reg_write)
DEFINE_REG(9, 0, 0x10 , (1*MB), reg_init, reg_read, reg_write)
DEFINE_REG(9, 0, 0x14, (64*KB), reg_init, reg_read, reg_write)
DEFINE_REG(10, 0, 0x10, (256), reg_init, reg_read, reg_write)
DEFINE_REG(10, 0, 0x14, (256*MB), reg_init, reg_read, reg_write)
DEFINE_REG(11, 0, 0x10, (256), reg_init, reg_read, reg_write)
DEFINE_REG(11, 0, 0x14, (256), reg_init, reg_read, reg_write)
DEFINE_REG(11, 1, 0x10, (256), reg_init, reg_read, reg_write)
DEFINE_REG(11, 2, 0x10, (256), reg_init, reg_read, reg_write)
DEFINE_REG(11, 2, 0x14, (256), reg_init, reg_read, reg_write)
DEFINE_REG(11, 2, 0x18, (256), reg_init, reg_read, reg_write)
DEFINE_REG(11, 3, 0x10, (256), reg_init, reg_read, reg_write)
DEFINE_REG(11, 3, 0x14, (256), reg_init, reg_read, reg_write)
DEFINE_REG(11, 4, 0x10, (256), reg_init, reg_read, reg_write)
DEFINE_REG(11, 5, 0x10, (64*KB), reg_init, reg_read, reg_write)
DEFINE_REG(11, 6, 0x10, (256), reg_init, reg_read, reg_write)
DEFINE_REG(11, 7, 0x10, (64*KB), reg_init, reg_read, reg_write)
DEFINE_REG(11, 7, 0x3c, 256, reg_init, reg_noirq_read, reg_write)
DEFINE_REG(12, 0, 0x10, (128*KB), reg_init, reg_read, reg_write)
DEFINE_REG(12, 0, 0x14, (256), reg_init, reg_read, reg_write)
DEFINE_REG(12, 1, 0x10, (1024), reg_init, reg_read, reg_write)
DEFINE_REG(13, 0, 0x10, (32*KB), reg_init, ehci_reg_read, reg_write)
DEFINE_REG(13, 1, 0x10, (32*KB), reg_init, ehci_reg_read, reg_write)
DEFINE_REG(14, 0, 0x8, 0, sata_revid_init, sata_revid_read, 0)
DEFINE_REG(14, 0, 0x10, 0, reg_init, reg_read, reg_write)
DEFINE_REG(14, 0, 0x14, 0, reg_init, reg_read, reg_write)
DEFINE_REG(14, 0, 0x18, 0, reg_init, reg_read, reg_write)
DEFINE_REG(14, 0, 0x1C, 0, reg_init, reg_read, reg_write)
DEFINE_REG(14, 0, 0x20, 0, reg_init, reg_read, reg_write)
DEFINE_REG(14, 0, 0x24, (0x200), sata_reg_init, reg_read, reg_write)
DEFINE_REG(15, 0, 0x10, (64*KB), reg_init, reg_read, reg_write)
DEFINE_REG(15, 0, 0x14, (64*KB), reg_init, reg_read, reg_write)
DEFINE_REG(16, 0, 0x10, (64*KB), reg_init, reg_read, reg_write)
DEFINE_REG(16, 0, 0x14, (64*MB), reg_init, reg_read, reg_write)
DEFINE_REG(16, 0, 0x18, (64*MB), reg_init, reg_read, reg_write)
DEFINE_REG(16, 0, 0x3c, 256, reg_init, reg_noirq_read, reg_write)
DEFINE_REG(17, 0, 0x10, (128*KB), reg_init, reg_read, reg_write)
DEFINE_REG(18, 0, 0x10, (1*KB), reg_init, reg_read, reg_write)
DEFINE_REG(18, 0, 0x3c, 256, reg_init, reg_noirq_read, reg_write)
};
static void __init init_sim_regs(void)
{
int i;
for (i = 0; i < ARRAY_SIZE(bus1_fixups); i++) {
if (bus1_fixups[i].init)
bus1_fixups[i].init(&bus1_fixups[i]);
}
}
static inline void extract_bytes(u32 *value, int reg, int len)
{
uint32_t mask;
*value >>= ((reg & 3) * 8);
mask = 0xFFFFFFFF >> ((4 - len) * 8);
*value &= mask;
}
static int bridge_read(unsigned int devfn, int reg, int len, u32 *value)
{
u32 av_bridge_base, av_bridge_limit;
int retval = 0;
switch (reg) {
/* Make BARs appear to not request any memory. */
case PCI_BASE_ADDRESS_0:
case PCI_BASE_ADDRESS_0 + 1:
case PCI_BASE_ADDRESS_0 + 2:
case PCI_BASE_ADDRESS_0 + 3:
*value = 0;
break;
/* Since subordinate bus number register is hardwired
* to zero and read only, so do the simulation.
*/
case PCI_PRIMARY_BUS:
if (len == 4)
*value = 0x00010100;
break;
case PCI_SUBORDINATE_BUS:
*value = 1;
break;
case PCI_MEMORY_BASE:
case PCI_MEMORY_LIMIT:
/* Get the A/V bridge base address. */
pci_direct_conf1.read(0, 0, devfn,
PCI_BASE_ADDRESS_0, 4, &av_bridge_base);
av_bridge_limit = av_bridge_base + (512*MB - 1);
av_bridge_limit >>= 16;
av_bridge_limit &= 0xFFF0;
av_bridge_base >>= 16;
av_bridge_base &= 0xFFF0;
if (reg == PCI_MEMORY_LIMIT)
*value = av_bridge_limit;
else if (len == 2)
*value = av_bridge_base;
else
*value = (av_bridge_limit << 16) | av_bridge_base;
break;
/* Make prefetchable memory limit smaller than prefetchable
* memory base, so not claim prefetchable memory space.
*/
case PCI_PREF_MEMORY_BASE:
*value = 0xFFF0;
break;
case PCI_PREF_MEMORY_LIMIT:
*value = 0x0;
break;
/* Make IO limit smaller than IO base, so not claim IO space. */
case PCI_IO_BASE:
*value = 0xF0;
break;
case PCI_IO_LIMIT:
*value = 0;
break;
default:
retval = 1;
}
return retval;
}
static int ce4100_bus1_read(unsigned int devfn, int reg, int len, u32 *value)
{
unsigned long flags;
int i;
for (i = 0; i < ARRAY_SIZE(bus1_fixups); i++) {
if (bus1_fixups[i].dev_func == devfn &&
bus1_fixups[i].reg == (reg & ~3) &&
bus1_fixups[i].read) {
raw_spin_lock_irqsave(&pci_config_lock, flags);
bus1_fixups[i].read(&(bus1_fixups[i]), value);
raw_spin_unlock_irqrestore(&pci_config_lock, flags);
extract_bytes(value, reg, len);
return 0;
}
}
return -1;
}
static int ce4100_conf_read(unsigned int seg, unsigned int bus,
unsigned int devfn, int reg, int len, u32 *value)
{
WARN_ON(seg);
if (bus == 1 && !ce4100_bus1_read(devfn, reg, len, value))
return 0;
if (bus == 0 && (PCI_DEVFN(1, 0) == devfn) &&
!bridge_read(devfn, reg, len, value))
return 0;
return pci_direct_conf1.read(seg, bus, devfn, reg, len, value);
}
static int ce4100_bus1_write(unsigned int devfn, int reg, int len, u32 value)
{
unsigned long flags;
int i;
for (i = 0; i < ARRAY_SIZE(bus1_fixups); i++) {
if (bus1_fixups[i].dev_func == devfn &&
bus1_fixups[i].reg == (reg & ~3) &&
bus1_fixups[i].write) {
raw_spin_lock_irqsave(&pci_config_lock, flags);
bus1_fixups[i].write(&(bus1_fixups[i]), value);
raw_spin_unlock_irqrestore(&pci_config_lock, flags);
return 0;
}
}
return -1;
}
static int ce4100_conf_write(unsigned int seg, unsigned int bus,
unsigned int devfn, int reg, int len, u32 value)
{
WARN_ON(seg);
if (bus == 1 && !ce4100_bus1_write(devfn, reg, len, value))
return 0;
/* Discard writes to A/V bridge BAR. */
if (bus == 0 && PCI_DEVFN(1, 0) == devfn &&
((reg & ~3) == PCI_BASE_ADDRESS_0))
return 0;
return pci_direct_conf1.write(seg, bus, devfn, reg, len, value);
}
static const struct pci_raw_ops ce4100_pci_conf = {
.read = ce4100_conf_read,
.write = ce4100_conf_write,
};
int __init ce4100_pci_init(void)
{
init_sim_regs();
raw_pci_ops = &ce4100_pci_conf;
/* Indicate caller that it should invoke pci_legacy_init() */
return 1;
}
| linux-master | arch/x86/pci/ce4100.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Exceptions for specific devices. Usually work-arounds for fatal design flaws.
*/
#include <linux/delay.h>
#include <linux/dmi.h>
#include <linux/pci.h>
#include <linux/vgaarb.h>
#include <asm/amd_nb.h>
#include <asm/hpet.h>
#include <asm/pci_x86.h>
static void pci_fixup_i450nx(struct pci_dev *d)
{
/*
* i450NX -- Find and scan all secondary buses on all PXB's.
*/
int pxb, reg;
u8 busno, suba, subb;
dev_warn(&d->dev, "Searching for i450NX host bridges\n");
reg = 0xd0;
for(pxb = 0; pxb < 2; pxb++) {
pci_read_config_byte(d, reg++, &busno);
pci_read_config_byte(d, reg++, &suba);
pci_read_config_byte(d, reg++, &subb);
dev_dbg(&d->dev, "i450NX PXB %d: %02x/%02x/%02x\n", pxb, busno,
suba, subb);
if (busno)
pcibios_scan_root(busno); /* Bus A */
if (suba < subb)
pcibios_scan_root(suba+1); /* Bus B */
}
pcibios_last_bus = -1;
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82451NX, pci_fixup_i450nx);
static void pci_fixup_i450gx(struct pci_dev *d)
{
/*
* i450GX and i450KX -- Find and scan all secondary buses.
* (called separately for each PCI bridge found)
*/
u8 busno;
pci_read_config_byte(d, 0x4a, &busno);
dev_info(&d->dev, "i440KX/GX host bridge; secondary bus %02x\n", busno);
pcibios_scan_root(busno);
pcibios_last_bus = -1;
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454GX, pci_fixup_i450gx);
static void pci_fixup_umc_ide(struct pci_dev *d)
{
/*
* UM8886BF IDE controller sets region type bits incorrectly,
* therefore they look like memory despite of them being I/O.
*/
int i;
dev_warn(&d->dev, "Fixing base address flags\n");
for(i = 0; i < 4; i++)
d->resource[i].flags |= PCI_BASE_ADDRESS_SPACE_IO;
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_UMC, PCI_DEVICE_ID_UMC_UM8886BF, pci_fixup_umc_ide);
static void pci_fixup_latency(struct pci_dev *d)
{
/*
* SiS 5597 and 5598 chipsets require latency timer set to
* at most 32 to avoid lockups.
*/
dev_dbg(&d->dev, "Setting max latency to 32\n");
pcibios_max_latency = 32;
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_5597, pci_fixup_latency);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_5598, pci_fixup_latency);
static void pci_fixup_piix4_acpi(struct pci_dev *d)
{
/*
* PIIX4 ACPI device: hardwired IRQ9
*/
d->irq = 9;
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_3, pci_fixup_piix4_acpi);
/*
* Addresses issues with problems in the memory write queue timer in
* certain VIA Northbridges. This bugfix is per VIA's specifications,
* except for the KL133/KM133: clearing bit 5 on those Northbridges seems
* to trigger a bug in its integrated ProSavage video card, which
* causes screen corruption. We only clear bits 6 and 7 for that chipset,
* until VIA can provide us with definitive information on why screen
* corruption occurs, and what exactly those bits do.
*
* VIA 8363,8622,8361 Northbridges:
* - bits 5, 6, 7 at offset 0x55 need to be turned off
* VIA 8367 (KT266x) Northbridges:
* - bits 5, 6, 7 at offset 0x95 need to be turned off
* VIA 8363 rev 0x81/0x84 (KL133/KM133) Northbridges:
* - bits 6, 7 at offset 0x55 need to be turned off
*/
#define VIA_8363_KL133_REVISION_ID 0x81
#define VIA_8363_KM133_REVISION_ID 0x84
static void pci_fixup_via_northbridge_bug(struct pci_dev *d)
{
u8 v;
int where = 0x55;
int mask = 0x1f; /* clear bits 5, 6, 7 by default */
if (d->device == PCI_DEVICE_ID_VIA_8367_0) {
/* fix pci bus latency issues resulted by NB bios error
it appears on bug free^Wreduced kt266x's bios forces
NB latency to zero */
pci_write_config_byte(d, PCI_LATENCY_TIMER, 0);
where = 0x95; /* the memory write queue timer register is
different for the KT266x's: 0x95 not 0x55 */
} else if (d->device == PCI_DEVICE_ID_VIA_8363_0 &&
(d->revision == VIA_8363_KL133_REVISION_ID ||
d->revision == VIA_8363_KM133_REVISION_ID)) {
mask = 0x3f; /* clear only bits 6 and 7; clearing bit 5
causes screen corruption on the KL133/KM133 */
}
pci_read_config_byte(d, where, &v);
if (v & ~mask) {
dev_warn(&d->dev, "Disabling VIA memory write queue (PCI ID %04x, rev %02x): [%02x] %02x & %02x -> %02x\n", \
d->device, d->revision, where, v, mask, v & mask);
v &= mask;
pci_write_config_byte(d, where, v);
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8363_0, pci_fixup_via_northbridge_bug);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8622, pci_fixup_via_northbridge_bug);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8361, pci_fixup_via_northbridge_bug);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8367_0, pci_fixup_via_northbridge_bug);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8363_0, pci_fixup_via_northbridge_bug);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8622, pci_fixup_via_northbridge_bug);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8361, pci_fixup_via_northbridge_bug);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8367_0, pci_fixup_via_northbridge_bug);
/*
* For some reasons Intel decided that certain parts of their
* 815, 845 and some other chipsets must look like PCI-to-PCI bridges
* while they are obviously not. The 82801 family (AA, AB, BAM/CAM,
* BA/CA/DB and E) PCI bridges are actually HUB-to-PCI ones, according
* to Intel terminology. These devices do forward all addresses from
* system to PCI bus no matter what are their window settings, so they are
* "transparent" (or subtractive decoding) from programmers point of view.
*/
static void pci_fixup_transparent_bridge(struct pci_dev *dev)
{
if ((dev->device & 0xff00) == 0x2400)
dev->transparent = 1;
}
DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
PCI_CLASS_BRIDGE_PCI, 8, pci_fixup_transparent_bridge);
/*
* Fixup for C1 Halt Disconnect problem on nForce2 systems.
*
* From information provided by "Allen Martin" <[email protected]>:
*
* A hang is caused when the CPU generates a very fast CONNECT/HALT cycle
* sequence. Workaround is to set the SYSTEM_IDLE_TIMEOUT to 80 ns.
* This allows the state-machine and timer to return to a proper state within
* 80 ns of the CONNECT and probe appearing together. Since the CPU will not
* issue another HALT within 80 ns of the initial HALT, the failure condition
* is avoided.
*/
static void pci_fixup_nforce2(struct pci_dev *dev)
{
u32 val;
/*
* Chip Old value New value
* C17 0x1F0FFF01 0x1F01FF01
* C18D 0x9F0FFF01 0x9F01FF01
*
* Northbridge chip version may be determined by
* reading the PCI revision ID (0xC1 or greater is C18D).
*/
pci_read_config_dword(dev, 0x6c, &val);
/*
* Apply fixup if needed, but don't touch disconnect state
*/
if ((val & 0x00FF0000) != 0x00010000) {
dev_warn(&dev->dev, "nForce2 C1 Halt Disconnect fixup\n");
pci_write_config_dword(dev, 0x6c, (val & 0xFF00FFFF) | 0x00010000);
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2, pci_fixup_nforce2);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2, pci_fixup_nforce2);
/* Max PCI Express root ports */
#define MAX_PCIEROOT 6
static int quirk_aspm_offset[MAX_PCIEROOT << 3];
#define GET_INDEX(a, b) ((((a) - PCI_DEVICE_ID_INTEL_MCH_PA) << 3) + ((b) & 7))
static int quirk_pcie_aspm_read(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *value)
{
return raw_pci_read(pci_domain_nr(bus), bus->number,
devfn, where, size, value);
}
/*
* Replace the original pci bus ops for write with a new one that will filter
* the request to insure ASPM cannot be enabled.
*/
static int quirk_pcie_aspm_write(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 value)
{
u8 offset;
offset = quirk_aspm_offset[GET_INDEX(bus->self->device, devfn)];
if ((offset) && (where == offset))
value = value & ~PCI_EXP_LNKCTL_ASPMC;
return raw_pci_write(pci_domain_nr(bus), bus->number,
devfn, where, size, value);
}
static struct pci_ops quirk_pcie_aspm_ops = {
.read = quirk_pcie_aspm_read,
.write = quirk_pcie_aspm_write,
};
/*
* Prevents PCI Express ASPM (Active State Power Management) being enabled.
*
* Save the register offset, where the ASPM control bits are located,
* for each PCI Express device that is in the device list of
* the root port in an array for fast indexing. Replace the bus ops
* with the modified one.
*/
static void pcie_rootport_aspm_quirk(struct pci_dev *pdev)
{
int i;
struct pci_bus *pbus;
struct pci_dev *dev;
if ((pbus = pdev->subordinate) == NULL)
return;
/*
* Check if the DID of pdev matches one of the six root ports. This
* check is needed in the case this function is called directly by the
* hot-plug driver.
*/
if ((pdev->device < PCI_DEVICE_ID_INTEL_MCH_PA) ||
(pdev->device > PCI_DEVICE_ID_INTEL_MCH_PC1))
return;
if (list_empty(&pbus->devices)) {
/*
* If no device is attached to the root port at power-up or
* after hot-remove, the pbus->devices is empty and this code
* will set the offsets to zero and the bus ops to parent's bus
* ops, which is unmodified.
*/
for (i = GET_INDEX(pdev->device, 0); i <= GET_INDEX(pdev->device, 7); ++i)
quirk_aspm_offset[i] = 0;
pci_bus_set_ops(pbus, pbus->parent->ops);
} else {
/*
* If devices are attached to the root port at power-up or
* after hot-add, the code loops through the device list of
* each root port to save the register offsets and replace the
* bus ops.
*/
list_for_each_entry(dev, &pbus->devices, bus_list)
/* There are 0 to 8 devices attached to this bus */
quirk_aspm_offset[GET_INDEX(pdev->device, dev->devfn)] =
dev->pcie_cap + PCI_EXP_LNKCTL;
pci_bus_set_ops(pbus, &quirk_pcie_aspm_ops);
dev_info(&pbus->dev, "writes to ASPM control bits will be ignored\n");
}
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MCH_PA, pcie_rootport_aspm_quirk);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MCH_PA1, pcie_rootport_aspm_quirk);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MCH_PB, pcie_rootport_aspm_quirk);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MCH_PB1, pcie_rootport_aspm_quirk);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MCH_PC, pcie_rootport_aspm_quirk);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MCH_PC1, pcie_rootport_aspm_quirk);
/*
* Fixup to mark boot BIOS video selected by BIOS before it changes
*
* From information provided by "Jon Smirl" <[email protected]>
*
* The standard boot ROM sequence for an x86 machine uses the BIOS
* to select an initial video card for boot display. This boot video
* card will have its BIOS copied to 0xC0000 in system RAM.
* IORESOURCE_ROM_SHADOW is used to associate the boot video
* card with this copy. On laptops this copy has to be used since
* the main ROM may be compressed or combined with another image.
* See pci_map_rom() for use of this flag. Before marking the device
* with IORESOURCE_ROM_SHADOW check if a vga_default_device is already set
* by either arch code or vga-arbitration; if so only apply the fixup to this
* already-determined primary video card.
*/
static void pci_fixup_video(struct pci_dev *pdev)
{
struct pci_dev *bridge;
struct pci_bus *bus;
u16 config;
struct resource *res;
/* Is VGA routed to us? */
bus = pdev->bus;
while (bus) {
bridge = bus->self;
/*
* From information provided by
* "David Miller" <[email protected]>
* The bridge control register is valid for PCI header
* type BRIDGE, or CARDBUS. Host to PCI controllers use
* PCI header type NORMAL.
*/
if (bridge && (pci_is_bridge(bridge))) {
pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
&config);
if (!(config & PCI_BRIDGE_CTL_VGA))
return;
}
bus = bus->parent;
}
if (!vga_default_device() || pdev == vga_default_device()) {
pci_read_config_word(pdev, PCI_COMMAND, &config);
if (config & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) {
res = &pdev->resource[PCI_ROM_RESOURCE];
pci_disable_rom(pdev);
if (res->parent)
release_resource(res);
res->start = 0xC0000;
res->end = res->start + 0x20000 - 1;
res->flags = IORESOURCE_MEM | IORESOURCE_ROM_SHADOW |
IORESOURCE_PCI_FIXED;
dev_info(&pdev->dev, "Video device with shadowed ROM at %pR\n",
res);
}
}
}
DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_ANY_ID, PCI_ANY_ID,
PCI_CLASS_DISPLAY_VGA, 8, pci_fixup_video);
static const struct dmi_system_id msi_k8t_dmi_table[] = {
{
.ident = "MSI-K8T-Neo2Fir",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "MSI"),
DMI_MATCH(DMI_PRODUCT_NAME, "MS-6702E"),
},
},
{}
};
/*
* The AMD-Athlon64 board MSI "K8T Neo2-FIR" disables the onboard sound
* card if a PCI-soundcard is added.
*
* The BIOS only gives options "DISABLED" and "AUTO". This code sets
* the corresponding register-value to enable the soundcard.
*
* The soundcard is only enabled, if the mainboard is identified
* via DMI-tables and the soundcard is detected to be off.
*/
static void pci_fixup_msi_k8t_onboard_sound(struct pci_dev *dev)
{
unsigned char val;
if (!dmi_check_system(msi_k8t_dmi_table))
return; /* only applies to MSI K8T Neo2-FIR */
pci_read_config_byte(dev, 0x50, &val);
if (val & 0x40) {
pci_write_config_byte(dev, 0x50, val & (~0x40));
/* verify the change for status output */
pci_read_config_byte(dev, 0x50, &val);
if (val & 0x40)
dev_info(&dev->dev, "Detected MSI K8T Neo2-FIR; "
"can't enable onboard soundcard!\n");
else
dev_info(&dev->dev, "Detected MSI K8T Neo2-FIR; "
"enabled onboard soundcard\n");
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237,
pci_fixup_msi_k8t_onboard_sound);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237,
pci_fixup_msi_k8t_onboard_sound);
/*
* Some Toshiba laptops need extra code to enable their TI TSB43AB22/A.
*
* We pretend to bring them out of full D3 state, and restore the proper
* IRQ, PCI cache line size, and BARs, otherwise the device won't function
* properly. In some cases, the device will generate an interrupt on
* the wrong IRQ line, causing any devices sharing the line it's
* *supposed* to use to be disabled by the kernel's IRQ debug code.
*/
static u16 toshiba_line_size;
static const struct dmi_system_id toshiba_ohci1394_dmi_table[] = {
{
.ident = "Toshiba PS5 based laptop",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
DMI_MATCH(DMI_PRODUCT_VERSION, "PS5"),
},
},
{
.ident = "Toshiba PSM4 based laptop",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
DMI_MATCH(DMI_PRODUCT_VERSION, "PSM4"),
},
},
{
.ident = "Toshiba A40 based laptop",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
DMI_MATCH(DMI_PRODUCT_VERSION, "PSA40U"),
},
},
{ }
};
static void pci_pre_fixup_toshiba_ohci1394(struct pci_dev *dev)
{
if (!dmi_check_system(toshiba_ohci1394_dmi_table))
return; /* only applies to certain Toshibas (so far) */
dev->current_state = PCI_D3cold;
pci_read_config_word(dev, PCI_CACHE_LINE_SIZE, &toshiba_line_size);
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TI, 0x8032,
pci_pre_fixup_toshiba_ohci1394);
static void pci_post_fixup_toshiba_ohci1394(struct pci_dev *dev)
{
if (!dmi_check_system(toshiba_ohci1394_dmi_table))
return; /* only applies to certain Toshibas (so far) */
/* Restore config space on Toshiba laptops */
pci_write_config_word(dev, PCI_CACHE_LINE_SIZE, toshiba_line_size);
pci_read_config_byte(dev, PCI_INTERRUPT_LINE, (u8 *)&dev->irq);
pci_write_config_dword(dev, PCI_BASE_ADDRESS_0,
pci_resource_start(dev, 0));
pci_write_config_dword(dev, PCI_BASE_ADDRESS_1,
pci_resource_start(dev, 1));
}
DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_TI, 0x8032,
pci_post_fixup_toshiba_ohci1394);
/*
* Prevent the BIOS trapping accesses to the Cyrix CS5530A video device
* configuration space.
*/
static void pci_early_fixup_cyrix_5530(struct pci_dev *dev)
{
u8 r;
/* clear 'F4 Video Configuration Trap' bit */
pci_read_config_byte(dev, 0x42, &r);
r &= 0xfd;
pci_write_config_byte(dev, 0x42, r);
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5530_LEGACY,
pci_early_fixup_cyrix_5530);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5530_LEGACY,
pci_early_fixup_cyrix_5530);
/*
* Siemens Nixdorf AG FSC Multiprocessor Interrupt Controller:
* prevent update of the BAR0, which doesn't look like a normal BAR.
*/
static void pci_siemens_interrupt_controller(struct pci_dev *dev)
{
dev->resource[0].flags |= IORESOURCE_PCI_FIXED;
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SIEMENS, 0x0015,
pci_siemens_interrupt_controller);
/*
* SB600: Disable BAR1 on device 14.0 to avoid HPET resources from
* confusing the PCI engine:
*/
static void sb600_disable_hpet_bar(struct pci_dev *dev)
{
u8 val;
/*
* The SB600 and SB700 both share the same device
* ID, but the PM register 0x55 does something different
* for the SB700, so make sure we are dealing with the
* SB600 before touching the bit:
*/
pci_read_config_byte(dev, 0x08, &val);
if (val < 0x2F) {
outb(0x55, 0xCD6);
val = inb(0xCD7);
/* Set bit 7 in PM register 0x55 */
outb(0x55, 0xCD6);
outb(val | 0x80, 0xCD7);
}
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_ATI, 0x4385, sb600_disable_hpet_bar);
#ifdef CONFIG_HPET_TIMER
static void sb600_hpet_quirk(struct pci_dev *dev)
{
struct resource *r = &dev->resource[1];
if (r->flags & IORESOURCE_MEM && r->start == hpet_address) {
r->flags |= IORESOURCE_PCI_FIXED;
dev_info(&dev->dev, "reg 0x14 contains HPET; making it immovable\n");
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, 0x4385, sb600_hpet_quirk);
#endif
/*
* Twinhead H12Y needs us to block out a region otherwise we map devices
* there and any access kills the box.
*
* See: https://bugzilla.kernel.org/show_bug.cgi?id=10231
*
* Match off the LPC and svid/sdid (older kernels lose the bridge subvendor)
*/
static void twinhead_reserve_killing_zone(struct pci_dev *dev)
{
if (dev->subsystem_vendor == 0x14FF && dev->subsystem_device == 0xA003) {
pr_info("Reserving memory on Twinhead H12Y\n");
request_mem_region(0xFFB00000, 0x100000, "twinhead");
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x27B9, twinhead_reserve_killing_zone);
/*
* Device [8086:2fc0]
* Erratum HSE43
* CONFIG_TDP_NOMINAL CSR Implemented at Incorrect Offset
* https://www.intel.com/content/www/us/en/processors/xeon/xeon-e5-v3-spec-update.html
*
* Devices [8086:6f60,6fa0,6fc0]
* Erratum BDF2
* PCI BARs in the Home Agent Will Return Non-Zero Values During Enumeration
* https://www.intel.com/content/www/us/en/processors/xeon/xeon-e5-v4-spec-update.html
*/
static void pci_invalid_bar(struct pci_dev *dev)
{
dev->non_compliant_bars = 1;
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2fc0, pci_invalid_bar);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6f60, pci_invalid_bar);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fa0, pci_invalid_bar);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fc0, pci_invalid_bar);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0xa1ec, pci_invalid_bar);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0xa1ed, pci_invalid_bar);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0xa26c, pci_invalid_bar);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0xa26d, pci_invalid_bar);
/*
* Device [1022:7808]
* 23. USB Wake on Connect/Disconnect with Low Speed Devices
* https://support.amd.com/TechDocs/46837.pdf
* Appendix A2
* https://support.amd.com/TechDocs/42413.pdf
*/
static void pci_fixup_amd_ehci_pme(struct pci_dev *dev)
{
dev_info(&dev->dev, "PME# does not work under D3, disabling it\n");
dev->pme_support &= ~((PCI_PM_CAP_PME_D3hot | PCI_PM_CAP_PME_D3cold)
>> PCI_PM_CAP_PME_SHIFT);
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x7808, pci_fixup_amd_ehci_pme);
/*
* Device [1022:7914]
* When in D0, PME# doesn't get asserted when plugging USB 2.0 device.
*/
static void pci_fixup_amd_fch_xhci_pme(struct pci_dev *dev)
{
dev_info(&dev->dev, "PME# does not work under D0, disabling it\n");
dev->pme_support &= ~(PCI_PM_CAP_PME_D0 >> PCI_PM_CAP_PME_SHIFT);
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x7914, pci_fixup_amd_fch_xhci_pme);
/*
* Apple MacBook Pro: Avoid [mem 0x7fa00000-0x7fbfffff]
*
* Using the [mem 0x7fa00000-0x7fbfffff] region, e.g., by assigning it to
* the 00:1c.0 Root Port, causes a conflict with [io 0x1804], which is used
* for soft poweroff and suspend-to-RAM.
*
* As far as we know, this is related to the address space, not to the Root
* Port itself. Attaching the quirk to the Root Port is a convenience, but
* it could probably also be a standalone DMI quirk.
*
* https://bugzilla.kernel.org/show_bug.cgi?id=103211
*/
static void quirk_apple_mbp_poweroff(struct pci_dev *pdev)
{
struct device *dev = &pdev->dev;
struct resource *res;
if ((!dmi_match(DMI_PRODUCT_NAME, "MacBookPro11,4") &&
!dmi_match(DMI_PRODUCT_NAME, "MacBookPro11,5")) ||
pdev->bus->number != 0 || pdev->devfn != PCI_DEVFN(0x1c, 0))
return;
res = request_mem_region(0x7fa00000, 0x200000,
"MacBook Pro poweroff workaround");
if (res)
dev_info(dev, "claimed %s %pR\n", res->name, res);
else
dev_info(dev, "can't work around MacBook Pro poweroff issue\n");
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x8c10, quirk_apple_mbp_poweroff);
/*
* VMD-enabled root ports will change the source ID for all messages
* to the VMD device. Rather than doing device matching with the source
* ID, the AER driver should traverse the child device tree, reading
* AER registers to find the faulting device.
*/
static void quirk_no_aersid(struct pci_dev *pdev)
{
/* VMD Domain */
if (is_vmd(pdev->bus) && pci_is_root_bus(pdev->bus))
pdev->bus->bus_flags |= PCI_BUS_FLAGS_NO_AERSID;
}
DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
PCI_CLASS_BRIDGE_PCI, 8, quirk_no_aersid);
static void quirk_intel_th_dnv(struct pci_dev *dev)
{
struct resource *r = &dev->resource[4];
/*
* Denverton reports 2k of RTIT_BAR (intel_th resource 4), which
* appears to be 4 MB in reality.
*/
if (r->end == r->start + 0x7ff) {
r->start = 0;
r->end = 0x3fffff;
r->flags |= IORESOURCE_UNSET;
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x19e1, quirk_intel_th_dnv);
#ifdef CONFIG_PHYS_ADDR_T_64BIT
#define AMD_141b_MMIO_BASE(x) (0x80 + (x) * 0x8)
#define AMD_141b_MMIO_BASE_RE_MASK BIT(0)
#define AMD_141b_MMIO_BASE_WE_MASK BIT(1)
#define AMD_141b_MMIO_BASE_MMIOBASE_MASK GENMASK(31,8)
#define AMD_141b_MMIO_LIMIT(x) (0x84 + (x) * 0x8)
#define AMD_141b_MMIO_LIMIT_MMIOLIMIT_MASK GENMASK(31,8)
#define AMD_141b_MMIO_HIGH(x) (0x180 + (x) * 0x4)
#define AMD_141b_MMIO_HIGH_MMIOBASE_MASK GENMASK(7,0)
#define AMD_141b_MMIO_HIGH_MMIOLIMIT_SHIFT 16
#define AMD_141b_MMIO_HIGH_MMIOLIMIT_MASK GENMASK(23,16)
/*
* The PCI Firmware Spec, rev 3.2, notes that ACPI should optionally allow
* configuring host bridge windows using the _PRS and _SRS methods.
*
* But this is rarely implemented, so we manually enable a large 64bit BAR for
* PCIe device on AMD Family 15h (Models 00h-1fh, 30h-3fh, 60h-7fh) Processors
* here.
*/
static void pci_amd_enable_64bit_bar(struct pci_dev *dev)
{
static const char *name = "PCI Bus 0000:00";
struct resource *res, *conflict;
u32 base, limit, high;
struct pci_dev *other;
unsigned i;
if (!(pci_probe & PCI_BIG_ROOT_WINDOW))
return;
/* Check that we are the only device of that type */
other = pci_get_device(dev->vendor, dev->device, NULL);
if (other != dev ||
(other = pci_get_device(dev->vendor, dev->device, other))) {
/* This is a multi-socket system, don't touch it for now */
pci_dev_put(other);
return;
}
for (i = 0; i < 8; i++) {
pci_read_config_dword(dev, AMD_141b_MMIO_BASE(i), &base);
pci_read_config_dword(dev, AMD_141b_MMIO_HIGH(i), &high);
/* Is this slot free? */
if (!(base & (AMD_141b_MMIO_BASE_RE_MASK |
AMD_141b_MMIO_BASE_WE_MASK)))
break;
base >>= 8;
base |= high << 24;
/* Abort if a slot already configures a 64bit BAR. */
if (base > 0x10000)
return;
}
if (i == 8)
return;
res = kzalloc(sizeof(*res), GFP_KERNEL);
if (!res)
return;
/*
* Allocate a 256GB window directly below the 0xfd00000000 hardware
* limit (see AMD Family 15h Models 30h-3Fh BKDG, sec 2.4.6).
*/
res->name = name;
res->flags = IORESOURCE_PREFETCH | IORESOURCE_MEM |
IORESOURCE_MEM_64 | IORESOURCE_WINDOW;
res->start = 0xbd00000000ull;
res->end = 0xfd00000000ull - 1;
conflict = request_resource_conflict(&iomem_resource, res);
if (conflict) {
kfree(res);
if (conflict->name != name)
return;
/* We are resuming from suspend; just reenable the window */
res = conflict;
} else {
dev_info(&dev->dev, "adding root bus resource %pR (tainting kernel)\n",
res);
add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
pci_bus_add_resource(dev->bus, res, 0);
}
base = ((res->start >> 8) & AMD_141b_MMIO_BASE_MMIOBASE_MASK) |
AMD_141b_MMIO_BASE_RE_MASK | AMD_141b_MMIO_BASE_WE_MASK;
limit = ((res->end + 1) >> 8) & AMD_141b_MMIO_LIMIT_MMIOLIMIT_MASK;
high = ((res->start >> 40) & AMD_141b_MMIO_HIGH_MMIOBASE_MASK) |
((((res->end + 1) >> 40) << AMD_141b_MMIO_HIGH_MMIOLIMIT_SHIFT)
& AMD_141b_MMIO_HIGH_MMIOLIMIT_MASK);
pci_write_config_dword(dev, AMD_141b_MMIO_HIGH(i), high);
pci_write_config_dword(dev, AMD_141b_MMIO_LIMIT(i), limit);
pci_write_config_dword(dev, AMD_141b_MMIO_BASE(i), base);
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x1401, pci_amd_enable_64bit_bar);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x141b, pci_amd_enable_64bit_bar);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x1571, pci_amd_enable_64bit_bar);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x15b1, pci_amd_enable_64bit_bar);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x1601, pci_amd_enable_64bit_bar);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x1401, pci_amd_enable_64bit_bar);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x141b, pci_amd_enable_64bit_bar);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x1571, pci_amd_enable_64bit_bar);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x15b1, pci_amd_enable_64bit_bar);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x1601, pci_amd_enable_64bit_bar);
#define RS690_LOWER_TOP_OF_DRAM2 0x30
#define RS690_LOWER_TOP_OF_DRAM2_VALID 0x1
#define RS690_UPPER_TOP_OF_DRAM2 0x31
#define RS690_HTIU_NB_INDEX 0xA8
#define RS690_HTIU_NB_INDEX_WR_ENABLE 0x100
#define RS690_HTIU_NB_DATA 0xAC
/*
* Some BIOS implementations support RAM above 4GB, but do not configure the
* PCI host to respond to bus master accesses for these addresses. These
* implementations set the TOP_OF_DRAM_SLOT1 register correctly, so PCI DMA
* works as expected for addresses below 4GB.
*
* Reference: "AMD RS690 ASIC Family Register Reference Guide" (pg. 2-57)
* https://www.amd.com/system/files/TechDocs/43372_rs690_rrg_3.00o.pdf
*/
static void rs690_fix_64bit_dma(struct pci_dev *pdev)
{
u32 val = 0;
phys_addr_t top_of_dram = __pa(high_memory - 1) + 1;
if (top_of_dram <= (1ULL << 32))
return;
pci_write_config_dword(pdev, RS690_HTIU_NB_INDEX,
RS690_LOWER_TOP_OF_DRAM2);
pci_read_config_dword(pdev, RS690_HTIU_NB_DATA, &val);
if (val)
return;
pci_info(pdev, "Adjusting top of DRAM to %pa for 64-bit DMA support\n", &top_of_dram);
pci_write_config_dword(pdev, RS690_HTIU_NB_INDEX,
RS690_UPPER_TOP_OF_DRAM2 | RS690_HTIU_NB_INDEX_WR_ENABLE);
pci_write_config_dword(pdev, RS690_HTIU_NB_DATA, top_of_dram >> 32);
pci_write_config_dword(pdev, RS690_HTIU_NB_INDEX,
RS690_LOWER_TOP_OF_DRAM2 | RS690_HTIU_NB_INDEX_WR_ENABLE);
pci_write_config_dword(pdev, RS690_HTIU_NB_DATA,
top_of_dram | RS690_LOWER_TOP_OF_DRAM2_VALID);
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7910, rs690_fix_64bit_dma);
#endif
#ifdef CONFIG_AMD_NB
#define AMD_15B8_RCC_DEV2_EPF0_STRAP2 0x10136008
#define AMD_15B8_RCC_DEV2_EPF0_STRAP2_NO_SOFT_RESET_DEV2_F0_MASK 0x00000080L
static void quirk_clear_strap_no_soft_reset_dev2_f0(struct pci_dev *dev)
{
u32 data;
if (!amd_smn_read(0, AMD_15B8_RCC_DEV2_EPF0_STRAP2, &data)) {
data &= ~AMD_15B8_RCC_DEV2_EPF0_STRAP2_NO_SOFT_RESET_DEV2_F0_MASK;
if (amd_smn_write(0, AMD_15B8_RCC_DEV2_EPF0_STRAP2, data))
pci_err(dev, "Failed to write data 0x%x\n", data);
} else {
pci_err(dev, "Failed to read data\n");
}
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x15b8, quirk_clear_strap_no_soft_reset_dev2_f0);
#endif
/*
* When returning from D3cold to D0, firmware on some Google Coral and Reef
* family Chromebooks with Intel Apollo Lake SoC clobbers the headers of
* both the L1 PM Substates capability and the previous capability for the
* "Celeron N3350/Pentium N4200/Atom E3900 Series PCI Express Port B #1".
*
* Save those values at enumeration-time and restore them at resume.
*/
static u16 prev_cap, l1ss_cap;
static u32 prev_header, l1ss_header;
static void chromeos_save_apl_pci_l1ss_capability(struct pci_dev *dev)
{
int pos = PCI_CFG_SPACE_SIZE, prev = 0;
u32 header, pheader = 0;
while (pos) {
pci_read_config_dword(dev, pos, &header);
if (PCI_EXT_CAP_ID(header) == PCI_EXT_CAP_ID_L1SS) {
prev_cap = prev;
prev_header = pheader;
l1ss_cap = pos;
l1ss_header = header;
return;
}
prev = pos;
pheader = header;
pos = PCI_EXT_CAP_NEXT(header);
}
}
static void chromeos_fixup_apl_pci_l1ss_capability(struct pci_dev *dev)
{
u32 header;
if (!prev_cap || !prev_header || !l1ss_cap || !l1ss_header)
return;
/* Fixup the header of L1SS Capability if missing */
pci_read_config_dword(dev, l1ss_cap, &header);
if (header != l1ss_header) {
pci_write_config_dword(dev, l1ss_cap, l1ss_header);
pci_info(dev, "restore L1SS Capability header (was %#010x now %#010x)\n",
header, l1ss_header);
}
/* Fixup the link to L1SS Capability if missing */
pci_read_config_dword(dev, prev_cap, &header);
if (header != prev_header) {
pci_write_config_dword(dev, prev_cap, prev_header);
pci_info(dev, "restore previous Capability header (was %#010x now %#010x)\n",
header, prev_header);
}
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x5ad6, chromeos_save_apl_pci_l1ss_capability);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, 0x5ad6, chromeos_fixup_apl_pci_l1ss_capability);
| linux-master | arch/x86/pci/fixup.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* legacy.c - traditional, old school PCI bus probing
*/
#include <linux/init.h>
#include <linux/export.h>
#include <linux/pci.h>
#include <asm/jailhouse_para.h>
#include <asm/pci_x86.h>
/*
* Discover remaining PCI buses in case there are peer host bridges.
* We use the number of last PCI bus provided by the PCI BIOS.
*/
static void pcibios_fixup_peer_bridges(void)
{
int n;
if (pcibios_last_bus <= 0 || pcibios_last_bus > 0xff)
return;
DBG("PCI: Peer bridge fixup\n");
for (n=0; n <= pcibios_last_bus; n++)
pcibios_scan_specific_bus(n);
}
int __init pci_legacy_init(void)
{
if (!raw_pci_ops)
return 1;
pr_info("PCI: Probing PCI hardware\n");
pcibios_scan_root(0);
return 0;
}
void pcibios_scan_specific_bus(int busn)
{
int stride = jailhouse_paravirt() ? 1 : 8;
int devfn;
u32 l;
if (pci_find_bus(0, busn))
return;
for (devfn = 0; devfn < 256; devfn += stride) {
if (!raw_pci_read(0, busn, devfn, PCI_VENDOR_ID, 2, &l) &&
l != 0x0000 && l != 0xffff) {
DBG("Found device at %02x:%02x [%04x]\n", busn, devfn, l);
pr_info("PCI: Discovered peer bus %02x\n", busn);
pcibios_scan_root(busn);
return;
}
}
}
EXPORT_SYMBOL_GPL(pcibios_scan_specific_bus);
static int __init pci_subsys_init(void)
{
/*
* The init function returns an non zero value when
* pci_legacy_init should be invoked.
*/
if (x86_init.pci.init()) {
if (pci_legacy_init()) {
pr_info("PCI: System does not support PCI\n");
return -ENODEV;
}
}
pcibios_fixup_peer_bridges();
x86_init.pci.init_irq();
pcibios_init();
return 0;
}
subsys_initcall(pci_subsys_init);
| linux-master | arch/x86/pci/legacy.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Read address ranges from a Broadcom CNB20LE Host Bridge
*
* Copyright (c) 2010 Ira W. Snyder <[email protected]>
*/
#include <linux/acpi.h>
#include <linux/delay.h>
#include <linux/dmi.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <asm/pci_x86.h>
#include <asm/pci-direct.h>
#include "bus_numa.h"
static void __init cnb20le_res(u8 bus, u8 slot, u8 func)
{
struct pci_root_info *info;
struct pci_root_res *root_res;
struct resource res;
u16 word1, word2;
u8 fbus, lbus;
/* read the PCI bus numbers */
fbus = read_pci_config_byte(bus, slot, func, 0x44);
lbus = read_pci_config_byte(bus, slot, func, 0x45);
info = alloc_pci_root_info(fbus, lbus, 0, 0);
/*
* Add the legacy IDE ports on bus 0
*
* These do not exist anywhere in the bridge registers, AFAICT. I do
* not have the datasheet, so this is the best I can do.
*/
if (fbus == 0) {
update_res(info, 0x01f0, 0x01f7, IORESOURCE_IO, 0);
update_res(info, 0x03f6, 0x03f6, IORESOURCE_IO, 0);
update_res(info, 0x0170, 0x0177, IORESOURCE_IO, 0);
update_res(info, 0x0376, 0x0376, IORESOURCE_IO, 0);
update_res(info, 0xffa0, 0xffaf, IORESOURCE_IO, 0);
}
/* read the non-prefetchable memory window */
word1 = read_pci_config_16(bus, slot, func, 0xc0);
word2 = read_pci_config_16(bus, slot, func, 0xc2);
if (word1 != word2) {
res.start = ((resource_size_t) word1 << 16) | 0x0000;
res.end = ((resource_size_t) word2 << 16) | 0xffff;
res.flags = IORESOURCE_MEM;
update_res(info, res.start, res.end, res.flags, 0);
}
/* read the prefetchable memory window */
word1 = read_pci_config_16(bus, slot, func, 0xc4);
word2 = read_pci_config_16(bus, slot, func, 0xc6);
if (word1 != word2) {
res.start = ((resource_size_t) word1 << 16) | 0x0000;
res.end = ((resource_size_t) word2 << 16) | 0xffff;
res.flags = IORESOURCE_MEM | IORESOURCE_PREFETCH;
update_res(info, res.start, res.end, res.flags, 0);
}
/* read the IO port window */
word1 = read_pci_config_16(bus, slot, func, 0xd0);
word2 = read_pci_config_16(bus, slot, func, 0xd2);
if (word1 != word2) {
res.start = word1;
res.end = word2;
res.flags = IORESOURCE_IO;
update_res(info, res.start, res.end, res.flags, 0);
}
/* print information about this host bridge */
res.start = fbus;
res.end = lbus;
res.flags = IORESOURCE_BUS;
printk(KERN_INFO "CNB20LE PCI Host Bridge (domain 0000 %pR)\n", &res);
list_for_each_entry(root_res, &info->resources, list)
printk(KERN_INFO "host bridge window %pR\n", &root_res->res);
}
static int __init broadcom_postcore_init(void)
{
u8 bus = 0, slot = 0;
u32 id;
u16 vendor, device;
#ifdef CONFIG_ACPI
/*
* We should get host bridge information from ACPI unless the BIOS
* doesn't support it.
*/
if (!acpi_disabled && acpi_os_get_root_pointer())
return 0;
#endif
id = read_pci_config(bus, slot, 0, PCI_VENDOR_ID);
vendor = id & 0xffff;
device = (id >> 16) & 0xffff;
if (vendor == PCI_VENDOR_ID_SERVERWORKS &&
device == PCI_DEVICE_ID_SERVERWORKS_LE) {
cnb20le_res(bus, slot, 0);
cnb20le_res(bus, slot, 1);
}
return 0;
}
postcore_initcall(broadcom_postcore_init);
| linux-master | arch/x86/pci/broadcom_bus.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Low-level PCI config space access for OLPC systems who lack the VSA
* PCI virtualization software.
*
* Copyright © 2006 Advanced Micro Devices, Inc.
*
* The AMD Geode chipset (ie: GX2 processor, cs5536 I/O companion device)
* has some I/O functions (display, southbridge, sound, USB HCIs, etc)
* that more or less behave like PCI devices, but the hardware doesn't
* directly implement the PCI configuration space headers. AMD provides
* "VSA" (Virtual System Architecture) software that emulates PCI config
* space for these devices, by trapping I/O accesses to PCI config register
* (CF8/CFC) and running some code in System Management Mode interrupt state.
* On the OLPC platform, we don't want to use that VSA code because
* (a) it slows down suspend/resume, and (b) recompiling it requires special
* compilers that are hard to get. So instead of letting the complex VSA
* code simulate the PCI config registers for the on-chip devices, we
* just simulate them the easy way, by inserting the code into the
* pci_write_config and pci_read_config path. Most of the config registers
* are read-only anyway, so the bulk of the simulation is just table lookup.
*/
#include <linux/pci.h>
#include <linux/init.h>
#include <asm/olpc.h>
#include <asm/geode.h>
#include <asm/pci_x86.h>
/*
* In the tables below, the first two line (8 longwords) are the
* size masks that are used when the higher level PCI code determines
* the size of the region by writing ~0 to a base address register
* and reading back the result.
*
* The following lines are the values that are read during normal
* PCI config access cycles, i.e. not after just having written
* ~0 to a base address register.
*/
static const uint32_t lxnb_hdr[] = { /* dev 1 function 0 - devfn = 8 */
0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0,
0x281022, 0x2200005, 0x6000021, 0x80f808, /* AMD Vendor ID */
0x0, 0x0, 0x0, 0x0, /* No virtual registers, hence no BAR */
0x0, 0x0, 0x0, 0x28100b,
0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0,
};
static const uint32_t gxnb_hdr[] = { /* dev 1 function 0 - devfn = 8 */
0xfffffffd, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0,
0x28100b, 0x2200005, 0x6000021, 0x80f808, /* NSC Vendor ID */
0xac1d, 0x0, 0x0, 0x0, /* I/O BAR - base of virtual registers */
0x0, 0x0, 0x0, 0x28100b,
0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0,
};
static const uint32_t lxfb_hdr[] = { /* dev 1 function 1 - devfn = 9 */
0xff000008, 0xffffc000, 0xffffc000, 0xffffc000,
0xffffc000, 0x0, 0x0, 0x0,
0x20811022, 0x2200003, 0x3000000, 0x0, /* AMD Vendor ID */
0xfd000000, 0xfe000000, 0xfe004000, 0xfe008000, /* FB, GP, VG, DF */
0xfe00c000, 0x0, 0x0, 0x30100b, /* VIP */
0x0, 0x0, 0x0, 0x10e, /* INTA, IRQ14 for graphics accel */
0x0, 0x0, 0x0, 0x0,
0x3d0, 0x3c0, 0xa0000, 0x0, /* VG IO, VG IO, EGA FB, MONO FB */
0x0, 0x0, 0x0, 0x0,
};
static const uint32_t gxfb_hdr[] = { /* dev 1 function 1 - devfn = 9 */
0xff800008, 0xffffc000, 0xffffc000, 0xffffc000,
0x0, 0x0, 0x0, 0x0,
0x30100b, 0x2200003, 0x3000000, 0x0, /* NSC Vendor ID */
0xfd000000, 0xfe000000, 0xfe004000, 0xfe008000, /* FB, GP, VG, DF */
0x0, 0x0, 0x0, 0x30100b,
0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0,
0x3d0, 0x3c0, 0xa0000, 0x0, /* VG IO, VG IO, EGA FB, MONO FB */
0x0, 0x0, 0x0, 0x0,
};
static const uint32_t aes_hdr[] = { /* dev 1 function 2 - devfn = 0xa */
0xffffc000, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0,
0x20821022, 0x2a00006, 0x10100000, 0x8, /* NSC Vendor ID */
0xfe010000, 0x0, 0x0, 0x0, /* AES registers */
0x0, 0x0, 0x0, 0x20821022,
0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0,
};
static const uint32_t isa_hdr[] = { /* dev f function 0 - devfn = 78 */
0xfffffff9, 0xffffff01, 0xffffffc1, 0xffffffe1,
0xffffff81, 0xffffffc1, 0x0, 0x0,
0x20901022, 0x2a00049, 0x6010003, 0x802000,
0x18b1, 0x1001, 0x1801, 0x1881, /* SMB-8 GPIO-256 MFGPT-64 IRQ-32 */
0x1401, 0x1841, 0x0, 0x20901022, /* PMS-128 ACPI-64 */
0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0xaa5b, /* IRQ steering */
0x0, 0x0, 0x0, 0x0,
};
static const uint32_t ac97_hdr[] = { /* dev f function 3 - devfn = 7b */
0xffffff81, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0,
0x20931022, 0x2a00041, 0x4010001, 0x0,
0x1481, 0x0, 0x0, 0x0, /* I/O BAR-128 */
0x0, 0x0, 0x0, 0x20931022,
0x0, 0x0, 0x0, 0x205, /* IntB, IRQ5 */
0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0,
};
static const uint32_t ohci_hdr[] = { /* dev f function 4 - devfn = 7c */
0xfffff000, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0,
0x20941022, 0x2300006, 0xc031002, 0x0,
0xfe01a000, 0x0, 0x0, 0x0, /* MEMBAR-1000 */
0x0, 0x0, 0x0, 0x20941022,
0x0, 0x40, 0x0, 0x40a, /* CapPtr INT-D, IRQA */
0xc8020001, 0x0, 0x0, 0x0, /* Capabilities - 40 is R/O,
44 is mask 8103 (power control) */
0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0,
};
static const uint32_t ehci_hdr[] = { /* dev f function 4 - devfn = 7d */
0xfffff000, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0,
0x20951022, 0x2300006, 0xc032002, 0x0,
0xfe01b000, 0x0, 0x0, 0x0, /* MEMBAR-1000 */
0x0, 0x0, 0x0, 0x20951022,
0x0, 0x40, 0x0, 0x40a, /* CapPtr INT-D, IRQA */
0xc8020001, 0x0, 0x0, 0x0, /* Capabilities - 40 is R/O, 44 is
mask 8103 (power control) */
#if 0
0x1, 0x40080000, 0x0, 0x0, /* EECP - see EHCI spec section 2.1.7 */
#endif
0x01000001, 0x0, 0x0, 0x0, /* EECP - see EHCI spec section 2.1.7 */
0x2020, 0x0, 0x0, 0x0, /* (EHCI page 8) 60 SBRN (R/O),
61 FLADJ (R/W), PORTWAKECAP */
};
static uint32_t ff_loc = ~0;
static uint32_t zero_loc;
static int bar_probing; /* Set after a write of ~0 to a BAR */
static int is_lx;
#define NB_SLOT 0x1 /* Northbridge - GX chip - Device 1 */
#define SB_SLOT 0xf /* Southbridge - CS5536 chip - Device F */
static int is_simulated(unsigned int bus, unsigned int devfn)
{
return (!bus && ((PCI_SLOT(devfn) == NB_SLOT) ||
(PCI_SLOT(devfn) == SB_SLOT)));
}
static uint32_t *hdr_addr(const uint32_t *hdr, int reg)
{
uint32_t addr;
/*
* This is a little bit tricky. The header maps consist of
* 0x20 bytes of size masks, followed by 0x70 bytes of header data.
* In the normal case, when not probing a BAR's size, we want
* to access the header data, so we add 0x20 to the reg offset,
* thus skipping the size mask area.
* In the BAR probing case, we want to access the size mask for
* the BAR, so we subtract 0x10 (the config header offset for
* BAR0), and don't skip the size mask area.
*/
addr = (uint32_t)hdr + reg + (bar_probing ? -0x10 : 0x20);
bar_probing = 0;
return (uint32_t *)addr;
}
static int pci_olpc_read(unsigned int seg, unsigned int bus,
unsigned int devfn, int reg, int len, uint32_t *value)
{
uint32_t *addr;
WARN_ON(seg);
/* Use the hardware mechanism for non-simulated devices */
if (!is_simulated(bus, devfn))
return pci_direct_conf1.read(seg, bus, devfn, reg, len, value);
/*
* No device has config registers past 0x70, so we save table space
* by not storing entries for the nonexistent registers
*/
if (reg >= 0x70)
addr = &zero_loc;
else {
switch (devfn) {
case 0x8:
addr = hdr_addr(is_lx ? lxnb_hdr : gxnb_hdr, reg);
break;
case 0x9:
addr = hdr_addr(is_lx ? lxfb_hdr : gxfb_hdr, reg);
break;
case 0xa:
addr = is_lx ? hdr_addr(aes_hdr, reg) : &ff_loc;
break;
case 0x78:
addr = hdr_addr(isa_hdr, reg);
break;
case 0x7b:
addr = hdr_addr(ac97_hdr, reg);
break;
case 0x7c:
addr = hdr_addr(ohci_hdr, reg);
break;
case 0x7d:
addr = hdr_addr(ehci_hdr, reg);
break;
default:
addr = &ff_loc;
break;
}
}
switch (len) {
case 1:
*value = *(uint8_t *)addr;
break;
case 2:
*value = *(uint16_t *)addr;
break;
case 4:
*value = *addr;
break;
default:
BUG();
}
return 0;
}
static int pci_olpc_write(unsigned int seg, unsigned int bus,
unsigned int devfn, int reg, int len, uint32_t value)
{
WARN_ON(seg);
/* Use the hardware mechanism for non-simulated devices */
if (!is_simulated(bus, devfn))
return pci_direct_conf1.write(seg, bus, devfn, reg, len, value);
/* XXX we may want to extend this to simulate EHCI power management */
/*
* Mostly we just discard writes, but if the write is a size probe
* (i.e. writing ~0 to a BAR), we remember it and arrange to return
* the appropriate size mask on the next read. This is cheating
* to some extent, because it depends on the fact that the next
* access after such a write will always be a read to the same BAR.
*/
if ((reg >= 0x10) && (reg < 0x2c)) {
/* write is to a BAR */
if (value == ~0)
bar_probing = 1;
} else {
/*
* No warning on writes to ROM BAR, CMD, LATENCY_TIMER,
* CACHE_LINE_SIZE, or PM registers.
*/
if ((reg != PCI_ROM_ADDRESS) && (reg != PCI_COMMAND_MASTER) &&
(reg != PCI_LATENCY_TIMER) &&
(reg != PCI_CACHE_LINE_SIZE) && (reg != 0x44))
printk(KERN_WARNING "OLPC PCI: Config write to devfn"
" %x reg %x value %x\n", devfn, reg, value);
}
return 0;
}
static const struct pci_raw_ops pci_olpc_conf = {
.read = pci_olpc_read,
.write = pci_olpc_write,
};
int __init pci_olpc_init(void)
{
printk(KERN_INFO "PCI: Using configuration type OLPC XO-1\n");
raw_pci_ops = &pci_olpc_conf;
is_lx = is_geode_lx();
return 0;
}
| linux-master | arch/x86/pci/olpc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Numascale NumaConnect-specific PCI code
*
* Copyright (C) 2012 Numascale AS. All rights reserved.
*
* Send feedback to <[email protected]>
*
* PCI accessor functions derived from mmconfig_64.c
*
*/
#include <linux/pci.h>
#include <asm/pci_x86.h>
#include <asm/numachip/numachip.h>
static u8 limit __read_mostly;
static inline char __iomem *pci_dev_base(unsigned int seg, unsigned int bus, unsigned int devfn)
{
struct pci_mmcfg_region *cfg = pci_mmconfig_lookup(seg, bus);
if (cfg && cfg->virt)
return cfg->virt + (PCI_MMCFG_BUS_OFFSET(bus) | (devfn << 12));
return NULL;
}
static int pci_mmcfg_read_numachip(unsigned int seg, unsigned int bus,
unsigned int devfn, int reg, int len, u32 *value)
{
char __iomem *addr;
/* Why do we have this when nobody checks it. How about a BUG()!? -AK */
if (unlikely((bus > 255) || (devfn > 255) || (reg > 4095))) {
err: *value = -1;
return -EINVAL;
}
/* Ensure AMD Northbridges don't decode reads to other devices */
if (unlikely(bus == 0 && devfn >= limit)) {
*value = -1;
return 0;
}
rcu_read_lock();
addr = pci_dev_base(seg, bus, devfn);
if (!addr) {
rcu_read_unlock();
goto err;
}
switch (len) {
case 1:
*value = mmio_config_readb(addr + reg);
break;
case 2:
*value = mmio_config_readw(addr + reg);
break;
case 4:
*value = mmio_config_readl(addr + reg);
break;
}
rcu_read_unlock();
return 0;
}
static int pci_mmcfg_write_numachip(unsigned int seg, unsigned int bus,
unsigned int devfn, int reg, int len, u32 value)
{
char __iomem *addr;
/* Why do we have this when nobody checks it. How about a BUG()!? -AK */
if (unlikely((bus > 255) || (devfn > 255) || (reg > 4095)))
return -EINVAL;
/* Ensure AMD Northbridges don't decode writes to other devices */
if (unlikely(bus == 0 && devfn >= limit))
return 0;
rcu_read_lock();
addr = pci_dev_base(seg, bus, devfn);
if (!addr) {
rcu_read_unlock();
return -EINVAL;
}
switch (len) {
case 1:
mmio_config_writeb(addr + reg, value);
break;
case 2:
mmio_config_writew(addr + reg, value);
break;
case 4:
mmio_config_writel(addr + reg, value);
break;
}
rcu_read_unlock();
return 0;
}
static const struct pci_raw_ops pci_mmcfg_numachip = {
.read = pci_mmcfg_read_numachip,
.write = pci_mmcfg_write_numachip,
};
int __init pci_numachip_init(void)
{
int ret = 0;
u32 val;
/* For remote I/O, restrict bus 0 access to the actual number of AMD
Northbridges, which starts at device number 0x18 */
ret = raw_pci_read(0, 0, PCI_DEVFN(0x18, 0), 0x60, sizeof(val), &val);
if (ret)
goto out;
/* HyperTransport fabric size in bits 6:4 */
limit = PCI_DEVFN(0x18 + ((val >> 4) & 7) + 1, 0);
/* Use NumaChip PCI accessors for non-extended and extended access */
raw_pci_ops = raw_pci_ext_ops = &pci_mmcfg_numachip;
out:
return ret;
}
| linux-master | arch/x86/pci/numachip.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include <linux/pci.h>
#include <asm/pci-direct.h>
#include <asm/io.h>
#include <asm/pci_x86.h>
/* Direct PCI access. This is used for PCI accesses in early boot before
the PCI subsystem works. */
u32 read_pci_config(u8 bus, u8 slot, u8 func, u8 offset)
{
u32 v;
outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8);
v = inl(0xcfc);
return v;
}
u8 read_pci_config_byte(u8 bus, u8 slot, u8 func, u8 offset)
{
u8 v;
outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8);
v = inb(0xcfc + (offset&3));
return v;
}
u16 read_pci_config_16(u8 bus, u8 slot, u8 func, u8 offset)
{
u16 v;
outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8);
v = inw(0xcfc + (offset&2));
return v;
}
void write_pci_config(u8 bus, u8 slot, u8 func, u8 offset,
u32 val)
{
outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8);
outl(val, 0xcfc);
}
void write_pci_config_byte(u8 bus, u8 slot, u8 func, u8 offset, u8 val)
{
outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8);
outb(val, 0xcfc + (offset&3));
}
void write_pci_config_16(u8 bus, u8 slot, u8 func, u8 offset, u16 val)
{
outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8);
outw(val, 0xcfc + (offset&2));
}
int early_pci_allowed(void)
{
return (pci_probe & (PCI_PROBE_CONF1|PCI_PROBE_NOEARLY)) ==
PCI_PROBE_CONF1;
}
| linux-master | arch/x86/pci/early.c |
// SPDX-License-Identifier: GPL-2.0
/*
* mmconfig-shared.c - Low-level direct PCI config space access via
* MMCONFIG - common code between i386 and x86-64.
*
* This code does:
* - known chipset handling
* - ACPI decoding and validation
*
* Per-architecture code takes care of the mappings and accesses
* themselves.
*/
#include <linux/acpi.h>
#include <linux/efi.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/bitmap.h>
#include <linux/dmi.h>
#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/rculist.h>
#include <asm/e820/api.h>
#include <asm/pci_x86.h>
#include <asm/acpi.h>
#define PREFIX "PCI: "
/* Indicate if the mmcfg resources have been placed into the resource table. */
static bool pci_mmcfg_running_state;
static bool pci_mmcfg_arch_init_failed;
static DEFINE_MUTEX(pci_mmcfg_lock);
#define pci_mmcfg_lock_held() lock_is_held(&(pci_mmcfg_lock).dep_map)
LIST_HEAD(pci_mmcfg_list);
static void __init pci_mmconfig_remove(struct pci_mmcfg_region *cfg)
{
if (cfg->res.parent)
release_resource(&cfg->res);
list_del(&cfg->list);
kfree(cfg);
}
static void __init free_all_mmcfg(void)
{
struct pci_mmcfg_region *cfg, *tmp;
pci_mmcfg_arch_free();
list_for_each_entry_safe(cfg, tmp, &pci_mmcfg_list, list)
pci_mmconfig_remove(cfg);
}
static void list_add_sorted(struct pci_mmcfg_region *new)
{
struct pci_mmcfg_region *cfg;
/* keep list sorted by segment and starting bus number */
list_for_each_entry_rcu(cfg, &pci_mmcfg_list, list, pci_mmcfg_lock_held()) {
if (cfg->segment > new->segment ||
(cfg->segment == new->segment &&
cfg->start_bus >= new->start_bus)) {
list_add_tail_rcu(&new->list, &cfg->list);
return;
}
}
list_add_tail_rcu(&new->list, &pci_mmcfg_list);
}
static struct pci_mmcfg_region *pci_mmconfig_alloc(int segment, int start,
int end, u64 addr)
{
struct pci_mmcfg_region *new;
struct resource *res;
if (addr == 0)
return NULL;
new = kzalloc(sizeof(*new), GFP_KERNEL);
if (!new)
return NULL;
new->address = addr;
new->segment = segment;
new->start_bus = start;
new->end_bus = end;
res = &new->res;
res->start = addr + PCI_MMCFG_BUS_OFFSET(start);
res->end = addr + PCI_MMCFG_BUS_OFFSET(end + 1) - 1;
res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
snprintf(new->name, PCI_MMCFG_RESOURCE_NAME_LEN,
"PCI MMCONFIG %04x [bus %02x-%02x]", segment, start, end);
res->name = new->name;
return new;
}
struct pci_mmcfg_region *__init pci_mmconfig_add(int segment, int start,
int end, u64 addr)
{
struct pci_mmcfg_region *new;
new = pci_mmconfig_alloc(segment, start, end, addr);
if (new) {
mutex_lock(&pci_mmcfg_lock);
list_add_sorted(new);
mutex_unlock(&pci_mmcfg_lock);
pr_info(PREFIX
"MMCONFIG for domain %04x [bus %02x-%02x] at %pR "
"(base %#lx)\n",
segment, start, end, &new->res, (unsigned long)addr);
}
return new;
}
struct pci_mmcfg_region *pci_mmconfig_lookup(int segment, int bus)
{
struct pci_mmcfg_region *cfg;
list_for_each_entry_rcu(cfg, &pci_mmcfg_list, list, pci_mmcfg_lock_held())
if (cfg->segment == segment &&
cfg->start_bus <= bus && bus <= cfg->end_bus)
return cfg;
return NULL;
}
static const char *__init pci_mmcfg_e7520(void)
{
u32 win;
raw_pci_ops->read(0, 0, PCI_DEVFN(0, 0), 0xce, 2, &win);
win = win & 0xf000;
if (win == 0x0000 || win == 0xf000)
return NULL;
if (pci_mmconfig_add(0, 0, 255, win << 16) == NULL)
return NULL;
return "Intel Corporation E7520 Memory Controller Hub";
}
static const char *__init pci_mmcfg_intel_945(void)
{
u32 pciexbar, mask = 0, len = 0;
raw_pci_ops->read(0, 0, PCI_DEVFN(0, 0), 0x48, 4, &pciexbar);
/* Enable bit */
if (!(pciexbar & 1))
return NULL;
/* Size bits */
switch ((pciexbar >> 1) & 3) {
case 0:
mask = 0xf0000000U;
len = 0x10000000U;
break;
case 1:
mask = 0xf8000000U;
len = 0x08000000U;
break;
case 2:
mask = 0xfc000000U;
len = 0x04000000U;
break;
default:
return NULL;
}
/* Errata #2, things break when not aligned on a 256Mb boundary */
/* Can only happen in 64M/128M mode */
if ((pciexbar & mask) & 0x0fffffffU)
return NULL;
/* Don't hit the APIC registers and their friends */
if ((pciexbar & mask) >= 0xf0000000U)
return NULL;
if (pci_mmconfig_add(0, 0, (len >> 20) - 1, pciexbar & mask) == NULL)
return NULL;
return "Intel Corporation 945G/GZ/P/PL Express Memory Controller Hub";
}
static const char *__init pci_mmcfg_amd_fam10h(void)
{
u32 low, high, address;
u64 base, msr;
int i;
unsigned segnbits = 0, busnbits, end_bus;
if (!(pci_probe & PCI_CHECK_ENABLE_AMD_MMCONF))
return NULL;
address = MSR_FAM10H_MMIO_CONF_BASE;
if (rdmsr_safe(address, &low, &high))
return NULL;
msr = high;
msr <<= 32;
msr |= low;
/* mmconfig is not enable */
if (!(msr & FAM10H_MMIO_CONF_ENABLE))
return NULL;
base = msr & (FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT);
busnbits = (msr >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) &
FAM10H_MMIO_CONF_BUSRANGE_MASK;
/*
* only handle bus 0 ?
* need to skip it
*/
if (!busnbits)
return NULL;
if (busnbits > 8) {
segnbits = busnbits - 8;
busnbits = 8;
}
end_bus = (1 << busnbits) - 1;
for (i = 0; i < (1 << segnbits); i++)
if (pci_mmconfig_add(i, 0, end_bus,
base + (1<<28) * i) == NULL) {
free_all_mmcfg();
return NULL;
}
return "AMD Family 10h NB";
}
static bool __initdata mcp55_checked;
static const char *__init pci_mmcfg_nvidia_mcp55(void)
{
int bus;
int mcp55_mmconf_found = 0;
static const u32 extcfg_regnum __initconst = 0x90;
static const u32 extcfg_regsize __initconst = 4;
static const u32 extcfg_enable_mask __initconst = 1 << 31;
static const u32 extcfg_start_mask __initconst = 0xff << 16;
static const int extcfg_start_shift __initconst = 16;
static const u32 extcfg_size_mask __initconst = 0x3 << 28;
static const int extcfg_size_shift __initconst = 28;
static const int extcfg_sizebus[] __initconst = {
0x100, 0x80, 0x40, 0x20
};
static const u32 extcfg_base_mask[] __initconst = {
0x7ff8, 0x7ffc, 0x7ffe, 0x7fff
};
static const int extcfg_base_lshift __initconst = 25;
/*
* do check if amd fam10h already took over
*/
if (!acpi_disabled || !list_empty(&pci_mmcfg_list) || mcp55_checked)
return NULL;
mcp55_checked = true;
for (bus = 0; bus < 256; bus++) {
u64 base;
u32 l, extcfg;
u16 vendor, device;
int start, size_index, end;
raw_pci_ops->read(0, bus, PCI_DEVFN(0, 0), 0, 4, &l);
vendor = l & 0xffff;
device = (l >> 16) & 0xffff;
if (PCI_VENDOR_ID_NVIDIA != vendor || 0x0369 != device)
continue;
raw_pci_ops->read(0, bus, PCI_DEVFN(0, 0), extcfg_regnum,
extcfg_regsize, &extcfg);
if (!(extcfg & extcfg_enable_mask))
continue;
size_index = (extcfg & extcfg_size_mask) >> extcfg_size_shift;
base = extcfg & extcfg_base_mask[size_index];
/* base could > 4G */
base <<= extcfg_base_lshift;
start = (extcfg & extcfg_start_mask) >> extcfg_start_shift;
end = start + extcfg_sizebus[size_index] - 1;
if (pci_mmconfig_add(0, start, end, base) == NULL)
continue;
mcp55_mmconf_found++;
}
if (!mcp55_mmconf_found)
return NULL;
return "nVidia MCP55";
}
struct pci_mmcfg_hostbridge_probe {
u32 bus;
u32 devfn;
u32 vendor;
u32 device;
const char *(*probe)(void);
};
static const struct pci_mmcfg_hostbridge_probe pci_mmcfg_probes[] __initconst = {
{ 0, PCI_DEVFN(0, 0), PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_E7520_MCH, pci_mmcfg_e7520 },
{ 0, PCI_DEVFN(0, 0), PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_82945G_HB, pci_mmcfg_intel_945 },
{ 0, PCI_DEVFN(0x18, 0), PCI_VENDOR_ID_AMD,
0x1200, pci_mmcfg_amd_fam10h },
{ 0xff, PCI_DEVFN(0, 0), PCI_VENDOR_ID_AMD,
0x1200, pci_mmcfg_amd_fam10h },
{ 0, PCI_DEVFN(0, 0), PCI_VENDOR_ID_NVIDIA,
0x0369, pci_mmcfg_nvidia_mcp55 },
};
static void __init pci_mmcfg_check_end_bus_number(void)
{
struct pci_mmcfg_region *cfg, *cfgx;
/* Fixup overlaps */
list_for_each_entry(cfg, &pci_mmcfg_list, list) {
if (cfg->end_bus < cfg->start_bus)
cfg->end_bus = 255;
/* Don't access the list head ! */
if (cfg->list.next == &pci_mmcfg_list)
break;
cfgx = list_entry(cfg->list.next, typeof(*cfg), list);
if (cfg->end_bus >= cfgx->start_bus)
cfg->end_bus = cfgx->start_bus - 1;
}
}
static int __init pci_mmcfg_check_hostbridge(void)
{
u32 l;
u32 bus, devfn;
u16 vendor, device;
int i;
const char *name;
if (!raw_pci_ops)
return 0;
free_all_mmcfg();
for (i = 0; i < ARRAY_SIZE(pci_mmcfg_probes); i++) {
bus = pci_mmcfg_probes[i].bus;
devfn = pci_mmcfg_probes[i].devfn;
raw_pci_ops->read(0, bus, devfn, 0, 4, &l);
vendor = l & 0xffff;
device = (l >> 16) & 0xffff;
name = NULL;
if (pci_mmcfg_probes[i].vendor == vendor &&
pci_mmcfg_probes[i].device == device)
name = pci_mmcfg_probes[i].probe();
if (name)
pr_info(PREFIX "%s with MMCONFIG support\n", name);
}
/* some end_bus_number is crazy, fix it */
pci_mmcfg_check_end_bus_number();
return !list_empty(&pci_mmcfg_list);
}
static acpi_status check_mcfg_resource(struct acpi_resource *res, void *data)
{
struct resource *mcfg_res = data;
struct acpi_resource_address64 address;
acpi_status status;
if (res->type == ACPI_RESOURCE_TYPE_FIXED_MEMORY32) {
struct acpi_resource_fixed_memory32 *fixmem32 =
&res->data.fixed_memory32;
if (!fixmem32)
return AE_OK;
if ((mcfg_res->start >= fixmem32->address) &&
(mcfg_res->end < (fixmem32->address +
fixmem32->address_length))) {
mcfg_res->flags = 1;
return AE_CTRL_TERMINATE;
}
}
if ((res->type != ACPI_RESOURCE_TYPE_ADDRESS32) &&
(res->type != ACPI_RESOURCE_TYPE_ADDRESS64))
return AE_OK;
status = acpi_resource_to_address64(res, &address);
if (ACPI_FAILURE(status) ||
(address.address.address_length <= 0) ||
(address.resource_type != ACPI_MEMORY_RANGE))
return AE_OK;
if ((mcfg_res->start >= address.address.minimum) &&
(mcfg_res->end < (address.address.minimum + address.address.address_length))) {
mcfg_res->flags = 1;
return AE_CTRL_TERMINATE;
}
return AE_OK;
}
static acpi_status find_mboard_resource(acpi_handle handle, u32 lvl,
void *context, void **rv)
{
struct resource *mcfg_res = context;
acpi_walk_resources(handle, METHOD_NAME__CRS,
check_mcfg_resource, context);
if (mcfg_res->flags)
return AE_CTRL_TERMINATE;
return AE_OK;
}
static bool is_acpi_reserved(u64 start, u64 end, enum e820_type not_used)
{
struct resource mcfg_res;
mcfg_res.start = start;
mcfg_res.end = end - 1;
mcfg_res.flags = 0;
acpi_get_devices("PNP0C01", find_mboard_resource, &mcfg_res, NULL);
if (!mcfg_res.flags)
acpi_get_devices("PNP0C02", find_mboard_resource, &mcfg_res,
NULL);
return mcfg_res.flags;
}
static bool is_efi_mmio(u64 start, u64 end, enum e820_type not_used)
{
#ifdef CONFIG_EFI
efi_memory_desc_t *md;
u64 size, mmio_start, mmio_end;
for_each_efi_memory_desc(md) {
if (md->type == EFI_MEMORY_MAPPED_IO) {
size = md->num_pages << EFI_PAGE_SHIFT;
mmio_start = md->phys_addr;
mmio_end = mmio_start + size;
/*
* N.B. Caller supplies (start, start + size),
* so to match, mmio_end is the first address
* *past* the EFI_MEMORY_MAPPED_IO area.
*/
if (mmio_start <= start && end <= mmio_end)
return true;
}
}
#endif
return false;
}
typedef bool (*check_reserved_t)(u64 start, u64 end, enum e820_type type);
static bool __ref is_mmconf_reserved(check_reserved_t is_reserved,
struct pci_mmcfg_region *cfg,
struct device *dev, const char *method)
{
u64 addr = cfg->res.start;
u64 size = resource_size(&cfg->res);
u64 old_size = size;
int num_buses;
while (!is_reserved(addr, addr + size, E820_TYPE_RESERVED)) {
size >>= 1;
if (size < (16UL<<20))
break;
}
if (size < (16UL<<20) && size != old_size)
return false;
if (dev)
dev_info(dev, "MMCONFIG at %pR reserved as %s\n",
&cfg->res, method);
else
pr_info(PREFIX "MMCONFIG at %pR reserved as %s\n",
&cfg->res, method);
if (old_size != size) {
/* update end_bus */
cfg->end_bus = cfg->start_bus + ((size>>20) - 1);
num_buses = cfg->end_bus - cfg->start_bus + 1;
cfg->res.end = cfg->res.start +
PCI_MMCFG_BUS_OFFSET(num_buses) - 1;
snprintf(cfg->name, PCI_MMCFG_RESOURCE_NAME_LEN,
"PCI MMCONFIG %04x [bus %02x-%02x]",
cfg->segment, cfg->start_bus, cfg->end_bus);
if (dev)
dev_info(dev,
"MMCONFIG "
"at %pR (base %#lx) (size reduced!)\n",
&cfg->res, (unsigned long) cfg->address);
else
pr_info(PREFIX
"MMCONFIG for %04x [bus%02x-%02x] "
"at %pR (base %#lx) (size reduced!)\n",
cfg->segment, cfg->start_bus, cfg->end_bus,
&cfg->res, (unsigned long) cfg->address);
}
return true;
}
static bool __ref
pci_mmcfg_check_reserved(struct device *dev, struct pci_mmcfg_region *cfg, int early)
{
if (!early && !acpi_disabled) {
if (is_mmconf_reserved(is_acpi_reserved, cfg, dev,
"ACPI motherboard resource"))
return true;
if (dev)
dev_info(dev, FW_INFO
"MMCONFIG at %pR not reserved in "
"ACPI motherboard resources\n",
&cfg->res);
else
pr_info(FW_INFO PREFIX
"MMCONFIG at %pR not reserved in "
"ACPI motherboard resources\n",
&cfg->res);
if (is_mmconf_reserved(is_efi_mmio, cfg, dev,
"EfiMemoryMappedIO"))
return true;
}
/*
* e820__mapped_all() is marked as __init.
* All entries from ACPI MCFG table have been checked at boot time.
* For MCFG information constructed from hotpluggable host bridge's
* _CBA method, just assume it's reserved.
*/
if (pci_mmcfg_running_state)
return true;
/* Don't try to do this check unless configuration
type 1 is available. how about type 2 ?*/
if (raw_pci_ops)
return is_mmconf_reserved(e820__mapped_all, cfg, dev,
"E820 entry");
return false;
}
static void __init pci_mmcfg_reject_broken(int early)
{
struct pci_mmcfg_region *cfg;
list_for_each_entry(cfg, &pci_mmcfg_list, list) {
if (pci_mmcfg_check_reserved(NULL, cfg, early) == 0) {
pr_info(PREFIX "not using MMCONFIG\n");
free_all_mmcfg();
return;
}
}
}
static int __init acpi_mcfg_check_entry(struct acpi_table_mcfg *mcfg,
struct acpi_mcfg_allocation *cfg)
{
if (cfg->address < 0xFFFFFFFF)
return 0;
if (!strncmp(mcfg->header.oem_id, "SGI", 3))
return 0;
if ((mcfg->header.revision >= 1) && (dmi_get_bios_year() >= 2010))
return 0;
pr_err(PREFIX "MCFG region for %04x [bus %02x-%02x] at %#llx "
"is above 4GB, ignored\n", cfg->pci_segment,
cfg->start_bus_number, cfg->end_bus_number, cfg->address);
return -EINVAL;
}
static int __init pci_parse_mcfg(struct acpi_table_header *header)
{
struct acpi_table_mcfg *mcfg;
struct acpi_mcfg_allocation *cfg_table, *cfg;
unsigned long i;
int entries;
if (!header)
return -EINVAL;
mcfg = (struct acpi_table_mcfg *)header;
/* how many config structures do we have */
free_all_mmcfg();
entries = 0;
i = header->length - sizeof(struct acpi_table_mcfg);
while (i >= sizeof(struct acpi_mcfg_allocation)) {
entries++;
i -= sizeof(struct acpi_mcfg_allocation);
}
if (entries == 0) {
pr_err(PREFIX "MMCONFIG has no entries\n");
return -ENODEV;
}
cfg_table = (struct acpi_mcfg_allocation *) &mcfg[1];
for (i = 0; i < entries; i++) {
cfg = &cfg_table[i];
if (acpi_mcfg_check_entry(mcfg, cfg)) {
free_all_mmcfg();
return -ENODEV;
}
if (pci_mmconfig_add(cfg->pci_segment, cfg->start_bus_number,
cfg->end_bus_number, cfg->address) == NULL) {
pr_warn(PREFIX "no memory for MCFG entries\n");
free_all_mmcfg();
return -ENOMEM;
}
}
return 0;
}
#ifdef CONFIG_ACPI_APEI
extern int (*arch_apei_filter_addr)(int (*func)(__u64 start, __u64 size,
void *data), void *data);
static int pci_mmcfg_for_each_region(int (*func)(__u64 start, __u64 size,
void *data), void *data)
{
struct pci_mmcfg_region *cfg;
int rc;
if (list_empty(&pci_mmcfg_list))
return 0;
list_for_each_entry(cfg, &pci_mmcfg_list, list) {
rc = func(cfg->res.start, resource_size(&cfg->res), data);
if (rc)
return rc;
}
return 0;
}
#define set_apei_filter() (arch_apei_filter_addr = pci_mmcfg_for_each_region)
#else
#define set_apei_filter()
#endif
static void __init __pci_mmcfg_init(int early)
{
pci_mmcfg_reject_broken(early);
if (list_empty(&pci_mmcfg_list))
return;
if (pcibios_last_bus < 0) {
const struct pci_mmcfg_region *cfg;
list_for_each_entry(cfg, &pci_mmcfg_list, list) {
if (cfg->segment)
break;
pcibios_last_bus = cfg->end_bus;
}
}
if (pci_mmcfg_arch_init())
pci_probe = (pci_probe & ~PCI_PROBE_MASK) | PCI_PROBE_MMCONF;
else {
free_all_mmcfg();
pci_mmcfg_arch_init_failed = true;
}
}
static int __initdata known_bridge;
void __init pci_mmcfg_early_init(void)
{
if (pci_probe & PCI_PROBE_MMCONF) {
if (pci_mmcfg_check_hostbridge())
known_bridge = 1;
else
acpi_table_parse(ACPI_SIG_MCFG, pci_parse_mcfg);
__pci_mmcfg_init(1);
set_apei_filter();
}
}
void __init pci_mmcfg_late_init(void)
{
/* MMCONFIG disabled */
if ((pci_probe & PCI_PROBE_MMCONF) == 0)
return;
if (known_bridge)
return;
/* MMCONFIG hasn't been enabled yet, try again */
if (pci_probe & PCI_PROBE_MASK & ~PCI_PROBE_MMCONF) {
acpi_table_parse(ACPI_SIG_MCFG, pci_parse_mcfg);
__pci_mmcfg_init(0);
}
}
static int __init pci_mmcfg_late_insert_resources(void)
{
struct pci_mmcfg_region *cfg;
pci_mmcfg_running_state = true;
/* If we are not using MMCONFIG, don't insert the resources. */
if ((pci_probe & PCI_PROBE_MMCONF) == 0)
return 1;
/*
* Attempt to insert the mmcfg resources but not with the busy flag
* marked so it won't cause request errors when __request_region is
* called.
*/
list_for_each_entry(cfg, &pci_mmcfg_list, list)
if (!cfg->res.parent)
insert_resource(&iomem_resource, &cfg->res);
return 0;
}
/*
* Perform MMCONFIG resource insertion after PCI initialization to allow for
* misprogrammed MCFG tables that state larger sizes but actually conflict
* with other system resources.
*/
late_initcall(pci_mmcfg_late_insert_resources);
/* Add MMCFG information for host bridges */
int pci_mmconfig_insert(struct device *dev, u16 seg, u8 start, u8 end,
phys_addr_t addr)
{
int rc;
struct resource *tmp = NULL;
struct pci_mmcfg_region *cfg;
if (!(pci_probe & PCI_PROBE_MMCONF) || pci_mmcfg_arch_init_failed)
return -ENODEV;
if (start > end)
return -EINVAL;
mutex_lock(&pci_mmcfg_lock);
cfg = pci_mmconfig_lookup(seg, start);
if (cfg) {
if (cfg->end_bus < end)
dev_info(dev, FW_INFO
"MMCONFIG for "
"domain %04x [bus %02x-%02x] "
"only partially covers this bridge\n",
cfg->segment, cfg->start_bus, cfg->end_bus);
mutex_unlock(&pci_mmcfg_lock);
return -EEXIST;
}
if (!addr) {
mutex_unlock(&pci_mmcfg_lock);
return -EINVAL;
}
rc = -EBUSY;
cfg = pci_mmconfig_alloc(seg, start, end, addr);
if (cfg == NULL) {
dev_warn(dev, "fail to add MMCONFIG (out of memory)\n");
rc = -ENOMEM;
} else if (!pci_mmcfg_check_reserved(dev, cfg, 0)) {
dev_warn(dev, FW_BUG "MMCONFIG %pR isn't reserved\n",
&cfg->res);
} else {
/* Insert resource if it's not in boot stage */
if (pci_mmcfg_running_state)
tmp = insert_resource_conflict(&iomem_resource,
&cfg->res);
if (tmp) {
dev_warn(dev,
"MMCONFIG %pR conflicts with "
"%s %pR\n",
&cfg->res, tmp->name, tmp);
} else if (pci_mmcfg_arch_map(cfg)) {
dev_warn(dev, "fail to map MMCONFIG %pR.\n",
&cfg->res);
} else {
list_add_sorted(cfg);
dev_info(dev, "MMCONFIG at %pR (base %#lx)\n",
&cfg->res, (unsigned long)addr);
cfg = NULL;
rc = 0;
}
}
if (cfg) {
if (cfg->res.parent)
release_resource(&cfg->res);
kfree(cfg);
}
mutex_unlock(&pci_mmcfg_lock);
return rc;
}
/* Delete MMCFG information for host bridges */
int pci_mmconfig_delete(u16 seg, u8 start, u8 end)
{
struct pci_mmcfg_region *cfg;
mutex_lock(&pci_mmcfg_lock);
list_for_each_entry_rcu(cfg, &pci_mmcfg_list, list)
if (cfg->segment == seg && cfg->start_bus == start &&
cfg->end_bus == end) {
list_del_rcu(&cfg->list);
synchronize_rcu();
pci_mmcfg_arch_unmap(cfg);
if (cfg->res.parent)
release_resource(&cfg->res);
mutex_unlock(&pci_mmcfg_lock);
kfree(cfg);
return 0;
}
mutex_unlock(&pci_mmcfg_lock);
return -ENOENT;
}
| linux-master | arch/x86/pci/mmconfig-shared.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* DMA translation between STA2x11 AMBA memory mapping and the x86 memory mapping
*
* ST Microelectronics ConneXt (STA2X11/STA2X10)
*
* Copyright (c) 2010-2011 Wind River Systems, Inc.
*/
#include <linux/pci.h>
#include <linux/pci_ids.h>
#include <linux/export.h>
#include <linux/list.h>
#include <linux/dma-map-ops.h>
#include <linux/swiotlb.h>
#include <asm/iommu.h>
#define STA2X11_SWIOTLB_SIZE (4*1024*1024)
/*
* We build a list of bus numbers that are under the ConneXt. The
* main bridge hosts 4 busses, which are the 4 endpoints, in order.
*/
#define STA2X11_NR_EP 4 /* 0..3 included */
#define STA2X11_NR_FUNCS 8 /* 0..7 included */
#define STA2X11_AMBA_SIZE (512 << 20)
struct sta2x11_ahb_regs { /* saved during suspend */
u32 base, pexlbase, pexhbase, crw;
};
struct sta2x11_mapping {
int is_suspended;
struct sta2x11_ahb_regs regs[STA2X11_NR_FUNCS];
};
struct sta2x11_instance {
struct list_head list;
int bus0;
struct sta2x11_mapping map[STA2X11_NR_EP];
};
static LIST_HEAD(sta2x11_instance_list);
/* At probe time, record new instances of this bridge (likely one only) */
static void sta2x11_new_instance(struct pci_dev *pdev)
{
struct sta2x11_instance *instance;
instance = kzalloc(sizeof(*instance), GFP_ATOMIC);
if (!instance)
return;
/* This has a subordinate bridge, with 4 more-subordinate ones */
instance->bus0 = pdev->subordinate->number + 1;
if (list_empty(&sta2x11_instance_list)) {
int size = STA2X11_SWIOTLB_SIZE;
/* First instance: register your own swiotlb area */
dev_info(&pdev->dev, "Using SWIOTLB (size %i)\n", size);
if (swiotlb_init_late(size, GFP_DMA, NULL))
dev_emerg(&pdev->dev, "init swiotlb failed\n");
}
list_add(&instance->list, &sta2x11_instance_list);
}
DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_STMICRO, 0xcc17, sta2x11_new_instance);
/*
* Utility functions used in this file from below
*/
static struct sta2x11_instance *sta2x11_pdev_to_instance(struct pci_dev *pdev)
{
struct sta2x11_instance *instance;
int ep;
list_for_each_entry(instance, &sta2x11_instance_list, list) {
ep = pdev->bus->number - instance->bus0;
if (ep >= 0 && ep < STA2X11_NR_EP)
return instance;
}
return NULL;
}
static int sta2x11_pdev_to_ep(struct pci_dev *pdev)
{
struct sta2x11_instance *instance;
instance = sta2x11_pdev_to_instance(pdev);
if (!instance)
return -1;
return pdev->bus->number - instance->bus0;
}
/* This is exported, as some devices need to access the MFD registers */
struct sta2x11_instance *sta2x11_get_instance(struct pci_dev *pdev)
{
return sta2x11_pdev_to_instance(pdev);
}
EXPORT_SYMBOL(sta2x11_get_instance);
/* At setup time, we use our own ops if the device is a ConneXt one */
static void sta2x11_setup_pdev(struct pci_dev *pdev)
{
struct sta2x11_instance *instance = sta2x11_pdev_to_instance(pdev);
if (!instance) /* either a sta2x11 bridge or another ST device */
return;
/* We must enable all devices as master, for audio DMA to work */
pci_set_master(pdev);
}
DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_STMICRO, PCI_ANY_ID, sta2x11_setup_pdev);
/*
* At boot we must set up the mappings for the pcie-to-amba bridge.
* It involves device access, and the same happens at suspend/resume time
*/
#define AHB_MAPB 0xCA4
#define AHB_CRW(i) (AHB_MAPB + 0 + (i) * 0x10)
#define AHB_CRW_SZMASK 0xfffffc00UL
#define AHB_CRW_ENABLE (1 << 0)
#define AHB_CRW_WTYPE_MEM (2 << 1)
#define AHB_CRW_ROE (1UL << 3) /* Relax Order Ena */
#define AHB_CRW_NSE (1UL << 4) /* No Snoop Enable */
#define AHB_BASE(i) (AHB_MAPB + 4 + (i) * 0x10)
#define AHB_PEXLBASE(i) (AHB_MAPB + 8 + (i) * 0x10)
#define AHB_PEXHBASE(i) (AHB_MAPB + 12 + (i) * 0x10)
/* At probe time, enable mapping for each endpoint, using the pdev */
static void sta2x11_map_ep(struct pci_dev *pdev)
{
struct sta2x11_instance *instance = sta2x11_pdev_to_instance(pdev);
struct device *dev = &pdev->dev;
u32 amba_base, max_amba_addr;
int i, ret;
if (!instance)
return;
pci_read_config_dword(pdev, AHB_BASE(0), &amba_base);
max_amba_addr = amba_base + STA2X11_AMBA_SIZE - 1;
ret = dma_direct_set_offset(dev, 0, amba_base, STA2X11_AMBA_SIZE);
if (ret)
dev_err(dev, "sta2x11: could not set DMA offset\n");
dev->bus_dma_limit = max_amba_addr;
dma_set_mask_and_coherent(&pdev->dev, max_amba_addr);
/* Configure AHB mapping */
pci_write_config_dword(pdev, AHB_PEXLBASE(0), 0);
pci_write_config_dword(pdev, AHB_PEXHBASE(0), 0);
pci_write_config_dword(pdev, AHB_CRW(0), STA2X11_AMBA_SIZE |
AHB_CRW_WTYPE_MEM | AHB_CRW_ENABLE);
/* Disable all the other windows */
for (i = 1; i < STA2X11_NR_FUNCS; i++)
pci_write_config_dword(pdev, AHB_CRW(i), 0);
dev_info(&pdev->dev,
"sta2x11: Map EP %i: AMBA address %#8x-%#8x\n",
sta2x11_pdev_to_ep(pdev), amba_base, max_amba_addr);
}
DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_STMICRO, PCI_ANY_ID, sta2x11_map_ep);
#ifdef CONFIG_PM /* Some register values must be saved and restored */
static struct sta2x11_mapping *sta2x11_pdev_to_mapping(struct pci_dev *pdev)
{
struct sta2x11_instance *instance;
int ep;
instance = sta2x11_pdev_to_instance(pdev);
if (!instance)
return NULL;
ep = sta2x11_pdev_to_ep(pdev);
return instance->map + ep;
}
static void suspend_mapping(struct pci_dev *pdev)
{
struct sta2x11_mapping *map = sta2x11_pdev_to_mapping(pdev);
int i;
if (!map)
return;
if (map->is_suspended)
return;
map->is_suspended = 1;
/* Save all window configs */
for (i = 0; i < STA2X11_NR_FUNCS; i++) {
struct sta2x11_ahb_regs *regs = map->regs + i;
pci_read_config_dword(pdev, AHB_BASE(i), ®s->base);
pci_read_config_dword(pdev, AHB_PEXLBASE(i), ®s->pexlbase);
pci_read_config_dword(pdev, AHB_PEXHBASE(i), ®s->pexhbase);
pci_read_config_dword(pdev, AHB_CRW(i), ®s->crw);
}
}
DECLARE_PCI_FIXUP_SUSPEND(PCI_VENDOR_ID_STMICRO, PCI_ANY_ID, suspend_mapping);
static void resume_mapping(struct pci_dev *pdev)
{
struct sta2x11_mapping *map = sta2x11_pdev_to_mapping(pdev);
int i;
if (!map)
return;
if (!map->is_suspended)
goto out;
map->is_suspended = 0;
/* Restore all window configs */
for (i = 0; i < STA2X11_NR_FUNCS; i++) {
struct sta2x11_ahb_regs *regs = map->regs + i;
pci_write_config_dword(pdev, AHB_BASE(i), regs->base);
pci_write_config_dword(pdev, AHB_PEXLBASE(i), regs->pexlbase);
pci_write_config_dword(pdev, AHB_PEXHBASE(i), regs->pexhbase);
pci_write_config_dword(pdev, AHB_CRW(i), regs->crw);
}
out:
pci_set_master(pdev); /* Like at boot, enable master on all devices */
}
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_STMICRO, PCI_ANY_ID, resume_mapping);
#endif /* CONFIG_PM */
| linux-master | arch/x86/pci/sta2x11-fixup.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2004 Matthew Wilcox <[email protected]>
* Copyright (C) 2004 Intel Corp.
*/
/*
* mmconfig.c - Low-level direct PCI config space access via MMCONFIG
*/
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/rcupdate.h>
#include <asm/e820/api.h>
#include <asm/pci_x86.h>
/* Assume systems with more busses have correct MCFG */
#define mmcfg_virt_addr ((void __iomem *) fix_to_virt(FIX_PCIE_MCFG))
/* The base address of the last MMCONFIG device accessed */
static u32 mmcfg_last_accessed_device;
static int mmcfg_last_accessed_cpu;
/*
* Functions for accessing PCI configuration space with MMCONFIG accesses
*/
static u32 get_base_addr(unsigned int seg, int bus, unsigned devfn)
{
struct pci_mmcfg_region *cfg = pci_mmconfig_lookup(seg, bus);
if (cfg)
return cfg->address;
return 0;
}
/*
* This is always called under pci_config_lock
*/
static void pci_exp_set_dev_base(unsigned int base, int bus, int devfn)
{
u32 dev_base = base | PCI_MMCFG_BUS_OFFSET(bus) | (devfn << 12);
int cpu = smp_processor_id();
if (dev_base != mmcfg_last_accessed_device ||
cpu != mmcfg_last_accessed_cpu) {
mmcfg_last_accessed_device = dev_base;
mmcfg_last_accessed_cpu = cpu;
set_fixmap_nocache(FIX_PCIE_MCFG, dev_base);
}
}
static int pci_mmcfg_read(unsigned int seg, unsigned int bus,
unsigned int devfn, int reg, int len, u32 *value)
{
unsigned long flags;
u32 base;
if ((bus > 255) || (devfn > 255) || (reg > 4095)) {
err: *value = -1;
return -EINVAL;
}
rcu_read_lock();
base = get_base_addr(seg, bus, devfn);
if (!base) {
rcu_read_unlock();
goto err;
}
raw_spin_lock_irqsave(&pci_config_lock, flags);
pci_exp_set_dev_base(base, bus, devfn);
switch (len) {
case 1:
*value = mmio_config_readb(mmcfg_virt_addr + reg);
break;
case 2:
*value = mmio_config_readw(mmcfg_virt_addr + reg);
break;
case 4:
*value = mmio_config_readl(mmcfg_virt_addr + reg);
break;
}
raw_spin_unlock_irqrestore(&pci_config_lock, flags);
rcu_read_unlock();
return 0;
}
static int pci_mmcfg_write(unsigned int seg, unsigned int bus,
unsigned int devfn, int reg, int len, u32 value)
{
unsigned long flags;
u32 base;
if ((bus > 255) || (devfn > 255) || (reg > 4095))
return -EINVAL;
rcu_read_lock();
base = get_base_addr(seg, bus, devfn);
if (!base) {
rcu_read_unlock();
return -EINVAL;
}
raw_spin_lock_irqsave(&pci_config_lock, flags);
pci_exp_set_dev_base(base, bus, devfn);
switch (len) {
case 1:
mmio_config_writeb(mmcfg_virt_addr + reg, value);
break;
case 2:
mmio_config_writew(mmcfg_virt_addr + reg, value);
break;
case 4:
mmio_config_writel(mmcfg_virt_addr + reg, value);
break;
}
raw_spin_unlock_irqrestore(&pci_config_lock, flags);
rcu_read_unlock();
return 0;
}
const struct pci_raw_ops pci_mmcfg = {
.read = pci_mmcfg_read,
.write = pci_mmcfg_write,
};
int __init pci_mmcfg_arch_init(void)
{
printk(KERN_INFO "PCI: Using MMCONFIG for extended config space\n");
raw_pci_ext_ops = &pci_mmcfg;
return 1;
}
void __init pci_mmcfg_arch_free(void)
{
}
int pci_mmcfg_arch_map(struct pci_mmcfg_region *cfg)
{
return 0;
}
void pci_mmcfg_arch_unmap(struct pci_mmcfg_region *cfg)
{
unsigned long flags;
/* Invalidate the cached mmcfg map entry. */
raw_spin_lock_irqsave(&pci_config_lock, flags);
mmcfg_last_accessed_device = 0;
raw_spin_unlock_irqrestore(&pci_config_lock, flags);
}
| linux-master | arch/x86/pci/mmconfig_32.c |
// SPDX-License-Identifier: GPL-2.0
#define pr_fmt(fmt) "PCI: " fmt
#include <linux/pci.h>
#include <linux/acpi.h>
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/dmi.h>
#include <linux/slab.h>
#include <linux/pci-acpi.h>
#include <asm/numa.h>
#include <asm/pci_x86.h>
struct pci_root_info {
struct acpi_pci_root_info common;
struct pci_sysdata sd;
#ifdef CONFIG_PCI_MMCONFIG
bool mcfg_added;
u8 start_bus;
u8 end_bus;
#endif
};
bool pci_use_e820 = true;
static bool pci_use_crs = true;
static bool pci_ignore_seg;
static int __init set_use_crs(const struct dmi_system_id *id)
{
pci_use_crs = true;
return 0;
}
static int __init set_nouse_crs(const struct dmi_system_id *id)
{
pci_use_crs = false;
return 0;
}
static int __init set_ignore_seg(const struct dmi_system_id *id)
{
pr_info("%s detected: ignoring ACPI _SEG\n", id->ident);
pci_ignore_seg = true;
return 0;
}
static int __init set_no_e820(const struct dmi_system_id *id)
{
pr_info("%s detected: not clipping E820 regions from _CRS\n",
id->ident);
pci_use_e820 = false;
return 0;
}
static const struct dmi_system_id pci_crs_quirks[] __initconst = {
/* http://bugzilla.kernel.org/show_bug.cgi?id=14183 */
{
.callback = set_use_crs,
.ident = "IBM System x3800",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "IBM"),
DMI_MATCH(DMI_PRODUCT_NAME, "x3800"),
},
},
/* https://bugzilla.kernel.org/show_bug.cgi?id=16007 */
/* 2006 AMD HT/VIA system with two host bridges */
{
.callback = set_use_crs,
.ident = "ASRock ALiveSATA2-GLAN",
.matches = {
DMI_MATCH(DMI_PRODUCT_NAME, "ALiveSATA2-GLAN"),
},
},
/* https://bugzilla.kernel.org/show_bug.cgi?id=30552 */
/* 2006 AMD HT/VIA system with two host bridges */
{
.callback = set_use_crs,
.ident = "ASUS M2V-MX SE",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
DMI_MATCH(DMI_BOARD_NAME, "M2V-MX SE"),
DMI_MATCH(DMI_BIOS_VENDOR, "American Megatrends Inc."),
},
},
/* https://bugzilla.kernel.org/show_bug.cgi?id=42619 */
{
.callback = set_use_crs,
.ident = "MSI MS-7253",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "MICRO-STAR INTERNATIONAL CO., LTD"),
DMI_MATCH(DMI_BOARD_NAME, "MS-7253"),
DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"),
},
},
/* https://bugs.launchpad.net/ubuntu/+source/alsa-driver/+bug/931368 */
/* https://bugs.launchpad.net/ubuntu/+source/alsa-driver/+bug/1033299 */
{
.callback = set_use_crs,
.ident = "Foxconn K8M890-8237A",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Foxconn"),
DMI_MATCH(DMI_BOARD_NAME, "K8M890-8237A"),
DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"),
},
},
/* Now for the blacklist.. */
/* https://bugzilla.redhat.com/show_bug.cgi?id=769657 */
{
.callback = set_nouse_crs,
.ident = "Dell Studio 1557",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Studio 1557"),
DMI_MATCH(DMI_BIOS_VERSION, "A09"),
},
},
/* https://bugzilla.redhat.com/show_bug.cgi?id=769657 */
{
.callback = set_nouse_crs,
.ident = "Thinkpad SL510",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
DMI_MATCH(DMI_BOARD_NAME, "2847DFG"),
DMI_MATCH(DMI_BIOS_VERSION, "6JET85WW (1.43 )"),
},
},
/* https://bugzilla.kernel.org/show_bug.cgi?id=42606 */
{
.callback = set_nouse_crs,
.ident = "Supermicro X8DTH",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Supermicro"),
DMI_MATCH(DMI_PRODUCT_NAME, "X8DTH-i/6/iF/6F"),
DMI_MATCH(DMI_BIOS_VERSION, "2.0a"),
},
},
/* https://bugzilla.kernel.org/show_bug.cgi?id=15362 */
{
.callback = set_ignore_seg,
.ident = "HP xw9300",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
DMI_MATCH(DMI_PRODUCT_NAME, "HP xw9300 Workstation"),
},
},
/*
* Many Lenovo models with "IIL" in their DMI_PRODUCT_VERSION have
* an E820 reserved region that covers the entire 32-bit host
* bridge memory window from _CRS. Using the E820 region to clip
* _CRS means no space is available for hot-added or uninitialized
* PCI devices. This typically breaks I2C controllers for touchpads
* and hot-added Thunderbolt devices. See the commit log for
* models known to require this quirk and related bug reports.
*/
{
.callback = set_no_e820,
.ident = "Lenovo *IIL* product version",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_VERSION, "IIL"),
},
},
/*
* The Acer Spin 5 (SP513-54N) has the same E820 reservation covering
* the entire _CRS 32-bit window issue as the Lenovo *IIL* models.
* See https://bugs.launchpad.net/bugs/1884232
*/
{
.callback = set_no_e820,
.ident = "Acer Spin 5 (SP513-54N)",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
DMI_MATCH(DMI_PRODUCT_NAME, "Spin SP513-54N"),
},
},
/*
* Clevo X170KM-G barebones have the same E820 reservation covering
* the entire _CRS 32-bit window issue as the Lenovo *IIL* models.
* See https://bugzilla.kernel.org/show_bug.cgi?id=214259
*/
{
.callback = set_no_e820,
.ident = "Clevo X170KM-G Barebone",
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "X170KM-G"),
},
},
{}
};
void __init pci_acpi_crs_quirks(void)
{
int year = dmi_get_bios_year();
if (year >= 0 && year < 2008 && iomem_resource.end <= 0xffffffff)
pci_use_crs = false;
/*
* Some firmware includes unusable space (host bridge registers,
* hidden PCI device BARs, etc) in PCI host bridge _CRS. This is a
* firmware defect, and 4dc2287c1805 ("x86: avoid E820 regions when
* allocating address space") has clipped out the unusable space in
* the past.
*
* But other firmware supplies E820 reserved regions that cover
* entire _CRS windows, so clipping throws away the entire window,
* leaving none for hot-added or uninitialized devices. These E820
* entries are probably *not* a firmware defect, so disable the
* clipping by default for post-2022 machines.
*
* We already have quirks to disable clipping for pre-2023
* machines, and we'll likely need quirks to *enable* clipping for
* post-2022 machines that incorrectly include unusable space in
* _CRS.
*/
if (year >= 2023)
pci_use_e820 = false;
dmi_check_system(pci_crs_quirks);
/*
* If the user specifies "pci=use_crs" or "pci=nocrs" explicitly, that
* takes precedence over anything we figured out above.
*/
if (pci_probe & PCI_ROOT_NO_CRS)
pci_use_crs = false;
else if (pci_probe & PCI_USE__CRS)
pci_use_crs = true;
pr_info("%s host bridge windows from ACPI; if necessary, use \"pci=%s\" and report a bug\n",
pci_use_crs ? "Using" : "Ignoring",
pci_use_crs ? "nocrs" : "use_crs");
/* "pci=use_e820"/"pci=no_e820" on the kernel cmdline takes precedence */
if (pci_probe & PCI_NO_E820)
pci_use_e820 = false;
else if (pci_probe & PCI_USE_E820)
pci_use_e820 = true;
pr_info("%s E820 reservations for host bridge windows\n",
pci_use_e820 ? "Using" : "Ignoring");
if (pci_probe & (PCI_NO_E820 | PCI_USE_E820))
pr_info("Please notify [email protected] so future kernels can do this automatically\n");
}
#ifdef CONFIG_PCI_MMCONFIG
static int check_segment(u16 seg, struct device *dev, char *estr)
{
if (seg) {
dev_err(dev, "%s can't access configuration space under this host bridge\n",
estr);
return -EIO;
}
/*
* Failure in adding MMCFG information is not fatal,
* just can't access extended configuration space of
* devices under this host bridge.
*/
dev_warn(dev, "%s can't access extended configuration space under this bridge\n",
estr);
return 0;
}
static int setup_mcfg_map(struct acpi_pci_root_info *ci)
{
int result, seg;
struct pci_root_info *info;
struct acpi_pci_root *root = ci->root;
struct device *dev = &ci->bridge->dev;
info = container_of(ci, struct pci_root_info, common);
info->start_bus = (u8)root->secondary.start;
info->end_bus = (u8)root->secondary.end;
info->mcfg_added = false;
seg = info->sd.domain;
/* return success if MMCFG is not in use */
if (raw_pci_ext_ops && raw_pci_ext_ops != &pci_mmcfg)
return 0;
if (!(pci_probe & PCI_PROBE_MMCONF))
return check_segment(seg, dev, "MMCONFIG is disabled,");
result = pci_mmconfig_insert(dev, seg, info->start_bus, info->end_bus,
root->mcfg_addr);
if (result == 0) {
/* enable MMCFG if it hasn't been enabled yet */
if (raw_pci_ext_ops == NULL)
raw_pci_ext_ops = &pci_mmcfg;
info->mcfg_added = true;
} else if (result != -EEXIST)
return check_segment(seg, dev,
"fail to add MMCONFIG information,");
return 0;
}
static void teardown_mcfg_map(struct acpi_pci_root_info *ci)
{
struct pci_root_info *info;
info = container_of(ci, struct pci_root_info, common);
if (info->mcfg_added) {
pci_mmconfig_delete(info->sd.domain,
info->start_bus, info->end_bus);
info->mcfg_added = false;
}
}
#else
static int setup_mcfg_map(struct acpi_pci_root_info *ci)
{
return 0;
}
static void teardown_mcfg_map(struct acpi_pci_root_info *ci)
{
}
#endif
static int pci_acpi_root_get_node(struct acpi_pci_root *root)
{
int busnum = root->secondary.start;
struct acpi_device *device = root->device;
int node = acpi_get_node(device->handle);
if (node == NUMA_NO_NODE) {
node = x86_pci_root_bus_node(busnum);
if (node != 0 && node != NUMA_NO_NODE)
dev_info(&device->dev, FW_BUG "no _PXM; falling back to node %d from hardware (may be inconsistent with ACPI node numbers)\n",
node);
}
if (node != NUMA_NO_NODE && !node_online(node))
node = NUMA_NO_NODE;
return node;
}
static int pci_acpi_root_init_info(struct acpi_pci_root_info *ci)
{
return setup_mcfg_map(ci);
}
static void pci_acpi_root_release_info(struct acpi_pci_root_info *ci)
{
teardown_mcfg_map(ci);
kfree(container_of(ci, struct pci_root_info, common));
}
/*
* An IO port or MMIO resource assigned to a PCI host bridge may be
* consumed by the host bridge itself or available to its child
* bus/devices. The ACPI specification defines a bit (Producer/Consumer)
* to tell whether the resource is consumed by the host bridge itself,
* but firmware hasn't used that bit consistently, so we can't rely on it.
*
* On x86 and IA64 platforms, all IO port and MMIO resources are assumed
* to be available to child bus/devices except one special case:
* IO port [0xCF8-0xCFF] is consumed by the host bridge itself
* to access PCI configuration space.
*
* So explicitly filter out PCI CFG IO ports[0xCF8-0xCFF].
*/
static bool resource_is_pcicfg_ioport(struct resource *res)
{
return (res->flags & IORESOURCE_IO) &&
res->start == 0xCF8 && res->end == 0xCFF;
}
static int pci_acpi_root_prepare_resources(struct acpi_pci_root_info *ci)
{
struct acpi_device *device = ci->bridge;
int busnum = ci->root->secondary.start;
struct resource_entry *entry, *tmp;
int status;
status = acpi_pci_probe_root_resources(ci);
if (pci_use_crs) {
resource_list_for_each_entry_safe(entry, tmp, &ci->resources)
if (resource_is_pcicfg_ioport(entry->res))
resource_list_destroy_entry(entry);
return status;
}
resource_list_for_each_entry_safe(entry, tmp, &ci->resources) {
dev_printk(KERN_DEBUG, &device->dev,
"host bridge window %pR (ignored)\n", entry->res);
resource_list_destroy_entry(entry);
}
x86_pci_root_bus_resources(busnum, &ci->resources);
return 0;
}
static struct acpi_pci_root_ops acpi_pci_root_ops = {
.pci_ops = &pci_root_ops,
.init_info = pci_acpi_root_init_info,
.release_info = pci_acpi_root_release_info,
.prepare_resources = pci_acpi_root_prepare_resources,
};
struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
{
int domain = root->segment;
int busnum = root->secondary.start;
int node = pci_acpi_root_get_node(root);
struct pci_bus *bus;
if (pci_ignore_seg)
root->segment = domain = 0;
if (domain && !pci_domains_supported) {
pr_warn("pci_bus %04x:%02x: ignored (multiple domains not supported)\n",
domain, busnum);
return NULL;
}
bus = pci_find_bus(domain, busnum);
if (bus) {
/*
* If the desired bus has been scanned already, replace
* its bus->sysdata.
*/
struct pci_sysdata sd = {
.domain = domain,
.node = node,
.companion = root->device
};
memcpy(bus->sysdata, &sd, sizeof(sd));
} else {
struct pci_root_info *info;
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info)
dev_err(&root->device->dev,
"pci_bus %04x:%02x: ignored (out of memory)\n",
domain, busnum);
else {
info->sd.domain = domain;
info->sd.node = node;
info->sd.companion = root->device;
bus = acpi_pci_root_create(root, &acpi_pci_root_ops,
&info->common, &info->sd);
}
}
/* After the PCI-E bus has been walked and all devices discovered,
* configure any settings of the fabric that might be necessary.
*/
if (bus) {
struct pci_bus *child;
list_for_each_entry(child, &bus->children, node)
pcie_bus_configure_settings(child);
}
return bus;
}
int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
{
/*
* We pass NULL as parent to pci_create_root_bus(), so if it is not NULL
* here, pci_create_root_bus() has been called by someone else and
* sysdata is likely to be different from what we expect. Let it go in
* that case.
*/
if (!bridge->dev.parent) {
struct pci_sysdata *sd = bridge->bus->sysdata;
ACPI_COMPANION_SET(&bridge->dev, sd->companion);
}
return 0;
}
int __init pci_acpi_init(void)
{
struct pci_dev *dev = NULL;
if (acpi_noirq)
return -ENODEV;
pr_info("Using ACPI for IRQ routing\n");
acpi_irq_penalty_init();
pcibios_enable_irq = acpi_pci_irq_enable;
pcibios_disable_irq = acpi_pci_irq_disable;
x86_init.pci.init_irq = x86_init_noop;
if (pci_routeirq) {
/*
* PCI IRQ routing is set up by pci_enable_device(), but we
* also do it here in case there are still broken drivers that
* don't use pci_enable_device().
*/
pr_info("Routing PCI interrupts for all devices because \"pci=routeirq\" specified\n");
for_each_pci_dev(dev)
acpi_pci_irq_enable(dev);
}
return 0;
}
| linux-master | arch/x86/pci/acpi.c |
/*
* Copyright (C) 2007 Antonino Daplas <[email protected]>
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive
* for more details.
*
*/
#include <linux/fb.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/vgaarb.h>
#include <asm/fb.h>
void fb_pgprotect(struct file *file, struct vm_area_struct *vma, unsigned long off)
{
unsigned long prot;
prot = pgprot_val(vma->vm_page_prot) & ~_PAGE_CACHE_MASK;
if (boot_cpu_data.x86 > 3)
pgprot_val(vma->vm_page_prot) =
prot | cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS);
}
EXPORT_SYMBOL(fb_pgprotect);
int fb_is_primary_device(struct fb_info *info)
{
struct device *device = info->device;
struct pci_dev *pci_dev;
if (!device || !dev_is_pci(device))
return 0;
pci_dev = to_pci_dev(device);
if (pci_dev == vga_default_device())
return 1;
return 0;
}
EXPORT_SYMBOL(fb_is_primary_device);
MODULE_LICENSE("GPL");
| linux-master | arch/x86/video/fbdev.c |
// SPDX-License-Identifier: GPL-2.0
/*---------------------------------------------------------------------------+
| reg_mul.c |
| |
| Multiply one FPU_REG by another, put the result in a destination FPU_REG. |
| |
| Copyright (C) 1992,1993,1997 |
| W. Metzenthen, 22 Parker St, Ormond, Vic 3163, Australia |
| E-mail [email protected] |
| |
| Returns the tag of the result if no exceptions or errors occurred. |
| |
+---------------------------------------------------------------------------*/
/*---------------------------------------------------------------------------+
| The destination may be any FPU_REG, including one of the source FPU_REGs. |
+---------------------------------------------------------------------------*/
#include "fpu_emu.h"
#include "exception.h"
#include "reg_constant.h"
#include "fpu_system.h"
/*
Multiply two registers to give a register result.
The sources are st(deststnr) and (b,tagb,signb).
The destination is st(deststnr).
*/
/* This routine must be called with non-empty source registers */
int FPU_mul(FPU_REG const *b, u_char tagb, int deststnr, int control_w)
{
FPU_REG *a = &st(deststnr);
FPU_REG *dest = a;
u_char taga = FPU_gettagi(deststnr);
u_char saved_sign = getsign(dest);
u_char sign = (getsign(a) ^ getsign(b));
int tag;
if (!(taga | tagb)) {
/* Both regs Valid, this should be the most common case. */
tag =
FPU_u_mul(a, b, dest, control_w, sign,
exponent(a) + exponent(b));
if (tag < 0) {
setsign(dest, saved_sign);
return tag;
}
FPU_settagi(deststnr, tag);
return tag;
}
if (taga == TAG_Special)
taga = FPU_Special(a);
if (tagb == TAG_Special)
tagb = FPU_Special(b);
if (((taga == TAG_Valid) && (tagb == TW_Denormal))
|| ((taga == TW_Denormal) && (tagb == TAG_Valid))
|| ((taga == TW_Denormal) && (tagb == TW_Denormal))) {
FPU_REG x, y;
if (denormal_operand() < 0)
return FPU_Exception;
FPU_to_exp16(a, &x);
FPU_to_exp16(b, &y);
tag = FPU_u_mul(&x, &y, dest, control_w, sign,
exponent16(&x) + exponent16(&y));
if (tag < 0) {
setsign(dest, saved_sign);
return tag;
}
FPU_settagi(deststnr, tag);
return tag;
} else if ((taga <= TW_Denormal) && (tagb <= TW_Denormal)) {
if (((tagb == TW_Denormal) || (taga == TW_Denormal))
&& (denormal_operand() < 0))
return FPU_Exception;
/* Must have either both arguments == zero, or
one valid and the other zero.
The result is therefore zero. */
FPU_copy_to_regi(&CONST_Z, TAG_Zero, deststnr);
/* The 80486 book says that the answer is +0, but a real
80486 behaves this way.
IEEE-754 apparently says it should be this way. */
setsign(dest, sign);
return TAG_Zero;
}
/* Must have infinities, NaNs, etc */
else if ((taga == TW_NaN) || (tagb == TW_NaN)) {
return real_2op_NaN(b, tagb, deststnr, &st(0));
} else if (((taga == TW_Infinity) && (tagb == TAG_Zero))
|| ((tagb == TW_Infinity) && (taga == TAG_Zero))) {
return arith_invalid(deststnr); /* Zero*Infinity is invalid */
} else if (((taga == TW_Denormal) || (tagb == TW_Denormal))
&& (denormal_operand() < 0)) {
return FPU_Exception;
} else if (taga == TW_Infinity) {
FPU_copy_to_regi(a, TAG_Special, deststnr);
setsign(dest, sign);
return TAG_Special;
} else if (tagb == TW_Infinity) {
FPU_copy_to_regi(b, TAG_Special, deststnr);
setsign(dest, sign);
return TAG_Special;
}
#ifdef PARANOID
else {
EXCEPTION(EX_INTERNAL | 0x102);
return FPU_Exception;
}
#endif /* PARANOID */
return 0;
}
| linux-master | arch/x86/math-emu/reg_mul.c |
// SPDX-License-Identifier: GPL-2.0
/*---------------------------------------------------------------------------+
| reg_ld_str.c |
| |
| All of the functions which transfer data between user memory and FPU_REGs.|
| |
| Copyright (C) 1992,1993,1994,1996,1997 |
| W. Metzenthen, 22 Parker St, Ormond, Vic 3163, Australia |
| E-mail [email protected] |
| |
| |
+---------------------------------------------------------------------------*/
/*---------------------------------------------------------------------------+
| Note: |
| The file contains code which accesses user memory. |
| Emulator static data may change when user memory is accessed, due to |
| other processes using the emulator while swapping is in progress. |
+---------------------------------------------------------------------------*/
#include "fpu_emu.h"
#include <linux/uaccess.h>
#include "fpu_system.h"
#include "exception.h"
#include "reg_constant.h"
#include "control_w.h"
#include "status_w.h"
#define DOUBLE_Emax 1023 /* largest valid exponent */
#define DOUBLE_Ebias 1023
#define DOUBLE_Emin (-1022) /* smallest valid exponent */
#define SINGLE_Emax 127 /* largest valid exponent */
#define SINGLE_Ebias 127
#define SINGLE_Emin (-126) /* smallest valid exponent */
static u_char normalize_no_excep(FPU_REG *r, int exp, int sign)
{
u_char tag;
setexponent16(r, exp);
tag = FPU_normalize_nuo(r);
stdexp(r);
if (sign)
setnegative(r);
return tag;
}
int FPU_tagof(FPU_REG *ptr)
{
int exp;
exp = exponent16(ptr) & 0x7fff;
if (exp == 0) {
if (!(ptr->sigh | ptr->sigl)) {
return TAG_Zero;
}
/* The number is a de-normal or pseudodenormal. */
return TAG_Special;
}
if (exp == 0x7fff) {
/* Is an Infinity, a NaN, or an unsupported data type. */
return TAG_Special;
}
if (!(ptr->sigh & 0x80000000)) {
/* Unsupported data type. */
/* Valid numbers have the ms bit set to 1. */
/* Unnormal. */
return TAG_Special;
}
return TAG_Valid;
}
/* Get a long double from user memory */
int FPU_load_extended(long double __user *s, int stnr)
{
FPU_REG *sti_ptr = &st(stnr);
RE_ENTRANT_CHECK_OFF;
FPU_access_ok(s, 10);
FPU_copy_from_user(sti_ptr, s, 10);
RE_ENTRANT_CHECK_ON;
return FPU_tagof(sti_ptr);
}
/* Get a double from user memory */
int FPU_load_double(double __user *dfloat, FPU_REG *loaded_data)
{
int exp, tag, negative;
unsigned m64, l64;
RE_ENTRANT_CHECK_OFF;
FPU_access_ok(dfloat, 8);
FPU_get_user(m64, 1 + (unsigned long __user *)dfloat);
FPU_get_user(l64, (unsigned long __user *)dfloat);
RE_ENTRANT_CHECK_ON;
negative = (m64 & 0x80000000) ? SIGN_Negative : SIGN_Positive;
exp = ((m64 & 0x7ff00000) >> 20) - DOUBLE_Ebias + EXTENDED_Ebias;
m64 &= 0xfffff;
if (exp > DOUBLE_Emax + EXTENDED_Ebias) {
/* Infinity or NaN */
if ((m64 == 0) && (l64 == 0)) {
/* +- infinity */
loaded_data->sigh = 0x80000000;
loaded_data->sigl = 0x00000000;
exp = EXP_Infinity + EXTENDED_Ebias;
tag = TAG_Special;
} else {
/* Must be a signaling or quiet NaN */
exp = EXP_NaN + EXTENDED_Ebias;
loaded_data->sigh = (m64 << 11) | 0x80000000;
loaded_data->sigh |= l64 >> 21;
loaded_data->sigl = l64 << 11;
tag = TAG_Special; /* The calling function must look for NaNs */
}
} else if (exp < DOUBLE_Emin + EXTENDED_Ebias) {
/* Zero or de-normal */
if ((m64 == 0) && (l64 == 0)) {
/* Zero */
reg_copy(&CONST_Z, loaded_data);
exp = 0;
tag = TAG_Zero;
} else {
/* De-normal */
loaded_data->sigh = m64 << 11;
loaded_data->sigh |= l64 >> 21;
loaded_data->sigl = l64 << 11;
return normalize_no_excep(loaded_data, DOUBLE_Emin,
negative)
| (denormal_operand() < 0 ? FPU_Exception : 0);
}
} else {
loaded_data->sigh = (m64 << 11) | 0x80000000;
loaded_data->sigh |= l64 >> 21;
loaded_data->sigl = l64 << 11;
tag = TAG_Valid;
}
setexponent16(loaded_data, exp | negative);
return tag;
}
/* Get a float from user memory */
int FPU_load_single(float __user *single, FPU_REG *loaded_data)
{
unsigned m32;
int exp, tag, negative;
RE_ENTRANT_CHECK_OFF;
FPU_access_ok(single, 4);
FPU_get_user(m32, (unsigned long __user *)single);
RE_ENTRANT_CHECK_ON;
negative = (m32 & 0x80000000) ? SIGN_Negative : SIGN_Positive;
if (!(m32 & 0x7fffffff)) {
/* Zero */
reg_copy(&CONST_Z, loaded_data);
addexponent(loaded_data, negative);
return TAG_Zero;
}
exp = ((m32 & 0x7f800000) >> 23) - SINGLE_Ebias + EXTENDED_Ebias;
m32 = (m32 & 0x7fffff) << 8;
if (exp < SINGLE_Emin + EXTENDED_Ebias) {
/* De-normals */
loaded_data->sigh = m32;
loaded_data->sigl = 0;
return normalize_no_excep(loaded_data, SINGLE_Emin, negative)
| (denormal_operand() < 0 ? FPU_Exception : 0);
} else if (exp > SINGLE_Emax + EXTENDED_Ebias) {
/* Infinity or NaN */
if (m32 == 0) {
/* +- infinity */
loaded_data->sigh = 0x80000000;
loaded_data->sigl = 0x00000000;
exp = EXP_Infinity + EXTENDED_Ebias;
tag = TAG_Special;
} else {
/* Must be a signaling or quiet NaN */
exp = EXP_NaN + EXTENDED_Ebias;
loaded_data->sigh = m32 | 0x80000000;
loaded_data->sigl = 0;
tag = TAG_Special; /* The calling function must look for NaNs */
}
} else {
loaded_data->sigh = m32 | 0x80000000;
loaded_data->sigl = 0;
tag = TAG_Valid;
}
setexponent16(loaded_data, exp | negative); /* Set the sign. */
return tag;
}
/* Get a long long from user memory */
int FPU_load_int64(long long __user *_s)
{
long long s;
int sign;
FPU_REG *st0_ptr = &st(0);
RE_ENTRANT_CHECK_OFF;
FPU_access_ok(_s, 8);
if (copy_from_user(&s, _s, 8))
FPU_abort;
RE_ENTRANT_CHECK_ON;
if (s == 0) {
reg_copy(&CONST_Z, st0_ptr);
return TAG_Zero;
}
if (s > 0)
sign = SIGN_Positive;
else {
s = -s;
sign = SIGN_Negative;
}
significand(st0_ptr) = s;
return normalize_no_excep(st0_ptr, 63, sign);
}
/* Get a long from user memory */
int FPU_load_int32(long __user *_s, FPU_REG *loaded_data)
{
long s;
int negative;
RE_ENTRANT_CHECK_OFF;
FPU_access_ok(_s, 4);
FPU_get_user(s, _s);
RE_ENTRANT_CHECK_ON;
if (s == 0) {
reg_copy(&CONST_Z, loaded_data);
return TAG_Zero;
}
if (s > 0)
negative = SIGN_Positive;
else {
s = -s;
negative = SIGN_Negative;
}
loaded_data->sigh = s;
loaded_data->sigl = 0;
return normalize_no_excep(loaded_data, 31, negative);
}
/* Get a short from user memory */
int FPU_load_int16(short __user *_s, FPU_REG *loaded_data)
{
int s, negative;
RE_ENTRANT_CHECK_OFF;
FPU_access_ok(_s, 2);
/* Cast as short to get the sign extended. */
FPU_get_user(s, _s);
RE_ENTRANT_CHECK_ON;
if (s == 0) {
reg_copy(&CONST_Z, loaded_data);
return TAG_Zero;
}
if (s > 0)
negative = SIGN_Positive;
else {
s = -s;
negative = SIGN_Negative;
}
loaded_data->sigh = s << 16;
loaded_data->sigl = 0;
return normalize_no_excep(loaded_data, 15, negative);
}
/* Get a packed bcd array from user memory */
int FPU_load_bcd(u_char __user *s)
{
FPU_REG *st0_ptr = &st(0);
int pos;
u_char bcd;
long long l = 0;
int sign;
RE_ENTRANT_CHECK_OFF;
FPU_access_ok(s, 10);
RE_ENTRANT_CHECK_ON;
for (pos = 8; pos >= 0; pos--) {
l *= 10;
RE_ENTRANT_CHECK_OFF;
FPU_get_user(bcd, s + pos);
RE_ENTRANT_CHECK_ON;
l += bcd >> 4;
l *= 10;
l += bcd & 0x0f;
}
RE_ENTRANT_CHECK_OFF;
FPU_get_user(sign, s + 9);
sign = sign & 0x80 ? SIGN_Negative : SIGN_Positive;
RE_ENTRANT_CHECK_ON;
if (l == 0) {
reg_copy(&CONST_Z, st0_ptr);
addexponent(st0_ptr, sign); /* Set the sign. */
return TAG_Zero;
} else {
significand(st0_ptr) = l;
return normalize_no_excep(st0_ptr, 63, sign);
}
}
/*===========================================================================*/
/* Put a long double into user memory */
int FPU_store_extended(FPU_REG *st0_ptr, u_char st0_tag,
long double __user * d)
{
/*
The only exception raised by an attempt to store to an
extended format is the Invalid Stack exception, i.e.
attempting to store from an empty register.
*/
if (st0_tag != TAG_Empty) {
RE_ENTRANT_CHECK_OFF;
FPU_access_ok(d, 10);
FPU_put_user(st0_ptr->sigl, (unsigned long __user *)d);
FPU_put_user(st0_ptr->sigh,
(unsigned long __user *)((u_char __user *) d + 4));
FPU_put_user(exponent16(st0_ptr),
(unsigned short __user *)((u_char __user *) d +
8));
RE_ENTRANT_CHECK_ON;
return 1;
}
/* Empty register (stack underflow) */
EXCEPTION(EX_StackUnder);
if (control_word & CW_Invalid) {
/* The masked response */
/* Put out the QNaN indefinite */
RE_ENTRANT_CHECK_OFF;
FPU_access_ok(d, 10);
FPU_put_user(0, (unsigned long __user *)d);
FPU_put_user(0xc0000000, 1 + (unsigned long __user *)d);
FPU_put_user(0xffff, 4 + (short __user *)d);
RE_ENTRANT_CHECK_ON;
return 1;
} else
return 0;
}
/* Put a double into user memory */
int FPU_store_double(FPU_REG *st0_ptr, u_char st0_tag, double __user *dfloat)
{
unsigned long l[2];
unsigned long increment = 0; /* avoid gcc warnings */
int precision_loss;
int exp;
FPU_REG tmp;
l[0] = 0;
l[1] = 0;
if (st0_tag == TAG_Valid) {
reg_copy(st0_ptr, &tmp);
exp = exponent(&tmp);
if (exp < DOUBLE_Emin) { /* It may be a denormal */
addexponent(&tmp, -DOUBLE_Emin + 52); /* largest exp to be 51 */
denormal_arg:
if ((precision_loss = FPU_round_to_int(&tmp, st0_tag))) {
#ifdef PECULIAR_486
/* Did it round to a non-denormal ? */
/* This behaviour might be regarded as peculiar, it appears
that the 80486 rounds to the dest precision, then
converts to decide underflow. */
if (!
((tmp.sigh == 0x00100000) && (tmp.sigl == 0)
&& (st0_ptr->sigl & 0x000007ff)))
#endif /* PECULIAR_486 */
{
EXCEPTION(EX_Underflow);
/* This is a special case: see sec 16.2.5.1 of
the 80486 book */
if (!(control_word & CW_Underflow))
return 0;
}
EXCEPTION(precision_loss);
if (!(control_word & CW_Precision))
return 0;
}
l[0] = tmp.sigl;
l[1] = tmp.sigh;
} else {
if (tmp.sigl & 0x000007ff) {
precision_loss = 1;
switch (control_word & CW_RC) {
case RC_RND:
/* Rounding can get a little messy.. */
increment = ((tmp.sigl & 0x7ff) > 0x400) | /* nearest */
((tmp.sigl & 0xc00) == 0xc00); /* odd -> even */
break;
case RC_DOWN: /* towards -infinity */
increment =
signpositive(&tmp) ? 0 : tmp.
sigl & 0x7ff;
break;
case RC_UP: /* towards +infinity */
increment =
signpositive(&tmp) ? tmp.
sigl & 0x7ff : 0;
break;
case RC_CHOP:
increment = 0;
break;
}
/* Truncate the mantissa */
tmp.sigl &= 0xfffff800;
if (increment) {
if (tmp.sigl >= 0xfffff800) {
/* the sigl part overflows */
if (tmp.sigh == 0xffffffff) {
/* The sigh part overflows */
tmp.sigh = 0x80000000;
exp++;
if (exp >= EXP_OVER)
goto overflow;
} else {
tmp.sigh++;
}
tmp.sigl = 0x00000000;
} else {
/* We only need to increment sigl */
tmp.sigl += 0x00000800;
}
}
} else
precision_loss = 0;
l[0] = (tmp.sigl >> 11) | (tmp.sigh << 21);
l[1] = ((tmp.sigh >> 11) & 0xfffff);
if (exp > DOUBLE_Emax) {
overflow:
EXCEPTION(EX_Overflow);
if (!(control_word & CW_Overflow))
return 0;
set_precision_flag_up();
if (!(control_word & CW_Precision))
return 0;
/* This is a special case: see sec 16.2.5.1 of the 80486 book */
/* Overflow to infinity */
l[1] = 0x7ff00000; /* Set to + INF */
} else {
if (precision_loss) {
if (increment)
set_precision_flag_up();
else
set_precision_flag_down();
}
/* Add the exponent */
l[1] |= (((exp + DOUBLE_Ebias) & 0x7ff) << 20);
}
}
} else if (st0_tag == TAG_Zero) {
/* Number is zero */
} else if (st0_tag == TAG_Special) {
st0_tag = FPU_Special(st0_ptr);
if (st0_tag == TW_Denormal) {
/* A denormal will always underflow. */
#ifndef PECULIAR_486
/* An 80486 is supposed to be able to generate
a denormal exception here, but... */
/* Underflow has priority. */
if (control_word & CW_Underflow)
denormal_operand();
#endif /* PECULIAR_486 */
reg_copy(st0_ptr, &tmp);
goto denormal_arg;
} else if (st0_tag == TW_Infinity) {
l[1] = 0x7ff00000;
} else if (st0_tag == TW_NaN) {
/* Is it really a NaN ? */
if ((exponent(st0_ptr) == EXP_OVER)
&& (st0_ptr->sigh & 0x80000000)) {
/* See if we can get a valid NaN from the FPU_REG */
l[0] =
(st0_ptr->sigl >> 11) | (st0_ptr->
sigh << 21);
l[1] = ((st0_ptr->sigh >> 11) & 0xfffff);
if (!(st0_ptr->sigh & 0x40000000)) {
/* It is a signalling NaN */
EXCEPTION(EX_Invalid);
if (!(control_word & CW_Invalid))
return 0;
l[1] |= (0x40000000 >> 11);
}
l[1] |= 0x7ff00000;
} else {
/* It is an unsupported data type */
EXCEPTION(EX_Invalid);
if (!(control_word & CW_Invalid))
return 0;
l[1] = 0xfff80000;
}
}
} else if (st0_tag == TAG_Empty) {
/* Empty register (stack underflow) */
EXCEPTION(EX_StackUnder);
if (control_word & CW_Invalid) {
/* The masked response */
/* Put out the QNaN indefinite */
RE_ENTRANT_CHECK_OFF;
FPU_access_ok(dfloat, 8);
FPU_put_user(0, (unsigned long __user *)dfloat);
FPU_put_user(0xfff80000,
1 + (unsigned long __user *)dfloat);
RE_ENTRANT_CHECK_ON;
return 1;
} else
return 0;
}
if (getsign(st0_ptr))
l[1] |= 0x80000000;
RE_ENTRANT_CHECK_OFF;
FPU_access_ok(dfloat, 8);
FPU_put_user(l[0], (unsigned long __user *)dfloat);
FPU_put_user(l[1], 1 + (unsigned long __user *)dfloat);
RE_ENTRANT_CHECK_ON;
return 1;
}
/* Put a float into user memory */
int FPU_store_single(FPU_REG *st0_ptr, u_char st0_tag, float __user *single)
{
long templ = 0;
unsigned long increment = 0; /* avoid gcc warnings */
int precision_loss;
int exp;
FPU_REG tmp;
if (st0_tag == TAG_Valid) {
reg_copy(st0_ptr, &tmp);
exp = exponent(&tmp);
if (exp < SINGLE_Emin) {
addexponent(&tmp, -SINGLE_Emin + 23); /* largest exp to be 22 */
denormal_arg:
if ((precision_loss = FPU_round_to_int(&tmp, st0_tag))) {
#ifdef PECULIAR_486
/* Did it round to a non-denormal ? */
/* This behaviour might be regarded as peculiar, it appears
that the 80486 rounds to the dest precision, then
converts to decide underflow. */
if (!((tmp.sigl == 0x00800000) &&
((st0_ptr->sigh & 0x000000ff)
|| st0_ptr->sigl)))
#endif /* PECULIAR_486 */
{
EXCEPTION(EX_Underflow);
/* This is a special case: see sec 16.2.5.1 of
the 80486 book */
if (!(control_word & CW_Underflow))
return 0;
}
EXCEPTION(precision_loss);
if (!(control_word & CW_Precision))
return 0;
}
templ = tmp.sigl;
} else {
if (tmp.sigl | (tmp.sigh & 0x000000ff)) {
unsigned long sigh = tmp.sigh;
unsigned long sigl = tmp.sigl;
precision_loss = 1;
switch (control_word & CW_RC) {
case RC_RND:
increment = ((sigh & 0xff) > 0x80) /* more than half */
||(((sigh & 0xff) == 0x80) && sigl) /* more than half */
||((sigh & 0x180) == 0x180); /* round to even */
break;
case RC_DOWN: /* towards -infinity */
increment = signpositive(&tmp)
? 0 : (sigl | (sigh & 0xff));
break;
case RC_UP: /* towards +infinity */
increment = signpositive(&tmp)
? (sigl | (sigh & 0xff)) : 0;
break;
case RC_CHOP:
increment = 0;
break;
}
/* Truncate part of the mantissa */
tmp.sigl = 0;
if (increment) {
if (sigh >= 0xffffff00) {
/* The sigh part overflows */
tmp.sigh = 0x80000000;
exp++;
if (exp >= EXP_OVER)
goto overflow;
} else {
tmp.sigh &= 0xffffff00;
tmp.sigh += 0x100;
}
} else {
tmp.sigh &= 0xffffff00; /* Finish the truncation */
}
} else
precision_loss = 0;
templ = (tmp.sigh >> 8) & 0x007fffff;
if (exp > SINGLE_Emax) {
overflow:
EXCEPTION(EX_Overflow);
if (!(control_word & CW_Overflow))
return 0;
set_precision_flag_up();
if (!(control_word & CW_Precision))
return 0;
/* This is a special case: see sec 16.2.5.1 of the 80486 book. */
/* Masked response is overflow to infinity. */
templ = 0x7f800000;
} else {
if (precision_loss) {
if (increment)
set_precision_flag_up();
else
set_precision_flag_down();
}
/* Add the exponent */
templ |= ((exp + SINGLE_Ebias) & 0xff) << 23;
}
}
} else if (st0_tag == TAG_Zero) {
templ = 0;
} else if (st0_tag == TAG_Special) {
st0_tag = FPU_Special(st0_ptr);
if (st0_tag == TW_Denormal) {
reg_copy(st0_ptr, &tmp);
/* A denormal will always underflow. */
#ifndef PECULIAR_486
/* An 80486 is supposed to be able to generate
a denormal exception here, but... */
/* Underflow has priority. */
if (control_word & CW_Underflow)
denormal_operand();
#endif /* PECULIAR_486 */
goto denormal_arg;
} else if (st0_tag == TW_Infinity) {
templ = 0x7f800000;
} else if (st0_tag == TW_NaN) {
/* Is it really a NaN ? */
if ((exponent(st0_ptr) == EXP_OVER)
&& (st0_ptr->sigh & 0x80000000)) {
/* See if we can get a valid NaN from the FPU_REG */
templ = st0_ptr->sigh >> 8;
if (!(st0_ptr->sigh & 0x40000000)) {
/* It is a signalling NaN */
EXCEPTION(EX_Invalid);
if (!(control_word & CW_Invalid))
return 0;
templ |= (0x40000000 >> 8);
}
templ |= 0x7f800000;
} else {
/* It is an unsupported data type */
EXCEPTION(EX_Invalid);
if (!(control_word & CW_Invalid))
return 0;
templ = 0xffc00000;
}
}
#ifdef PARANOID
else {
EXCEPTION(EX_INTERNAL | 0x164);
return 0;
}
#endif
} else if (st0_tag == TAG_Empty) {
/* Empty register (stack underflow) */
EXCEPTION(EX_StackUnder);
if (control_word & EX_Invalid) {
/* The masked response */
/* Put out the QNaN indefinite */
RE_ENTRANT_CHECK_OFF;
FPU_access_ok(single, 4);
FPU_put_user(0xffc00000,
(unsigned long __user *)single);
RE_ENTRANT_CHECK_ON;
return 1;
} else
return 0;
}
#ifdef PARANOID
else {
EXCEPTION(EX_INTERNAL | 0x163);
return 0;
}
#endif
if (getsign(st0_ptr))
templ |= 0x80000000;
RE_ENTRANT_CHECK_OFF;
FPU_access_ok(single, 4);
FPU_put_user(templ, (unsigned long __user *)single);
RE_ENTRANT_CHECK_ON;
return 1;
}
/* Put a long long into user memory */
int FPU_store_int64(FPU_REG *st0_ptr, u_char st0_tag, long long __user *d)
{
FPU_REG t;
long long tll;
int precision_loss;
if (st0_tag == TAG_Empty) {
/* Empty register (stack underflow) */
EXCEPTION(EX_StackUnder);
goto invalid_operand;
} else if (st0_tag == TAG_Special) {
st0_tag = FPU_Special(st0_ptr);
if ((st0_tag == TW_Infinity) || (st0_tag == TW_NaN)) {
EXCEPTION(EX_Invalid);
goto invalid_operand;
}
}
reg_copy(st0_ptr, &t);
precision_loss = FPU_round_to_int(&t, st0_tag);
((long *)&tll)[0] = t.sigl;
((long *)&tll)[1] = t.sigh;
if ((precision_loss == 1) ||
((t.sigh & 0x80000000) &&
!((t.sigh == 0x80000000) && (t.sigl == 0) && signnegative(&t)))) {
EXCEPTION(EX_Invalid);
/* This is a special case: see sec 16.2.5.1 of the 80486 book */
invalid_operand:
if (control_word & EX_Invalid) {
/* Produce something like QNaN "indefinite" */
tll = 0x8000000000000000LL;
} else
return 0;
} else {
if (precision_loss)
set_precision_flag(precision_loss);
if (signnegative(&t))
tll = -tll;
}
RE_ENTRANT_CHECK_OFF;
FPU_access_ok(d, 8);
if (copy_to_user(d, &tll, 8))
FPU_abort;
RE_ENTRANT_CHECK_ON;
return 1;
}
/* Put a long into user memory */
int FPU_store_int32(FPU_REG *st0_ptr, u_char st0_tag, long __user *d)
{
FPU_REG t;
int precision_loss;
if (st0_tag == TAG_Empty) {
/* Empty register (stack underflow) */
EXCEPTION(EX_StackUnder);
goto invalid_operand;
} else if (st0_tag == TAG_Special) {
st0_tag = FPU_Special(st0_ptr);
if ((st0_tag == TW_Infinity) || (st0_tag == TW_NaN)) {
EXCEPTION(EX_Invalid);
goto invalid_operand;
}
}
reg_copy(st0_ptr, &t);
precision_loss = FPU_round_to_int(&t, st0_tag);
if (t.sigh ||
((t.sigl & 0x80000000) &&
!((t.sigl == 0x80000000) && signnegative(&t)))) {
EXCEPTION(EX_Invalid);
/* This is a special case: see sec 16.2.5.1 of the 80486 book */
invalid_operand:
if (control_word & EX_Invalid) {
/* Produce something like QNaN "indefinite" */
t.sigl = 0x80000000;
} else
return 0;
} else {
if (precision_loss)
set_precision_flag(precision_loss);
if (signnegative(&t))
t.sigl = -(long)t.sigl;
}
RE_ENTRANT_CHECK_OFF;
FPU_access_ok(d, 4);
FPU_put_user(t.sigl, (unsigned long __user *)d);
RE_ENTRANT_CHECK_ON;
return 1;
}
/* Put a short into user memory */
int FPU_store_int16(FPU_REG *st0_ptr, u_char st0_tag, short __user *d)
{
FPU_REG t;
int precision_loss;
if (st0_tag == TAG_Empty) {
/* Empty register (stack underflow) */
EXCEPTION(EX_StackUnder);
goto invalid_operand;
} else if (st0_tag == TAG_Special) {
st0_tag = FPU_Special(st0_ptr);
if ((st0_tag == TW_Infinity) || (st0_tag == TW_NaN)) {
EXCEPTION(EX_Invalid);
goto invalid_operand;
}
}
reg_copy(st0_ptr, &t);
precision_loss = FPU_round_to_int(&t, st0_tag);
if (t.sigh ||
((t.sigl & 0xffff8000) &&
!((t.sigl == 0x8000) && signnegative(&t)))) {
EXCEPTION(EX_Invalid);
/* This is a special case: see sec 16.2.5.1 of the 80486 book */
invalid_operand:
if (control_word & EX_Invalid) {
/* Produce something like QNaN "indefinite" */
t.sigl = 0x8000;
} else
return 0;
} else {
if (precision_loss)
set_precision_flag(precision_loss);
if (signnegative(&t))
t.sigl = -t.sigl;
}
RE_ENTRANT_CHECK_OFF;
FPU_access_ok(d, 2);
FPU_put_user((short)t.sigl, d);
RE_ENTRANT_CHECK_ON;
return 1;
}
/* Put a packed bcd array into user memory */
int FPU_store_bcd(FPU_REG *st0_ptr, u_char st0_tag, u_char __user *d)
{
FPU_REG t;
unsigned long long ll;
u_char b;
int i, precision_loss;
u_char sign = (getsign(st0_ptr) == SIGN_NEG) ? 0x80 : 0;
if (st0_tag == TAG_Empty) {
/* Empty register (stack underflow) */
EXCEPTION(EX_StackUnder);
goto invalid_operand;
} else if (st0_tag == TAG_Special) {
st0_tag = FPU_Special(st0_ptr);
if ((st0_tag == TW_Infinity) || (st0_tag == TW_NaN)) {
EXCEPTION(EX_Invalid);
goto invalid_operand;
}
}
reg_copy(st0_ptr, &t);
precision_loss = FPU_round_to_int(&t, st0_tag);
ll = significand(&t);
/* Check for overflow, by comparing with 999999999999999999 decimal. */
if ((t.sigh > 0x0de0b6b3) ||
((t.sigh == 0x0de0b6b3) && (t.sigl > 0xa763ffff))) {
EXCEPTION(EX_Invalid);
/* This is a special case: see sec 16.2.5.1 of the 80486 book */
invalid_operand:
if (control_word & CW_Invalid) {
/* Produce the QNaN "indefinite" */
RE_ENTRANT_CHECK_OFF;
FPU_access_ok(d, 10);
for (i = 0; i < 7; i++)
FPU_put_user(0, d + i); /* These bytes "undefined" */
FPU_put_user(0xc0, d + 7); /* This byte "undefined" */
FPU_put_user(0xff, d + 8);
FPU_put_user(0xff, d + 9);
RE_ENTRANT_CHECK_ON;
return 1;
} else
return 0;
} else if (precision_loss) {
/* Precision loss doesn't stop the data transfer */
set_precision_flag(precision_loss);
}
RE_ENTRANT_CHECK_OFF;
FPU_access_ok(d, 10);
RE_ENTRANT_CHECK_ON;
for (i = 0; i < 9; i++) {
b = FPU_div_small(&ll, 10);
b |= (FPU_div_small(&ll, 10)) << 4;
RE_ENTRANT_CHECK_OFF;
FPU_put_user(b, d + i);
RE_ENTRANT_CHECK_ON;
}
RE_ENTRANT_CHECK_OFF;
FPU_put_user(sign, d + 9);
RE_ENTRANT_CHECK_ON;
return 1;
}
/*===========================================================================*/
/* r gets mangled such that sig is int, sign:
it is NOT normalized */
/* The return value (in eax) is zero if the result is exact,
if bits are changed due to rounding, truncation, etc, then
a non-zero value is returned */
/* Overflow is signaled by a non-zero return value (in eax).
In the case of overflow, the returned significand always has the
largest possible value */
int FPU_round_to_int(FPU_REG *r, u_char tag)
{
u_char very_big;
unsigned eax;
if (tag == TAG_Zero) {
/* Make sure that zero is returned */
significand(r) = 0;
return 0; /* o.k. */
}
if (exponent(r) > 63) {
r->sigl = r->sigh = ~0; /* The largest representable number */
return 1; /* overflow */
}
eax = FPU_shrxs(&r->sigl, 63 - exponent(r));
very_big = !(~(r->sigh) | ~(r->sigl)); /* test for 0xfff...fff */
#define half_or_more (eax & 0x80000000)
#define frac_part (eax)
#define more_than_half ((eax & 0x80000001) == 0x80000001)
switch (control_word & CW_RC) {
case RC_RND:
if (more_than_half /* nearest */
|| (half_or_more && (r->sigl & 1))) { /* odd -> even */
if (very_big)
return 1; /* overflow */
significand(r)++;
return PRECISION_LOST_UP;
}
break;
case RC_DOWN:
if (frac_part && getsign(r)) {
if (very_big)
return 1; /* overflow */
significand(r)++;
return PRECISION_LOST_UP;
}
break;
case RC_UP:
if (frac_part && !getsign(r)) {
if (very_big)
return 1; /* overflow */
significand(r)++;
return PRECISION_LOST_UP;
}
break;
case RC_CHOP:
break;
}
return eax ? PRECISION_LOST_DOWN : 0;
}
/*===========================================================================*/
u_char __user *fldenv(fpu_addr_modes addr_modes, u_char __user *s)
{
unsigned short tag_word = 0;
u_char tag;
int i;
if ((addr_modes.default_mode == VM86) ||
((addr_modes.default_mode == PM16)
^ (addr_modes.override.operand_size == OP_SIZE_PREFIX))) {
RE_ENTRANT_CHECK_OFF;
FPU_access_ok(s, 0x0e);
FPU_get_user(control_word, (unsigned short __user *)s);
FPU_get_user(partial_status, (unsigned short __user *)(s + 2));
FPU_get_user(tag_word, (unsigned short __user *)(s + 4));
FPU_get_user(instruction_address.offset,
(unsigned short __user *)(s + 6));
FPU_get_user(instruction_address.selector,
(unsigned short __user *)(s + 8));
FPU_get_user(operand_address.offset,
(unsigned short __user *)(s + 0x0a));
FPU_get_user(operand_address.selector,
(unsigned short __user *)(s + 0x0c));
RE_ENTRANT_CHECK_ON;
s += 0x0e;
if (addr_modes.default_mode == VM86) {
instruction_address.offset
+= (instruction_address.selector & 0xf000) << 4;
operand_address.offset +=
(operand_address.selector & 0xf000) << 4;
}
} else {
RE_ENTRANT_CHECK_OFF;
FPU_access_ok(s, 0x1c);
FPU_get_user(control_word, (unsigned short __user *)s);
FPU_get_user(partial_status, (unsigned short __user *)(s + 4));
FPU_get_user(tag_word, (unsigned short __user *)(s + 8));
FPU_get_user(instruction_address.offset,
(unsigned long __user *)(s + 0x0c));
FPU_get_user(instruction_address.selector,
(unsigned short __user *)(s + 0x10));
FPU_get_user(instruction_address.opcode,
(unsigned short __user *)(s + 0x12));
FPU_get_user(operand_address.offset,
(unsigned long __user *)(s + 0x14));
FPU_get_user(operand_address.selector,
(unsigned long __user *)(s + 0x18));
RE_ENTRANT_CHECK_ON;
s += 0x1c;
}
#ifdef PECULIAR_486
control_word &= ~0xe080;
#endif /* PECULIAR_486 */
top = (partial_status >> SW_Top_Shift) & 7;
if (partial_status & ~control_word & CW_Exceptions)
partial_status |= (SW_Summary | SW_Backward);
else
partial_status &= ~(SW_Summary | SW_Backward);
for (i = 0; i < 8; i++) {
tag = tag_word & 3;
tag_word >>= 2;
if (tag == TAG_Empty)
/* New tag is empty. Accept it */
FPU_settag(i, TAG_Empty);
else if (FPU_gettag(i) == TAG_Empty) {
/* Old tag is empty and new tag is not empty. New tag is determined
by old reg contents */
if (exponent(&fpu_register(i)) == -EXTENDED_Ebias) {
if (!
(fpu_register(i).sigl | fpu_register(i).
sigh))
FPU_settag(i, TAG_Zero);
else
FPU_settag(i, TAG_Special);
} else if (exponent(&fpu_register(i)) ==
0x7fff - EXTENDED_Ebias) {
FPU_settag(i, TAG_Special);
} else if (fpu_register(i).sigh & 0x80000000)
FPU_settag(i, TAG_Valid);
else
FPU_settag(i, TAG_Special); /* An Un-normal */
}
/* Else old tag is not empty and new tag is not empty. Old tag
remains correct */
}
return s;
}
void FPU_frstor(fpu_addr_modes addr_modes, u_char __user *data_address)
{
int i, regnr;
u_char __user *s = fldenv(addr_modes, data_address);
int offset = (top & 7) * 10, other = 80 - offset;
/* Copy all registers in stack order. */
RE_ENTRANT_CHECK_OFF;
FPU_access_ok(s, 80);
FPU_copy_from_user(register_base + offset, s, other);
if (offset)
FPU_copy_from_user(register_base, s + other, offset);
RE_ENTRANT_CHECK_ON;
for (i = 0; i < 8; i++) {
regnr = (i + top) & 7;
if (FPU_gettag(regnr) != TAG_Empty)
/* The loaded data over-rides all other cases. */
FPU_settag(regnr, FPU_tagof(&st(i)));
}
}
u_char __user *fstenv(fpu_addr_modes addr_modes, u_char __user *d)
{
if ((addr_modes.default_mode == VM86) ||
((addr_modes.default_mode == PM16)
^ (addr_modes.override.operand_size == OP_SIZE_PREFIX))) {
RE_ENTRANT_CHECK_OFF;
FPU_access_ok(d, 14);
#ifdef PECULIAR_486
FPU_put_user(control_word & ~0xe080, (unsigned long __user *)d);
#else
FPU_put_user(control_word, (unsigned short __user *)d);
#endif /* PECULIAR_486 */
FPU_put_user(status_word(), (unsigned short __user *)(d + 2));
FPU_put_user(fpu_tag_word, (unsigned short __user *)(d + 4));
FPU_put_user(instruction_address.offset,
(unsigned short __user *)(d + 6));
FPU_put_user(operand_address.offset,
(unsigned short __user *)(d + 0x0a));
if (addr_modes.default_mode == VM86) {
FPU_put_user((instruction_address.
offset & 0xf0000) >> 4,
(unsigned short __user *)(d + 8));
FPU_put_user((operand_address.offset & 0xf0000) >> 4,
(unsigned short __user *)(d + 0x0c));
} else {
FPU_put_user(instruction_address.selector,
(unsigned short __user *)(d + 8));
FPU_put_user(operand_address.selector,
(unsigned short __user *)(d + 0x0c));
}
RE_ENTRANT_CHECK_ON;
d += 0x0e;
} else {
RE_ENTRANT_CHECK_OFF;
FPU_access_ok(d, 7 * 4);
#ifdef PECULIAR_486
control_word &= ~0xe080;
/* An 80486 sets nearly all of the reserved bits to 1. */
control_word |= 0xffff0040;
partial_status = status_word() | 0xffff0000;
fpu_tag_word |= 0xffff0000;
I387->soft.fcs &= ~0xf8000000;
I387->soft.fos |= 0xffff0000;
#endif /* PECULIAR_486 */
if (__copy_to_user(d, &control_word, 7 * 4))
FPU_abort;
RE_ENTRANT_CHECK_ON;
d += 0x1c;
}
control_word |= CW_Exceptions;
partial_status &= ~(SW_Summary | SW_Backward);
return d;
}
void fsave(fpu_addr_modes addr_modes, u_char __user *data_address)
{
u_char __user *d;
int offset = (top & 7) * 10, other = 80 - offset;
d = fstenv(addr_modes, data_address);
RE_ENTRANT_CHECK_OFF;
FPU_access_ok(d, 80);
/* Copy all registers in stack order. */
if (__copy_to_user(d, register_base + offset, other))
FPU_abort;
if (offset)
if (__copy_to_user(d + other, register_base, offset))
FPU_abort;
RE_ENTRANT_CHECK_ON;
finit();
}
/*===========================================================================*/
| linux-master | arch/x86/math-emu/reg_ld_str.c |
// SPDX-License-Identifier: GPL-2.0
/*---------------------------------------------------------------------------+
| fpu_etc.c |
| |
| Implement a few FPU instructions. |
| |
| Copyright (C) 1992,1993,1994,1997 |
| W. Metzenthen, 22 Parker St, Ormond, Vic 3163, |
| Australia. E-mail [email protected] |
| |
| |
+---------------------------------------------------------------------------*/
#include "fpu_system.h"
#include "exception.h"
#include "fpu_emu.h"
#include "status_w.h"
#include "reg_constant.h"
static void fchs(FPU_REG *st0_ptr, u_char st0tag)
{
if (st0tag ^ TAG_Empty) {
signbyte(st0_ptr) ^= SIGN_NEG;
clear_C1();
} else
FPU_stack_underflow();
}
static void fabs(FPU_REG *st0_ptr, u_char st0tag)
{
if (st0tag ^ TAG_Empty) {
setpositive(st0_ptr);
clear_C1();
} else
FPU_stack_underflow();
}
static void ftst_(FPU_REG *st0_ptr, u_char st0tag)
{
switch (st0tag) {
case TAG_Zero:
setcc(SW_C3);
break;
case TAG_Valid:
if (getsign(st0_ptr) == SIGN_POS)
setcc(0);
else
setcc(SW_C0);
break;
case TAG_Special:
switch (FPU_Special(st0_ptr)) {
case TW_Denormal:
if (getsign(st0_ptr) == SIGN_POS)
setcc(0);
else
setcc(SW_C0);
if (denormal_operand() < 0) {
#ifdef PECULIAR_486
/* This is weird! */
if (getsign(st0_ptr) == SIGN_POS)
setcc(SW_C3);
#endif /* PECULIAR_486 */
return;
}
break;
case TW_NaN:
setcc(SW_C0 | SW_C2 | SW_C3); /* Operand is not comparable */
EXCEPTION(EX_Invalid);
break;
case TW_Infinity:
if (getsign(st0_ptr) == SIGN_POS)
setcc(0);
else
setcc(SW_C0);
break;
default:
setcc(SW_C0 | SW_C2 | SW_C3); /* Operand is not comparable */
EXCEPTION(EX_INTERNAL | 0x14);
break;
}
break;
case TAG_Empty:
setcc(SW_C0 | SW_C2 | SW_C3);
EXCEPTION(EX_StackUnder);
break;
}
}
static void fxam(FPU_REG *st0_ptr, u_char st0tag)
{
int c = 0;
switch (st0tag) {
case TAG_Empty:
c = SW_C3 | SW_C0;
break;
case TAG_Zero:
c = SW_C3;
break;
case TAG_Valid:
c = SW_C2;
break;
case TAG_Special:
switch (FPU_Special(st0_ptr)) {
case TW_Denormal:
c = SW_C2 | SW_C3; /* Denormal */
break;
case TW_NaN:
/* We also use NaN for unsupported types. */
if ((st0_ptr->sigh & 0x80000000)
&& (exponent(st0_ptr) == EXP_OVER))
c = SW_C0;
break;
case TW_Infinity:
c = SW_C2 | SW_C0;
break;
}
}
if (getsign(st0_ptr) == SIGN_NEG)
c |= SW_C1;
setcc(c);
}
static FUNC_ST0 const fp_etc_table[] = {
fchs, fabs, (FUNC_ST0) FPU_illegal, (FUNC_ST0) FPU_illegal,
ftst_, fxam, (FUNC_ST0) FPU_illegal, (FUNC_ST0) FPU_illegal
};
void FPU_etc(void)
{
(fp_etc_table[FPU_rm]) (&st(0), FPU_gettag0());
}
| linux-master | arch/x86/math-emu/fpu_etc.c |
// SPDX-License-Identifier: GPL-2.0
/*---------------------------------------------------------------------------+
| reg_constant.c |
| |
| All of the constant FPU_REGs |
| |
| Copyright (C) 1992,1993,1994,1997 |
| W. Metzenthen, 22 Parker St, Ormond, Vic 3163, |
| Australia. E-mail [email protected] |
| |
| |
+---------------------------------------------------------------------------*/
#include "fpu_system.h"
#include "fpu_emu.h"
#include "status_w.h"
#include "reg_constant.h"
#include "control_w.h"
#define MAKE_REG(s, e, l, h) { l, h, \
(u16)((EXTENDED_Ebias+(e)) | ((SIGN_##s != 0)*0x8000)) }
FPU_REG const CONST_1 = MAKE_REG(POS, 0, 0x00000000, 0x80000000);
#if 0
FPU_REG const CONST_2 = MAKE_REG(POS, 1, 0x00000000, 0x80000000);
FPU_REG const CONST_HALF = MAKE_REG(POS, -1, 0x00000000, 0x80000000);
#endif /* 0 */
static FPU_REG const CONST_L2T = MAKE_REG(POS, 1, 0xcd1b8afe, 0xd49a784b);
static FPU_REG const CONST_L2E = MAKE_REG(POS, 0, 0x5c17f0bc, 0xb8aa3b29);
FPU_REG const CONST_PI = MAKE_REG(POS, 1, 0x2168c235, 0xc90fdaa2);
FPU_REG const CONST_PI2 = MAKE_REG(POS, 0, 0x2168c235, 0xc90fdaa2);
FPU_REG const CONST_PI4 = MAKE_REG(POS, -1, 0x2168c235, 0xc90fdaa2);
static FPU_REG const CONST_LG2 = MAKE_REG(POS, -2, 0xfbcff799, 0x9a209a84);
static FPU_REG const CONST_LN2 = MAKE_REG(POS, -1, 0xd1cf79ac, 0xb17217f7);
/* Extra bits to take pi/2 to more than 128 bits precision. */
FPU_REG const CONST_PI2extra = MAKE_REG(NEG, -66,
0xfc8f8cbb, 0xece675d1);
/* Only the sign (and tag) is used in internal zeroes */
FPU_REG const CONST_Z = MAKE_REG(POS, EXP_UNDER, 0x0, 0x0);
/* Only the sign and significand (and tag) are used in internal NaNs */
/* The 80486 never generates one of these
FPU_REG const CONST_SNAN = MAKE_REG(POS, EXP_OVER, 0x00000001, 0x80000000);
*/
/* This is the real indefinite QNaN */
FPU_REG const CONST_QNaN = MAKE_REG(NEG, EXP_OVER, 0x00000000, 0xC0000000);
/* Only the sign (and tag) is used in internal infinities */
FPU_REG const CONST_INF = MAKE_REG(POS, EXP_OVER, 0x00000000, 0x80000000);
static void fld_const(FPU_REG const * c, int adj, u_char tag)
{
FPU_REG *st_new_ptr;
if (STACK_OVERFLOW) {
FPU_stack_overflow();
return;
}
push();
reg_copy(c, st_new_ptr);
st_new_ptr->sigl += adj; /* For all our fldxxx constants, we don't need to
borrow or carry. */
FPU_settag0(tag);
clear_C1();
}
/* A fast way to find out whether x is one of RC_DOWN or RC_CHOP
(and not one of RC_RND or RC_UP).
*/
#define DOWN_OR_CHOP(x) (x & RC_DOWN)
static void fld1(int rc)
{
fld_const(&CONST_1, 0, TAG_Valid);
}
static void fldl2t(int rc)
{
fld_const(&CONST_L2T, (rc == RC_UP) ? 1 : 0, TAG_Valid);
}
static void fldl2e(int rc)
{
fld_const(&CONST_L2E, DOWN_OR_CHOP(rc) ? -1 : 0, TAG_Valid);
}
static void fldpi(int rc)
{
fld_const(&CONST_PI, DOWN_OR_CHOP(rc) ? -1 : 0, TAG_Valid);
}
static void fldlg2(int rc)
{
fld_const(&CONST_LG2, DOWN_OR_CHOP(rc) ? -1 : 0, TAG_Valid);
}
static void fldln2(int rc)
{
fld_const(&CONST_LN2, DOWN_OR_CHOP(rc) ? -1 : 0, TAG_Valid);
}
static void fldz(int rc)
{
fld_const(&CONST_Z, 0, TAG_Zero);
}
typedef void (*FUNC_RC) (int);
static FUNC_RC constants_table[] = {
fld1, fldl2t, fldl2e, fldpi, fldlg2, fldln2, fldz, (FUNC_RC) FPU_illegal
};
void fconst(void)
{
(constants_table[FPU_rm]) (control_word & CW_RC);
}
| linux-master | arch/x86/math-emu/reg_constant.c |
// SPDX-License-Identifier: GPL-2.0
/*---------------------------------------------------------------------------+
| reg_add_sub.c |
| |
| Functions to add or subtract two registers and put the result in a third. |
| |
| Copyright (C) 1992,1993,1997 |
| W. Metzenthen, 22 Parker St, Ormond, Vic 3163, Australia |
| E-mail [email protected] |
| |
| |
+---------------------------------------------------------------------------*/
/*---------------------------------------------------------------------------+
| For each function, the destination may be any FPU_REG, including one of |
| the source FPU_REGs. |
| Each function returns 0 if the answer is o.k., otherwise a non-zero |
| value is returned, indicating either an exception condition or an |
| internal error. |
+---------------------------------------------------------------------------*/
#include "exception.h"
#include "reg_constant.h"
#include "fpu_emu.h"
#include "control_w.h"
#include "fpu_system.h"
static
int add_sub_specials(FPU_REG const *a, u_char taga, u_char signa,
FPU_REG const *b, u_char tagb, u_char signb,
FPU_REG * dest, int deststnr, int control_w);
/*
Operates on st(0) and st(n), or on st(0) and temporary data.
The destination must be one of the source st(x).
*/
int FPU_add(FPU_REG const *b, u_char tagb, int deststnr, int control_w)
{
FPU_REG *a = &st(0);
FPU_REG *dest = &st(deststnr);
u_char signb = getsign(b);
u_char taga = FPU_gettag0();
u_char signa = getsign(a);
u_char saved_sign = getsign(dest);
int diff, tag, expa, expb;
if (!(taga | tagb)) {
expa = exponent(a);
expb = exponent(b);
valid_add:
/* Both registers are valid */
if (!(signa ^ signb)) {
/* signs are the same */
tag =
FPU_u_add(a, b, dest, control_w, signa, expa, expb);
} else {
/* The signs are different, so do a subtraction */
diff = expa - expb;
if (!diff) {
diff = a->sigh - b->sigh; /* This works only if the ms bits
are identical. */
if (!diff) {
diff = a->sigl > b->sigl;
if (!diff)
diff = -(a->sigl < b->sigl);
}
}
if (diff > 0) {
tag =
FPU_u_sub(a, b, dest, control_w, signa,
expa, expb);
} else if (diff < 0) {
tag =
FPU_u_sub(b, a, dest, control_w, signb,
expb, expa);
} else {
FPU_copy_to_regi(&CONST_Z, TAG_Zero, deststnr);
/* sign depends upon rounding mode */
setsign(dest, ((control_w & CW_RC) != RC_DOWN)
? SIGN_POS : SIGN_NEG);
return TAG_Zero;
}
}
if (tag < 0) {
setsign(dest, saved_sign);
return tag;
}
FPU_settagi(deststnr, tag);
return tag;
}
if (taga == TAG_Special)
taga = FPU_Special(a);
if (tagb == TAG_Special)
tagb = FPU_Special(b);
if (((taga == TAG_Valid) && (tagb == TW_Denormal))
|| ((taga == TW_Denormal) && (tagb == TAG_Valid))
|| ((taga == TW_Denormal) && (tagb == TW_Denormal))) {
FPU_REG x, y;
if (denormal_operand() < 0)
return FPU_Exception;
FPU_to_exp16(a, &x);
FPU_to_exp16(b, &y);
a = &x;
b = &y;
expa = exponent16(a);
expb = exponent16(b);
goto valid_add;
}
if ((taga == TW_NaN) || (tagb == TW_NaN)) {
if (deststnr == 0)
return real_2op_NaN(b, tagb, deststnr, a);
else
return real_2op_NaN(a, taga, deststnr, a);
}
return add_sub_specials(a, taga, signa, b, tagb, signb,
dest, deststnr, control_w);
}
/* Subtract b from a. (a-b) -> dest */
int FPU_sub(int flags, int rm, int control_w)
{
FPU_REG const *a, *b;
FPU_REG *dest;
u_char taga, tagb, signa, signb, saved_sign, sign;
int diff, tag = 0, expa, expb, deststnr;
a = &st(0);
taga = FPU_gettag0();
deststnr = 0;
if (flags & LOADED) {
b = (FPU_REG *) rm;
tagb = flags & 0x0f;
} else {
b = &st(rm);
tagb = FPU_gettagi(rm);
if (flags & DEST_RM)
deststnr = rm;
}
signa = getsign(a);
signb = getsign(b);
if (flags & REV) {
signa ^= SIGN_NEG;
signb ^= SIGN_NEG;
}
dest = &st(deststnr);
saved_sign = getsign(dest);
if (!(taga | tagb)) {
expa = exponent(a);
expb = exponent(b);
valid_subtract:
/* Both registers are valid */
diff = expa - expb;
if (!diff) {
diff = a->sigh - b->sigh; /* Works only if ms bits are identical */
if (!diff) {
diff = a->sigl > b->sigl;
if (!diff)
diff = -(a->sigl < b->sigl);
}
}
switch ((((int)signa) * 2 + signb) / SIGN_NEG) {
case 0: /* P - P */
case 3: /* N - N */
if (diff > 0) {
/* |a| > |b| */
tag =
FPU_u_sub(a, b, dest, control_w, signa,
expa, expb);
} else if (diff == 0) {
FPU_copy_to_regi(&CONST_Z, TAG_Zero, deststnr);
/* sign depends upon rounding mode */
setsign(dest, ((control_w & CW_RC) != RC_DOWN)
? SIGN_POS : SIGN_NEG);
return TAG_Zero;
} else {
sign = signa ^ SIGN_NEG;
tag =
FPU_u_sub(b, a, dest, control_w, sign, expb,
expa);
}
break;
case 1: /* P - N */
tag =
FPU_u_add(a, b, dest, control_w, SIGN_POS, expa,
expb);
break;
case 2: /* N - P */
tag =
FPU_u_add(a, b, dest, control_w, SIGN_NEG, expa,
expb);
break;
#ifdef PARANOID
default:
EXCEPTION(EX_INTERNAL | 0x111);
return -1;
#endif
}
if (tag < 0) {
setsign(dest, saved_sign);
return tag;
}
FPU_settagi(deststnr, tag);
return tag;
}
if (taga == TAG_Special)
taga = FPU_Special(a);
if (tagb == TAG_Special)
tagb = FPU_Special(b);
if (((taga == TAG_Valid) && (tagb == TW_Denormal))
|| ((taga == TW_Denormal) && (tagb == TAG_Valid))
|| ((taga == TW_Denormal) && (tagb == TW_Denormal))) {
FPU_REG x, y;
if (denormal_operand() < 0)
return FPU_Exception;
FPU_to_exp16(a, &x);
FPU_to_exp16(b, &y);
a = &x;
b = &y;
expa = exponent16(a);
expb = exponent16(b);
goto valid_subtract;
}
if ((taga == TW_NaN) || (tagb == TW_NaN)) {
FPU_REG const *d1, *d2;
if (flags & REV) {
d1 = b;
d2 = a;
} else {
d1 = a;
d2 = b;
}
if (flags & LOADED)
return real_2op_NaN(b, tagb, deststnr, d1);
if (flags & DEST_RM)
return real_2op_NaN(a, taga, deststnr, d2);
else
return real_2op_NaN(b, tagb, deststnr, d2);
}
return add_sub_specials(a, taga, signa, b, tagb, signb ^ SIGN_NEG,
dest, deststnr, control_w);
}
static
int add_sub_specials(FPU_REG const *a, u_char taga, u_char signa,
FPU_REG const *b, u_char tagb, u_char signb,
FPU_REG * dest, int deststnr, int control_w)
{
if (((taga == TW_Denormal) || (tagb == TW_Denormal))
&& (denormal_operand() < 0))
return FPU_Exception;
if (taga == TAG_Zero) {
if (tagb == TAG_Zero) {
/* Both are zero, result will be zero. */
u_char different_signs = signa ^ signb;
FPU_copy_to_regi(a, TAG_Zero, deststnr);
if (different_signs) {
/* Signs are different. */
/* Sign of answer depends upon rounding mode. */
setsign(dest, ((control_w & CW_RC) != RC_DOWN)
? SIGN_POS : SIGN_NEG);
} else
setsign(dest, signa); /* signa may differ from the sign of a. */
return TAG_Zero;
} else {
reg_copy(b, dest);
if ((tagb == TW_Denormal) && (b->sigh & 0x80000000)) {
/* A pseudoDenormal, convert it. */
addexponent(dest, 1);
tagb = TAG_Valid;
} else if (tagb > TAG_Empty)
tagb = TAG_Special;
setsign(dest, signb); /* signb may differ from the sign of b. */
FPU_settagi(deststnr, tagb);
return tagb;
}
} else if (tagb == TAG_Zero) {
reg_copy(a, dest);
if ((taga == TW_Denormal) && (a->sigh & 0x80000000)) {
/* A pseudoDenormal */
addexponent(dest, 1);
taga = TAG_Valid;
} else if (taga > TAG_Empty)
taga = TAG_Special;
setsign(dest, signa); /* signa may differ from the sign of a. */
FPU_settagi(deststnr, taga);
return taga;
} else if (taga == TW_Infinity) {
if ((tagb != TW_Infinity) || (signa == signb)) {
FPU_copy_to_regi(a, TAG_Special, deststnr);
setsign(dest, signa); /* signa may differ from the sign of a. */
return taga;
}
/* Infinity-Infinity is undefined. */
return arith_invalid(deststnr);
} else if (tagb == TW_Infinity) {
FPU_copy_to_regi(b, TAG_Special, deststnr);
setsign(dest, signb); /* signb may differ from the sign of b. */
return tagb;
}
#ifdef PARANOID
EXCEPTION(EX_INTERNAL | 0x101);
#endif
return FPU_Exception;
}
| linux-master | arch/x86/math-emu/reg_add_sub.c |
// SPDX-License-Identifier: GPL-2.0
/*---------------------------------------------------------------------------+
| reg_divide.c |
| |
| Divide one FPU_REG by another and put the result in a destination FPU_REG.|
| |
| Copyright (C) 1996 |
| W. Metzenthen, 22 Parker St, Ormond, Vic 3163, Australia |
| E-mail [email protected] |
| |
| Return value is the tag of the answer, or-ed with FPU_Exception if |
| one was raised, or -1 on internal error. |
| |
+---------------------------------------------------------------------------*/
/*---------------------------------------------------------------------------+
| The destination may be any FPU_REG, including one of the source FPU_REGs. |
+---------------------------------------------------------------------------*/
#include "exception.h"
#include "reg_constant.h"
#include "fpu_emu.h"
#include "fpu_system.h"
/*
Divide one register by another and put the result into a third register.
*/
int FPU_div(int flags, int rm, int control_w)
{
FPU_REG x, y;
FPU_REG const *a, *b, *st0_ptr, *st_ptr;
FPU_REG *dest;
u_char taga, tagb, signa, signb, sign, saved_sign;
int tag, deststnr;
if (flags & DEST_RM)
deststnr = rm;
else
deststnr = 0;
if (flags & REV) {
b = &st(0);
st0_ptr = b;
tagb = FPU_gettag0();
if (flags & LOADED) {
a = (FPU_REG *) rm;
taga = flags & 0x0f;
} else {
a = &st(rm);
st_ptr = a;
taga = FPU_gettagi(rm);
}
} else {
a = &st(0);
st0_ptr = a;
taga = FPU_gettag0();
if (flags & LOADED) {
b = (FPU_REG *) rm;
tagb = flags & 0x0f;
} else {
b = &st(rm);
st_ptr = b;
tagb = FPU_gettagi(rm);
}
}
signa = getsign(a);
signb = getsign(b);
sign = signa ^ signb;
dest = &st(deststnr);
saved_sign = getsign(dest);
if (!(taga | tagb)) {
/* Both regs Valid, this should be the most common case. */
reg_copy(a, &x);
reg_copy(b, &y);
setpositive(&x);
setpositive(&y);
tag = FPU_u_div(&x, &y, dest, control_w, sign);
if (tag < 0)
return tag;
FPU_settagi(deststnr, tag);
return tag;
}
if (taga == TAG_Special)
taga = FPU_Special(a);
if (tagb == TAG_Special)
tagb = FPU_Special(b);
if (((taga == TAG_Valid) && (tagb == TW_Denormal))
|| ((taga == TW_Denormal) && (tagb == TAG_Valid))
|| ((taga == TW_Denormal) && (tagb == TW_Denormal))) {
if (denormal_operand() < 0)
return FPU_Exception;
FPU_to_exp16(a, &x);
FPU_to_exp16(b, &y);
tag = FPU_u_div(&x, &y, dest, control_w, sign);
if (tag < 0)
return tag;
FPU_settagi(deststnr, tag);
return tag;
} else if ((taga <= TW_Denormal) && (tagb <= TW_Denormal)) {
if (tagb != TAG_Zero) {
/* Want to find Zero/Valid */
if (tagb == TW_Denormal) {
if (denormal_operand() < 0)
return FPU_Exception;
}
/* The result is zero. */
FPU_copy_to_regi(&CONST_Z, TAG_Zero, deststnr);
setsign(dest, sign);
return TAG_Zero;
}
/* We have an exception condition, either 0/0 or Valid/Zero. */
if (taga == TAG_Zero) {
/* 0/0 */
return arith_invalid(deststnr);
}
/* Valid/Zero */
return FPU_divide_by_zero(deststnr, sign);
}
/* Must have infinities, NaNs, etc */
else if ((taga == TW_NaN) || (tagb == TW_NaN)) {
if (flags & LOADED)
return real_2op_NaN((FPU_REG *) rm, flags & 0x0f, 0,
st0_ptr);
if (flags & DEST_RM) {
int tag;
tag = FPU_gettag0();
if (tag == TAG_Special)
tag = FPU_Special(st0_ptr);
return real_2op_NaN(st0_ptr, tag, rm,
(flags & REV) ? st0_ptr : &st(rm));
} else {
int tag;
tag = FPU_gettagi(rm);
if (tag == TAG_Special)
tag = FPU_Special(&st(rm));
return real_2op_NaN(&st(rm), tag, 0,
(flags & REV) ? st0_ptr : &st(rm));
}
} else if (taga == TW_Infinity) {
if (tagb == TW_Infinity) {
/* infinity/infinity */
return arith_invalid(deststnr);
} else {
/* tagb must be Valid or Zero */
if ((tagb == TW_Denormal) && (denormal_operand() < 0))
return FPU_Exception;
/* Infinity divided by Zero or Valid does
not raise and exception, but returns Infinity */
FPU_copy_to_regi(a, TAG_Special, deststnr);
setsign(dest, sign);
return taga;
}
} else if (tagb == TW_Infinity) {
if ((taga == TW_Denormal) && (denormal_operand() < 0))
return FPU_Exception;
/* The result is zero. */
FPU_copy_to_regi(&CONST_Z, TAG_Zero, deststnr);
setsign(dest, sign);
return TAG_Zero;
}
#ifdef PARANOID
else {
EXCEPTION(EX_INTERNAL | 0x102);
return FPU_Exception;
}
#endif /* PARANOID */
return 0;
}
| linux-master | arch/x86/math-emu/reg_divide.c |
// SPDX-License-Identifier: GPL-2.0
/*---------------------------------------------------------------------------+
| poly_tan.c |
| |
| Compute the tan of a FPU_REG, using a polynomial approximation. |
| |
| Copyright (C) 1992,1993,1994,1997,1999 |
| W. Metzenthen, 22 Parker St, Ormond, Vic 3163, |
| Australia. E-mail [email protected] |
| |
| |
+---------------------------------------------------------------------------*/
#include "exception.h"
#include "reg_constant.h"
#include "fpu_emu.h"
#include "fpu_system.h"
#include "control_w.h"
#include "poly.h"
#define HiPOWERop 3 /* odd poly, positive terms */
static const unsigned long long oddplterm[HiPOWERop] = {
0x0000000000000000LL,
0x0051a1cf08fca228LL,
0x0000000071284ff7LL
};
#define HiPOWERon 2 /* odd poly, negative terms */
static const unsigned long long oddnegterm[HiPOWERon] = {
0x1291a9a184244e80LL,
0x0000583245819c21LL
};
#define HiPOWERep 2 /* even poly, positive terms */
static const unsigned long long evenplterm[HiPOWERep] = {
0x0e848884b539e888LL,
0x00003c7f18b887daLL
};
#define HiPOWERen 2 /* even poly, negative terms */
static const unsigned long long evennegterm[HiPOWERen] = {
0xf1f0200fd51569ccLL,
0x003afb46105c4432LL
};
static const unsigned long long twothirds = 0xaaaaaaaaaaaaaaabLL;
/*--- poly_tan() ------------------------------------------------------------+
| |
+---------------------------------------------------------------------------*/
void poly_tan(FPU_REG *st0_ptr)
{
long int exponent;
int invert;
Xsig argSq, argSqSq, accumulatoro, accumulatore, accum,
argSignif, fix_up;
unsigned long adj;
exponent = exponent(st0_ptr);
#ifdef PARANOID
if (signnegative(st0_ptr)) { /* Can't hack a number < 0.0 */
arith_invalid(0);
return;
} /* Need a positive number */
#endif /* PARANOID */
/* Split the problem into two domains, smaller and larger than pi/4 */
if ((exponent == 0)
|| ((exponent == -1) && (st0_ptr->sigh > 0xc90fdaa2))) {
/* The argument is greater than (approx) pi/4 */
invert = 1;
accum.lsw = 0;
XSIG_LL(accum) = significand(st0_ptr);
if (exponent == 0) {
/* The argument is >= 1.0 */
/* Put the binary point at the left. */
XSIG_LL(accum) <<= 1;
}
/* pi/2 in hex is: 1.921fb54442d18469 898CC51701B839A2 52049C1 */
XSIG_LL(accum) = 0x921fb54442d18469LL - XSIG_LL(accum);
/* This is a special case which arises due to rounding. */
if (XSIG_LL(accum) == 0xffffffffffffffffLL) {
FPU_settag0(TAG_Valid);
significand(st0_ptr) = 0x8a51e04daabda360LL;
setexponent16(st0_ptr,
(0x41 + EXTENDED_Ebias) | SIGN_Negative);
return;
}
argSignif.lsw = accum.lsw;
XSIG_LL(argSignif) = XSIG_LL(accum);
exponent = -1 + norm_Xsig(&argSignif);
} else {
invert = 0;
argSignif.lsw = 0;
XSIG_LL(accum) = XSIG_LL(argSignif) = significand(st0_ptr);
if (exponent < -1) {
/* shift the argument right by the required places */
if (FPU_shrx(&XSIG_LL(accum), -1 - exponent) >=
0x80000000U)
XSIG_LL(accum)++; /* round up */
}
}
XSIG_LL(argSq) = XSIG_LL(accum);
argSq.lsw = accum.lsw;
mul_Xsig_Xsig(&argSq, &argSq);
XSIG_LL(argSqSq) = XSIG_LL(argSq);
argSqSq.lsw = argSq.lsw;
mul_Xsig_Xsig(&argSqSq, &argSqSq);
/* Compute the negative terms for the numerator polynomial */
accumulatoro.msw = accumulatoro.midw = accumulatoro.lsw = 0;
polynomial_Xsig(&accumulatoro, &XSIG_LL(argSqSq), oddnegterm,
HiPOWERon - 1);
mul_Xsig_Xsig(&accumulatoro, &argSq);
negate_Xsig(&accumulatoro);
/* Add the positive terms */
polynomial_Xsig(&accumulatoro, &XSIG_LL(argSqSq), oddplterm,
HiPOWERop - 1);
/* Compute the positive terms for the denominator polynomial */
accumulatore.msw = accumulatore.midw = accumulatore.lsw = 0;
polynomial_Xsig(&accumulatore, &XSIG_LL(argSqSq), evenplterm,
HiPOWERep - 1);
mul_Xsig_Xsig(&accumulatore, &argSq);
negate_Xsig(&accumulatore);
/* Add the negative terms */
polynomial_Xsig(&accumulatore, &XSIG_LL(argSqSq), evennegterm,
HiPOWERen - 1);
/* Multiply by arg^2 */
mul64_Xsig(&accumulatore, &XSIG_LL(argSignif));
mul64_Xsig(&accumulatore, &XSIG_LL(argSignif));
/* de-normalize and divide by 2 */
shr_Xsig(&accumulatore, -2 * (1 + exponent) + 1);
negate_Xsig(&accumulatore); /* This does 1 - accumulator */
/* Now find the ratio. */
if (accumulatore.msw == 0) {
/* accumulatoro must contain 1.0 here, (actually, 0) but it
really doesn't matter what value we use because it will
have negligible effect in later calculations
*/
XSIG_LL(accum) = 0x8000000000000000LL;
accum.lsw = 0;
} else {
div_Xsig(&accumulatoro, &accumulatore, &accum);
}
/* Multiply by 1/3 * arg^3 */
mul64_Xsig(&accum, &XSIG_LL(argSignif));
mul64_Xsig(&accum, &XSIG_LL(argSignif));
mul64_Xsig(&accum, &XSIG_LL(argSignif));
mul64_Xsig(&accum, &twothirds);
shr_Xsig(&accum, -2 * (exponent + 1));
/* tan(arg) = arg + accum */
add_two_Xsig(&accum, &argSignif, &exponent);
if (invert) {
/* We now have the value of tan(pi_2 - arg) where pi_2 is an
approximation for pi/2
*/
/* The next step is to fix the answer to compensate for the
error due to the approximation used for pi/2
*/
/* This is (approx) delta, the error in our approx for pi/2
(see above). It has an exponent of -65
*/
XSIG_LL(fix_up) = 0x898cc51701b839a2LL;
fix_up.lsw = 0;
if (exponent == 0)
adj = 0xffffffff; /* We want approx 1.0 here, but
this is close enough. */
else if (exponent > -30) {
adj = accum.msw >> -(exponent + 1); /* tan */
adj = mul_32_32(adj, adj); /* tan^2 */
} else
adj = 0;
adj = mul_32_32(0x898cc517, adj); /* delta * tan^2 */
fix_up.msw += adj;
if (!(fix_up.msw & 0x80000000)) { /* did fix_up overflow ? */
/* Yes, we need to add an msb */
shr_Xsig(&fix_up, 1);
fix_up.msw |= 0x80000000;
shr_Xsig(&fix_up, 64 + exponent);
} else
shr_Xsig(&fix_up, 65 + exponent);
add_two_Xsig(&accum, &fix_up, &exponent);
/* accum now contains tan(pi/2 - arg).
Use tan(arg) = 1.0 / tan(pi/2 - arg)
*/
accumulatoro.lsw = accumulatoro.midw = 0;
accumulatoro.msw = 0x80000000;
div_Xsig(&accumulatoro, &accum, &accum);
exponent = -exponent - 1;
}
/* Transfer the result */
round_Xsig(&accum);
FPU_settag0(TAG_Valid);
significand(st0_ptr) = XSIG_LL(accum);
setexponent16(st0_ptr, exponent + EXTENDED_Ebias); /* Result is positive. */
}
| linux-master | arch/x86/math-emu/poly_tan.c |
// SPDX-License-Identifier: GPL-2.0
/*---------------------------------------------------------------------------+
| poly_atan.c |
| |
| Compute the arctan of a FPU_REG, using a polynomial approximation. |
| |
| Copyright (C) 1992,1993,1994,1997 |
| W. Metzenthen, 22 Parker St, Ormond, Vic 3163, Australia |
| E-mail [email protected] |
| |
| |
+---------------------------------------------------------------------------*/
#include "exception.h"
#include "reg_constant.h"
#include "fpu_emu.h"
#include "fpu_system.h"
#include "status_w.h"
#include "control_w.h"
#include "poly.h"
#define HIPOWERon 6 /* odd poly, negative terms */
static const unsigned long long oddnegterms[HIPOWERon] = {
0x0000000000000000LL, /* Dummy (not for - 1.0) */
0x015328437f756467LL,
0x0005dda27b73dec6LL,
0x0000226bf2bfb91aLL,
0x000000ccc439c5f7LL,
0x0000000355438407LL
};
#define HIPOWERop 6 /* odd poly, positive terms */
static const unsigned long long oddplterms[HIPOWERop] = {
/* 0xaaaaaaaaaaaaaaabLL, transferred to fixedpterm[] */
0x0db55a71875c9ac2LL,
0x0029fce2d67880b0LL,
0x0000dfd3908b4596LL,
0x00000550fd61dab4LL,
0x0000001c9422b3f9LL,
0x000000003e3301e1LL
};
static const unsigned long long denomterm = 0xebd9b842c5c53a0eLL;
static const Xsig fixedpterm = MK_XSIG(0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa);
static const Xsig pi_signif = MK_XSIG(0xc90fdaa2, 0x2168c234, 0xc4c6628b);
/*--- poly_atan() -----------------------------------------------------------+
| |
+---------------------------------------------------------------------------*/
void poly_atan(FPU_REG *st0_ptr, u_char st0_tag,
FPU_REG *st1_ptr, u_char st1_tag)
{
u_char transformed, inverted, sign1, sign2;
int exponent;
long int dummy_exp;
Xsig accumulator, Numer, Denom, accumulatore, argSignif, argSq, argSqSq;
u_char tag;
sign1 = getsign(st0_ptr);
sign2 = getsign(st1_ptr);
if (st0_tag == TAG_Valid) {
exponent = exponent(st0_ptr);
} else {
/* This gives non-compatible stack contents... */
FPU_to_exp16(st0_ptr, st0_ptr);
exponent = exponent16(st0_ptr);
}
if (st1_tag == TAG_Valid) {
exponent -= exponent(st1_ptr);
} else {
/* This gives non-compatible stack contents... */
FPU_to_exp16(st1_ptr, st1_ptr);
exponent -= exponent16(st1_ptr);
}
if ((exponent < 0) || ((exponent == 0) &&
((st0_ptr->sigh < st1_ptr->sigh) ||
((st0_ptr->sigh == st1_ptr->sigh) &&
(st0_ptr->sigl < st1_ptr->sigl))))) {
inverted = 1;
Numer.lsw = Denom.lsw = 0;
XSIG_LL(Numer) = significand(st0_ptr);
XSIG_LL(Denom) = significand(st1_ptr);
} else {
inverted = 0;
exponent = -exponent;
Numer.lsw = Denom.lsw = 0;
XSIG_LL(Numer) = significand(st1_ptr);
XSIG_LL(Denom) = significand(st0_ptr);
}
div_Xsig(&Numer, &Denom, &argSignif);
exponent += norm_Xsig(&argSignif);
if ((exponent >= -1)
|| ((exponent == -2) && (argSignif.msw > 0xd413ccd0))) {
/* The argument is greater than sqrt(2)-1 (=0.414213562...) */
/* Convert the argument by an identity for atan */
transformed = 1;
if (exponent >= 0) {
#ifdef PARANOID
if (!((exponent == 0) &&
(argSignif.lsw == 0) && (argSignif.midw == 0) &&
(argSignif.msw == 0x80000000))) {
EXCEPTION(EX_INTERNAL | 0x104); /* There must be a logic error */
return;
}
#endif /* PARANOID */
argSignif.msw = 0; /* Make the transformed arg -> 0.0 */
} else {
Numer.lsw = Denom.lsw = argSignif.lsw;
XSIG_LL(Numer) = XSIG_LL(Denom) = XSIG_LL(argSignif);
if (exponent < -1)
shr_Xsig(&Numer, -1 - exponent);
negate_Xsig(&Numer);
shr_Xsig(&Denom, -exponent);
Denom.msw |= 0x80000000;
div_Xsig(&Numer, &Denom, &argSignif);
exponent = -1 + norm_Xsig(&argSignif);
}
} else {
transformed = 0;
}
argSq.lsw = argSignif.lsw;
argSq.midw = argSignif.midw;
argSq.msw = argSignif.msw;
mul_Xsig_Xsig(&argSq, &argSq);
argSqSq.lsw = argSq.lsw;
argSqSq.midw = argSq.midw;
argSqSq.msw = argSq.msw;
mul_Xsig_Xsig(&argSqSq, &argSqSq);
accumulatore.lsw = argSq.lsw;
XSIG_LL(accumulatore) = XSIG_LL(argSq);
shr_Xsig(&argSq, 2 * (-1 - exponent - 1));
shr_Xsig(&argSqSq, 4 * (-1 - exponent - 1));
/* Now have argSq etc with binary point at the left
.1xxxxxxxx */
/* Do the basic fixed point polynomial evaluation */
accumulator.msw = accumulator.midw = accumulator.lsw = 0;
polynomial_Xsig(&accumulator, &XSIG_LL(argSqSq),
oddplterms, HIPOWERop - 1);
mul64_Xsig(&accumulator, &XSIG_LL(argSq));
negate_Xsig(&accumulator);
polynomial_Xsig(&accumulator, &XSIG_LL(argSqSq), oddnegterms,
HIPOWERon - 1);
negate_Xsig(&accumulator);
add_two_Xsig(&accumulator, &fixedpterm, &dummy_exp);
mul64_Xsig(&accumulatore, &denomterm);
shr_Xsig(&accumulatore, 1 + 2 * (-1 - exponent));
accumulatore.msw |= 0x80000000;
div_Xsig(&accumulator, &accumulatore, &accumulator);
mul_Xsig_Xsig(&accumulator, &argSignif);
mul_Xsig_Xsig(&accumulator, &argSq);
shr_Xsig(&accumulator, 3);
negate_Xsig(&accumulator);
add_Xsig_Xsig(&accumulator, &argSignif);
if (transformed) {
/* compute pi/4 - accumulator */
shr_Xsig(&accumulator, -1 - exponent);
negate_Xsig(&accumulator);
add_Xsig_Xsig(&accumulator, &pi_signif);
exponent = -1;
}
if (inverted) {
/* compute pi/2 - accumulator */
shr_Xsig(&accumulator, -exponent);
negate_Xsig(&accumulator);
add_Xsig_Xsig(&accumulator, &pi_signif);
exponent = 0;
}
if (sign1) {
/* compute pi - accumulator */
shr_Xsig(&accumulator, 1 - exponent);
negate_Xsig(&accumulator);
add_Xsig_Xsig(&accumulator, &pi_signif);
exponent = 1;
}
exponent += round_Xsig(&accumulator);
significand(st1_ptr) = XSIG_LL(accumulator);
setexponent16(st1_ptr, exponent);
tag = FPU_round(st1_ptr, 1, 0, FULL_PRECISION, sign2);
FPU_settagi(1, tag);
set_precision_flag_up(); /* We do not really know if up or down,
use this as the default. */
}
| linux-master | arch/x86/math-emu/poly_atan.c |
// SPDX-License-Identifier: GPL-2.0
/*---------------------------------------------------------------------------+
| get_address.c |
| |
| Get the effective address from an FPU instruction. |
| |
| Copyright (C) 1992,1993,1994,1997 |
| W. Metzenthen, 22 Parker St, Ormond, Vic 3163, |
| Australia. E-mail [email protected] |
| |
| |
+---------------------------------------------------------------------------*/
/*---------------------------------------------------------------------------+
| Note: |
| The file contains code which accesses user memory. |
| Emulator static data may change when user memory is accessed, due to |
| other processes using the emulator while swapping is in progress. |
+---------------------------------------------------------------------------*/
#include <linux/stddef.h>
#include <linux/uaccess.h>
#include <asm/vm86.h>
#include "fpu_system.h"
#include "exception.h"
#include "fpu_emu.h"
#define FPU_WRITE_BIT 0x10
static int reg_offset[] = {
offsetof(struct pt_regs, ax),
offsetof(struct pt_regs, cx),
offsetof(struct pt_regs, dx),
offsetof(struct pt_regs, bx),
offsetof(struct pt_regs, sp),
offsetof(struct pt_regs, bp),
offsetof(struct pt_regs, si),
offsetof(struct pt_regs, di)
};
#define REG_(x) (*(long *)(reg_offset[(x)] + (u_char *)FPU_info->regs))
static int reg_offset_vm86[] = {
offsetof(struct pt_regs, cs),
offsetof(struct kernel_vm86_regs, ds),
offsetof(struct kernel_vm86_regs, es),
offsetof(struct kernel_vm86_regs, fs),
offsetof(struct kernel_vm86_regs, gs),
offsetof(struct pt_regs, ss),
offsetof(struct kernel_vm86_regs, ds)
};
#define VM86_REG_(x) (*(unsigned short *) \
(reg_offset_vm86[((unsigned)x)] + (u_char *)FPU_info->regs))
static int reg_offset_pm[] = {
offsetof(struct pt_regs, cs),
offsetof(struct pt_regs, ds),
offsetof(struct pt_regs, es),
offsetof(struct pt_regs, fs),
offsetof(struct pt_regs, ds), /* dummy, not saved on stack */
offsetof(struct pt_regs, ss),
offsetof(struct pt_regs, ds)
};
#define PM_REG_(x) (*(unsigned short *) \
(reg_offset_pm[((unsigned)x)] + (u_char *)FPU_info->regs))
/* Decode the SIB byte. This function assumes mod != 0 */
static int sib(int mod, unsigned long *fpu_eip)
{
u_char ss, index, base;
long offset;
RE_ENTRANT_CHECK_OFF;
FPU_code_access_ok(1);
FPU_get_user(base, (u_char __user *) (*fpu_eip)); /* The SIB byte */
RE_ENTRANT_CHECK_ON;
(*fpu_eip)++;
ss = base >> 6;
index = (base >> 3) & 7;
base &= 7;
if ((mod == 0) && (base == 5))
offset = 0; /* No base register */
else
offset = REG_(base);
if (index == 4) {
/* No index register */
/* A non-zero ss is illegal */
if (ss)
EXCEPTION(EX_Invalid);
} else {
offset += (REG_(index)) << ss;
}
if (mod == 1) {
/* 8 bit signed displacement */
long displacement;
RE_ENTRANT_CHECK_OFF;
FPU_code_access_ok(1);
FPU_get_user(displacement, (signed char __user *)(*fpu_eip));
offset += displacement;
RE_ENTRANT_CHECK_ON;
(*fpu_eip)++;
} else if (mod == 2 || base == 5) { /* The second condition also has mod==0 */
/* 32 bit displacement */
long displacement;
RE_ENTRANT_CHECK_OFF;
FPU_code_access_ok(4);
FPU_get_user(displacement, (long __user *)(*fpu_eip));
offset += displacement;
RE_ENTRANT_CHECK_ON;
(*fpu_eip) += 4;
}
return offset;
}
static unsigned long vm86_segment(u_char segment, struct address *addr)
{
segment--;
#ifdef PARANOID
if (segment > PREFIX_SS_) {
EXCEPTION(EX_INTERNAL | 0x130);
math_abort(FPU_info, SIGSEGV);
}
#endif /* PARANOID */
addr->selector = VM86_REG_(segment);
return (unsigned long)VM86_REG_(segment) << 4;
}
/* This should work for 16 and 32 bit protected mode. */
static long pm_address(u_char FPU_modrm, u_char segment,
struct address *addr, long offset)
{
struct desc_struct descriptor;
unsigned long base_address, limit, address, seg_top;
segment--;
#ifdef PARANOID
/* segment is unsigned, so this also detects if segment was 0: */
if (segment > PREFIX_SS_) {
EXCEPTION(EX_INTERNAL | 0x132);
math_abort(FPU_info, SIGSEGV);
}
#endif /* PARANOID */
switch (segment) {
case PREFIX_GS_ - 1:
/* user gs handling can be lazy, use special accessors */
savesegment(gs, addr->selector);
break;
default:
addr->selector = PM_REG_(segment);
}
descriptor = FPU_get_ldt_descriptor(addr->selector);
base_address = seg_get_base(&descriptor);
address = base_address + offset;
limit = seg_get_limit(&descriptor) + 1;
limit *= seg_get_granularity(&descriptor);
limit += base_address - 1;
if (limit < base_address)
limit = 0xffffffff;
if (seg_expands_down(&descriptor)) {
if (descriptor.g) {
seg_top = 0xffffffff;
} else {
seg_top = base_address + (1 << 20);
if (seg_top < base_address)
seg_top = 0xffffffff;
}
access_limit =
(address <= limit) || (address >= seg_top) ? 0 :
((seg_top - address) >= 255 ? 255 : seg_top - address);
} else {
access_limit =
(address > limit) || (address < base_address) ? 0 :
((limit - address) >= 254 ? 255 : limit - address + 1);
}
if (seg_execute_only(&descriptor) ||
(!seg_writable(&descriptor) && (FPU_modrm & FPU_WRITE_BIT))) {
access_limit = 0;
}
return address;
}
/*
MOD R/M byte: MOD == 3 has a special use for the FPU
SIB byte used iff R/M = 100b
7 6 5 4 3 2 1 0
..... ......... .........
MOD OPCODE(2) R/M
SIB byte
7 6 5 4 3 2 1 0
..... ......... .........
SS INDEX BASE
*/
void __user *FPU_get_address(u_char FPU_modrm, unsigned long *fpu_eip,
struct address *addr, fpu_addr_modes addr_modes)
{
u_char mod;
unsigned rm = FPU_modrm & 7;
long *cpu_reg_ptr;
int address = 0; /* Initialized just to stop compiler warnings. */
/* Memory accessed via the cs selector is write protected
in `non-segmented' 32 bit protected mode. */
if (!addr_modes.default_mode && (FPU_modrm & FPU_WRITE_BIT)
&& (addr_modes.override.segment == PREFIX_CS_)) {
math_abort(FPU_info, SIGSEGV);
}
addr->selector = FPU_DS; /* Default, for 32 bit non-segmented mode. */
mod = (FPU_modrm >> 6) & 3;
if (rm == 4 && mod != 3) {
address = sib(mod, fpu_eip);
} else {
cpu_reg_ptr = ®_(rm);
switch (mod) {
case 0:
if (rm == 5) {
/* Special case: disp32 */
RE_ENTRANT_CHECK_OFF;
FPU_code_access_ok(4);
FPU_get_user(address,
(unsigned long __user
*)(*fpu_eip));
(*fpu_eip) += 4;
RE_ENTRANT_CHECK_ON;
addr->offset = address;
return (void __user *)address;
} else {
address = *cpu_reg_ptr; /* Just return the contents
of the cpu register */
addr->offset = address;
return (void __user *)address;
}
case 1:
/* 8 bit signed displacement */
RE_ENTRANT_CHECK_OFF;
FPU_code_access_ok(1);
FPU_get_user(address, (signed char __user *)(*fpu_eip));
RE_ENTRANT_CHECK_ON;
(*fpu_eip)++;
break;
case 2:
/* 32 bit displacement */
RE_ENTRANT_CHECK_OFF;
FPU_code_access_ok(4);
FPU_get_user(address, (long __user *)(*fpu_eip));
(*fpu_eip) += 4;
RE_ENTRANT_CHECK_ON;
break;
case 3:
/* Not legal for the FPU */
EXCEPTION(EX_Invalid);
}
address += *cpu_reg_ptr;
}
addr->offset = address;
switch (addr_modes.default_mode) {
case 0:
break;
case VM86:
address += vm86_segment(addr_modes.override.segment, addr);
break;
case PM16:
case SEG32:
address = pm_address(FPU_modrm, addr_modes.override.segment,
addr, address);
break;
default:
EXCEPTION(EX_INTERNAL | 0x133);
}
return (void __user *)address;
}
void __user *FPU_get_address_16(u_char FPU_modrm, unsigned long *fpu_eip,
struct address *addr, fpu_addr_modes addr_modes)
{
u_char mod;
unsigned rm = FPU_modrm & 7;
int address = 0; /* Default used for mod == 0 */
/* Memory accessed via the cs selector is write protected
in `non-segmented' 32 bit protected mode. */
if (!addr_modes.default_mode && (FPU_modrm & FPU_WRITE_BIT)
&& (addr_modes.override.segment == PREFIX_CS_)) {
math_abort(FPU_info, SIGSEGV);
}
addr->selector = FPU_DS; /* Default, for 32 bit non-segmented mode. */
mod = (FPU_modrm >> 6) & 3;
switch (mod) {
case 0:
if (rm == 6) {
/* Special case: disp16 */
RE_ENTRANT_CHECK_OFF;
FPU_code_access_ok(2);
FPU_get_user(address,
(unsigned short __user *)(*fpu_eip));
(*fpu_eip) += 2;
RE_ENTRANT_CHECK_ON;
goto add_segment;
}
break;
case 1:
/* 8 bit signed displacement */
RE_ENTRANT_CHECK_OFF;
FPU_code_access_ok(1);
FPU_get_user(address, (signed char __user *)(*fpu_eip));
RE_ENTRANT_CHECK_ON;
(*fpu_eip)++;
break;
case 2:
/* 16 bit displacement */
RE_ENTRANT_CHECK_OFF;
FPU_code_access_ok(2);
FPU_get_user(address, (unsigned short __user *)(*fpu_eip));
(*fpu_eip) += 2;
RE_ENTRANT_CHECK_ON;
break;
case 3:
/* Not legal for the FPU */
EXCEPTION(EX_Invalid);
break;
}
switch (rm) {
case 0:
address += FPU_info->regs->bx + FPU_info->regs->si;
break;
case 1:
address += FPU_info->regs->bx + FPU_info->regs->di;
break;
case 2:
address += FPU_info->regs->bp + FPU_info->regs->si;
if (addr_modes.override.segment == PREFIX_DEFAULT)
addr_modes.override.segment = PREFIX_SS_;
break;
case 3:
address += FPU_info->regs->bp + FPU_info->regs->di;
if (addr_modes.override.segment == PREFIX_DEFAULT)
addr_modes.override.segment = PREFIX_SS_;
break;
case 4:
address += FPU_info->regs->si;
break;
case 5:
address += FPU_info->regs->di;
break;
case 6:
address += FPU_info->regs->bp;
if (addr_modes.override.segment == PREFIX_DEFAULT)
addr_modes.override.segment = PREFIX_SS_;
break;
case 7:
address += FPU_info->regs->bx;
break;
}
add_segment:
address &= 0xffff;
addr->offset = address;
switch (addr_modes.default_mode) {
case 0:
break;
case VM86:
address += vm86_segment(addr_modes.override.segment, addr);
break;
case PM16:
case SEG32:
address = pm_address(FPU_modrm, addr_modes.override.segment,
addr, address);
break;
default:
EXCEPTION(EX_INTERNAL | 0x131);
}
return (void __user *)address;
}
| linux-master | arch/x86/math-emu/get_address.c |
// SPDX-License-Identifier: GPL-2.0
/*---------------------------------------------------------------------------+
| poly_2xm1.c |
| |
| Function to compute 2^x-1 by a polynomial approximation. |
| |
| Copyright (C) 1992,1993,1994,1997 |
| W. Metzenthen, 22 Parker St, Ormond, Vic 3163, Australia |
| E-mail [email protected] |
| |
| |
+---------------------------------------------------------------------------*/
#include "exception.h"
#include "reg_constant.h"
#include "fpu_emu.h"
#include "fpu_system.h"
#include "control_w.h"
#include "poly.h"
#define HIPOWER 11
static const unsigned long long lterms[HIPOWER] = {
0x0000000000000000LL, /* This term done separately as 12 bytes */
0xf5fdeffc162c7543LL,
0x1c6b08d704a0bfa6LL,
0x0276556df749cc21LL,
0x002bb0ffcf14f6b8LL,
0x0002861225ef751cLL,
0x00001ffcbfcd5422LL,
0x00000162c005d5f1LL,
0x0000000da96ccb1bLL,
0x0000000078d1b897LL,
0x000000000422b029LL
};
static const Xsig hiterm = MK_XSIG(0xb17217f7, 0xd1cf79ab, 0xc8a39194);
/* Four slices: 0.0 : 0.25 : 0.50 : 0.75 : 1.0,
These numbers are 2^(1/4), 2^(1/2), and 2^(3/4)
*/
static const Xsig shiftterm0 = MK_XSIG(0, 0, 0);
static const Xsig shiftterm1 = MK_XSIG(0x9837f051, 0x8db8a96f, 0x46ad2318);
static const Xsig shiftterm2 = MK_XSIG(0xb504f333, 0xf9de6484, 0x597d89b3);
static const Xsig shiftterm3 = MK_XSIG(0xd744fcca, 0xd69d6af4, 0x39a68bb9);
static const Xsig *shiftterm[] = { &shiftterm0, &shiftterm1,
&shiftterm2, &shiftterm3
};
/*--- poly_2xm1() -----------------------------------------------------------+
| Requires st(0) which is TAG_Valid and < 1. |
+---------------------------------------------------------------------------*/
int poly_2xm1(u_char sign, FPU_REG *arg, FPU_REG *result)
{
long int exponent, shift;
unsigned long long Xll;
Xsig accumulator, Denom, argSignif;
u_char tag;
exponent = exponent16(arg);
#ifdef PARANOID
if (exponent >= 0) { /* Don't want a |number| >= 1.0 */
/* Number negative, too large, or not Valid. */
EXCEPTION(EX_INTERNAL | 0x127);
return 1;
}
#endif /* PARANOID */
argSignif.lsw = 0;
XSIG_LL(argSignif) = Xll = significand(arg);
if (exponent == -1) {
shift = (argSignif.msw & 0x40000000) ? 3 : 2;
/* subtract 0.5 or 0.75 */
exponent -= 2;
XSIG_LL(argSignif) <<= 2;
Xll <<= 2;
} else if (exponent == -2) {
shift = 1;
/* subtract 0.25 */
exponent--;
XSIG_LL(argSignif) <<= 1;
Xll <<= 1;
} else
shift = 0;
if (exponent < -2) {
/* Shift the argument right by the required places. */
if (FPU_shrx(&Xll, -2 - exponent) >= 0x80000000U)
Xll++; /* round up */
}
accumulator.lsw = accumulator.midw = accumulator.msw = 0;
polynomial_Xsig(&accumulator, &Xll, lterms, HIPOWER - 1);
mul_Xsig_Xsig(&accumulator, &argSignif);
shr_Xsig(&accumulator, 3);
mul_Xsig_Xsig(&argSignif, &hiterm); /* The leading term */
add_two_Xsig(&accumulator, &argSignif, &exponent);
if (shift) {
/* The argument is large, use the identity:
f(x+a) = f(a) * (f(x) + 1) - 1;
*/
shr_Xsig(&accumulator, -exponent);
accumulator.msw |= 0x80000000; /* add 1.0 */
mul_Xsig_Xsig(&accumulator, shiftterm[shift]);
accumulator.msw &= 0x3fffffff; /* subtract 1.0 */
exponent = 1;
}
if (sign != SIGN_POS) {
/* The argument is negative, use the identity:
f(-x) = -f(x) / (1 + f(x))
*/
Denom.lsw = accumulator.lsw;
XSIG_LL(Denom) = XSIG_LL(accumulator);
if (exponent < 0)
shr_Xsig(&Denom, -exponent);
else if (exponent > 0) {
/* exponent must be 1 here */
XSIG_LL(Denom) <<= 1;
if (Denom.lsw & 0x80000000)
XSIG_LL(Denom) |= 1;
(Denom.lsw) <<= 1;
}
Denom.msw |= 0x80000000; /* add 1.0 */
div_Xsig(&accumulator, &Denom, &accumulator);
}
/* Convert to 64 bit signed-compatible */
exponent += round_Xsig(&accumulator);
result = &st(0);
significand(result) = XSIG_LL(accumulator);
setexponent16(result, exponent);
tag = FPU_round(result, 1, 0, FULL_PRECISION, sign);
setsign(result, sign);
FPU_settag0(tag);
return 0;
}
| linux-master | arch/x86/math-emu/poly_2xm1.c |
// SPDX-License-Identifier: GPL-2.0
/*---------------------------------------------------------------------------+
| fpu_entry.c |
| |
| The entry functions for wm-FPU-emu |
| |
| Copyright (C) 1992,1993,1994,1996,1997 |
| W. Metzenthen, 22 Parker St, Ormond, Vic 3163, Australia |
| E-mail [email protected] |
| |
| See the files "README" and "COPYING" for further copyright and warranty |
| information. |
| |
+---------------------------------------------------------------------------*/
/*---------------------------------------------------------------------------+
| Note: |
| The file contains code which accesses user memory. |
| Emulator static data may change when user memory is accessed, due to |
| other processes using the emulator while swapping is in progress. |
+---------------------------------------------------------------------------*/
/*---------------------------------------------------------------------------+
| math_emulate(), restore_i387_soft() and save_i387_soft() are the only |
| entry points for wm-FPU-emu. |
+---------------------------------------------------------------------------*/
#include <linux/signal.h>
#include <linux/regset.h>
#include <linux/uaccess.h>
#include <asm/traps.h>
#include <asm/user.h>
#include <asm/fpu/api.h>
#include <asm/fpu/regset.h>
#include "fpu_system.h"
#include "fpu_emu.h"
#include "exception.h"
#include "control_w.h"
#include "status_w.h"
#define __BAD__ FPU_illegal /* Illegal on an 80486, causes SIGILL */
/* fcmovCC and f(u)comi(p) are enabled if CPUID(1).EDX(15) "cmov" is set */
/* WARNING: "u" entries are not documented by Intel in their 80486 manual
and may not work on FPU clones or later Intel FPUs.
Changes to support them provided by Linus Torvalds. */
static FUNC const st_instr_table[64] = {
/* Opcode: d8 d9 da db */
/* dc dd de df */
/* c0..7 */ fadd__, fld_i_, fcmovb, fcmovnb,
/* c0..7 */ fadd_i, ffree_, faddp_, ffreep,/*u*/
/* c8..f */ fmul__, fxch_i, fcmove, fcmovne,
/* c8..f */ fmul_i, fxch_i,/*u*/ fmulp_, fxch_i,/*u*/
/* d0..7 */ fcom_st, fp_nop, fcmovbe, fcmovnbe,
/* d0..7 */ fcom_st,/*u*/ fst_i_, fcompst,/*u*/ fstp_i,/*u*/
/* d8..f */ fcompst, fstp_i,/*u*/ fcmovu, fcmovnu,
/* d8..f */ fcompst,/*u*/ fstp_i, fcompp, fstp_i,/*u*/
/* e0..7 */ fsub__, FPU_etc, __BAD__, finit_,
/* e0..7 */ fsubri, fucom_, fsubrp, fstsw_,
/* e8..f */ fsubr_, fconst, fucompp, fucomi_,
/* e8..f */ fsub_i, fucomp, fsubp_, fucomip,
/* f0..7 */ fdiv__, FPU_triga, __BAD__, fcomi_,
/* f0..7 */ fdivri, __BAD__, fdivrp, fcomip,
/* f8..f */ fdivr_, FPU_trigb, __BAD__, __BAD__,
/* f8..f */ fdiv_i, __BAD__, fdivp_, __BAD__,
};
#define _NONE_ 0 /* Take no special action */
#define _REG0_ 1 /* Need to check for not empty st(0) */
#define _REGI_ 2 /* Need to check for not empty st(0) and st(rm) */
#define _REGi_ 0 /* Uses st(rm) */
#define _PUSH_ 3 /* Need to check for space to push onto stack */
#define _null_ 4 /* Function illegal or not implemented */
#define _REGIi 5 /* Uses st(0) and st(rm), result to st(rm) */
#define _REGIp 6 /* Uses st(0) and st(rm), result to st(rm) then pop */
#define _REGIc 0 /* Compare st(0) and st(rm) */
#define _REGIn 0 /* Uses st(0) and st(rm), but handle checks later */
static u_char const type_table[64] = {
/* Opcode: d8 d9 da db dc dd de df */
/* c0..7 */ _REGI_, _NONE_, _REGIn, _REGIn, _REGIi, _REGi_, _REGIp, _REGi_,
/* c8..f */ _REGI_, _REGIn, _REGIn, _REGIn, _REGIi, _REGI_, _REGIp, _REGI_,
/* d0..7 */ _REGIc, _NONE_, _REGIn, _REGIn, _REGIc, _REG0_, _REGIc, _REG0_,
/* d8..f */ _REGIc, _REG0_, _REGIn, _REGIn, _REGIc, _REG0_, _REGIc, _REG0_,
/* e0..7 */ _REGI_, _NONE_, _null_, _NONE_, _REGIi, _REGIc, _REGIp, _NONE_,
/* e8..f */ _REGI_, _NONE_, _REGIc, _REGIc, _REGIi, _REGIc, _REGIp, _REGIc,
/* f0..7 */ _REGI_, _NONE_, _null_, _REGIc, _REGIi, _null_, _REGIp, _REGIc,
/* f8..f */ _REGI_, _NONE_, _null_, _null_, _REGIi, _null_, _REGIp, _null_,
};
#ifdef RE_ENTRANT_CHECKING
u_char emulating = 0;
#endif /* RE_ENTRANT_CHECKING */
static int valid_prefix(u_char *Byte, u_char __user ** fpu_eip,
overrides * override);
void math_emulate(struct math_emu_info *info)
{
u_char FPU_modrm, byte1;
unsigned short code;
fpu_addr_modes addr_modes;
int unmasked;
FPU_REG loaded_data;
FPU_REG *st0_ptr;
u_char loaded_tag, st0_tag;
void __user *data_address;
struct address data_sel_off;
struct address entry_sel_off;
unsigned long code_base = 0;
unsigned long code_limit = 0; /* Initialized to stop compiler warnings */
struct desc_struct code_descriptor;
#ifdef RE_ENTRANT_CHECKING
if (emulating) {
printk("ERROR: wm-FPU-emu is not RE-ENTRANT!\n");
}
RE_ENTRANT_CHECK_ON;
#endif /* RE_ENTRANT_CHECKING */
FPU_info = info;
FPU_ORIG_EIP = FPU_EIP;
if ((FPU_EFLAGS & 0x00020000) != 0) {
/* Virtual 8086 mode */
addr_modes.default_mode = VM86;
FPU_EIP += code_base = FPU_CS << 4;
code_limit = code_base + 0xffff; /* Assumes code_base <= 0xffff0000 */
} else if (FPU_CS == __USER_CS && FPU_DS == __USER_DS) {
addr_modes.default_mode = 0;
} else if (FPU_CS == __KERNEL_CS) {
printk("math_emulate: %04x:%08lx\n", FPU_CS, FPU_EIP);
panic("Math emulation needed in kernel");
} else {
if ((FPU_CS & 4) != 4) { /* Must be in the LDT */
/* Can only handle segmented addressing via the LDT
for now, and it must be 16 bit */
printk("FPU emulator: Unsupported addressing mode\n");
math_abort(FPU_info, SIGILL);
}
code_descriptor = FPU_get_ldt_descriptor(FPU_CS);
if (code_descriptor.d) {
/* The above test may be wrong, the book is not clear */
/* Segmented 32 bit protected mode */
addr_modes.default_mode = SEG32;
} else {
/* 16 bit protected mode */
addr_modes.default_mode = PM16;
}
FPU_EIP += code_base = seg_get_base(&code_descriptor);
code_limit = seg_get_limit(&code_descriptor) + 1;
code_limit *= seg_get_granularity(&code_descriptor);
code_limit += code_base - 1;
if (code_limit < code_base)
code_limit = 0xffffffff;
}
FPU_lookahead = !(FPU_EFLAGS & X86_EFLAGS_TF);
if (!valid_prefix(&byte1, (u_char __user **) & FPU_EIP,
&addr_modes.override)) {
RE_ENTRANT_CHECK_OFF;
printk
("FPU emulator: Unknown prefix byte 0x%02x, probably due to\n"
"FPU emulator: self-modifying code! (emulation impossible)\n",
byte1);
RE_ENTRANT_CHECK_ON;
EXCEPTION(EX_INTERNAL | 0x126);
math_abort(FPU_info, SIGILL);
}
do_another_FPU_instruction:
no_ip_update = 0;
FPU_EIP++; /* We have fetched the prefix and first code bytes. */
if (addr_modes.default_mode) {
/* This checks for the minimum instruction bytes.
We also need to check any extra (address mode) code access. */
if (FPU_EIP > code_limit)
math_abort(FPU_info, SIGSEGV);
}
if ((byte1 & 0xf8) != 0xd8) {
if (byte1 == FWAIT_OPCODE) {
if (partial_status & SW_Summary)
goto do_the_FPU_interrupt;
else
goto FPU_fwait_done;
}
#ifdef PARANOID
EXCEPTION(EX_INTERNAL | 0x128);
math_abort(FPU_info, SIGILL);
#endif /* PARANOID */
}
RE_ENTRANT_CHECK_OFF;
FPU_code_access_ok(1);
FPU_get_user(FPU_modrm, (u_char __user *) FPU_EIP);
RE_ENTRANT_CHECK_ON;
FPU_EIP++;
if (partial_status & SW_Summary) {
/* Ignore the error for now if the current instruction is a no-wait
control instruction */
/* The 80486 manual contradicts itself on this topic,
but a real 80486 uses the following instructions:
fninit, fnstenv, fnsave, fnstsw, fnstenv, fnclex.
*/
code = (FPU_modrm << 8) | byte1;
if (!((((code & 0xf803) == 0xe003) || /* fnclex, fninit, fnstsw */
(((code & 0x3003) == 0x3001) && /* fnsave, fnstcw, fnstenv,
fnstsw */
((code & 0xc000) != 0xc000))))) {
/*
* We need to simulate the action of the kernel to FPU
* interrupts here.
*/
do_the_FPU_interrupt:
FPU_EIP = FPU_ORIG_EIP; /* Point to current FPU instruction. */
RE_ENTRANT_CHECK_OFF;
current->thread.trap_nr = X86_TRAP_MF;
current->thread.error_code = 0;
send_sig(SIGFPE, current, 1);
return;
}
}
entry_sel_off.offset = FPU_ORIG_EIP;
entry_sel_off.selector = FPU_CS;
entry_sel_off.opcode = (byte1 << 8) | FPU_modrm;
entry_sel_off.empty = 0;
FPU_rm = FPU_modrm & 7;
if (FPU_modrm < 0300) {
/* All of these instructions use the mod/rm byte to get a data address */
if ((addr_modes.default_mode & SIXTEEN)
^ (addr_modes.override.address_size == ADDR_SIZE_PREFIX))
data_address =
FPU_get_address_16(FPU_modrm, &FPU_EIP,
&data_sel_off, addr_modes);
else
data_address =
FPU_get_address(FPU_modrm, &FPU_EIP, &data_sel_off,
addr_modes);
if (addr_modes.default_mode) {
if (FPU_EIP - 1 > code_limit)
math_abort(FPU_info, SIGSEGV);
}
if (!(byte1 & 1)) {
unsigned short status1 = partial_status;
st0_ptr = &st(0);
st0_tag = FPU_gettag0();
/* Stack underflow has priority */
if (NOT_EMPTY_ST0) {
if (addr_modes.default_mode & PROTECTED) {
/* This table works for 16 and 32 bit protected mode */
if (access_limit <
data_sizes_16[(byte1 >> 1) & 3])
math_abort(FPU_info, SIGSEGV);
}
unmasked = 0; /* Do this here to stop compiler warnings. */
switch ((byte1 >> 1) & 3) {
case 0:
unmasked =
FPU_load_single((float __user *)
data_address,
&loaded_data);
loaded_tag = unmasked & 0xff;
unmasked &= ~0xff;
break;
case 1:
loaded_tag =
FPU_load_int32((long __user *)
data_address,
&loaded_data);
break;
case 2:
unmasked =
FPU_load_double((double __user *)
data_address,
&loaded_data);
loaded_tag = unmasked & 0xff;
unmasked &= ~0xff;
break;
case 3:
default: /* Used here to suppress gcc warnings. */
loaded_tag =
FPU_load_int16((short __user *)
data_address,
&loaded_data);
break;
}
/* No more access to user memory, it is safe
to use static data now */
/* NaN operands have the next priority. */
/* We have to delay looking at st(0) until after
loading the data, because that data might contain an SNaN */
if (((st0_tag == TAG_Special) && isNaN(st0_ptr))
|| ((loaded_tag == TAG_Special)
&& isNaN(&loaded_data))) {
/* Restore the status word; we might have loaded a
denormal. */
partial_status = status1;
if ((FPU_modrm & 0x30) == 0x10) {
/* fcom or fcomp */
EXCEPTION(EX_Invalid);
setcc(SW_C3 | SW_C2 | SW_C0);
if ((FPU_modrm & 0x08)
&& (control_word &
CW_Invalid))
FPU_pop(); /* fcomp, masked, so we pop. */
} else {
if (loaded_tag == TAG_Special)
loaded_tag =
FPU_Special
(&loaded_data);
#ifdef PECULIAR_486
/* This is not really needed, but gives behaviour
identical to an 80486 */
if ((FPU_modrm & 0x28) == 0x20)
/* fdiv or fsub */
real_2op_NaN
(&loaded_data,
loaded_tag, 0,
&loaded_data);
else
#endif /* PECULIAR_486 */
/* fadd, fdivr, fmul, or fsubr */
real_2op_NaN
(&loaded_data,
loaded_tag, 0,
st0_ptr);
}
goto reg_mem_instr_done;
}
if (unmasked && !((FPU_modrm & 0x30) == 0x10)) {
/* Is not a comparison instruction. */
if ((FPU_modrm & 0x38) == 0x38) {
/* fdivr */
if ((st0_tag == TAG_Zero) &&
((loaded_tag == TAG_Valid)
|| (loaded_tag ==
TAG_Special
&&
isdenormal
(&loaded_data)))) {
if (FPU_divide_by_zero
(0,
getsign
(&loaded_data))
< 0) {
/* We use the fact here that the unmasked
exception in the loaded data was for a
denormal operand */
/* Restore the state of the denormal op bit */
partial_status
&=
~SW_Denorm_Op;
partial_status
|=
status1 &
SW_Denorm_Op;
} else
setsign(st0_ptr,
getsign
(&loaded_data));
}
}
goto reg_mem_instr_done;
}
switch ((FPU_modrm >> 3) & 7) {
case 0: /* fadd */
clear_C1();
FPU_add(&loaded_data, loaded_tag, 0,
control_word);
break;
case 1: /* fmul */
clear_C1();
FPU_mul(&loaded_data, loaded_tag, 0,
control_word);
break;
case 2: /* fcom */
FPU_compare_st_data(&loaded_data,
loaded_tag);
break;
case 3: /* fcomp */
if (!FPU_compare_st_data
(&loaded_data, loaded_tag)
&& !unmasked)
FPU_pop();
break;
case 4: /* fsub */
clear_C1();
FPU_sub(LOADED | loaded_tag,
(int)&loaded_data,
control_word);
break;
case 5: /* fsubr */
clear_C1();
FPU_sub(REV | LOADED | loaded_tag,
(int)&loaded_data,
control_word);
break;
case 6: /* fdiv */
clear_C1();
FPU_div(LOADED | loaded_tag,
(int)&loaded_data,
control_word);
break;
case 7: /* fdivr */
clear_C1();
if (st0_tag == TAG_Zero)
partial_status = status1; /* Undo any denorm tag,
zero-divide has priority. */
FPU_div(REV | LOADED | loaded_tag,
(int)&loaded_data,
control_word);
break;
}
} else {
if ((FPU_modrm & 0x30) == 0x10) {
/* The instruction is fcom or fcomp */
EXCEPTION(EX_StackUnder);
setcc(SW_C3 | SW_C2 | SW_C0);
if ((FPU_modrm & 0x08)
&& (control_word & CW_Invalid))
FPU_pop(); /* fcomp */
} else
FPU_stack_underflow();
}
reg_mem_instr_done:
operand_address = data_sel_off;
} else {
if (!(no_ip_update =
FPU_load_store(((FPU_modrm & 0x38) | (byte1 & 6))
>> 1, addr_modes, data_address))) {
operand_address = data_sel_off;
}
}
} else {
/* None of these instructions access user memory */
u_char instr_index = (FPU_modrm & 0x38) | (byte1 & 7);
#ifdef PECULIAR_486
/* This is supposed to be undefined, but a real 80486 seems
to do this: */
operand_address.offset = 0;
operand_address.selector = FPU_DS;
#endif /* PECULIAR_486 */
st0_ptr = &st(0);
st0_tag = FPU_gettag0();
switch (type_table[(int)instr_index]) {
case _NONE_: /* also _REGIc: _REGIn */
break;
case _REG0_:
if (!NOT_EMPTY_ST0) {
FPU_stack_underflow();
goto FPU_instruction_done;
}
break;
case _REGIi:
if (!NOT_EMPTY_ST0 || !NOT_EMPTY(FPU_rm)) {
FPU_stack_underflow_i(FPU_rm);
goto FPU_instruction_done;
}
break;
case _REGIp:
if (!NOT_EMPTY_ST0 || !NOT_EMPTY(FPU_rm)) {
FPU_stack_underflow_pop(FPU_rm);
goto FPU_instruction_done;
}
break;
case _REGI_:
if (!NOT_EMPTY_ST0 || !NOT_EMPTY(FPU_rm)) {
FPU_stack_underflow();
goto FPU_instruction_done;
}
break;
case _PUSH_: /* Only used by the fld st(i) instruction */
break;
case _null_:
FPU_illegal();
goto FPU_instruction_done;
default:
EXCEPTION(EX_INTERNAL | 0x111);
goto FPU_instruction_done;
}
(*st_instr_table[(int)instr_index]) ();
FPU_instruction_done:
;
}
if (!no_ip_update)
instruction_address = entry_sel_off;
FPU_fwait_done:
#ifdef DEBUG
RE_ENTRANT_CHECK_OFF;
FPU_printall();
RE_ENTRANT_CHECK_ON;
#endif /* DEBUG */
if (FPU_lookahead && !need_resched()) {
FPU_ORIG_EIP = FPU_EIP - code_base;
if (valid_prefix(&byte1, (u_char __user **) & FPU_EIP,
&addr_modes.override))
goto do_another_FPU_instruction;
}
if (addr_modes.default_mode)
FPU_EIP -= code_base;
RE_ENTRANT_CHECK_OFF;
}
/* Support for prefix bytes is not yet complete. To properly handle
all prefix bytes, further changes are needed in the emulator code
which accesses user address space. Access to separate segments is
important for msdos emulation. */
static int valid_prefix(u_char *Byte, u_char __user **fpu_eip,
overrides * override)
{
u_char byte;
u_char __user *ip = *fpu_eip;
*override = (overrides) {
0, 0, PREFIX_DEFAULT}; /* defaults */
RE_ENTRANT_CHECK_OFF;
FPU_code_access_ok(1);
FPU_get_user(byte, ip);
RE_ENTRANT_CHECK_ON;
while (1) {
switch (byte) {
case ADDR_SIZE_PREFIX:
override->address_size = ADDR_SIZE_PREFIX;
goto do_next_byte;
case OP_SIZE_PREFIX:
override->operand_size = OP_SIZE_PREFIX;
goto do_next_byte;
case PREFIX_CS:
override->segment = PREFIX_CS_;
goto do_next_byte;
case PREFIX_ES:
override->segment = PREFIX_ES_;
goto do_next_byte;
case PREFIX_SS:
override->segment = PREFIX_SS_;
goto do_next_byte;
case PREFIX_FS:
override->segment = PREFIX_FS_;
goto do_next_byte;
case PREFIX_GS:
override->segment = PREFIX_GS_;
goto do_next_byte;
case PREFIX_DS:
override->segment = PREFIX_DS_;
goto do_next_byte;
/* lock is not a valid prefix for FPU instructions,
let the cpu handle it to generate a SIGILL. */
/* case PREFIX_LOCK: */
/* rep.. prefixes have no meaning for FPU instructions */
case PREFIX_REPE:
case PREFIX_REPNE:
do_next_byte:
ip++;
RE_ENTRANT_CHECK_OFF;
FPU_code_access_ok(1);
FPU_get_user(byte, ip);
RE_ENTRANT_CHECK_ON;
break;
case FWAIT_OPCODE:
*Byte = byte;
return 1;
default:
if ((byte & 0xf8) == 0xd8) {
*Byte = byte;
*fpu_eip = ip;
return 1;
} else {
/* Not a valid sequence of prefix bytes followed by
an FPU instruction. */
*Byte = byte; /* Needed for error message. */
return 0;
}
}
}
}
void math_abort(struct math_emu_info *info, unsigned int signal)
{
FPU_EIP = FPU_ORIG_EIP;
current->thread.trap_nr = X86_TRAP_MF;
current->thread.error_code = 0;
send_sig(signal, current, 1);
RE_ENTRANT_CHECK_OFF;
__asm__("movl %0,%%esp ; ret": :"g"(((long)info) - 4));
#ifdef PARANOID
printk("ERROR: wm-FPU-emu math_abort failed!\n");
#endif /* PARANOID */
}
#define S387 ((struct swregs_state *)s387)
#define sstatus_word() \
((S387->swd & ~SW_Top & 0xffff) | ((S387->ftop << SW_Top_Shift) & SW_Top))
int fpregs_soft_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
struct swregs_state *s387 = &target->thread.fpu.fpstate->regs.soft;
void *space = s387->st_space;
int ret;
int offset, other, i, tags, regnr, tag, newtop;
RE_ENTRANT_CHECK_OFF;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, s387, 0,
offsetof(struct swregs_state, st_space));
RE_ENTRANT_CHECK_ON;
if (ret)
return ret;
S387->ftop = (S387->swd >> SW_Top_Shift) & 7;
offset = (S387->ftop & 7) * 10;
other = 80 - offset;
RE_ENTRANT_CHECK_OFF;
/* Copy all registers in stack order. */
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
space + offset, 0, other);
if (!ret && offset)
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
space, 0, offset);
RE_ENTRANT_CHECK_ON;
/* The tags may need to be corrected now. */
tags = S387->twd;
newtop = S387->ftop;
for (i = 0; i < 8; i++) {
regnr = (i + newtop) & 7;
if (((tags >> ((regnr & 7) * 2)) & 3) != TAG_Empty) {
/* The loaded data over-rides all other cases. */
tag =
FPU_tagof((FPU_REG *) ((u_char *) S387->st_space +
10 * regnr));
tags &= ~(3 << (regnr * 2));
tags |= (tag & 3) << (regnr * 2);
}
}
S387->twd = tags;
return ret;
}
int fpregs_soft_get(struct task_struct *target,
const struct user_regset *regset,
struct membuf to)
{
struct swregs_state *s387 = &target->thread.fpu.fpstate->regs.soft;
const void *space = s387->st_space;
int offset = (S387->ftop & 7) * 10, other = 80 - offset;
RE_ENTRANT_CHECK_OFF;
#ifdef PECULIAR_486
S387->cwd &= ~0xe080;
/* An 80486 sets nearly all of the reserved bits to 1. */
S387->cwd |= 0xffff0040;
S387->swd = sstatus_word() | 0xffff0000;
S387->twd |= 0xffff0000;
S387->fcs &= ~0xf8000000;
S387->fos |= 0xffff0000;
#endif /* PECULIAR_486 */
membuf_write(&to, s387, offsetof(struct swregs_state, st_space));
membuf_write(&to, space + offset, other);
membuf_write(&to, space, offset);
RE_ENTRANT_CHECK_ON;
return 0;
}
| linux-master | arch/x86/math-emu/fpu_entry.c |
// SPDX-License-Identifier: GPL-2.0
/*---------------------------------------------------------------------------+
| fpu_trig.c |
| |
| Implementation of the FPU "transcendental" functions. |
| |
| Copyright (C) 1992,1993,1994,1997,1999 |
| W. Metzenthen, 22 Parker St, Ormond, Vic 3163, |
| Australia. E-mail [email protected] |
| |
| |
+---------------------------------------------------------------------------*/
#include "fpu_system.h"
#include "exception.h"
#include "fpu_emu.h"
#include "status_w.h"
#include "control_w.h"
#include "reg_constant.h"
static void rem_kernel(unsigned long long st0, unsigned long long *y,
unsigned long long st1, unsigned long long q, int n);
#define BETTER_THAN_486
#define FCOS 4
/* Used only by fptan, fsin, fcos, and fsincos. */
/* This routine produces very accurate results, similar to
using a value of pi with more than 128 bits precision. */
/* Limited measurements show no results worse than 64 bit precision
except for the results for arguments close to 2^63, where the
precision of the result sometimes degrades to about 63.9 bits */
static int trig_arg(FPU_REG *st0_ptr, int even)
{
FPU_REG tmp;
u_char tmptag;
unsigned long long q;
int old_cw = control_word, saved_status = partial_status;
int tag, st0_tag = TAG_Valid;
if (exponent(st0_ptr) >= 63) {
partial_status |= SW_C2; /* Reduction incomplete. */
return -1;
}
control_word &= ~CW_RC;
control_word |= RC_CHOP;
setpositive(st0_ptr);
tag = FPU_u_div(st0_ptr, &CONST_PI2, &tmp, PR_64_BITS | RC_CHOP | 0x3f,
SIGN_POS);
FPU_round_to_int(&tmp, tag); /* Fortunately, this can't overflow
to 2^64 */
q = significand(&tmp);
if (q) {
rem_kernel(significand(st0_ptr),
&significand(&tmp),
significand(&CONST_PI2),
q, exponent(st0_ptr) - exponent(&CONST_PI2));
setexponent16(&tmp, exponent(&CONST_PI2));
st0_tag = FPU_normalize(&tmp);
FPU_copy_to_reg0(&tmp, st0_tag);
}
if ((even && !(q & 1)) || (!even && (q & 1))) {
st0_tag =
FPU_sub(REV | LOADED | TAG_Valid, (int)&CONST_PI2,
FULL_PRECISION);
#ifdef BETTER_THAN_486
/* So far, the results are exact but based upon a 64 bit
precision approximation to pi/2. The technique used
now is equivalent to using an approximation to pi/2 which
is accurate to about 128 bits. */
if ((exponent(st0_ptr) <= exponent(&CONST_PI2extra) + 64)
|| (q > 1)) {
/* This code gives the effect of having pi/2 to better than
128 bits precision. */
significand(&tmp) = q + 1;
setexponent16(&tmp, 63);
FPU_normalize(&tmp);
tmptag =
FPU_u_mul(&CONST_PI2extra, &tmp, &tmp,
FULL_PRECISION, SIGN_POS,
exponent(&CONST_PI2extra) +
exponent(&tmp));
setsign(&tmp, getsign(&CONST_PI2extra));
st0_tag = FPU_add(&tmp, tmptag, 0, FULL_PRECISION);
if (signnegative(st0_ptr)) {
/* CONST_PI2extra is negative, so the result of the addition
can be negative. This means that the argument is actually
in a different quadrant. The correction is always < pi/2,
so it can't overflow into yet another quadrant. */
setpositive(st0_ptr);
q++;
}
}
#endif /* BETTER_THAN_486 */
}
#ifdef BETTER_THAN_486
else {
/* So far, the results are exact but based upon a 64 bit
precision approximation to pi/2. The technique used
now is equivalent to using an approximation to pi/2 which
is accurate to about 128 bits. */
if (((q > 0)
&& (exponent(st0_ptr) <= exponent(&CONST_PI2extra) + 64))
|| (q > 1)) {
/* This code gives the effect of having p/2 to better than
128 bits precision. */
significand(&tmp) = q;
setexponent16(&tmp, 63);
FPU_normalize(&tmp); /* This must return TAG_Valid */
tmptag =
FPU_u_mul(&CONST_PI2extra, &tmp, &tmp,
FULL_PRECISION, SIGN_POS,
exponent(&CONST_PI2extra) +
exponent(&tmp));
setsign(&tmp, getsign(&CONST_PI2extra));
st0_tag = FPU_sub(LOADED | (tmptag & 0x0f), (int)&tmp,
FULL_PRECISION);
if ((exponent(st0_ptr) == exponent(&CONST_PI2)) &&
((st0_ptr->sigh > CONST_PI2.sigh)
|| ((st0_ptr->sigh == CONST_PI2.sigh)
&& (st0_ptr->sigl > CONST_PI2.sigl)))) {
/* CONST_PI2extra is negative, so the result of the
subtraction can be larger than pi/2. This means
that the argument is actually in a different quadrant.
The correction is always < pi/2, so it can't overflow
into yet another quadrant. */
st0_tag =
FPU_sub(REV | LOADED | TAG_Valid,
(int)&CONST_PI2, FULL_PRECISION);
q++;
}
}
}
#endif /* BETTER_THAN_486 */
FPU_settag0(st0_tag);
control_word = old_cw;
partial_status = saved_status & ~SW_C2; /* Reduction complete. */
return (q & 3) | even;
}
/* Convert a long to register */
static void convert_l2reg(long const *arg, int deststnr)
{
int tag;
long num = *arg;
u_char sign;
FPU_REG *dest = &st(deststnr);
if (num == 0) {
FPU_copy_to_regi(&CONST_Z, TAG_Zero, deststnr);
return;
}
if (num > 0) {
sign = SIGN_POS;
} else {
num = -num;
sign = SIGN_NEG;
}
dest->sigh = num;
dest->sigl = 0;
setexponent16(dest, 31);
tag = FPU_normalize(dest);
FPU_settagi(deststnr, tag);
setsign(dest, sign);
return;
}
static void single_arg_error(FPU_REG *st0_ptr, u_char st0_tag)
{
if (st0_tag == TAG_Empty)
FPU_stack_underflow(); /* Puts a QNaN in st(0) */
else if (st0_tag == TW_NaN)
real_1op_NaN(st0_ptr); /* return with a NaN in st(0) */
#ifdef PARANOID
else
EXCEPTION(EX_INTERNAL | 0x0112);
#endif /* PARANOID */
}
static void single_arg_2_error(FPU_REG *st0_ptr, u_char st0_tag)
{
int isNaN;
switch (st0_tag) {
case TW_NaN:
isNaN = (exponent(st0_ptr) == EXP_OVER)
&& (st0_ptr->sigh & 0x80000000);
if (isNaN && !(st0_ptr->sigh & 0x40000000)) { /* Signaling ? */
EXCEPTION(EX_Invalid);
if (control_word & CW_Invalid) {
/* The masked response */
/* Convert to a QNaN */
st0_ptr->sigh |= 0x40000000;
push();
FPU_copy_to_reg0(st0_ptr, TAG_Special);
}
} else if (isNaN) {
/* A QNaN */
push();
FPU_copy_to_reg0(st0_ptr, TAG_Special);
} else {
/* pseudoNaN or other unsupported */
EXCEPTION(EX_Invalid);
if (control_word & CW_Invalid) {
/* The masked response */
FPU_copy_to_reg0(&CONST_QNaN, TAG_Special);
push();
FPU_copy_to_reg0(&CONST_QNaN, TAG_Special);
}
}
break; /* return with a NaN in st(0) */
#ifdef PARANOID
default:
EXCEPTION(EX_INTERNAL | 0x0112);
#endif /* PARANOID */
}
}
/*---------------------------------------------------------------------------*/
static void f2xm1(FPU_REG *st0_ptr, u_char tag)
{
FPU_REG a;
clear_C1();
if (tag == TAG_Valid) {
/* For an 80486 FPU, the result is undefined if the arg is >= 1.0 */
if (exponent(st0_ptr) < 0) {
denormal_arg:
FPU_to_exp16(st0_ptr, &a);
/* poly_2xm1(x) requires 0 < st(0) < 1. */
poly_2xm1(getsign(st0_ptr), &a, st0_ptr);
}
set_precision_flag_up(); /* 80486 appears to always do this */
return;
}
if (tag == TAG_Zero)
return;
if (tag == TAG_Special)
tag = FPU_Special(st0_ptr);
switch (tag) {
case TW_Denormal:
if (denormal_operand() < 0)
return;
goto denormal_arg;
case TW_Infinity:
if (signnegative(st0_ptr)) {
/* -infinity gives -1 (p16-10) */
FPU_copy_to_reg0(&CONST_1, TAG_Valid);
setnegative(st0_ptr);
}
return;
default:
single_arg_error(st0_ptr, tag);
}
}
static void fptan(FPU_REG *st0_ptr, u_char st0_tag)
{
FPU_REG *st_new_ptr;
int q;
u_char arg_sign = getsign(st0_ptr);
/* Stack underflow has higher priority */
if (st0_tag == TAG_Empty) {
FPU_stack_underflow(); /* Puts a QNaN in st(0) */
if (control_word & CW_Invalid) {
st_new_ptr = &st(-1);
push();
FPU_stack_underflow(); /* Puts a QNaN in the new st(0) */
}
return;
}
if (STACK_OVERFLOW) {
FPU_stack_overflow();
return;
}
if (st0_tag == TAG_Valid) {
if (exponent(st0_ptr) > -40) {
if ((q = trig_arg(st0_ptr, 0)) == -1) {
/* Operand is out of range */
return;
}
poly_tan(st0_ptr);
setsign(st0_ptr, (q & 1) ^ (arg_sign != 0));
set_precision_flag_up(); /* We do not really know if up or down */
} else {
/* For a small arg, the result == the argument */
/* Underflow may happen */
denormal_arg:
FPU_to_exp16(st0_ptr, st0_ptr);
st0_tag =
FPU_round(st0_ptr, 1, 0, FULL_PRECISION, arg_sign);
FPU_settag0(st0_tag);
}
push();
FPU_copy_to_reg0(&CONST_1, TAG_Valid);
return;
}
if (st0_tag == TAG_Zero) {
push();
FPU_copy_to_reg0(&CONST_1, TAG_Valid);
setcc(0);
return;
}
if (st0_tag == TAG_Special)
st0_tag = FPU_Special(st0_ptr);
if (st0_tag == TW_Denormal) {
if (denormal_operand() < 0)
return;
goto denormal_arg;
}
if (st0_tag == TW_Infinity) {
/* The 80486 treats infinity as an invalid operand */
if (arith_invalid(0) >= 0) {
st_new_ptr = &st(-1);
push();
arith_invalid(0);
}
return;
}
single_arg_2_error(st0_ptr, st0_tag);
}
static void fxtract(FPU_REG *st0_ptr, u_char st0_tag)
{
FPU_REG *st_new_ptr;
u_char sign;
register FPU_REG *st1_ptr = st0_ptr; /* anticipate */
if (STACK_OVERFLOW) {
FPU_stack_overflow();
return;
}
clear_C1();
if (st0_tag == TAG_Valid) {
long e;
push();
sign = getsign(st1_ptr);
reg_copy(st1_ptr, st_new_ptr);
setexponent16(st_new_ptr, exponent(st_new_ptr));
denormal_arg:
e = exponent16(st_new_ptr);
convert_l2reg(&e, 1);
setexponentpos(st_new_ptr, 0);
setsign(st_new_ptr, sign);
FPU_settag0(TAG_Valid); /* Needed if arg was a denormal */
return;
} else if (st0_tag == TAG_Zero) {
sign = getsign(st0_ptr);
if (FPU_divide_by_zero(0, SIGN_NEG) < 0)
return;
push();
FPU_copy_to_reg0(&CONST_Z, TAG_Zero);
setsign(st_new_ptr, sign);
return;
}
if (st0_tag == TAG_Special)
st0_tag = FPU_Special(st0_ptr);
if (st0_tag == TW_Denormal) {
if (denormal_operand() < 0)
return;
push();
sign = getsign(st1_ptr);
FPU_to_exp16(st1_ptr, st_new_ptr);
goto denormal_arg;
} else if (st0_tag == TW_Infinity) {
sign = getsign(st0_ptr);
setpositive(st0_ptr);
push();
FPU_copy_to_reg0(&CONST_INF, TAG_Special);
setsign(st_new_ptr, sign);
return;
} else if (st0_tag == TW_NaN) {
if (real_1op_NaN(st0_ptr) < 0)
return;
push();
FPU_copy_to_reg0(st0_ptr, TAG_Special);
return;
} else if (st0_tag == TAG_Empty) {
/* Is this the correct behaviour? */
if (control_word & EX_Invalid) {
FPU_stack_underflow();
push();
FPU_stack_underflow();
} else
EXCEPTION(EX_StackUnder);
}
#ifdef PARANOID
else
EXCEPTION(EX_INTERNAL | 0x119);
#endif /* PARANOID */
}
static void fdecstp(void)
{
clear_C1();
top--;
}
static void fincstp(void)
{
clear_C1();
top++;
}
static void fsqrt_(FPU_REG *st0_ptr, u_char st0_tag)
{
int expon;
clear_C1();
if (st0_tag == TAG_Valid) {
u_char tag;
if (signnegative(st0_ptr)) {
arith_invalid(0); /* sqrt(negative) is invalid */
return;
}
/* make st(0) in [1.0 .. 4.0) */
expon = exponent(st0_ptr);
denormal_arg:
setexponent16(st0_ptr, (expon & 1));
/* Do the computation, the sign of the result will be positive. */
tag = wm_sqrt(st0_ptr, 0, 0, control_word, SIGN_POS);
addexponent(st0_ptr, expon >> 1);
FPU_settag0(tag);
return;
}
if (st0_tag == TAG_Zero)
return;
if (st0_tag == TAG_Special)
st0_tag = FPU_Special(st0_ptr);
if (st0_tag == TW_Infinity) {
if (signnegative(st0_ptr))
arith_invalid(0); /* sqrt(-Infinity) is invalid */
return;
} else if (st0_tag == TW_Denormal) {
if (signnegative(st0_ptr)) {
arith_invalid(0); /* sqrt(negative) is invalid */
return;
}
if (denormal_operand() < 0)
return;
FPU_to_exp16(st0_ptr, st0_ptr);
expon = exponent16(st0_ptr);
goto denormal_arg;
}
single_arg_error(st0_ptr, st0_tag);
}
static void frndint_(FPU_REG *st0_ptr, u_char st0_tag)
{
int flags, tag;
if (st0_tag == TAG_Valid) {
u_char sign;
denormal_arg:
sign = getsign(st0_ptr);
if (exponent(st0_ptr) > 63)
return;
if (st0_tag == TW_Denormal) {
if (denormal_operand() < 0)
return;
}
/* Fortunately, this can't overflow to 2^64 */
if ((flags = FPU_round_to_int(st0_ptr, st0_tag)))
set_precision_flag(flags);
setexponent16(st0_ptr, 63);
tag = FPU_normalize(st0_ptr);
setsign(st0_ptr, sign);
FPU_settag0(tag);
return;
}
if (st0_tag == TAG_Zero)
return;
if (st0_tag == TAG_Special)
st0_tag = FPU_Special(st0_ptr);
if (st0_tag == TW_Denormal)
goto denormal_arg;
else if (st0_tag == TW_Infinity)
return;
else
single_arg_error(st0_ptr, st0_tag);
}
static int f_sin(FPU_REG *st0_ptr, u_char tag)
{
u_char arg_sign = getsign(st0_ptr);
if (tag == TAG_Valid) {
int q;
if (exponent(st0_ptr) > -40) {
if ((q = trig_arg(st0_ptr, 0)) == -1) {
/* Operand is out of range */
return 1;
}
poly_sine(st0_ptr);
if (q & 2)
changesign(st0_ptr);
setsign(st0_ptr, getsign(st0_ptr) ^ arg_sign);
/* We do not really know if up or down */
set_precision_flag_up();
return 0;
} else {
/* For a small arg, the result == the argument */
set_precision_flag_up(); /* Must be up. */
return 0;
}
}
if (tag == TAG_Zero) {
setcc(0);
return 0;
}
if (tag == TAG_Special)
tag = FPU_Special(st0_ptr);
if (tag == TW_Denormal) {
if (denormal_operand() < 0)
return 1;
/* For a small arg, the result == the argument */
/* Underflow may happen */
FPU_to_exp16(st0_ptr, st0_ptr);
tag = FPU_round(st0_ptr, 1, 0, FULL_PRECISION, arg_sign);
FPU_settag0(tag);
return 0;
} else if (tag == TW_Infinity) {
/* The 80486 treats infinity as an invalid operand */
arith_invalid(0);
return 1;
} else {
single_arg_error(st0_ptr, tag);
return 1;
}
}
static void fsin(FPU_REG *st0_ptr, u_char tag)
{
f_sin(st0_ptr, tag);
}
static int f_cos(FPU_REG *st0_ptr, u_char tag)
{
u_char st0_sign;
st0_sign = getsign(st0_ptr);
if (tag == TAG_Valid) {
int q;
if (exponent(st0_ptr) > -40) {
if ((exponent(st0_ptr) < 0)
|| ((exponent(st0_ptr) == 0)
&& (significand(st0_ptr) <=
0xc90fdaa22168c234LL))) {
poly_cos(st0_ptr);
/* We do not really know if up or down */
set_precision_flag_down();
return 0;
} else if ((q = trig_arg(st0_ptr, FCOS)) != -1) {
poly_sine(st0_ptr);
if ((q + 1) & 2)
changesign(st0_ptr);
/* We do not really know if up or down */
set_precision_flag_down();
return 0;
} else {
/* Operand is out of range */
return 1;
}
} else {
denormal_arg:
setcc(0);
FPU_copy_to_reg0(&CONST_1, TAG_Valid);
#ifdef PECULIAR_486
set_precision_flag_down(); /* 80486 appears to do this. */
#else
set_precision_flag_up(); /* Must be up. */
#endif /* PECULIAR_486 */
return 0;
}
} else if (tag == TAG_Zero) {
FPU_copy_to_reg0(&CONST_1, TAG_Valid);
setcc(0);
return 0;
}
if (tag == TAG_Special)
tag = FPU_Special(st0_ptr);
if (tag == TW_Denormal) {
if (denormal_operand() < 0)
return 1;
goto denormal_arg;
} else if (tag == TW_Infinity) {
/* The 80486 treats infinity as an invalid operand */
arith_invalid(0);
return 1;
} else {
single_arg_error(st0_ptr, tag); /* requires st0_ptr == &st(0) */
return 1;
}
}
static void fcos(FPU_REG *st0_ptr, u_char st0_tag)
{
f_cos(st0_ptr, st0_tag);
}
static void fsincos(FPU_REG *st0_ptr, u_char st0_tag)
{
FPU_REG *st_new_ptr;
FPU_REG arg;
u_char tag;
/* Stack underflow has higher priority */
if (st0_tag == TAG_Empty) {
FPU_stack_underflow(); /* Puts a QNaN in st(0) */
if (control_word & CW_Invalid) {
st_new_ptr = &st(-1);
push();
FPU_stack_underflow(); /* Puts a QNaN in the new st(0) */
}
return;
}
if (STACK_OVERFLOW) {
FPU_stack_overflow();
return;
}
if (st0_tag == TAG_Special)
tag = FPU_Special(st0_ptr);
else
tag = st0_tag;
if (tag == TW_NaN) {
single_arg_2_error(st0_ptr, TW_NaN);
return;
} else if (tag == TW_Infinity) {
/* The 80486 treats infinity as an invalid operand */
if (arith_invalid(0) >= 0) {
/* Masked response */
push();
arith_invalid(0);
}
return;
}
reg_copy(st0_ptr, &arg);
if (!f_sin(st0_ptr, st0_tag)) {
push();
FPU_copy_to_reg0(&arg, st0_tag);
f_cos(&st(0), st0_tag);
} else {
/* An error, so restore st(0) */
FPU_copy_to_reg0(&arg, st0_tag);
}
}
/*---------------------------------------------------------------------------*/
/* The following all require two arguments: st(0) and st(1) */
/* A lean, mean kernel for the fprem instructions. This relies upon
the division and rounding to an integer in do_fprem giving an
exact result. Because of this, rem_kernel() needs to deal only with
the least significant 64 bits, the more significant bits of the
result must be zero.
*/
static void rem_kernel(unsigned long long st0, unsigned long long *y,
unsigned long long st1, unsigned long long q, int n)
{
int dummy;
unsigned long long x;
x = st0 << n;
/* Do the required multiplication and subtraction in the one operation */
/* lsw x -= lsw st1 * lsw q */
asm volatile ("mull %4; subl %%eax,%0; sbbl %%edx,%1":"=m"
(((unsigned *)&x)[0]), "=m"(((unsigned *)&x)[1]),
"=a"(dummy)
:"2"(((unsigned *)&st1)[0]), "m"(((unsigned *)&q)[0])
:"%dx");
/* msw x -= msw st1 * lsw q */
asm volatile ("mull %3; subl %%eax,%0":"=m" (((unsigned *)&x)[1]),
"=a"(dummy)
:"1"(((unsigned *)&st1)[1]), "m"(((unsigned *)&q)[0])
:"%dx");
/* msw x -= lsw st1 * msw q */
asm volatile ("mull %3; subl %%eax,%0":"=m" (((unsigned *)&x)[1]),
"=a"(dummy)
:"1"(((unsigned *)&st1)[0]), "m"(((unsigned *)&q)[1])
:"%dx");
*y = x;
}
/* Remainder of st(0) / st(1) */
/* This routine produces exact results, i.e. there is never any
rounding or truncation, etc of the result. */
static void do_fprem(FPU_REG *st0_ptr, u_char st0_tag, int round)
{
FPU_REG *st1_ptr = &st(1);
u_char st1_tag = FPU_gettagi(1);
if (!((st0_tag ^ TAG_Valid) | (st1_tag ^ TAG_Valid))) {
FPU_REG tmp, st0, st1;
u_char st0_sign, st1_sign;
u_char tmptag;
int tag;
int old_cw;
int expdif;
long long q;
unsigned short saved_status;
int cc;
fprem_valid:
/* Convert registers for internal use. */
st0_sign = FPU_to_exp16(st0_ptr, &st0);
st1_sign = FPU_to_exp16(st1_ptr, &st1);
expdif = exponent16(&st0) - exponent16(&st1);
old_cw = control_word;
cc = 0;
/* We want the status following the denorm tests, but don't want
the status changed by the arithmetic operations. */
saved_status = partial_status;
control_word &= ~CW_RC;
control_word |= RC_CHOP;
if (expdif < 64) {
/* This should be the most common case */
if (expdif > -2) {
u_char sign = st0_sign ^ st1_sign;
tag = FPU_u_div(&st0, &st1, &tmp,
PR_64_BITS | RC_CHOP | 0x3f,
sign);
setsign(&tmp, sign);
if (exponent(&tmp) >= 0) {
FPU_round_to_int(&tmp, tag); /* Fortunately, this can't
overflow to 2^64 */
q = significand(&tmp);
rem_kernel(significand(&st0),
&significand(&tmp),
significand(&st1),
q, expdif);
setexponent16(&tmp, exponent16(&st1));
} else {
reg_copy(&st0, &tmp);
q = 0;
}
if ((round == RC_RND)
&& (tmp.sigh & 0xc0000000)) {
/* We may need to subtract st(1) once more,
to get a result <= 1/2 of st(1). */
unsigned long long x;
expdif =
exponent16(&st1) - exponent16(&tmp);
if (expdif <= 1) {
if (expdif == 0)
x = significand(&st1) -
significand(&tmp);
else /* expdif is 1 */
x = (significand(&st1)
<< 1) -
significand(&tmp);
if ((x < significand(&tmp)) ||
/* or equi-distant (from 0 & st(1)) and q is odd */
((x == significand(&tmp))
&& (q & 1))) {
st0_sign = !st0_sign;
significand(&tmp) = x;
q++;
}
}
}
if (q & 4)
cc |= SW_C0;
if (q & 2)
cc |= SW_C3;
if (q & 1)
cc |= SW_C1;
} else {
control_word = old_cw;
setcc(0);
return;
}
} else {
/* There is a large exponent difference ( >= 64 ) */
/* To make much sense, the code in this section should
be done at high precision. */
int exp_1, N;
u_char sign;
/* prevent overflow here */
/* N is 'a number between 32 and 63' (p26-113) */
reg_copy(&st0, &tmp);
tmptag = st0_tag;
N = (expdif & 0x0000001f) + 32; /* This choice gives results
identical to an AMD 486 */
setexponent16(&tmp, N);
exp_1 = exponent16(&st1);
setexponent16(&st1, 0);
expdif -= N;
sign = getsign(&tmp) ^ st1_sign;
tag =
FPU_u_div(&tmp, &st1, &tmp,
PR_64_BITS | RC_CHOP | 0x3f, sign);
setsign(&tmp, sign);
FPU_round_to_int(&tmp, tag); /* Fortunately, this can't
overflow to 2^64 */
rem_kernel(significand(&st0),
&significand(&tmp),
significand(&st1),
significand(&tmp), exponent(&tmp)
);
setexponent16(&tmp, exp_1 + expdif);
/* It is possible for the operation to be complete here.
What does the IEEE standard say? The Intel 80486 manual
implies that the operation will never be completed at this
point, and the behaviour of a real 80486 confirms this.
*/
if (!(tmp.sigh | tmp.sigl)) {
/* The result is zero */
control_word = old_cw;
partial_status = saved_status;
FPU_copy_to_reg0(&CONST_Z, TAG_Zero);
setsign(&st0, st0_sign);
#ifdef PECULIAR_486
setcc(SW_C2);
#else
setcc(0);
#endif /* PECULIAR_486 */
return;
}
cc = SW_C2;
}
control_word = old_cw;
partial_status = saved_status;
tag = FPU_normalize_nuo(&tmp);
reg_copy(&tmp, st0_ptr);
/* The only condition to be looked for is underflow,
and it can occur here only if underflow is unmasked. */
if ((exponent16(&tmp) <= EXP_UNDER) && (tag != TAG_Zero)
&& !(control_word & CW_Underflow)) {
setcc(cc);
tag = arith_underflow(st0_ptr);
setsign(st0_ptr, st0_sign);
FPU_settag0(tag);
return;
} else if ((exponent16(&tmp) > EXP_UNDER) || (tag == TAG_Zero)) {
stdexp(st0_ptr);
setsign(st0_ptr, st0_sign);
} else {
tag =
FPU_round(st0_ptr, 0, 0, FULL_PRECISION, st0_sign);
}
FPU_settag0(tag);
setcc(cc);
return;
}
if (st0_tag == TAG_Special)
st0_tag = FPU_Special(st0_ptr);
if (st1_tag == TAG_Special)
st1_tag = FPU_Special(st1_ptr);
if (((st0_tag == TAG_Valid) && (st1_tag == TW_Denormal))
|| ((st0_tag == TW_Denormal) && (st1_tag == TAG_Valid))
|| ((st0_tag == TW_Denormal) && (st1_tag == TW_Denormal))) {
if (denormal_operand() < 0)
return;
goto fprem_valid;
} else if ((st0_tag == TAG_Empty) || (st1_tag == TAG_Empty)) {
FPU_stack_underflow();
return;
} else if (st0_tag == TAG_Zero) {
if (st1_tag == TAG_Valid) {
setcc(0);
return;
} else if (st1_tag == TW_Denormal) {
if (denormal_operand() < 0)
return;
setcc(0);
return;
} else if (st1_tag == TAG_Zero) {
arith_invalid(0);
return;
} /* fprem(?,0) always invalid */
else if (st1_tag == TW_Infinity) {
setcc(0);
return;
}
} else if ((st0_tag == TAG_Valid) || (st0_tag == TW_Denormal)) {
if (st1_tag == TAG_Zero) {
arith_invalid(0); /* fprem(Valid,Zero) is invalid */
return;
} else if (st1_tag != TW_NaN) {
if (((st0_tag == TW_Denormal)
|| (st1_tag == TW_Denormal))
&& (denormal_operand() < 0))
return;
if (st1_tag == TW_Infinity) {
/* fprem(Valid,Infinity) is o.k. */
setcc(0);
return;
}
}
} else if (st0_tag == TW_Infinity) {
if (st1_tag != TW_NaN) {
arith_invalid(0); /* fprem(Infinity,?) is invalid */
return;
}
}
/* One of the registers must contain a NaN if we got here. */
#ifdef PARANOID
if ((st0_tag != TW_NaN) && (st1_tag != TW_NaN))
EXCEPTION(EX_INTERNAL | 0x118);
#endif /* PARANOID */
real_2op_NaN(st1_ptr, st1_tag, 0, st1_ptr);
}
/* ST(1) <- ST(1) * log ST; pop ST */
static void fyl2x(FPU_REG *st0_ptr, u_char st0_tag)
{
FPU_REG *st1_ptr = &st(1), exponent;
u_char st1_tag = FPU_gettagi(1);
u_char sign;
int e, tag;
clear_C1();
if ((st0_tag == TAG_Valid) && (st1_tag == TAG_Valid)) {
both_valid:
/* Both regs are Valid or Denormal */
if (signpositive(st0_ptr)) {
if (st0_tag == TW_Denormal)
FPU_to_exp16(st0_ptr, st0_ptr);
else
/* Convert st(0) for internal use. */
setexponent16(st0_ptr, exponent(st0_ptr));
if ((st0_ptr->sigh == 0x80000000)
&& (st0_ptr->sigl == 0)) {
/* Special case. The result can be precise. */
u_char esign;
e = exponent16(st0_ptr);
if (e >= 0) {
exponent.sigh = e;
esign = SIGN_POS;
} else {
exponent.sigh = -e;
esign = SIGN_NEG;
}
exponent.sigl = 0;
setexponent16(&exponent, 31);
tag = FPU_normalize_nuo(&exponent);
stdexp(&exponent);
setsign(&exponent, esign);
tag =
FPU_mul(&exponent, tag, 1, FULL_PRECISION);
if (tag >= 0)
FPU_settagi(1, tag);
} else {
/* The usual case */
sign = getsign(st1_ptr);
if (st1_tag == TW_Denormal)
FPU_to_exp16(st1_ptr, st1_ptr);
else
/* Convert st(1) for internal use. */
setexponent16(st1_ptr,
exponent(st1_ptr));
poly_l2(st0_ptr, st1_ptr, sign);
}
} else {
/* negative */
if (arith_invalid(1) < 0)
return;
}
FPU_pop();
return;
}
if (st0_tag == TAG_Special)
st0_tag = FPU_Special(st0_ptr);
if (st1_tag == TAG_Special)
st1_tag = FPU_Special(st1_ptr);
if ((st0_tag == TAG_Empty) || (st1_tag == TAG_Empty)) {
FPU_stack_underflow_pop(1);
return;
} else if ((st0_tag <= TW_Denormal) && (st1_tag <= TW_Denormal)) {
if (st0_tag == TAG_Zero) {
if (st1_tag == TAG_Zero) {
/* Both args zero is invalid */
if (arith_invalid(1) < 0)
return;
} else {
u_char sign;
sign = getsign(st1_ptr) ^ SIGN_NEG;
if (FPU_divide_by_zero(1, sign) < 0)
return;
setsign(st1_ptr, sign);
}
} else if (st1_tag == TAG_Zero) {
/* st(1) contains zero, st(0) valid <> 0 */
/* Zero is the valid answer */
sign = getsign(st1_ptr);
if (signnegative(st0_ptr)) {
/* log(negative) */
if (arith_invalid(1) < 0)
return;
} else if ((st0_tag == TW_Denormal)
&& (denormal_operand() < 0))
return;
else {
if (exponent(st0_ptr) < 0)
sign ^= SIGN_NEG;
FPU_copy_to_reg1(&CONST_Z, TAG_Zero);
setsign(st1_ptr, sign);
}
} else {
/* One or both operands are denormals. */
if (denormal_operand() < 0)
return;
goto both_valid;
}
} else if ((st0_tag == TW_NaN) || (st1_tag == TW_NaN)) {
if (real_2op_NaN(st0_ptr, st0_tag, 1, st0_ptr) < 0)
return;
}
/* One or both arg must be an infinity */
else if (st0_tag == TW_Infinity) {
if ((signnegative(st0_ptr)) || (st1_tag == TAG_Zero)) {
/* log(-infinity) or 0*log(infinity) */
if (arith_invalid(1) < 0)
return;
} else {
u_char sign = getsign(st1_ptr);
if ((st1_tag == TW_Denormal)
&& (denormal_operand() < 0))
return;
FPU_copy_to_reg1(&CONST_INF, TAG_Special);
setsign(st1_ptr, sign);
}
}
/* st(1) must be infinity here */
else if (((st0_tag == TAG_Valid) || (st0_tag == TW_Denormal))
&& (signpositive(st0_ptr))) {
if (exponent(st0_ptr) >= 0) {
if ((exponent(st0_ptr) == 0) &&
(st0_ptr->sigh == 0x80000000) &&
(st0_ptr->sigl == 0)) {
/* st(0) holds 1.0 */
/* infinity*log(1) */
if (arith_invalid(1) < 0)
return;
}
/* else st(0) is positive and > 1.0 */
} else {
/* st(0) is positive and < 1.0 */
if ((st0_tag == TW_Denormal)
&& (denormal_operand() < 0))
return;
changesign(st1_ptr);
}
} else {
/* st(0) must be zero or negative */
if (st0_tag == TAG_Zero) {
/* This should be invalid, but a real 80486 is happy with it. */
#ifndef PECULIAR_486
sign = getsign(st1_ptr);
if (FPU_divide_by_zero(1, sign) < 0)
return;
#endif /* PECULIAR_486 */
changesign(st1_ptr);
} else if (arith_invalid(1) < 0) /* log(negative) */
return;
}
FPU_pop();
}
static void fpatan(FPU_REG *st0_ptr, u_char st0_tag)
{
FPU_REG *st1_ptr = &st(1);
u_char st1_tag = FPU_gettagi(1);
int tag;
clear_C1();
if (!((st0_tag ^ TAG_Valid) | (st1_tag ^ TAG_Valid))) {
valid_atan:
poly_atan(st0_ptr, st0_tag, st1_ptr, st1_tag);
FPU_pop();
return;
}
if (st0_tag == TAG_Special)
st0_tag = FPU_Special(st0_ptr);
if (st1_tag == TAG_Special)
st1_tag = FPU_Special(st1_ptr);
if (((st0_tag == TAG_Valid) && (st1_tag == TW_Denormal))
|| ((st0_tag == TW_Denormal) && (st1_tag == TAG_Valid))
|| ((st0_tag == TW_Denormal) && (st1_tag == TW_Denormal))) {
if (denormal_operand() < 0)
return;
goto valid_atan;
} else if ((st0_tag == TAG_Empty) || (st1_tag == TAG_Empty)) {
FPU_stack_underflow_pop(1);
return;
} else if ((st0_tag == TW_NaN) || (st1_tag == TW_NaN)) {
if (real_2op_NaN(st0_ptr, st0_tag, 1, st0_ptr) >= 0)
FPU_pop();
return;
} else if ((st0_tag == TW_Infinity) || (st1_tag == TW_Infinity)) {
u_char sign = getsign(st1_ptr);
if (st0_tag == TW_Infinity) {
if (st1_tag == TW_Infinity) {
if (signpositive(st0_ptr)) {
FPU_copy_to_reg1(&CONST_PI4, TAG_Valid);
} else {
setpositive(st1_ptr);
tag =
FPU_u_add(&CONST_PI4, &CONST_PI2,
st1_ptr, FULL_PRECISION,
SIGN_POS,
exponent(&CONST_PI4),
exponent(&CONST_PI2));
if (tag >= 0)
FPU_settagi(1, tag);
}
} else {
if ((st1_tag == TW_Denormal)
&& (denormal_operand() < 0))
return;
if (signpositive(st0_ptr)) {
FPU_copy_to_reg1(&CONST_Z, TAG_Zero);
setsign(st1_ptr, sign); /* An 80486 preserves the sign */
FPU_pop();
return;
} else {
FPU_copy_to_reg1(&CONST_PI, TAG_Valid);
}
}
} else {
/* st(1) is infinity, st(0) not infinity */
if ((st0_tag == TW_Denormal)
&& (denormal_operand() < 0))
return;
FPU_copy_to_reg1(&CONST_PI2, TAG_Valid);
}
setsign(st1_ptr, sign);
} else if (st1_tag == TAG_Zero) {
/* st(0) must be valid or zero */
u_char sign = getsign(st1_ptr);
if ((st0_tag == TW_Denormal) && (denormal_operand() < 0))
return;
if (signpositive(st0_ptr)) {
/* An 80486 preserves the sign */
FPU_pop();
return;
}
FPU_copy_to_reg1(&CONST_PI, TAG_Valid);
setsign(st1_ptr, sign);
} else if (st0_tag == TAG_Zero) {
/* st(1) must be TAG_Valid here */
u_char sign = getsign(st1_ptr);
if ((st1_tag == TW_Denormal) && (denormal_operand() < 0))
return;
FPU_copy_to_reg1(&CONST_PI2, TAG_Valid);
setsign(st1_ptr, sign);
}
#ifdef PARANOID
else
EXCEPTION(EX_INTERNAL | 0x125);
#endif /* PARANOID */
FPU_pop();
set_precision_flag_up(); /* We do not really know if up or down */
}
static void fprem(FPU_REG *st0_ptr, u_char st0_tag)
{
do_fprem(st0_ptr, st0_tag, RC_CHOP);
}
static void fprem1(FPU_REG *st0_ptr, u_char st0_tag)
{
do_fprem(st0_ptr, st0_tag, RC_RND);
}
static void fyl2xp1(FPU_REG *st0_ptr, u_char st0_tag)
{
u_char sign, sign1;
FPU_REG *st1_ptr = &st(1), a, b;
u_char st1_tag = FPU_gettagi(1);
clear_C1();
if (!((st0_tag ^ TAG_Valid) | (st1_tag ^ TAG_Valid))) {
valid_yl2xp1:
sign = getsign(st0_ptr);
sign1 = getsign(st1_ptr);
FPU_to_exp16(st0_ptr, &a);
FPU_to_exp16(st1_ptr, &b);
if (poly_l2p1(sign, sign1, &a, &b, st1_ptr))
return;
FPU_pop();
return;
}
if (st0_tag == TAG_Special)
st0_tag = FPU_Special(st0_ptr);
if (st1_tag == TAG_Special)
st1_tag = FPU_Special(st1_ptr);
if (((st0_tag == TAG_Valid) && (st1_tag == TW_Denormal))
|| ((st0_tag == TW_Denormal) && (st1_tag == TAG_Valid))
|| ((st0_tag == TW_Denormal) && (st1_tag == TW_Denormal))) {
if (denormal_operand() < 0)
return;
goto valid_yl2xp1;
} else if ((st0_tag == TAG_Empty) | (st1_tag == TAG_Empty)) {
FPU_stack_underflow_pop(1);
return;
} else if (st0_tag == TAG_Zero) {
switch (st1_tag) {
case TW_Denormal:
if (denormal_operand() < 0)
return;
fallthrough;
case TAG_Zero:
case TAG_Valid:
setsign(st0_ptr, getsign(st0_ptr) ^ getsign(st1_ptr));
FPU_copy_to_reg1(st0_ptr, st0_tag);
break;
case TW_Infinity:
/* Infinity*log(1) */
if (arith_invalid(1) < 0)
return;
break;
case TW_NaN:
if (real_2op_NaN(st0_ptr, st0_tag, 1, st0_ptr) < 0)
return;
break;
default:
#ifdef PARANOID
EXCEPTION(EX_INTERNAL | 0x116);
return;
#endif /* PARANOID */
break;
}
} else if ((st0_tag == TAG_Valid) || (st0_tag == TW_Denormal)) {
switch (st1_tag) {
case TAG_Zero:
if (signnegative(st0_ptr)) {
if (exponent(st0_ptr) >= 0) {
/* st(0) holds <= -1.0 */
#ifdef PECULIAR_486 /* Stupid 80486 doesn't worry about log(negative). */
changesign(st1_ptr);
#else
if (arith_invalid(1) < 0)
return;
#endif /* PECULIAR_486 */
} else if ((st0_tag == TW_Denormal)
&& (denormal_operand() < 0))
return;
else
changesign(st1_ptr);
} else if ((st0_tag == TW_Denormal)
&& (denormal_operand() < 0))
return;
break;
case TW_Infinity:
if (signnegative(st0_ptr)) {
if ((exponent(st0_ptr) >= 0) &&
!((st0_ptr->sigh == 0x80000000) &&
(st0_ptr->sigl == 0))) {
/* st(0) holds < -1.0 */
#ifdef PECULIAR_486 /* Stupid 80486 doesn't worry about log(negative). */
changesign(st1_ptr);
#else
if (arith_invalid(1) < 0)
return;
#endif /* PECULIAR_486 */
} else if ((st0_tag == TW_Denormal)
&& (denormal_operand() < 0))
return;
else
changesign(st1_ptr);
} else if ((st0_tag == TW_Denormal)
&& (denormal_operand() < 0))
return;
break;
case TW_NaN:
if (real_2op_NaN(st0_ptr, st0_tag, 1, st0_ptr) < 0)
return;
}
} else if (st0_tag == TW_NaN) {
if (real_2op_NaN(st0_ptr, st0_tag, 1, st0_ptr) < 0)
return;
} else if (st0_tag == TW_Infinity) {
if (st1_tag == TW_NaN) {
if (real_2op_NaN(st0_ptr, st0_tag, 1, st0_ptr) < 0)
return;
} else if (signnegative(st0_ptr)) {
#ifndef PECULIAR_486
/* This should have higher priority than denormals, but... */
if (arith_invalid(1) < 0) /* log(-infinity) */
return;
#endif /* PECULIAR_486 */
if ((st1_tag == TW_Denormal)
&& (denormal_operand() < 0))
return;
#ifdef PECULIAR_486
/* Denormal operands actually get higher priority */
if (arith_invalid(1) < 0) /* log(-infinity) */
return;
#endif /* PECULIAR_486 */
} else if (st1_tag == TAG_Zero) {
/* log(infinity) */
if (arith_invalid(1) < 0)
return;
}
/* st(1) must be valid here. */
else if ((st1_tag == TW_Denormal) && (denormal_operand() < 0))
return;
/* The Manual says that log(Infinity) is invalid, but a real
80486 sensibly says that it is o.k. */
else {
u_char sign = getsign(st1_ptr);
FPU_copy_to_reg1(&CONST_INF, TAG_Special);
setsign(st1_ptr, sign);
}
}
#ifdef PARANOID
else {
EXCEPTION(EX_INTERNAL | 0x117);
return;
}
#endif /* PARANOID */
FPU_pop();
return;
}
static void fscale(FPU_REG *st0_ptr, u_char st0_tag)
{
FPU_REG *st1_ptr = &st(1);
u_char st1_tag = FPU_gettagi(1);
int old_cw = control_word;
u_char sign = getsign(st0_ptr);
clear_C1();
if (!((st0_tag ^ TAG_Valid) | (st1_tag ^ TAG_Valid))) {
long scale;
FPU_REG tmp;
/* Convert register for internal use. */
setexponent16(st0_ptr, exponent(st0_ptr));
valid_scale:
if (exponent(st1_ptr) > 30) {
/* 2^31 is far too large, would require 2^(2^30) or 2^(-2^30) */
if (signpositive(st1_ptr)) {
EXCEPTION(EX_Overflow);
FPU_copy_to_reg0(&CONST_INF, TAG_Special);
} else {
EXCEPTION(EX_Underflow);
FPU_copy_to_reg0(&CONST_Z, TAG_Zero);
}
setsign(st0_ptr, sign);
return;
}
control_word &= ~CW_RC;
control_word |= RC_CHOP;
reg_copy(st1_ptr, &tmp);
FPU_round_to_int(&tmp, st1_tag); /* This can never overflow here */
control_word = old_cw;
scale = signnegative(st1_ptr) ? -tmp.sigl : tmp.sigl;
scale += exponent16(st0_ptr);
setexponent16(st0_ptr, scale);
/* Use FPU_round() to properly detect under/overflow etc */
FPU_round(st0_ptr, 0, 0, control_word, sign);
return;
}
if (st0_tag == TAG_Special)
st0_tag = FPU_Special(st0_ptr);
if (st1_tag == TAG_Special)
st1_tag = FPU_Special(st1_ptr);
if ((st0_tag == TAG_Valid) || (st0_tag == TW_Denormal)) {
switch (st1_tag) {
case TAG_Valid:
/* st(0) must be a denormal */
if ((st0_tag == TW_Denormal)
&& (denormal_operand() < 0))
return;
FPU_to_exp16(st0_ptr, st0_ptr); /* Will not be left on stack */
goto valid_scale;
case TAG_Zero:
if (st0_tag == TW_Denormal)
denormal_operand();
return;
case TW_Denormal:
denormal_operand();
return;
case TW_Infinity:
if ((st0_tag == TW_Denormal)
&& (denormal_operand() < 0))
return;
if (signpositive(st1_ptr))
FPU_copy_to_reg0(&CONST_INF, TAG_Special);
else
FPU_copy_to_reg0(&CONST_Z, TAG_Zero);
setsign(st0_ptr, sign);
return;
case TW_NaN:
real_2op_NaN(st1_ptr, st1_tag, 0, st0_ptr);
return;
}
} else if (st0_tag == TAG_Zero) {
switch (st1_tag) {
case TAG_Valid:
case TAG_Zero:
return;
case TW_Denormal:
denormal_operand();
return;
case TW_Infinity:
if (signpositive(st1_ptr))
arith_invalid(0); /* Zero scaled by +Infinity */
return;
case TW_NaN:
real_2op_NaN(st1_ptr, st1_tag, 0, st0_ptr);
return;
}
} else if (st0_tag == TW_Infinity) {
switch (st1_tag) {
case TAG_Valid:
case TAG_Zero:
return;
case TW_Denormal:
denormal_operand();
return;
case TW_Infinity:
if (signnegative(st1_ptr))
arith_invalid(0); /* Infinity scaled by -Infinity */
return;
case TW_NaN:
real_2op_NaN(st1_ptr, st1_tag, 0, st0_ptr);
return;
}
} else if (st0_tag == TW_NaN) {
if (st1_tag != TAG_Empty) {
real_2op_NaN(st1_ptr, st1_tag, 0, st0_ptr);
return;
}
}
#ifdef PARANOID
if (!((st0_tag == TAG_Empty) || (st1_tag == TAG_Empty))) {
EXCEPTION(EX_INTERNAL | 0x115);
return;
}
#endif
/* At least one of st(0), st(1) must be empty */
FPU_stack_underflow();
}
/*---------------------------------------------------------------------------*/
static FUNC_ST0 const trig_table_a[] = {
f2xm1, fyl2x, fptan, fpatan,
fxtract, fprem1, (FUNC_ST0) fdecstp, (FUNC_ST0) fincstp
};
void FPU_triga(void)
{
(trig_table_a[FPU_rm]) (&st(0), FPU_gettag0());
}
static FUNC_ST0 const trig_table_b[] = {
fprem, fyl2xp1, fsqrt_, fsincos, frndint_, fscale, fsin, fcos
};
void FPU_trigb(void)
{
(trig_table_b[FPU_rm]) (&st(0), FPU_gettag0());
}
| linux-master | arch/x86/math-emu/fpu_trig.c |
// SPDX-License-Identifier: GPL-2.0
/*---------------------------------------------------------------------------+
| load_store.c |
| |
| This file contains most of the code to interpret the FPU instructions |
| which load and store from user memory. |
| |
| Copyright (C) 1992,1993,1994,1997 |
| W. Metzenthen, 22 Parker St, Ormond, Vic 3163, |
| Australia. E-mail [email protected] |
| |
| |
+---------------------------------------------------------------------------*/
/*---------------------------------------------------------------------------+
| Note: |
| The file contains code which accesses user memory. |
| Emulator static data may change when user memory is accessed, due to |
| other processes using the emulator while swapping is in progress. |
+---------------------------------------------------------------------------*/
#include <linux/uaccess.h>
#include "fpu_system.h"
#include "exception.h"
#include "fpu_emu.h"
#include "status_w.h"
#include "control_w.h"
#define _NONE_ 0 /* st0_ptr etc not needed */
#define _REG0_ 1 /* Will be storing st(0) */
#define _PUSH_ 3 /* Need to check for space to push onto stack */
#define _null_ 4 /* Function illegal or not implemented */
#define pop_0() { FPU_settag0(TAG_Empty); top++; }
/* index is a 5-bit value: (3-bit FPU_modrm.reg field | opcode[2,1]) */
static u_char const type_table[32] = {
_PUSH_, _PUSH_, _PUSH_, _PUSH_, /* /0: d9:fld f32, db:fild m32, dd:fld f64, df:fild m16 */
_null_, _REG0_, _REG0_, _REG0_, /* /1: d9:undef, db,dd,df:fisttp m32/64/16 */
_REG0_, _REG0_, _REG0_, _REG0_, /* /2: d9:fst f32, db:fist m32, dd:fst f64, df:fist m16 */
_REG0_, _REG0_, _REG0_, _REG0_, /* /3: d9:fstp f32, db:fistp m32, dd:fstp f64, df:fistp m16 */
_NONE_, _null_, _NONE_, _PUSH_,
_NONE_, _PUSH_, _null_, _PUSH_,
_NONE_, _null_, _NONE_, _REG0_,
_NONE_, _REG0_, _NONE_, _REG0_
};
u_char const data_sizes_16[32] = {
4, 4, 8, 2,
0, 4, 8, 2, /* /1: d9:undef, db,dd,df:fisttp */
4, 4, 8, 2,
4, 4, 8, 2,
14, 0, 94, 10, 2, 10, 0, 8,
14, 0, 94, 10, 2, 10, 2, 8
};
static u_char const data_sizes_32[32] = {
4, 4, 8, 2,
0, 4, 8, 2, /* /1: d9:undef, db,dd,df:fisttp */
4, 4, 8, 2,
4, 4, 8, 2,
28, 0, 108, 10, 2, 10, 0, 8,
28, 0, 108, 10, 2, 10, 2, 8
};
int FPU_load_store(u_char type, fpu_addr_modes addr_modes,
void __user * data_address)
{
FPU_REG loaded_data;
FPU_REG *st0_ptr;
u_char st0_tag = TAG_Empty; /* This is just to stop a gcc warning. */
u_char loaded_tag;
int sv_cw;
st0_ptr = NULL; /* Initialized just to stop compiler warnings. */
if (addr_modes.default_mode & PROTECTED) {
if (addr_modes.default_mode == SEG32) {
if (access_limit < data_sizes_32[type])
math_abort(FPU_info, SIGSEGV);
} else if (addr_modes.default_mode == PM16) {
if (access_limit < data_sizes_16[type])
math_abort(FPU_info, SIGSEGV);
}
#ifdef PARANOID
else
EXCEPTION(EX_INTERNAL | 0x140);
#endif /* PARANOID */
}
switch (type_table[type]) {
case _NONE_:
break;
case _REG0_:
st0_ptr = &st(0); /* Some of these instructions pop after
storing */
st0_tag = FPU_gettag0();
break;
case _PUSH_:
{
if (FPU_gettagi(-1) != TAG_Empty) {
FPU_stack_overflow();
return 0;
}
top--;
st0_ptr = &st(0);
}
break;
case _null_:
FPU_illegal();
return 0;
#ifdef PARANOID
default:
EXCEPTION(EX_INTERNAL | 0x141);
return 0;
#endif /* PARANOID */
}
switch (type) {
/* type is a 5-bit value: (3-bit FPU_modrm.reg field | opcode[2,1]) */
case 000: /* fld m32real (d9 /0) */
clear_C1();
loaded_tag =
FPU_load_single((float __user *)data_address, &loaded_data);
if ((loaded_tag == TAG_Special)
&& isNaN(&loaded_data)
&& (real_1op_NaN(&loaded_data) < 0)) {
top++;
break;
}
FPU_copy_to_reg0(&loaded_data, loaded_tag);
break;
case 001: /* fild m32int (db /0) */
clear_C1();
loaded_tag =
FPU_load_int32((long __user *)data_address, &loaded_data);
FPU_copy_to_reg0(&loaded_data, loaded_tag);
break;
case 002: /* fld m64real (dd /0) */
clear_C1();
loaded_tag =
FPU_load_double((double __user *)data_address,
&loaded_data);
if ((loaded_tag == TAG_Special)
&& isNaN(&loaded_data)
&& (real_1op_NaN(&loaded_data) < 0)) {
top++;
break;
}
FPU_copy_to_reg0(&loaded_data, loaded_tag);
break;
case 003: /* fild m16int (df /0) */
clear_C1();
loaded_tag =
FPU_load_int16((short __user *)data_address, &loaded_data);
FPU_copy_to_reg0(&loaded_data, loaded_tag);
break;
/* case 004: undefined (d9 /1) */
/* fisttp are enabled if CPUID(1).ECX(0) "sse3" is set */
case 005: /* fisttp m32int (db /1) */
clear_C1();
sv_cw = control_word;
control_word |= RC_CHOP;
if (FPU_store_int32
(st0_ptr, st0_tag, (long __user *)data_address))
pop_0(); /* pop only if the number was actually stored
(see the 80486 manual p16-28) */
control_word = sv_cw;
break;
case 006: /* fisttp m64int (dd /1) */
clear_C1();
sv_cw = control_word;
control_word |= RC_CHOP;
if (FPU_store_int64
(st0_ptr, st0_tag, (long long __user *)data_address))
pop_0(); /* pop only if the number was actually stored
(see the 80486 manual p16-28) */
control_word = sv_cw;
break;
case 007: /* fisttp m16int (df /1) */
clear_C1();
sv_cw = control_word;
control_word |= RC_CHOP;
if (FPU_store_int16
(st0_ptr, st0_tag, (short __user *)data_address))
pop_0(); /* pop only if the number was actually stored
(see the 80486 manual p16-28) */
control_word = sv_cw;
break;
case 010: /* fst m32real */
clear_C1();
FPU_store_single(st0_ptr, st0_tag,
(float __user *)data_address);
break;
case 011: /* fist m32int */
clear_C1();
FPU_store_int32(st0_ptr, st0_tag, (long __user *)data_address);
break;
case 012: /* fst m64real */
clear_C1();
FPU_store_double(st0_ptr, st0_tag,
(double __user *)data_address);
break;
case 013: /* fist m16int */
clear_C1();
FPU_store_int16(st0_ptr, st0_tag, (short __user *)data_address);
break;
case 014: /* fstp m32real */
clear_C1();
if (FPU_store_single
(st0_ptr, st0_tag, (float __user *)data_address))
pop_0(); /* pop only if the number was actually stored
(see the 80486 manual p16-28) */
break;
case 015: /* fistp m32int */
clear_C1();
if (FPU_store_int32
(st0_ptr, st0_tag, (long __user *)data_address))
pop_0(); /* pop only if the number was actually stored
(see the 80486 manual p16-28) */
break;
case 016: /* fstp m64real */
clear_C1();
if (FPU_store_double
(st0_ptr, st0_tag, (double __user *)data_address))
pop_0(); /* pop only if the number was actually stored
(see the 80486 manual p16-28) */
break;
case 017: /* fistp m16int */
clear_C1();
if (FPU_store_int16
(st0_ptr, st0_tag, (short __user *)data_address))
pop_0(); /* pop only if the number was actually stored
(see the 80486 manual p16-28) */
break;
case 020: /* fldenv m14/28byte */
fldenv(addr_modes, (u_char __user *) data_address);
/* Ensure that the values just loaded are not changed by
fix-up operations. */
return 1;
case 022: /* frstor m94/108byte */
FPU_frstor(addr_modes, (u_char __user *) data_address);
/* Ensure that the values just loaded are not changed by
fix-up operations. */
return 1;
case 023: /* fbld m80dec */
clear_C1();
loaded_tag = FPU_load_bcd((u_char __user *) data_address);
FPU_settag0(loaded_tag);
break;
case 024: /* fldcw */
RE_ENTRANT_CHECK_OFF;
FPU_access_ok(data_address, 2);
FPU_get_user(control_word,
(unsigned short __user *)data_address);
RE_ENTRANT_CHECK_ON;
if (partial_status & ~control_word & CW_Exceptions)
partial_status |= (SW_Summary | SW_Backward);
else
partial_status &= ~(SW_Summary | SW_Backward);
#ifdef PECULIAR_486
control_word |= 0x40; /* An 80486 appears to always set this bit */
#endif /* PECULIAR_486 */
return 1;
case 025: /* fld m80real */
clear_C1();
loaded_tag =
FPU_load_extended((long double __user *)data_address, 0);
FPU_settag0(loaded_tag);
break;
case 027: /* fild m64int */
clear_C1();
loaded_tag = FPU_load_int64((long long __user *)data_address);
if (loaded_tag == TAG_Error)
return 0;
FPU_settag0(loaded_tag);
break;
case 030: /* fstenv m14/28byte */
fstenv(addr_modes, (u_char __user *) data_address);
return 1;
case 032: /* fsave */
fsave(addr_modes, (u_char __user *) data_address);
return 1;
case 033: /* fbstp m80dec */
clear_C1();
if (FPU_store_bcd
(st0_ptr, st0_tag, (u_char __user *) data_address))
pop_0(); /* pop only if the number was actually stored
(see the 80486 manual p16-28) */
break;
case 034: /* fstcw m16int */
RE_ENTRANT_CHECK_OFF;
FPU_access_ok(data_address, 2);
FPU_put_user(control_word,
(unsigned short __user *)data_address);
RE_ENTRANT_CHECK_ON;
return 1;
case 035: /* fstp m80real */
clear_C1();
if (FPU_store_extended
(st0_ptr, st0_tag, (long double __user *)data_address))
pop_0(); /* pop only if the number was actually stored
(see the 80486 manual p16-28) */
break;
case 036: /* fstsw m2byte */
RE_ENTRANT_CHECK_OFF;
FPU_access_ok(data_address, 2);
FPU_put_user(status_word(),
(unsigned short __user *)data_address);
RE_ENTRANT_CHECK_ON;
return 1;
case 037: /* fistp m64int */
clear_C1();
if (FPU_store_int64
(st0_ptr, st0_tag, (long long __user *)data_address))
pop_0(); /* pop only if the number was actually stored
(see the 80486 manual p16-28) */
break;
}
return 0;
}
| linux-master | arch/x86/math-emu/load_store.c |
// SPDX-License-Identifier: GPL-2.0
/*---------------------------------------------------------------------------+
| fpu_aux.c |
| |
| Code to implement some of the FPU auxiliary instructions. |
| |
| Copyright (C) 1992,1993,1994,1997 |
| W. Metzenthen, 22 Parker St, Ormond, Vic 3163, Australia |
| E-mail [email protected] |
| |
| |
+---------------------------------------------------------------------------*/
#include "fpu_system.h"
#include "exception.h"
#include "fpu_emu.h"
#include "status_w.h"
#include "control_w.h"
static void fnop(void)
{
}
static void fclex(void)
{
partial_status &=
~(SW_Backward | SW_Summary | SW_Stack_Fault | SW_Precision |
SW_Underflow | SW_Overflow | SW_Zero_Div | SW_Denorm_Op |
SW_Invalid);
no_ip_update = 1;
}
/* Needs to be externally visible */
void fpstate_init_soft(struct swregs_state *soft)
{
struct address *oaddr, *iaddr;
memset(soft, 0, sizeof(*soft));
soft->cwd = 0x037f;
soft->swd = 0;
soft->ftop = 0; /* We don't keep top in the status word internally. */
soft->twd = 0xffff;
/* The behaviour is different from that detailed in
Section 15.1.6 of the Intel manual */
oaddr = (struct address *)&soft->foo;
oaddr->offset = 0;
oaddr->selector = 0;
iaddr = (struct address *)&soft->fip;
iaddr->offset = 0;
iaddr->selector = 0;
iaddr->opcode = 0;
soft->no_update = 1;
}
void finit(void)
{
fpstate_init_soft(¤t->thread.fpu.fpstate->regs.soft);
}
/*
* These are nops on the i387..
*/
#define feni fnop
#define fdisi fnop
#define fsetpm fnop
static FUNC const finit_table[] = {
feni, fdisi, fclex, finit,
fsetpm, FPU_illegal, FPU_illegal, FPU_illegal
};
void finit_(void)
{
(finit_table[FPU_rm]) ();
}
static void fstsw_ax(void)
{
*(short *)&FPU_EAX = status_word();
no_ip_update = 1;
}
static FUNC const fstsw_table[] = {
fstsw_ax, FPU_illegal, FPU_illegal, FPU_illegal,
FPU_illegal, FPU_illegal, FPU_illegal, FPU_illegal
};
void fstsw_(void)
{
(fstsw_table[FPU_rm]) ();
}
static FUNC const fp_nop_table[] = {
fnop, FPU_illegal, FPU_illegal, FPU_illegal,
FPU_illegal, FPU_illegal, FPU_illegal, FPU_illegal
};
void fp_nop(void)
{
(fp_nop_table[FPU_rm]) ();
}
void fld_i_(void)
{
FPU_REG *st_new_ptr;
int i;
u_char tag;
if (STACK_OVERFLOW) {
FPU_stack_overflow();
return;
}
/* fld st(i) */
i = FPU_rm;
if (NOT_EMPTY(i)) {
reg_copy(&st(i), st_new_ptr);
tag = FPU_gettagi(i);
push();
FPU_settag0(tag);
} else {
if (control_word & CW_Invalid) {
/* The masked response */
FPU_stack_underflow();
} else
EXCEPTION(EX_StackUnder);
}
}
void fxch_i(void)
{
/* fxch st(i) */
FPU_REG t;
int i = FPU_rm;
FPU_REG *st0_ptr = &st(0), *sti_ptr = &st(i);
long tag_word = fpu_tag_word;
int regnr = top & 7, regnri = ((regnr + i) & 7);
u_char st0_tag = (tag_word >> (regnr * 2)) & 3;
u_char sti_tag = (tag_word >> (regnri * 2)) & 3;
if (st0_tag == TAG_Empty) {
if (sti_tag == TAG_Empty) {
FPU_stack_underflow();
FPU_stack_underflow_i(i);
return;
}
if (control_word & CW_Invalid) {
/* Masked response */
FPU_copy_to_reg0(sti_ptr, sti_tag);
}
FPU_stack_underflow_i(i);
return;
}
if (sti_tag == TAG_Empty) {
if (control_word & CW_Invalid) {
/* Masked response */
FPU_copy_to_regi(st0_ptr, st0_tag, i);
}
FPU_stack_underflow();
return;
}
clear_C1();
reg_copy(st0_ptr, &t);
reg_copy(sti_ptr, st0_ptr);
reg_copy(&t, sti_ptr);
tag_word &= ~(3 << (regnr * 2)) & ~(3 << (regnri * 2));
tag_word |= (sti_tag << (regnr * 2)) | (st0_tag << (regnri * 2));
fpu_tag_word = tag_word;
}
static void fcmovCC(void)
{
/* fcmovCC st(i) */
int i = FPU_rm;
FPU_REG *st0_ptr = &st(0);
FPU_REG *sti_ptr = &st(i);
long tag_word = fpu_tag_word;
int regnr = top & 7;
int regnri = (top + i) & 7;
u_char sti_tag = (tag_word >> (regnri * 2)) & 3;
if (sti_tag == TAG_Empty) {
FPU_stack_underflow();
clear_C1();
return;
}
reg_copy(sti_ptr, st0_ptr);
tag_word &= ~(3 << (regnr * 2));
tag_word |= (sti_tag << (regnr * 2));
fpu_tag_word = tag_word;
}
void fcmovb(void)
{
if (FPU_EFLAGS & X86_EFLAGS_CF)
fcmovCC();
}
void fcmove(void)
{
if (FPU_EFLAGS & X86_EFLAGS_ZF)
fcmovCC();
}
void fcmovbe(void)
{
if (FPU_EFLAGS & (X86_EFLAGS_CF|X86_EFLAGS_ZF))
fcmovCC();
}
void fcmovu(void)
{
if (FPU_EFLAGS & X86_EFLAGS_PF)
fcmovCC();
}
void fcmovnb(void)
{
if (!(FPU_EFLAGS & X86_EFLAGS_CF))
fcmovCC();
}
void fcmovne(void)
{
if (!(FPU_EFLAGS & X86_EFLAGS_ZF))
fcmovCC();
}
void fcmovnbe(void)
{
if (!(FPU_EFLAGS & (X86_EFLAGS_CF|X86_EFLAGS_ZF)))
fcmovCC();
}
void fcmovnu(void)
{
if (!(FPU_EFLAGS & X86_EFLAGS_PF))
fcmovCC();
}
void ffree_(void)
{
/* ffree st(i) */
FPU_settagi(FPU_rm, TAG_Empty);
}
void ffreep(void)
{
/* ffree st(i) + pop - unofficial code */
FPU_settagi(FPU_rm, TAG_Empty);
FPU_pop();
}
void fst_i_(void)
{
/* fst st(i) */
FPU_copy_to_regi(&st(0), FPU_gettag0(), FPU_rm);
}
void fstp_i(void)
{
/* fstp st(i) */
FPU_copy_to_regi(&st(0), FPU_gettag0(), FPU_rm);
FPU_pop();
}
| linux-master | arch/x86/math-emu/fpu_aux.c |
// SPDX-License-Identifier: GPL-2.0
/*---------------------------------------------------------------------------+
| poly_l2.c |
| |
| Compute the base 2 log of a FPU_REG, using a polynomial approximation. |
| |
| Copyright (C) 1992,1993,1994,1997 |
| W. Metzenthen, 22 Parker St, Ormond, Vic 3163, Australia |
| E-mail [email protected] |
| |
| |
+---------------------------------------------------------------------------*/
#include "exception.h"
#include "reg_constant.h"
#include "fpu_emu.h"
#include "fpu_system.h"
#include "control_w.h"
#include "poly.h"
static void log2_kernel(FPU_REG const *arg, u_char argsign,
Xsig * accum_result, long int *expon);
/*--- poly_l2() -------------------------------------------------------------+
| Base 2 logarithm by a polynomial approximation. |
+---------------------------------------------------------------------------*/
void poly_l2(FPU_REG *st0_ptr, FPU_REG *st1_ptr, u_char st1_sign)
{
long int exponent, expon, expon_expon;
Xsig accumulator, expon_accum, yaccum;
u_char sign, argsign;
FPU_REG x;
int tag;
exponent = exponent16(st0_ptr);
/* From st0_ptr, make a number > sqrt(2)/2 and < sqrt(2) */
if (st0_ptr->sigh > (unsigned)0xb504f334) {
/* Treat as sqrt(2)/2 < st0_ptr < 1 */
significand(&x) = -significand(st0_ptr);
setexponent16(&x, -1);
exponent++;
argsign = SIGN_NEG;
} else {
/* Treat as 1 <= st0_ptr < sqrt(2) */
x.sigh = st0_ptr->sigh - 0x80000000;
x.sigl = st0_ptr->sigl;
setexponent16(&x, 0);
argsign = SIGN_POS;
}
tag = FPU_normalize_nuo(&x);
if (tag == TAG_Zero) {
expon = 0;
accumulator.msw = accumulator.midw = accumulator.lsw = 0;
} else {
log2_kernel(&x, argsign, &accumulator, &expon);
}
if (exponent < 0) {
sign = SIGN_NEG;
exponent = -exponent;
} else
sign = SIGN_POS;
expon_accum.msw = exponent;
expon_accum.midw = expon_accum.lsw = 0;
if (exponent) {
expon_expon = 31 + norm_Xsig(&expon_accum);
shr_Xsig(&accumulator, expon_expon - expon);
if (sign ^ argsign)
negate_Xsig(&accumulator);
add_Xsig_Xsig(&accumulator, &expon_accum);
} else {
expon_expon = expon;
sign = argsign;
}
yaccum.lsw = 0;
XSIG_LL(yaccum) = significand(st1_ptr);
mul_Xsig_Xsig(&accumulator, &yaccum);
expon_expon += round_Xsig(&accumulator);
if (accumulator.msw == 0) {
FPU_copy_to_reg1(&CONST_Z, TAG_Zero);
return;
}
significand(st1_ptr) = XSIG_LL(accumulator);
setexponent16(st1_ptr, expon_expon + exponent16(st1_ptr) + 1);
tag = FPU_round(st1_ptr, 1, 0, FULL_PRECISION, sign ^ st1_sign);
FPU_settagi(1, tag);
set_precision_flag_up(); /* 80486 appears to always do this */
return;
}
/*--- poly_l2p1() -----------------------------------------------------------+
| Base 2 logarithm by a polynomial approximation. |
| log2(x+1) |
+---------------------------------------------------------------------------*/
int poly_l2p1(u_char sign0, u_char sign1,
FPU_REG * st0_ptr, FPU_REG * st1_ptr, FPU_REG * dest)
{
u_char tag;
long int exponent;
Xsig accumulator, yaccum;
if (exponent16(st0_ptr) < 0) {
log2_kernel(st0_ptr, sign0, &accumulator, &exponent);
yaccum.lsw = 0;
XSIG_LL(yaccum) = significand(st1_ptr);
mul_Xsig_Xsig(&accumulator, &yaccum);
exponent += round_Xsig(&accumulator);
exponent += exponent16(st1_ptr) + 1;
if (exponent < EXP_WAY_UNDER)
exponent = EXP_WAY_UNDER;
significand(dest) = XSIG_LL(accumulator);
setexponent16(dest, exponent);
tag = FPU_round(dest, 1, 0, FULL_PRECISION, sign0 ^ sign1);
FPU_settagi(1, tag);
if (tag == TAG_Valid)
set_precision_flag_up(); /* 80486 appears to always do this */
} else {
/* The magnitude of st0_ptr is far too large. */
if (sign0 != SIGN_POS) {
/* Trying to get the log of a negative number. */
#ifdef PECULIAR_486 /* Stupid 80486 doesn't worry about log(negative). */
changesign(st1_ptr);
#else
if (arith_invalid(1) < 0)
return 1;
#endif /* PECULIAR_486 */
}
/* 80486 appears to do this */
if (sign0 == SIGN_NEG)
set_precision_flag_down();
else
set_precision_flag_up();
}
if (exponent(dest) <= EXP_UNDER)
EXCEPTION(EX_Underflow);
return 0;
}
#undef HIPOWER
#define HIPOWER 10
static const unsigned long long logterms[HIPOWER] = {
0x2a8eca5705fc2ef0LL,
0xf6384ee1d01febceLL,
0x093bb62877cdf642LL,
0x006985d8a9ec439bLL,
0x0005212c4f55a9c8LL,
0x00004326a16927f0LL,
0x0000038d1d80a0e7LL,
0x0000003141cc80c6LL,
0x00000002b1668c9fLL,
0x000000002c7a46aaLL
};
static const unsigned long leadterm = 0xb8000000;
/*--- log2_kernel() ---------------------------------------------------------+
| Base 2 logarithm by a polynomial approximation. |
| log2(x+1) |
+---------------------------------------------------------------------------*/
static void log2_kernel(FPU_REG const *arg, u_char argsign, Xsig *accum_result,
long int *expon)
{
long int exponent, adj;
unsigned long long Xsq;
Xsig accumulator, Numer, Denom, argSignif, arg_signif;
exponent = exponent16(arg);
Numer.lsw = Denom.lsw = 0;
XSIG_LL(Numer) = XSIG_LL(Denom) = significand(arg);
if (argsign == SIGN_POS) {
shr_Xsig(&Denom, 2 - (1 + exponent));
Denom.msw |= 0x80000000;
div_Xsig(&Numer, &Denom, &argSignif);
} else {
shr_Xsig(&Denom, 1 - (1 + exponent));
negate_Xsig(&Denom);
if (Denom.msw & 0x80000000) {
div_Xsig(&Numer, &Denom, &argSignif);
exponent++;
} else {
/* Denom must be 1.0 */
argSignif.lsw = Numer.lsw;
argSignif.midw = Numer.midw;
argSignif.msw = Numer.msw;
}
}
#ifndef PECULIAR_486
/* Should check here that |local_arg| is within the valid range */
if (exponent >= -2) {
if ((exponent > -2) || (argSignif.msw > (unsigned)0xafb0ccc0)) {
/* The argument is too large */
}
}
#endif /* PECULIAR_486 */
arg_signif.lsw = argSignif.lsw;
XSIG_LL(arg_signif) = XSIG_LL(argSignif);
adj = norm_Xsig(&argSignif);
accumulator.lsw = argSignif.lsw;
XSIG_LL(accumulator) = XSIG_LL(argSignif);
mul_Xsig_Xsig(&accumulator, &accumulator);
shr_Xsig(&accumulator, 2 * (-1 - (1 + exponent + adj)));
Xsq = XSIG_LL(accumulator);
if (accumulator.lsw & 0x80000000)
Xsq++;
accumulator.msw = accumulator.midw = accumulator.lsw = 0;
/* Do the basic fixed point polynomial evaluation */
polynomial_Xsig(&accumulator, &Xsq, logterms, HIPOWER - 1);
mul_Xsig_Xsig(&accumulator, &argSignif);
shr_Xsig(&accumulator, 6 - adj);
mul32_Xsig(&arg_signif, leadterm);
add_two_Xsig(&accumulator, &arg_signif, &exponent);
*expon = exponent + 1;
accum_result->lsw = accumulator.lsw;
accum_result->midw = accumulator.midw;
accum_result->msw = accumulator.msw;
}
| linux-master | arch/x86/math-emu/poly_l2.c |
// SPDX-License-Identifier: GPL-2.0
/*---------------------------------------------------------------------------+
| poly_sin.c |
| |
| Computation of an approximation of the sin function and the cosine |
| function by a polynomial. |
| |
| Copyright (C) 1992,1993,1994,1997,1999 |
| W. Metzenthen, 22 Parker St, Ormond, Vic 3163, Australia |
| E-mail [email protected] |
| |
| |
+---------------------------------------------------------------------------*/
#include "exception.h"
#include "reg_constant.h"
#include "fpu_emu.h"
#include "fpu_system.h"
#include "control_w.h"
#include "poly.h"
#define N_COEFF_P 4
#define N_COEFF_N 4
static const unsigned long long pos_terms_l[N_COEFF_P] = {
0xaaaaaaaaaaaaaaabLL,
0x00d00d00d00cf906LL,
0x000006b99159a8bbLL,
0x000000000d7392e6LL
};
static const unsigned long long neg_terms_l[N_COEFF_N] = {
0x2222222222222167LL,
0x0002e3bc74aab624LL,
0x0000000b09229062LL,
0x00000000000c7973LL
};
#define N_COEFF_PH 4
#define N_COEFF_NH 4
static const unsigned long long pos_terms_h[N_COEFF_PH] = {
0x0000000000000000LL,
0x05b05b05b05b0406LL,
0x000049f93edd91a9LL,
0x00000000c9c9ed62LL
};
static const unsigned long long neg_terms_h[N_COEFF_NH] = {
0xaaaaaaaaaaaaaa98LL,
0x001a01a01a019064LL,
0x0000008f76c68a77LL,
0x0000000000d58f5eLL
};
/*--- poly_sine() -----------------------------------------------------------+
| |
+---------------------------------------------------------------------------*/
void poly_sine(FPU_REG *st0_ptr)
{
int exponent, echange;
Xsig accumulator, argSqrd, argTo4;
unsigned long fix_up, adj;
unsigned long long fixed_arg;
FPU_REG result;
exponent = exponent(st0_ptr);
accumulator.lsw = accumulator.midw = accumulator.msw = 0;
/* Split into two ranges, for arguments below and above 1.0 */
/* The boundary between upper and lower is approx 0.88309101259 */
if ((exponent < -1)
|| ((exponent == -1) && (st0_ptr->sigh <= 0xe21240aa))) {
/* The argument is <= 0.88309101259 */
argSqrd.msw = st0_ptr->sigh;
argSqrd.midw = st0_ptr->sigl;
argSqrd.lsw = 0;
mul64_Xsig(&argSqrd, &significand(st0_ptr));
shr_Xsig(&argSqrd, 2 * (-1 - exponent));
argTo4.msw = argSqrd.msw;
argTo4.midw = argSqrd.midw;
argTo4.lsw = argSqrd.lsw;
mul_Xsig_Xsig(&argTo4, &argTo4);
polynomial_Xsig(&accumulator, &XSIG_LL(argTo4), neg_terms_l,
N_COEFF_N - 1);
mul_Xsig_Xsig(&accumulator, &argSqrd);
negate_Xsig(&accumulator);
polynomial_Xsig(&accumulator, &XSIG_LL(argTo4), pos_terms_l,
N_COEFF_P - 1);
shr_Xsig(&accumulator, 2); /* Divide by four */
accumulator.msw |= 0x80000000; /* Add 1.0 */
mul64_Xsig(&accumulator, &significand(st0_ptr));
mul64_Xsig(&accumulator, &significand(st0_ptr));
mul64_Xsig(&accumulator, &significand(st0_ptr));
/* Divide by four, FPU_REG compatible, etc */
exponent = 3 * exponent;
/* The minimum exponent difference is 3 */
shr_Xsig(&accumulator, exponent(st0_ptr) - exponent);
negate_Xsig(&accumulator);
XSIG_LL(accumulator) += significand(st0_ptr);
echange = round_Xsig(&accumulator);
setexponentpos(&result, exponent(st0_ptr) + echange);
} else {
/* The argument is > 0.88309101259 */
/* We use sin(st(0)) = cos(pi/2-st(0)) */
fixed_arg = significand(st0_ptr);
if (exponent == 0) {
/* The argument is >= 1.0 */
/* Put the binary point at the left. */
fixed_arg <<= 1;
}
/* pi/2 in hex is: 1.921fb54442d18469 898CC51701B839A2 52049C1 */
fixed_arg = 0x921fb54442d18469LL - fixed_arg;
/* There is a special case which arises due to rounding, to fix here. */
if (fixed_arg == 0xffffffffffffffffLL)
fixed_arg = 0;
XSIG_LL(argSqrd) = fixed_arg;
argSqrd.lsw = 0;
mul64_Xsig(&argSqrd, &fixed_arg);
XSIG_LL(argTo4) = XSIG_LL(argSqrd);
argTo4.lsw = argSqrd.lsw;
mul_Xsig_Xsig(&argTo4, &argTo4);
polynomial_Xsig(&accumulator, &XSIG_LL(argTo4), neg_terms_h,
N_COEFF_NH - 1);
mul_Xsig_Xsig(&accumulator, &argSqrd);
negate_Xsig(&accumulator);
polynomial_Xsig(&accumulator, &XSIG_LL(argTo4), pos_terms_h,
N_COEFF_PH - 1);
negate_Xsig(&accumulator);
mul64_Xsig(&accumulator, &fixed_arg);
mul64_Xsig(&accumulator, &fixed_arg);
shr_Xsig(&accumulator, 3);
negate_Xsig(&accumulator);
add_Xsig_Xsig(&accumulator, &argSqrd);
shr_Xsig(&accumulator, 1);
accumulator.lsw |= 1; /* A zero accumulator here would cause problems */
negate_Xsig(&accumulator);
/* The basic computation is complete. Now fix the answer to
compensate for the error due to the approximation used for
pi/2
*/
/* This has an exponent of -65 */
fix_up = 0x898cc517;
/* The fix-up needs to be improved for larger args */
if (argSqrd.msw & 0xffc00000) {
/* Get about 32 bit precision in these: */
fix_up -= mul_32_32(0x898cc517, argSqrd.msw) / 6;
}
fix_up = mul_32_32(fix_up, LL_MSW(fixed_arg));
adj = accumulator.lsw; /* temp save */
accumulator.lsw -= fix_up;
if (accumulator.lsw > adj)
XSIG_LL(accumulator)--;
echange = round_Xsig(&accumulator);
setexponentpos(&result, echange - 1);
}
significand(&result) = XSIG_LL(accumulator);
setsign(&result, getsign(st0_ptr));
FPU_copy_to_reg0(&result, TAG_Valid);
#ifdef PARANOID
if ((exponent(&result) >= 0)
&& (significand(&result) > 0x8000000000000000LL)) {
EXCEPTION(EX_INTERNAL | 0x150);
}
#endif /* PARANOID */
}
/*--- poly_cos() ------------------------------------------------------------+
| |
+---------------------------------------------------------------------------*/
void poly_cos(FPU_REG *st0_ptr)
{
FPU_REG result;
long int exponent, exp2, echange;
Xsig accumulator, argSqrd, fix_up, argTo4;
unsigned long long fixed_arg;
#ifdef PARANOID
if ((exponent(st0_ptr) > 0)
|| ((exponent(st0_ptr) == 0)
&& (significand(st0_ptr) > 0xc90fdaa22168c234LL))) {
EXCEPTION(EX_Invalid);
FPU_copy_to_reg0(&CONST_QNaN, TAG_Special);
return;
}
#endif /* PARANOID */
exponent = exponent(st0_ptr);
accumulator.lsw = accumulator.midw = accumulator.msw = 0;
if ((exponent < -1)
|| ((exponent == -1) && (st0_ptr->sigh <= 0xb00d6f54))) {
/* arg is < 0.687705 */
argSqrd.msw = st0_ptr->sigh;
argSqrd.midw = st0_ptr->sigl;
argSqrd.lsw = 0;
mul64_Xsig(&argSqrd, &significand(st0_ptr));
if (exponent < -1) {
/* shift the argument right by the required places */
shr_Xsig(&argSqrd, 2 * (-1 - exponent));
}
argTo4.msw = argSqrd.msw;
argTo4.midw = argSqrd.midw;
argTo4.lsw = argSqrd.lsw;
mul_Xsig_Xsig(&argTo4, &argTo4);
polynomial_Xsig(&accumulator, &XSIG_LL(argTo4), neg_terms_h,
N_COEFF_NH - 1);
mul_Xsig_Xsig(&accumulator, &argSqrd);
negate_Xsig(&accumulator);
polynomial_Xsig(&accumulator, &XSIG_LL(argTo4), pos_terms_h,
N_COEFF_PH - 1);
negate_Xsig(&accumulator);
mul64_Xsig(&accumulator, &significand(st0_ptr));
mul64_Xsig(&accumulator, &significand(st0_ptr));
shr_Xsig(&accumulator, -2 * (1 + exponent));
shr_Xsig(&accumulator, 3);
negate_Xsig(&accumulator);
add_Xsig_Xsig(&accumulator, &argSqrd);
shr_Xsig(&accumulator, 1);
/* It doesn't matter if accumulator is all zero here, the
following code will work ok */
negate_Xsig(&accumulator);
if (accumulator.lsw & 0x80000000)
XSIG_LL(accumulator)++;
if (accumulator.msw == 0) {
/* The result is 1.0 */
FPU_copy_to_reg0(&CONST_1, TAG_Valid);
return;
} else {
significand(&result) = XSIG_LL(accumulator);
/* will be a valid positive nr with expon = -1 */
setexponentpos(&result, -1);
}
} else {
fixed_arg = significand(st0_ptr);
if (exponent == 0) {
/* The argument is >= 1.0 */
/* Put the binary point at the left. */
fixed_arg <<= 1;
}
/* pi/2 in hex is: 1.921fb54442d18469 898CC51701B839A2 52049C1 */
fixed_arg = 0x921fb54442d18469LL - fixed_arg;
/* There is a special case which arises due to rounding, to fix here. */
if (fixed_arg == 0xffffffffffffffffLL)
fixed_arg = 0;
exponent = -1;
exp2 = -1;
/* A shift is needed here only for a narrow range of arguments,
i.e. for fixed_arg approx 2^-32, but we pick up more... */
if (!(LL_MSW(fixed_arg) & 0xffff0000)) {
fixed_arg <<= 16;
exponent -= 16;
exp2 -= 16;
}
XSIG_LL(argSqrd) = fixed_arg;
argSqrd.lsw = 0;
mul64_Xsig(&argSqrd, &fixed_arg);
if (exponent < -1) {
/* shift the argument right by the required places */
shr_Xsig(&argSqrd, 2 * (-1 - exponent));
}
argTo4.msw = argSqrd.msw;
argTo4.midw = argSqrd.midw;
argTo4.lsw = argSqrd.lsw;
mul_Xsig_Xsig(&argTo4, &argTo4);
polynomial_Xsig(&accumulator, &XSIG_LL(argTo4), neg_terms_l,
N_COEFF_N - 1);
mul_Xsig_Xsig(&accumulator, &argSqrd);
negate_Xsig(&accumulator);
polynomial_Xsig(&accumulator, &XSIG_LL(argTo4), pos_terms_l,
N_COEFF_P - 1);
shr_Xsig(&accumulator, 2); /* Divide by four */
accumulator.msw |= 0x80000000; /* Add 1.0 */
mul64_Xsig(&accumulator, &fixed_arg);
mul64_Xsig(&accumulator, &fixed_arg);
mul64_Xsig(&accumulator, &fixed_arg);
/* Divide by four, FPU_REG compatible, etc */
exponent = 3 * exponent;
/* The minimum exponent difference is 3 */
shr_Xsig(&accumulator, exp2 - exponent);
negate_Xsig(&accumulator);
XSIG_LL(accumulator) += fixed_arg;
/* The basic computation is complete. Now fix the answer to
compensate for the error due to the approximation used for
pi/2
*/
/* This has an exponent of -65 */
XSIG_LL(fix_up) = 0x898cc51701b839a2ll;
fix_up.lsw = 0;
/* The fix-up needs to be improved for larger args */
if (argSqrd.msw & 0xffc00000) {
/* Get about 32 bit precision in these: */
fix_up.msw -= mul_32_32(0x898cc517, argSqrd.msw) / 2;
fix_up.msw += mul_32_32(0x898cc517, argTo4.msw) / 24;
}
exp2 += norm_Xsig(&accumulator);
shr_Xsig(&accumulator, 1); /* Prevent overflow */
exp2++;
shr_Xsig(&fix_up, 65 + exp2);
add_Xsig_Xsig(&accumulator, &fix_up);
echange = round_Xsig(&accumulator);
setexponentpos(&result, exp2 + echange);
significand(&result) = XSIG_LL(accumulator);
}
FPU_copy_to_reg0(&result, TAG_Valid);
#ifdef PARANOID
if ((exponent(&result) >= 0)
&& (significand(&result) > 0x8000000000000000LL)) {
EXCEPTION(EX_INTERNAL | 0x151);
}
#endif /* PARANOID */
}
| linux-master | arch/x86/math-emu/poly_sin.c |
// SPDX-License-Identifier: GPL-2.0
/*---------------------------------------------------------------------------+
| reg_compare.c |
| |
| Compare two floating point registers |
| |
| Copyright (C) 1992,1993,1994,1997 |
| W. Metzenthen, 22 Parker St, Ormond, Vic 3163, Australia |
| E-mail [email protected] |
| |
| |
+---------------------------------------------------------------------------*/
/*---------------------------------------------------------------------------+
| compare() is the core FPU_REG comparison function |
+---------------------------------------------------------------------------*/
#include "fpu_system.h"
#include "exception.h"
#include "fpu_emu.h"
#include "control_w.h"
#include "status_w.h"
static int compare(FPU_REG const *b, int tagb)
{
int diff, exp0, expb;
u_char st0_tag;
FPU_REG *st0_ptr;
FPU_REG x, y;
u_char st0_sign, signb = getsign(b);
st0_ptr = &st(0);
st0_tag = FPU_gettag0();
st0_sign = getsign(st0_ptr);
if (tagb == TAG_Special)
tagb = FPU_Special(b);
if (st0_tag == TAG_Special)
st0_tag = FPU_Special(st0_ptr);
if (((st0_tag != TAG_Valid) && (st0_tag != TW_Denormal))
|| ((tagb != TAG_Valid) && (tagb != TW_Denormal))) {
if (st0_tag == TAG_Zero) {
if (tagb == TAG_Zero)
return COMP_A_eq_B;
if (tagb == TAG_Valid)
return ((signb ==
SIGN_POS) ? COMP_A_lt_B : COMP_A_gt_B);
if (tagb == TW_Denormal)
return ((signb ==
SIGN_POS) ? COMP_A_lt_B : COMP_A_gt_B)
| COMP_Denormal;
} else if (tagb == TAG_Zero) {
if (st0_tag == TAG_Valid)
return ((st0_sign ==
SIGN_POS) ? COMP_A_gt_B : COMP_A_lt_B);
if (st0_tag == TW_Denormal)
return ((st0_sign ==
SIGN_POS) ? COMP_A_gt_B : COMP_A_lt_B)
| COMP_Denormal;
}
if (st0_tag == TW_Infinity) {
if ((tagb == TAG_Valid) || (tagb == TAG_Zero))
return ((st0_sign ==
SIGN_POS) ? COMP_A_gt_B : COMP_A_lt_B);
else if (tagb == TW_Denormal)
return ((st0_sign ==
SIGN_POS) ? COMP_A_gt_B : COMP_A_lt_B)
| COMP_Denormal;
else if (tagb == TW_Infinity) {
/* The 80486 book says that infinities can be equal! */
return (st0_sign == signb) ? COMP_A_eq_B :
((st0_sign ==
SIGN_POS) ? COMP_A_gt_B : COMP_A_lt_B);
}
/* Fall through to the NaN code */
} else if (tagb == TW_Infinity) {
if ((st0_tag == TAG_Valid) || (st0_tag == TAG_Zero))
return ((signb ==
SIGN_POS) ? COMP_A_lt_B : COMP_A_gt_B);
if (st0_tag == TW_Denormal)
return ((signb ==
SIGN_POS) ? COMP_A_lt_B : COMP_A_gt_B)
| COMP_Denormal;
/* Fall through to the NaN code */
}
/* The only possibility now should be that one of the arguments
is a NaN */
if ((st0_tag == TW_NaN) || (tagb == TW_NaN)) {
int signalling = 0, unsupported = 0;
if (st0_tag == TW_NaN) {
signalling =
(st0_ptr->sigh & 0xc0000000) == 0x80000000;
unsupported = !((exponent(st0_ptr) == EXP_OVER)
&& (st0_ptr->
sigh & 0x80000000));
}
if (tagb == TW_NaN) {
signalling |=
(b->sigh & 0xc0000000) == 0x80000000;
unsupported |= !((exponent(b) == EXP_OVER)
&& (b->sigh & 0x80000000));
}
if (signalling || unsupported)
return COMP_No_Comp | COMP_SNaN | COMP_NaN;
else
/* Neither is a signaling NaN */
return COMP_No_Comp | COMP_NaN;
}
EXCEPTION(EX_Invalid);
}
if (st0_sign != signb) {
return ((st0_sign == SIGN_POS) ? COMP_A_gt_B : COMP_A_lt_B)
| (((st0_tag == TW_Denormal) || (tagb == TW_Denormal)) ?
COMP_Denormal : 0);
}
if ((st0_tag == TW_Denormal) || (tagb == TW_Denormal)) {
FPU_to_exp16(st0_ptr, &x);
FPU_to_exp16(b, &y);
st0_ptr = &x;
b = &y;
exp0 = exponent16(st0_ptr);
expb = exponent16(b);
} else {
exp0 = exponent(st0_ptr);
expb = exponent(b);
}
#ifdef PARANOID
if (!(st0_ptr->sigh & 0x80000000))
EXCEPTION(EX_Invalid);
if (!(b->sigh & 0x80000000))
EXCEPTION(EX_Invalid);
#endif /* PARANOID */
diff = exp0 - expb;
if (diff == 0) {
diff = st0_ptr->sigh - b->sigh; /* Works only if ms bits are
identical */
if (diff == 0) {
diff = st0_ptr->sigl > b->sigl;
if (diff == 0)
diff = -(st0_ptr->sigl < b->sigl);
}
}
if (diff > 0) {
return ((st0_sign == SIGN_POS) ? COMP_A_gt_B : COMP_A_lt_B)
| (((st0_tag == TW_Denormal) || (tagb == TW_Denormal)) ?
COMP_Denormal : 0);
}
if (diff < 0) {
return ((st0_sign == SIGN_POS) ? COMP_A_lt_B : COMP_A_gt_B)
| (((st0_tag == TW_Denormal) || (tagb == TW_Denormal)) ?
COMP_Denormal : 0);
}
return COMP_A_eq_B
| (((st0_tag == TW_Denormal) || (tagb == TW_Denormal)) ?
COMP_Denormal : 0);
}
/* This function requires that st(0) is not empty */
int FPU_compare_st_data(FPU_REG const *loaded_data, u_char loaded_tag)
{
int f, c;
c = compare(loaded_data, loaded_tag);
if (c & COMP_NaN) {
EXCEPTION(EX_Invalid);
f = SW_C3 | SW_C2 | SW_C0;
} else
switch (c & 7) {
case COMP_A_lt_B:
f = SW_C0;
break;
case COMP_A_eq_B:
f = SW_C3;
break;
case COMP_A_gt_B:
f = 0;
break;
case COMP_No_Comp:
f = SW_C3 | SW_C2 | SW_C0;
break;
default:
#ifdef PARANOID
EXCEPTION(EX_INTERNAL | 0x121);
#endif /* PARANOID */
f = SW_C3 | SW_C2 | SW_C0;
break;
}
setcc(f);
if (c & COMP_Denormal) {
return denormal_operand() < 0;
}
return 0;
}
static int compare_st_st(int nr)
{
int f, c;
FPU_REG *st_ptr;
if (!NOT_EMPTY(0) || !NOT_EMPTY(nr)) {
setcc(SW_C3 | SW_C2 | SW_C0);
/* Stack fault */
EXCEPTION(EX_StackUnder);
return !(control_word & CW_Invalid);
}
st_ptr = &st(nr);
c = compare(st_ptr, FPU_gettagi(nr));
if (c & COMP_NaN) {
setcc(SW_C3 | SW_C2 | SW_C0);
EXCEPTION(EX_Invalid);
return !(control_word & CW_Invalid);
} else
switch (c & 7) {
case COMP_A_lt_B:
f = SW_C0;
break;
case COMP_A_eq_B:
f = SW_C3;
break;
case COMP_A_gt_B:
f = 0;
break;
case COMP_No_Comp:
f = SW_C3 | SW_C2 | SW_C0;
break;
default:
#ifdef PARANOID
EXCEPTION(EX_INTERNAL | 0x122);
#endif /* PARANOID */
f = SW_C3 | SW_C2 | SW_C0;
break;
}
setcc(f);
if (c & COMP_Denormal) {
return denormal_operand() < 0;
}
return 0;
}
static int compare_i_st_st(int nr)
{
int f, c;
FPU_REG *st_ptr;
if (!NOT_EMPTY(0) || !NOT_EMPTY(nr)) {
FPU_EFLAGS |= (X86_EFLAGS_ZF | X86_EFLAGS_PF | X86_EFLAGS_CF);
/* Stack fault */
EXCEPTION(EX_StackUnder);
return !(control_word & CW_Invalid);
}
partial_status &= ~SW_C0;
st_ptr = &st(nr);
c = compare(st_ptr, FPU_gettagi(nr));
if (c & COMP_NaN) {
FPU_EFLAGS |= (X86_EFLAGS_ZF | X86_EFLAGS_PF | X86_EFLAGS_CF);
EXCEPTION(EX_Invalid);
return !(control_word & CW_Invalid);
}
switch (c & 7) {
case COMP_A_lt_B:
f = X86_EFLAGS_CF;
break;
case COMP_A_eq_B:
f = X86_EFLAGS_ZF;
break;
case COMP_A_gt_B:
f = 0;
break;
case COMP_No_Comp:
f = X86_EFLAGS_ZF | X86_EFLAGS_PF | X86_EFLAGS_CF;
break;
default:
#ifdef PARANOID
EXCEPTION(EX_INTERNAL | 0x122);
#endif /* PARANOID */
f = 0;
break;
}
FPU_EFLAGS = (FPU_EFLAGS & ~(X86_EFLAGS_ZF | X86_EFLAGS_PF | X86_EFLAGS_CF)) | f;
if (c & COMP_Denormal) {
return denormal_operand() < 0;
}
return 0;
}
static int compare_u_st_st(int nr)
{
int f = 0, c;
FPU_REG *st_ptr;
if (!NOT_EMPTY(0) || !NOT_EMPTY(nr)) {
setcc(SW_C3 | SW_C2 | SW_C0);
/* Stack fault */
EXCEPTION(EX_StackUnder);
return !(control_word & CW_Invalid);
}
st_ptr = &st(nr);
c = compare(st_ptr, FPU_gettagi(nr));
if (c & COMP_NaN) {
setcc(SW_C3 | SW_C2 | SW_C0);
if (c & COMP_SNaN) { /* This is the only difference between
un-ordered and ordinary comparisons */
EXCEPTION(EX_Invalid);
return !(control_word & CW_Invalid);
}
return 0;
} else
switch (c & 7) {
case COMP_A_lt_B:
f = SW_C0;
break;
case COMP_A_eq_B:
f = SW_C3;
break;
case COMP_A_gt_B:
f = 0;
break;
case COMP_No_Comp:
f = SW_C3 | SW_C2 | SW_C0;
break;
#ifdef PARANOID
default:
EXCEPTION(EX_INTERNAL | 0x123);
f = SW_C3 | SW_C2 | SW_C0;
break;
#endif /* PARANOID */
}
setcc(f);
if (c & COMP_Denormal) {
return denormal_operand() < 0;
}
return 0;
}
static int compare_ui_st_st(int nr)
{
int f = 0, c;
FPU_REG *st_ptr;
if (!NOT_EMPTY(0) || !NOT_EMPTY(nr)) {
FPU_EFLAGS |= (X86_EFLAGS_ZF | X86_EFLAGS_PF | X86_EFLAGS_CF);
/* Stack fault */
EXCEPTION(EX_StackUnder);
return !(control_word & CW_Invalid);
}
partial_status &= ~SW_C0;
st_ptr = &st(nr);
c = compare(st_ptr, FPU_gettagi(nr));
if (c & COMP_NaN) {
FPU_EFLAGS |= (X86_EFLAGS_ZF | X86_EFLAGS_PF | X86_EFLAGS_CF);
if (c & COMP_SNaN) { /* This is the only difference between
un-ordered and ordinary comparisons */
EXCEPTION(EX_Invalid);
return !(control_word & CW_Invalid);
}
return 0;
}
switch (c & 7) {
case COMP_A_lt_B:
f = X86_EFLAGS_CF;
break;
case COMP_A_eq_B:
f = X86_EFLAGS_ZF;
break;
case COMP_A_gt_B:
f = 0;
break;
case COMP_No_Comp:
f = X86_EFLAGS_ZF | X86_EFLAGS_PF | X86_EFLAGS_CF;
break;
#ifdef PARANOID
default:
EXCEPTION(EX_INTERNAL | 0x123);
f = 0;
break;
#endif /* PARANOID */
}
FPU_EFLAGS = (FPU_EFLAGS & ~(X86_EFLAGS_ZF | X86_EFLAGS_PF | X86_EFLAGS_CF)) | f;
if (c & COMP_Denormal) {
return denormal_operand() < 0;
}
return 0;
}
/*---------------------------------------------------------------------------*/
void fcom_st(void)
{
/* fcom st(i) */
compare_st_st(FPU_rm);
}
void fcompst(void)
{
/* fcomp st(i) */
if (!compare_st_st(FPU_rm))
FPU_pop();
}
void fcompp(void)
{
/* fcompp */
if (FPU_rm != 1) {
FPU_illegal();
return;
}
if (!compare_st_st(1))
poppop();
}
void fucom_(void)
{
/* fucom st(i) */
compare_u_st_st(FPU_rm);
}
void fucomp(void)
{
/* fucomp st(i) */
if (!compare_u_st_st(FPU_rm))
FPU_pop();
}
void fucompp(void)
{
/* fucompp */
if (FPU_rm == 1) {
if (!compare_u_st_st(1))
poppop();
} else
FPU_illegal();
}
/* P6+ compare-to-EFLAGS ops */
void fcomi_(void)
{
/* fcomi st(i) */
compare_i_st_st(FPU_rm);
}
void fcomip(void)
{
/* fcomip st(i) */
if (!compare_i_st_st(FPU_rm))
FPU_pop();
}
void fucomi_(void)
{
/* fucomi st(i) */
compare_ui_st_st(FPU_rm);
}
void fucomip(void)
{
/* fucomip st(i) */
if (!compare_ui_st_st(FPU_rm))
FPU_pop();
}
| linux-master | arch/x86/math-emu/reg_compare.c |
// SPDX-License-Identifier: GPL-2.0
/*---------------------------------------------------------------------------+
| fpu_arith.c |
| |
| Code to implement the FPU register/register arithmetic instructions |
| |
| Copyright (C) 1992,1993,1997 |
| W. Metzenthen, 22 Parker St, Ormond, Vic 3163, Australia |
| E-mail [email protected] |
| |
| |
+---------------------------------------------------------------------------*/
#include "fpu_system.h"
#include "fpu_emu.h"
#include "control_w.h"
#include "status_w.h"
void fadd__(void)
{
/* fadd st,st(i) */
int i = FPU_rm;
clear_C1();
FPU_add(&st(i), FPU_gettagi(i), 0, control_word);
}
void fmul__(void)
{
/* fmul st,st(i) */
int i = FPU_rm;
clear_C1();
FPU_mul(&st(i), FPU_gettagi(i), 0, control_word);
}
void fsub__(void)
{
/* fsub st,st(i) */
clear_C1();
FPU_sub(0, FPU_rm, control_word);
}
void fsubr_(void)
{
/* fsubr st,st(i) */
clear_C1();
FPU_sub(REV, FPU_rm, control_word);
}
void fdiv__(void)
{
/* fdiv st,st(i) */
clear_C1();
FPU_div(0, FPU_rm, control_word);
}
void fdivr_(void)
{
/* fdivr st,st(i) */
clear_C1();
FPU_div(REV, FPU_rm, control_word);
}
void fadd_i(void)
{
/* fadd st(i),st */
int i = FPU_rm;
clear_C1();
FPU_add(&st(i), FPU_gettagi(i), i, control_word);
}
void fmul_i(void)
{
/* fmul st(i),st */
clear_C1();
FPU_mul(&st(0), FPU_gettag0(), FPU_rm, control_word);
}
void fsubri(void)
{
/* fsubr st(i),st */
clear_C1();
FPU_sub(DEST_RM, FPU_rm, control_word);
}
void fsub_i(void)
{
/* fsub st(i),st */
clear_C1();
FPU_sub(REV | DEST_RM, FPU_rm, control_word);
}
void fdivri(void)
{
/* fdivr st(i),st */
clear_C1();
FPU_div(DEST_RM, FPU_rm, control_word);
}
void fdiv_i(void)
{
/* fdiv st(i),st */
clear_C1();
FPU_div(REV | DEST_RM, FPU_rm, control_word);
}
void faddp_(void)
{
/* faddp st(i),st */
int i = FPU_rm;
clear_C1();
if (FPU_add(&st(i), FPU_gettagi(i), i, control_word) >= 0)
FPU_pop();
}
void fmulp_(void)
{
/* fmulp st(i),st */
clear_C1();
if (FPU_mul(&st(0), FPU_gettag0(), FPU_rm, control_word) >= 0)
FPU_pop();
}
void fsubrp(void)
{
/* fsubrp st(i),st */
clear_C1();
if (FPU_sub(DEST_RM, FPU_rm, control_word) >= 0)
FPU_pop();
}
void fsubp_(void)
{
/* fsubp st(i),st */
clear_C1();
if (FPU_sub(REV | DEST_RM, FPU_rm, control_word) >= 0)
FPU_pop();
}
void fdivrp(void)
{
/* fdivrp st(i),st */
clear_C1();
if (FPU_div(DEST_RM, FPU_rm, control_word) >= 0)
FPU_pop();
}
void fdivp_(void)
{
/* fdivp st(i),st */
clear_C1();
if (FPU_div(REV | DEST_RM, FPU_rm, control_word) >= 0)
FPU_pop();
}
| linux-master | arch/x86/math-emu/fpu_arith.c |
// SPDX-License-Identifier: GPL-2.0
/*---------------------------------------------------------------------------+
| fpu_tags.c |
| |
| Set FPU register tags. |
| |
| Copyright (C) 1997 |
| W. Metzenthen, 22 Parker St, Ormond, Vic 3163, Australia |
| E-mail [email protected] |
| |
| |
+---------------------------------------------------------------------------*/
#include "fpu_emu.h"
#include "fpu_system.h"
#include "exception.h"
void FPU_pop(void)
{
fpu_tag_word |= 3 << ((top & 7) * 2);
top++;
}
int FPU_gettag0(void)
{
return (fpu_tag_word >> ((top & 7) * 2)) & 3;
}
int FPU_gettagi(int stnr)
{
return (fpu_tag_word >> (((top + stnr) & 7) * 2)) & 3;
}
int FPU_gettag(int regnr)
{
return (fpu_tag_word >> ((regnr & 7) * 2)) & 3;
}
void FPU_settag0(int tag)
{
int regnr = top;
regnr &= 7;
fpu_tag_word &= ~(3 << (regnr * 2));
fpu_tag_word |= (tag & 3) << (regnr * 2);
}
void FPU_settagi(int stnr, int tag)
{
int regnr = stnr + top;
regnr &= 7;
fpu_tag_word &= ~(3 << (regnr * 2));
fpu_tag_word |= (tag & 3) << (regnr * 2);
}
void FPU_settag(int regnr, int tag)
{
regnr &= 7;
fpu_tag_word &= ~(3 << (regnr * 2));
fpu_tag_word |= (tag & 3) << (regnr * 2);
}
int FPU_Special(FPU_REG const *ptr)
{
int exp = exponent(ptr);
if (exp == EXP_BIAS + EXP_UNDER)
return TW_Denormal;
else if (exp != EXP_BIAS + EXP_OVER)
return TW_NaN;
else if ((ptr->sigh == 0x80000000) && (ptr->sigl == 0))
return TW_Infinity;
return TW_NaN;
}
int isNaN(FPU_REG const *ptr)
{
return ((exponent(ptr) == EXP_BIAS + EXP_OVER)
&& !((ptr->sigh == 0x80000000) && (ptr->sigl == 0)));
}
int FPU_empty_i(int stnr)
{
int regnr = (top + stnr) & 7;
return ((fpu_tag_word >> (regnr * 2)) & 3) == TAG_Empty;
}
int FPU_stackoverflow(FPU_REG ** st_new_ptr)
{
*st_new_ptr = &st(-1);
return ((fpu_tag_word >> (((top - 1) & 7) * 2)) & 3) != TAG_Empty;
}
void FPU_copy_to_regi(FPU_REG const *r, u_char tag, int stnr)
{
reg_copy(r, &st(stnr));
FPU_settagi(stnr, tag);
}
void FPU_copy_to_reg1(FPU_REG const *r, u_char tag)
{
reg_copy(r, &st(1));
FPU_settagi(1, tag);
}
void FPU_copy_to_reg0(FPU_REG const *r, u_char tag)
{
int regnr = top;
regnr &= 7;
reg_copy(r, &st(0));
fpu_tag_word &= ~(3 << (regnr * 2));
fpu_tag_word |= (tag & 3) << (regnr * 2);
}
| linux-master | arch/x86/math-emu/fpu_tags.c |
// SPDX-License-Identifier: GPL-2.0
/*---------------------------------------------------------------------------+
| reg_convert.c |
| |
| Convert register representation. |
| |
| Copyright (C) 1992,1993,1994,1996,1997 |
| W. Metzenthen, 22 Parker St, Ormond, Vic 3163, Australia |
| E-mail [email protected] |
| |
| |
+---------------------------------------------------------------------------*/
#include "exception.h"
#include "fpu_emu.h"
int FPU_to_exp16(FPU_REG const *a, FPU_REG *x)
{
int sign = getsign(a);
*(long long *)&(x->sigl) = *(const long long *)&(a->sigl);
/* Set up the exponent as a 16 bit quantity. */
setexponent16(x, exponent(a));
if (exponent16(x) == EXP_UNDER) {
/* The number is a de-normal or pseudodenormal. */
/* We only deal with the significand and exponent. */
if (x->sigh & 0x80000000) {
/* Is a pseudodenormal. */
/* This is non-80486 behaviour because the number
loses its 'denormal' identity. */
addexponent(x, 1);
} else {
/* Is a denormal. */
addexponent(x, 1);
FPU_normalize_nuo(x);
}
}
if (!(x->sigh & 0x80000000)) {
EXCEPTION(EX_INTERNAL | 0x180);
}
return sign;
}
| linux-master | arch/x86/math-emu/reg_convert.c |
// SPDX-License-Identifier: GPL-2.0
/*---------------------------------------------------------------------------+
| errors.c |
| |
| The error handling functions for wm-FPU-emu |
| |
| Copyright (C) 1992,1993,1994,1996 |
| W. Metzenthen, 22 Parker St, Ormond, Vic 3163, Australia |
| E-mail [email protected] |
| |
| |
+---------------------------------------------------------------------------*/
/*---------------------------------------------------------------------------+
| Note: |
| The file contains code which accesses user memory. |
| Emulator static data may change when user memory is accessed, due to |
| other processes using the emulator while swapping is in progress. |
+---------------------------------------------------------------------------*/
#include <linux/signal.h>
#include <linux/uaccess.h>
#include "fpu_emu.h"
#include "fpu_system.h"
#include "exception.h"
#include "status_w.h"
#include "control_w.h"
#include "reg_constant.h"
#include "version.h"
/* */
#undef PRINT_MESSAGES
/* */
#if 0
void Un_impl(void)
{
u_char byte1, FPU_modrm;
unsigned long address = FPU_ORIG_EIP;
RE_ENTRANT_CHECK_OFF;
/* No need to check access_ok(), we have previously fetched these bytes. */
printk("Unimplemented FPU Opcode at eip=%p : ", (void __user *)address);
if (FPU_CS == __USER_CS) {
while (1) {
FPU_get_user(byte1, (u_char __user *) address);
if ((byte1 & 0xf8) == 0xd8)
break;
printk("[%02x]", byte1);
address++;
}
printk("%02x ", byte1);
FPU_get_user(FPU_modrm, 1 + (u_char __user *) address);
if (FPU_modrm >= 0300)
printk("%02x (%02x+%d)\n", FPU_modrm, FPU_modrm & 0xf8,
FPU_modrm & 7);
else
printk("/%d\n", (FPU_modrm >> 3) & 7);
} else {
printk("cs selector = %04x\n", FPU_CS);
}
RE_ENTRANT_CHECK_ON;
EXCEPTION(EX_Invalid);
}
#endif /* 0 */
/*
Called for opcodes which are illegal and which are known to result in a
SIGILL with a real 80486.
*/
void FPU_illegal(void)
{
math_abort(FPU_info, SIGILL);
}
void FPU_printall(void)
{
int i;
static const char *tag_desc[] = { "Valid", "Zero", "ERROR", "Empty",
"DeNorm", "Inf", "NaN"
};
u_char byte1, FPU_modrm;
unsigned long address = FPU_ORIG_EIP;
RE_ENTRANT_CHECK_OFF;
/* No need to check access_ok(), we have previously fetched these bytes. */
printk("At %p:", (void *)address);
if (FPU_CS == __USER_CS) {
#define MAX_PRINTED_BYTES 20
for (i = 0; i < MAX_PRINTED_BYTES; i++) {
FPU_get_user(byte1, (u_char __user *) address);
if ((byte1 & 0xf8) == 0xd8) {
printk(" %02x", byte1);
break;
}
printk(" [%02x]", byte1);
address++;
}
if (i == MAX_PRINTED_BYTES)
printk(" [more..]\n");
else {
FPU_get_user(FPU_modrm, 1 + (u_char __user *) address);
if (FPU_modrm >= 0300)
printk(" %02x (%02x+%d)\n", FPU_modrm,
FPU_modrm & 0xf8, FPU_modrm & 7);
else
printk(" /%d, mod=%d rm=%d\n",
(FPU_modrm >> 3) & 7,
(FPU_modrm >> 6) & 3, FPU_modrm & 7);
}
} else {
printk("%04x\n", FPU_CS);
}
partial_status = status_word();
#ifdef DEBUGGING
if (partial_status & SW_Backward)
printk("SW: backward compatibility\n");
if (partial_status & SW_C3)
printk("SW: condition bit 3\n");
if (partial_status & SW_C2)
printk("SW: condition bit 2\n");
if (partial_status & SW_C1)
printk("SW: condition bit 1\n");
if (partial_status & SW_C0)
printk("SW: condition bit 0\n");
if (partial_status & SW_Summary)
printk("SW: exception summary\n");
if (partial_status & SW_Stack_Fault)
printk("SW: stack fault\n");
if (partial_status & SW_Precision)
printk("SW: loss of precision\n");
if (partial_status & SW_Underflow)
printk("SW: underflow\n");
if (partial_status & SW_Overflow)
printk("SW: overflow\n");
if (partial_status & SW_Zero_Div)
printk("SW: divide by zero\n");
if (partial_status & SW_Denorm_Op)
printk("SW: denormalized operand\n");
if (partial_status & SW_Invalid)
printk("SW: invalid operation\n");
#endif /* DEBUGGING */
printk(" SW: b=%d st=%d es=%d sf=%d cc=%d%d%d%d ef=%d%d%d%d%d%d\n", partial_status & 0x8000 ? 1 : 0, /* busy */
(partial_status & 0x3800) >> 11, /* stack top pointer */
partial_status & 0x80 ? 1 : 0, /* Error summary status */
partial_status & 0x40 ? 1 : 0, /* Stack flag */
partial_status & SW_C3 ? 1 : 0, partial_status & SW_C2 ? 1 : 0, /* cc */
partial_status & SW_C1 ? 1 : 0, partial_status & SW_C0 ? 1 : 0, /* cc */
partial_status & SW_Precision ? 1 : 0,
partial_status & SW_Underflow ? 1 : 0,
partial_status & SW_Overflow ? 1 : 0,
partial_status & SW_Zero_Div ? 1 : 0,
partial_status & SW_Denorm_Op ? 1 : 0,
partial_status & SW_Invalid ? 1 : 0);
printk(" CW: ic=%d rc=%d%d pc=%d%d iem=%d ef=%d%d%d%d%d%d\n",
control_word & 0x1000 ? 1 : 0,
(control_word & 0x800) >> 11, (control_word & 0x400) >> 10,
(control_word & 0x200) >> 9, (control_word & 0x100) >> 8,
control_word & 0x80 ? 1 : 0,
control_word & SW_Precision ? 1 : 0,
control_word & SW_Underflow ? 1 : 0,
control_word & SW_Overflow ? 1 : 0,
control_word & SW_Zero_Div ? 1 : 0,
control_word & SW_Denorm_Op ? 1 : 0,
control_word & SW_Invalid ? 1 : 0);
for (i = 0; i < 8; i++) {
FPU_REG *r = &st(i);
u_char tagi = FPU_gettagi(i);
switch (tagi) {
case TAG_Empty:
continue;
case TAG_Zero:
case TAG_Special:
/* Update tagi for the printk below */
tagi = FPU_Special(r);
fallthrough;
case TAG_Valid:
printk("st(%d) %c .%04lx %04lx %04lx %04lx e%+-6d ", i,
getsign(r) ? '-' : '+',
(long)(r->sigh >> 16),
(long)(r->sigh & 0xFFFF),
(long)(r->sigl >> 16),
(long)(r->sigl & 0xFFFF),
exponent(r) - EXP_BIAS + 1);
break;
default:
printk("Whoops! Error in errors.c: tag%d is %d ", i,
tagi);
continue;
}
printk("%s\n", tag_desc[(int)(unsigned)tagi]);
}
RE_ENTRANT_CHECK_ON;
}
static struct {
int type;
const char *name;
} exception_names[] = {
{
EX_StackOver, "stack overflow"}, {
EX_StackUnder, "stack underflow"}, {
EX_Precision, "loss of precision"}, {
EX_Underflow, "underflow"}, {
EX_Overflow, "overflow"}, {
EX_ZeroDiv, "divide by zero"}, {
EX_Denormal, "denormalized operand"}, {
EX_Invalid, "invalid operation"}, {
EX_INTERNAL, "INTERNAL BUG in " FPU_VERSION}, {
0, NULL}
};
/*
EX_INTERNAL is always given with a code which indicates where the
error was detected.
Internal error types:
0x14 in fpu_etc.c
0x1nn in a *.c file:
0x101 in reg_add_sub.c
0x102 in reg_mul.c
0x104 in poly_atan.c
0x105 in reg_mul.c
0x107 in fpu_trig.c
0x108 in reg_compare.c
0x109 in reg_compare.c
0x110 in reg_add_sub.c
0x111 in fpe_entry.c
0x112 in fpu_trig.c
0x113 in errors.c
0x115 in fpu_trig.c
0x116 in fpu_trig.c
0x117 in fpu_trig.c
0x118 in fpu_trig.c
0x119 in fpu_trig.c
0x120 in poly_atan.c
0x121 in reg_compare.c
0x122 in reg_compare.c
0x123 in reg_compare.c
0x125 in fpu_trig.c
0x126 in fpu_entry.c
0x127 in poly_2xm1.c
0x128 in fpu_entry.c
0x129 in fpu_entry.c
0x130 in get_address.c
0x131 in get_address.c
0x132 in get_address.c
0x133 in get_address.c
0x140 in load_store.c
0x141 in load_store.c
0x150 in poly_sin.c
0x151 in poly_sin.c
0x160 in reg_ld_str.c
0x161 in reg_ld_str.c
0x162 in reg_ld_str.c
0x163 in reg_ld_str.c
0x164 in reg_ld_str.c
0x170 in fpu_tags.c
0x171 in fpu_tags.c
0x172 in fpu_tags.c
0x180 in reg_convert.c
0x2nn in an *.S file:
0x201 in reg_u_add.S
0x202 in reg_u_div.S
0x203 in reg_u_div.S
0x204 in reg_u_div.S
0x205 in reg_u_mul.S
0x206 in reg_u_sub.S
0x207 in wm_sqrt.S
0x208 in reg_div.S
0x209 in reg_u_sub.S
0x210 in reg_u_sub.S
0x211 in reg_u_sub.S
0x212 in reg_u_sub.S
0x213 in wm_sqrt.S
0x214 in wm_sqrt.S
0x215 in wm_sqrt.S
0x220 in reg_norm.S
0x221 in reg_norm.S
0x230 in reg_round.S
0x231 in reg_round.S
0x232 in reg_round.S
0x233 in reg_round.S
0x234 in reg_round.S
0x235 in reg_round.S
0x236 in reg_round.S
0x240 in div_Xsig.S
0x241 in div_Xsig.S
0x242 in div_Xsig.S
*/
asmlinkage __visible void FPU_exception(int n)
{
int i, int_type;
int_type = 0; /* Needed only to stop compiler warnings */
if (n & EX_INTERNAL) {
int_type = n - EX_INTERNAL;
n = EX_INTERNAL;
/* Set lots of exception bits! */
partial_status |= (SW_Exc_Mask | SW_Summary | SW_Backward);
} else {
/* Extract only the bits which we use to set the status word */
n &= (SW_Exc_Mask);
/* Set the corresponding exception bit */
partial_status |= n;
/* Set summary bits iff exception isn't masked */
if (partial_status & ~control_word & CW_Exceptions)
partial_status |= (SW_Summary | SW_Backward);
if (n & (SW_Stack_Fault | EX_Precision)) {
if (!(n & SW_C1))
/* This bit distinguishes over- from underflow for a stack fault,
and roundup from round-down for precision loss. */
partial_status &= ~SW_C1;
}
}
RE_ENTRANT_CHECK_OFF;
if ((~control_word & n & CW_Exceptions) || (n == EX_INTERNAL)) {
/* Get a name string for error reporting */
for (i = 0; exception_names[i].type; i++)
if ((exception_names[i].type & n) ==
exception_names[i].type)
break;
if (exception_names[i].type) {
#ifdef PRINT_MESSAGES
printk("FP Exception: %s!\n", exception_names[i].name);
#endif /* PRINT_MESSAGES */
} else
printk("FPU emulator: Unknown Exception: 0x%04x!\n", n);
if (n == EX_INTERNAL) {
printk("FPU emulator: Internal error type 0x%04x\n",
int_type);
FPU_printall();
}
#ifdef PRINT_MESSAGES
else
FPU_printall();
#endif /* PRINT_MESSAGES */
/*
* The 80486 generates an interrupt on the next non-control FPU
* instruction. So we need some means of flagging it.
* We use the ES (Error Summary) bit for this.
*/
}
RE_ENTRANT_CHECK_ON;
#ifdef __DEBUG__
math_abort(FPU_info, SIGFPE);
#endif /* __DEBUG__ */
}
/* Real operation attempted on a NaN. */
/* Returns < 0 if the exception is unmasked */
int real_1op_NaN(FPU_REG *a)
{
int signalling, isNaN;
isNaN = (exponent(a) == EXP_OVER) && (a->sigh & 0x80000000);
/* The default result for the case of two "equal" NaNs (signs may
differ) is chosen to reproduce 80486 behaviour */
signalling = isNaN && !(a->sigh & 0x40000000);
if (!signalling) {
if (!isNaN) { /* pseudo-NaN, or other unsupported? */
if (control_word & CW_Invalid) {
/* Masked response */
reg_copy(&CONST_QNaN, a);
}
EXCEPTION(EX_Invalid);
return (!(control_word & CW_Invalid) ? FPU_Exception :
0) | TAG_Special;
}
return TAG_Special;
}
if (control_word & CW_Invalid) {
/* The masked response */
if (!(a->sigh & 0x80000000)) { /* pseudo-NaN ? */
reg_copy(&CONST_QNaN, a);
}
/* ensure a Quiet NaN */
a->sigh |= 0x40000000;
}
EXCEPTION(EX_Invalid);
return (!(control_word & CW_Invalid) ? FPU_Exception : 0) | TAG_Special;
}
/* Real operation attempted on two operands, one a NaN. */
/* Returns < 0 if the exception is unmasked */
int real_2op_NaN(FPU_REG const *b, u_char tagb,
int deststnr, FPU_REG const *defaultNaN)
{
FPU_REG *dest = &st(deststnr);
FPU_REG const *a = dest;
u_char taga = FPU_gettagi(deststnr);
FPU_REG const *x;
int signalling, unsupported;
if (taga == TAG_Special)
taga = FPU_Special(a);
if (tagb == TAG_Special)
tagb = FPU_Special(b);
/* TW_NaN is also used for unsupported data types. */
unsupported = ((taga == TW_NaN)
&& !((exponent(a) == EXP_OVER)
&& (a->sigh & 0x80000000)))
|| ((tagb == TW_NaN)
&& !((exponent(b) == EXP_OVER) && (b->sigh & 0x80000000)));
if (unsupported) {
if (control_word & CW_Invalid) {
/* Masked response */
FPU_copy_to_regi(&CONST_QNaN, TAG_Special, deststnr);
}
EXCEPTION(EX_Invalid);
return (!(control_word & CW_Invalid) ? FPU_Exception : 0) |
TAG_Special;
}
if (taga == TW_NaN) {
x = a;
if (tagb == TW_NaN) {
signalling = !(a->sigh & b->sigh & 0x40000000);
if (significand(b) > significand(a))
x = b;
else if (significand(b) == significand(a)) {
/* The default result for the case of two "equal" NaNs (signs may
differ) is chosen to reproduce 80486 behaviour */
x = defaultNaN;
}
} else {
/* return the quiet version of the NaN in a */
signalling = !(a->sigh & 0x40000000);
}
} else
#ifdef PARANOID
if (tagb == TW_NaN)
#endif /* PARANOID */
{
signalling = !(b->sigh & 0x40000000);
x = b;
}
#ifdef PARANOID
else {
signalling = 0;
EXCEPTION(EX_INTERNAL | 0x113);
x = &CONST_QNaN;
}
#endif /* PARANOID */
if ((!signalling) || (control_word & CW_Invalid)) {
if (!x)
x = b;
if (!(x->sigh & 0x80000000)) /* pseudo-NaN ? */
x = &CONST_QNaN;
FPU_copy_to_regi(x, TAG_Special, deststnr);
if (!signalling)
return TAG_Special;
/* ensure a Quiet NaN */
dest->sigh |= 0x40000000;
}
EXCEPTION(EX_Invalid);
return (!(control_word & CW_Invalid) ? FPU_Exception : 0) | TAG_Special;
}
/* Invalid arith operation on Valid registers */
/* Returns < 0 if the exception is unmasked */
asmlinkage __visible int arith_invalid(int deststnr)
{
EXCEPTION(EX_Invalid);
if (control_word & CW_Invalid) {
/* The masked response */
FPU_copy_to_regi(&CONST_QNaN, TAG_Special, deststnr);
}
return (!(control_word & CW_Invalid) ? FPU_Exception : 0) | TAG_Valid;
}
/* Divide a finite number by zero */
asmlinkage __visible int FPU_divide_by_zero(int deststnr, u_char sign)
{
FPU_REG *dest = &st(deststnr);
int tag = TAG_Valid;
if (control_word & CW_ZeroDiv) {
/* The masked response */
FPU_copy_to_regi(&CONST_INF, TAG_Special, deststnr);
setsign(dest, sign);
tag = TAG_Special;
}
EXCEPTION(EX_ZeroDiv);
return (!(control_word & CW_ZeroDiv) ? FPU_Exception : 0) | tag;
}
/* This may be called often, so keep it lean */
int set_precision_flag(int flags)
{
if (control_word & CW_Precision) {
partial_status &= ~(SW_C1 & flags);
partial_status |= flags; /* The masked response */
return 0;
} else {
EXCEPTION(flags);
return 1;
}
}
/* This may be called often, so keep it lean */
asmlinkage __visible void set_precision_flag_up(void)
{
if (control_word & CW_Precision)
partial_status |= (SW_Precision | SW_C1); /* The masked response */
else
EXCEPTION(EX_Precision | SW_C1);
}
/* This may be called often, so keep it lean */
asmlinkage __visible void set_precision_flag_down(void)
{
if (control_word & CW_Precision) { /* The masked response */
partial_status &= ~SW_C1;
partial_status |= SW_Precision;
} else
EXCEPTION(EX_Precision);
}
asmlinkage __visible int denormal_operand(void)
{
if (control_word & CW_Denormal) { /* The masked response */
partial_status |= SW_Denorm_Op;
return TAG_Special;
} else {
EXCEPTION(EX_Denormal);
return TAG_Special | FPU_Exception;
}
}
asmlinkage __visible int arith_overflow(FPU_REG *dest)
{
int tag = TAG_Valid;
if (control_word & CW_Overflow) {
/* The masked response */
/* ###### The response here depends upon the rounding mode */
reg_copy(&CONST_INF, dest);
tag = TAG_Special;
} else {
/* Subtract the magic number from the exponent */
addexponent(dest, (-3 * (1 << 13)));
}
EXCEPTION(EX_Overflow);
if (control_word & CW_Overflow) {
/* The overflow exception is masked. */
/* By definition, precision is lost.
The roundup bit (C1) is also set because we have
"rounded" upwards to Infinity. */
EXCEPTION(EX_Precision | SW_C1);
return tag;
}
return tag;
}
asmlinkage __visible int arith_underflow(FPU_REG *dest)
{
int tag = TAG_Valid;
if (control_word & CW_Underflow) {
/* The masked response */
if (exponent16(dest) <= EXP_UNDER - 63) {
reg_copy(&CONST_Z, dest);
partial_status &= ~SW_C1; /* Round down. */
tag = TAG_Zero;
} else {
stdexp(dest);
}
} else {
/* Add the magic number to the exponent. */
addexponent(dest, (3 * (1 << 13)) + EXTENDED_Ebias);
}
EXCEPTION(EX_Underflow);
if (control_word & CW_Underflow) {
/* The underflow exception is masked. */
EXCEPTION(EX_Precision);
return tag;
}
return tag;
}
void FPU_stack_overflow(void)
{
if (control_word & CW_Invalid) {
/* The masked response */
top--;
FPU_copy_to_reg0(&CONST_QNaN, TAG_Special);
}
EXCEPTION(EX_StackOver);
return;
}
void FPU_stack_underflow(void)
{
if (control_word & CW_Invalid) {
/* The masked response */
FPU_copy_to_reg0(&CONST_QNaN, TAG_Special);
}
EXCEPTION(EX_StackUnder);
return;
}
void FPU_stack_underflow_i(int i)
{
if (control_word & CW_Invalid) {
/* The masked response */
FPU_copy_to_regi(&CONST_QNaN, TAG_Special, i);
}
EXCEPTION(EX_StackUnder);
return;
}
void FPU_stack_underflow_pop(int i)
{
if (control_word & CW_Invalid) {
/* The masked response */
FPU_copy_to_regi(&CONST_QNaN, TAG_Special, i);
FPU_pop();
}
EXCEPTION(EX_StackUnder);
return;
}
| linux-master | arch/x86/math-emu/errors.c |
// SPDX-License-Identifier: GPL-2.0
#include "relocs.h"
void die(char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
vfprintf(stderr, fmt, ap);
va_end(ap);
exit(1);
}
static void usage(void)
{
die("relocs [--abs-syms|--abs-relocs|--reloc-info|--text|--realmode]" \
" vmlinux\n");
}
int main(int argc, char **argv)
{
int show_absolute_syms, show_absolute_relocs, show_reloc_info;
int as_text, use_real_mode;
const char *fname;
FILE *fp;
int i;
unsigned char e_ident[EI_NIDENT];
show_absolute_syms = 0;
show_absolute_relocs = 0;
show_reloc_info = 0;
as_text = 0;
use_real_mode = 0;
fname = NULL;
for (i = 1; i < argc; i++) {
char *arg = argv[i];
if (*arg == '-') {
if (strcmp(arg, "--abs-syms") == 0) {
show_absolute_syms = 1;
continue;
}
if (strcmp(arg, "--abs-relocs") == 0) {
show_absolute_relocs = 1;
continue;
}
if (strcmp(arg, "--reloc-info") == 0) {
show_reloc_info = 1;
continue;
}
if (strcmp(arg, "--text") == 0) {
as_text = 1;
continue;
}
if (strcmp(arg, "--realmode") == 0) {
use_real_mode = 1;
continue;
}
}
else if (!fname) {
fname = arg;
continue;
}
usage();
}
if (!fname) {
usage();
}
fp = fopen(fname, "r");
if (!fp) {
die("Cannot open %s: %s\n", fname, strerror(errno));
}
if (fread(&e_ident, 1, EI_NIDENT, fp) != EI_NIDENT) {
die("Cannot read %s: %s", fname, strerror(errno));
}
rewind(fp);
if (e_ident[EI_CLASS] == ELFCLASS64)
process_64(fp, use_real_mode, as_text,
show_absolute_syms, show_absolute_relocs,
show_reloc_info);
else
process_32(fp, use_real_mode, as_text,
show_absolute_syms, show_absolute_relocs,
show_reloc_info);
fclose(fp);
return 0;
}
| linux-master | arch/x86/tools/relocs_common.c |
// SPDX-License-Identifier: GPL-2.0
#include "relocs.h"
#define ELF_BITS 64
#define ELF_MACHINE EM_X86_64
#define ELF_MACHINE_NAME "x86_64"
#define SHT_REL_TYPE SHT_RELA
#define Elf_Rel Elf64_Rela
#define ELF_CLASS ELFCLASS64
#define ELF_R_SYM(val) ELF64_R_SYM(val)
#define ELF_R_TYPE(val) ELF64_R_TYPE(val)
#define ELF_ST_TYPE(o) ELF64_ST_TYPE(o)
#define ELF_ST_BIND(o) ELF64_ST_BIND(o)
#define ELF_ST_VISIBILITY(o) ELF64_ST_VISIBILITY(o)
#include "relocs.c"
| linux-master | arch/x86/tools/relocs_64.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* x86 decoder sanity test - based on test_get_insn.c
*
* Copyright (C) IBM Corporation, 2009
* Copyright (C) Hitachi, Ltd., 2011
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <assert.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <asm/insn.h>
#include <inat.c>
#include <insn.c>
/*
* Test of instruction analysis against tampering.
* Feed random binary to instruction decoder and ensure not to
* access out-of-instruction-buffer.
*/
#define DEFAULT_MAX_ITER 10000
#define INSN_NOP 0x90
static const char *prog; /* Program name */
static int verbose; /* Verbosity */
static int x86_64; /* x86-64 bit mode flag */
static unsigned int seed; /* Random seed */
static unsigned long iter_start; /* Start of iteration number */
static unsigned long iter_end = DEFAULT_MAX_ITER; /* End of iteration number */
static FILE *input_file; /* Input file name */
static void usage(const char *err)
{
if (err)
fprintf(stderr, "%s: Error: %s\n\n", prog, err);
fprintf(stderr, "Usage: %s [-y|-n|-v] [-s seed[,no]] [-m max] [-i input]\n", prog);
fprintf(stderr, "\t-y 64bit mode\n");
fprintf(stderr, "\t-n 32bit mode\n");
fprintf(stderr, "\t-v Verbosity(-vv dumps any decoded result)\n");
fprintf(stderr, "\t-s Give a random seed (and iteration number)\n");
fprintf(stderr, "\t-m Give a maximum iteration number\n");
fprintf(stderr, "\t-i Give an input file with decoded binary\n");
exit(1);
}
static void dump_field(FILE *fp, const char *name, const char *indent,
struct insn_field *field)
{
fprintf(fp, "%s.%s = {\n", indent, name);
fprintf(fp, "%s\t.value = %d, bytes[] = {%x, %x, %x, %x},\n",
indent, field->value, field->bytes[0], field->bytes[1],
field->bytes[2], field->bytes[3]);
fprintf(fp, "%s\t.got = %d, .nbytes = %d},\n", indent,
field->got, field->nbytes);
}
static void dump_insn(FILE *fp, struct insn *insn)
{
fprintf(fp, "Instruction = {\n");
dump_field(fp, "prefixes", "\t", &insn->prefixes);
dump_field(fp, "rex_prefix", "\t", &insn->rex_prefix);
dump_field(fp, "vex_prefix", "\t", &insn->vex_prefix);
dump_field(fp, "opcode", "\t", &insn->opcode);
dump_field(fp, "modrm", "\t", &insn->modrm);
dump_field(fp, "sib", "\t", &insn->sib);
dump_field(fp, "displacement", "\t", &insn->displacement);
dump_field(fp, "immediate1", "\t", &insn->immediate1);
dump_field(fp, "immediate2", "\t", &insn->immediate2);
fprintf(fp, "\t.attr = %x, .opnd_bytes = %d, .addr_bytes = %d,\n",
insn->attr, insn->opnd_bytes, insn->addr_bytes);
fprintf(fp, "\t.length = %d, .x86_64 = %d, .kaddr = %p}\n",
insn->length, insn->x86_64, insn->kaddr);
}
static void dump_stream(FILE *fp, const char *msg, unsigned long nr_iter,
unsigned char *insn_buff, struct insn *insn)
{
int i;
fprintf(fp, "%s:\n", msg);
dump_insn(fp, insn);
fprintf(fp, "You can reproduce this with below command(s);\n");
/* Input a decoded instruction sequence directly */
fprintf(fp, " $ echo ");
for (i = 0; i < MAX_INSN_SIZE; i++)
fprintf(fp, " %02x", insn_buff[i]);
fprintf(fp, " | %s -i -\n", prog);
if (!input_file) {
fprintf(fp, "Or \n");
/* Give a seed and iteration number */
fprintf(fp, " $ %s -s 0x%x,%lu\n", prog, seed, nr_iter);
}
}
static void init_random_seed(void)
{
int fd;
fd = open("/dev/urandom", O_RDONLY);
if (fd < 0)
goto fail;
if (read(fd, &seed, sizeof(seed)) != sizeof(seed))
goto fail;
close(fd);
return;
fail:
usage("Failed to open /dev/urandom");
}
/* Read given instruction sequence from the input file */
static int read_next_insn(unsigned char *insn_buff)
{
char buf[256] = "", *tmp;
int i;
tmp = fgets(buf, ARRAY_SIZE(buf), input_file);
if (tmp == NULL || feof(input_file))
return 0;
for (i = 0; i < MAX_INSN_SIZE; i++) {
insn_buff[i] = (unsigned char)strtoul(tmp, &tmp, 16);
if (*tmp != ' ')
break;
}
return i;
}
static int generate_insn(unsigned char *insn_buff)
{
int i;
if (input_file)
return read_next_insn(insn_buff);
/* Fills buffer with random binary up to MAX_INSN_SIZE */
for (i = 0; i < MAX_INSN_SIZE - 1; i += 2)
*(unsigned short *)(&insn_buff[i]) = random() & 0xffff;
while (i < MAX_INSN_SIZE)
insn_buff[i++] = random() & 0xff;
return i;
}
static void parse_args(int argc, char **argv)
{
int c;
char *tmp = NULL;
int set_seed = 0;
prog = argv[0];
while ((c = getopt(argc, argv, "ynvs:m:i:")) != -1) {
switch (c) {
case 'y':
x86_64 = 1;
break;
case 'n':
x86_64 = 0;
break;
case 'v':
verbose++;
break;
case 'i':
if (strcmp("-", optarg) == 0)
input_file = stdin;
else
input_file = fopen(optarg, "r");
if (!input_file)
usage("Failed to open input file");
break;
case 's':
seed = (unsigned int)strtoul(optarg, &tmp, 0);
if (*tmp == ',') {
optarg = tmp + 1;
iter_start = strtoul(optarg, &tmp, 0);
}
if (*tmp != '\0' || tmp == optarg)
usage("Failed to parse seed");
set_seed = 1;
break;
case 'm':
iter_end = strtoul(optarg, &tmp, 0);
if (*tmp != '\0' || tmp == optarg)
usage("Failed to parse max_iter");
break;
default:
usage(NULL);
}
}
/* Check errors */
if (iter_end < iter_start)
usage("Max iteration number must be bigger than iter-num");
if (set_seed && input_file)
usage("Don't use input file (-i) with random seed (-s)");
/* Initialize random seed */
if (!input_file) {
if (!set_seed) /* No seed is given */
init_random_seed();
srand(seed);
}
}
int main(int argc, char **argv)
{
int insns = 0, ret;
struct insn insn;
int errors = 0;
unsigned long i;
unsigned char insn_buff[MAX_INSN_SIZE * 2];
parse_args(argc, argv);
/* Prepare stop bytes with NOPs */
memset(insn_buff + MAX_INSN_SIZE, INSN_NOP, MAX_INSN_SIZE);
for (i = 0; i < iter_end; i++) {
if (generate_insn(insn_buff) <= 0)
break;
if (i < iter_start) /* Skip to given iteration number */
continue;
/* Decode an instruction */
ret = insn_decode(&insn, insn_buff, sizeof(insn_buff),
x86_64 ? INSN_MODE_64 : INSN_MODE_32);
if (insn.next_byte <= insn.kaddr ||
insn.kaddr + MAX_INSN_SIZE < insn.next_byte) {
/* Access out-of-range memory */
dump_stream(stderr, "Error: Found an access violation", i, insn_buff, &insn);
errors++;
} else if (verbose && ret < 0)
dump_stream(stdout, "Info: Found an undecodable input", i, insn_buff, &insn);
else if (verbose >= 2)
dump_insn(stdout, &insn);
insns++;
}
fprintf((errors) ? stderr : stdout,
"%s: %s: decoded and checked %d %s instructions with %d errors (seed:0x%x)\n",
prog,
(errors) ? "Failure" : "Success",
insns,
(input_file) ? "given" : "random",
errors,
seed);
return errors ? 1 : 0;
}
| linux-master | arch/x86/tools/insn_sanity.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
*
* Copyright (C) IBM Corporation, 2009
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <assert.h>
#include <unistd.h>
#include <stdarg.h>
#define unlikely(cond) (cond)
#include <asm/insn.h>
#include <inat.c>
#include <insn.c>
/*
* Test of instruction analysis in general and insn_get_length() in
* particular. See if insn_get_length() and the disassembler agree
* on the length of each instruction in an elf disassembly.
*
* Usage: objdump -d a.out | awk -f objdump_reformat.awk | ./insn_decoder_test
*/
const char *prog;
static int verbose;
static int x86_64;
static void usage(void)
{
fprintf(stderr, "Usage: objdump -d a.out | awk -f objdump_reformat.awk"
" | %s [-y|-n] [-v]\n", prog);
fprintf(stderr, "\t-y 64bit mode\n");
fprintf(stderr, "\t-n 32bit mode\n");
fprintf(stderr, "\t-v verbose mode\n");
exit(1);
}
static void malformed_line(const char *line, int line_nr)
{
fprintf(stderr, "%s: error: malformed line %d:\n%s",
prog, line_nr, line);
exit(3);
}
static void pr_warn(const char *fmt, ...)
{
va_list ap;
fprintf(stderr, "%s: warning: ", prog);
va_start(ap, fmt);
vfprintf(stderr, fmt, ap);
va_end(ap);
}
static void dump_field(FILE *fp, const char *name, const char *indent,
struct insn_field *field)
{
fprintf(fp, "%s.%s = {\n", indent, name);
fprintf(fp, "%s\t.value = %d, bytes[] = {%x, %x, %x, %x},\n",
indent, field->value, field->bytes[0], field->bytes[1],
field->bytes[2], field->bytes[3]);
fprintf(fp, "%s\t.got = %d, .nbytes = %d},\n", indent,
field->got, field->nbytes);
}
static void dump_insn(FILE *fp, struct insn *insn)
{
fprintf(fp, "Instruction = {\n");
dump_field(fp, "prefixes", "\t", &insn->prefixes);
dump_field(fp, "rex_prefix", "\t", &insn->rex_prefix);
dump_field(fp, "vex_prefix", "\t", &insn->vex_prefix);
dump_field(fp, "opcode", "\t", &insn->opcode);
dump_field(fp, "modrm", "\t", &insn->modrm);
dump_field(fp, "sib", "\t", &insn->sib);
dump_field(fp, "displacement", "\t", &insn->displacement);
dump_field(fp, "immediate1", "\t", &insn->immediate1);
dump_field(fp, "immediate2", "\t", &insn->immediate2);
fprintf(fp, "\t.attr = %x, .opnd_bytes = %d, .addr_bytes = %d,\n",
insn->attr, insn->opnd_bytes, insn->addr_bytes);
fprintf(fp, "\t.length = %d, .x86_64 = %d, .kaddr = %p}\n",
insn->length, insn->x86_64, insn->kaddr);
}
static void parse_args(int argc, char **argv)
{
int c;
prog = argv[0];
while ((c = getopt(argc, argv, "ynv")) != -1) {
switch (c) {
case 'y':
x86_64 = 1;
break;
case 'n':
x86_64 = 0;
break;
case 'v':
verbose = 1;
break;
default:
usage();
}
}
}
#define BUFSIZE 256
int main(int argc, char **argv)
{
char line[BUFSIZE], sym[BUFSIZE] = "<unknown>";
unsigned char insn_buff[16];
struct insn insn;
int insns = 0;
int warnings = 0;
parse_args(argc, argv);
while (fgets(line, BUFSIZE, stdin)) {
char copy[BUFSIZE], *s, *tab1, *tab2;
int nb = 0, ret;
unsigned int b;
if (line[0] == '<') {
/* Symbol line */
strcpy(sym, line);
continue;
}
insns++;
memset(insn_buff, 0, 16);
strcpy(copy, line);
tab1 = strchr(copy, '\t');
if (!tab1)
malformed_line(line, insns);
s = tab1 + 1;
s += strspn(s, " ");
tab2 = strchr(s, '\t');
if (!tab2)
malformed_line(line, insns);
*tab2 = '\0'; /* Characters beyond tab2 aren't examined */
while (s < tab2) {
if (sscanf(s, "%x", &b) == 1) {
insn_buff[nb++] = (unsigned char) b;
s += 3;
} else
break;
}
/* Decode an instruction */
ret = insn_decode(&insn, insn_buff, sizeof(insn_buff),
x86_64 ? INSN_MODE_64 : INSN_MODE_32);
if (ret < 0 || insn.length != nb) {
warnings++;
pr_warn("Found an x86 instruction decoder bug, "
"please report this.\n", sym);
pr_warn("%s", line);
pr_warn("objdump says %d bytes, but insn_get_length() "
"says %d\n", nb, insn.length);
if (verbose)
dump_insn(stderr, &insn);
}
}
if (warnings)
pr_warn("Decoded and checked %d instructions with %d "
"failures\n", insns, warnings);
else
fprintf(stdout, "%s: success: Decoded and checked %d"
" instructions\n", prog, insns);
return 0;
}
| linux-master | arch/x86/tools/insn_decoder_test.c |
// SPDX-License-Identifier: GPL-2.0
/* This is included from relocs_32/64.c */
#define ElfW(type) _ElfW(ELF_BITS, type)
#define _ElfW(bits, type) __ElfW(bits, type)
#define __ElfW(bits, type) Elf##bits##_##type
#define Elf_Addr ElfW(Addr)
#define Elf_Ehdr ElfW(Ehdr)
#define Elf_Phdr ElfW(Phdr)
#define Elf_Shdr ElfW(Shdr)
#define Elf_Sym ElfW(Sym)
static Elf_Ehdr ehdr;
static unsigned long shnum;
static unsigned int shstrndx;
static unsigned int shsymtabndx;
static unsigned int shxsymtabndx;
static int sym_index(Elf_Sym *sym);
struct relocs {
uint32_t *offset;
unsigned long count;
unsigned long size;
};
static struct relocs relocs16;
static struct relocs relocs32;
#if ELF_BITS == 64
static struct relocs relocs32neg;
static struct relocs relocs64;
#define FMT PRIu64
#else
#define FMT PRIu32
#endif
struct section {
Elf_Shdr shdr;
struct section *link;
Elf_Sym *symtab;
Elf32_Word *xsymtab;
Elf_Rel *reltab;
char *strtab;
};
static struct section *secs;
static const char * const sym_regex_kernel[S_NSYMTYPES] = {
/*
* Following symbols have been audited. There values are constant and do
* not change if bzImage is loaded at a different physical address than
* the address for which it has been compiled. Don't warn user about
* absolute relocations present w.r.t these symbols.
*/
[S_ABS] =
"^(xen_irq_disable_direct_reloc$|"
"xen_save_fl_direct_reloc$|"
"VDSO|"
"__kcfi_typeid_|"
"__crc_)",
/*
* These symbols are known to be relative, even if the linker marks them
* as absolute (typically defined outside any section in the linker script.)
*/
[S_REL] =
"^(__init_(begin|end)|"
"__x86_cpu_dev_(start|end)|"
"(__parainstructions|__alt_instructions)(_end)?|"
"(__iommu_table|__apicdrivers|__smp_locks)(_end)?|"
"__(start|end)_pci_.*|"
#if CONFIG_FW_LOADER
"__(start|end)_builtin_fw|"
#endif
"__(start|stop)___ksymtab(_gpl)?|"
"__(start|stop)___kcrctab(_gpl)?|"
"__(start|stop)___param|"
"__(start|stop)___modver|"
"__(start|stop)___bug_table|"
"__tracedata_(start|end)|"
"__(start|stop)_notes|"
"__end_rodata|"
"__end_rodata_aligned|"
"__initramfs_start|"
"(jiffies|jiffies_64)|"
#if ELF_BITS == 64
"__per_cpu_load|"
"init_per_cpu__.*|"
"__end_rodata_hpage_align|"
#endif
"__vvar_page|"
"_end)$"
};
static const char * const sym_regex_realmode[S_NSYMTYPES] = {
/*
* These symbols are known to be relative, even if the linker marks them
* as absolute (typically defined outside any section in the linker script.)
*/
[S_REL] =
"^pa_",
/*
* These are 16-bit segment symbols when compiling 16-bit code.
*/
[S_SEG] =
"^real_mode_seg$",
/*
* These are offsets belonging to segments, as opposed to linear addresses,
* when compiling 16-bit code.
*/
[S_LIN] =
"^pa_",
};
static const char * const *sym_regex;
static regex_t sym_regex_c[S_NSYMTYPES];
static int is_reloc(enum symtype type, const char *sym_name)
{
return sym_regex[type] &&
!regexec(&sym_regex_c[type], sym_name, 0, NULL, 0);
}
static void regex_init(int use_real_mode)
{
char errbuf[128];
int err;
int i;
if (use_real_mode)
sym_regex = sym_regex_realmode;
else
sym_regex = sym_regex_kernel;
for (i = 0; i < S_NSYMTYPES; i++) {
if (!sym_regex[i])
continue;
err = regcomp(&sym_regex_c[i], sym_regex[i],
REG_EXTENDED|REG_NOSUB);
if (err) {
regerror(err, &sym_regex_c[i], errbuf, sizeof(errbuf));
die("%s", errbuf);
}
}
}
static const char *sym_type(unsigned type)
{
static const char *type_name[] = {
#define SYM_TYPE(X) [X] = #X
SYM_TYPE(STT_NOTYPE),
SYM_TYPE(STT_OBJECT),
SYM_TYPE(STT_FUNC),
SYM_TYPE(STT_SECTION),
SYM_TYPE(STT_FILE),
SYM_TYPE(STT_COMMON),
SYM_TYPE(STT_TLS),
#undef SYM_TYPE
};
const char *name = "unknown sym type name";
if (type < ARRAY_SIZE(type_name)) {
name = type_name[type];
}
return name;
}
static const char *sym_bind(unsigned bind)
{
static const char *bind_name[] = {
#define SYM_BIND(X) [X] = #X
SYM_BIND(STB_LOCAL),
SYM_BIND(STB_GLOBAL),
SYM_BIND(STB_WEAK),
#undef SYM_BIND
};
const char *name = "unknown sym bind name";
if (bind < ARRAY_SIZE(bind_name)) {
name = bind_name[bind];
}
return name;
}
static const char *sym_visibility(unsigned visibility)
{
static const char *visibility_name[] = {
#define SYM_VISIBILITY(X) [X] = #X
SYM_VISIBILITY(STV_DEFAULT),
SYM_VISIBILITY(STV_INTERNAL),
SYM_VISIBILITY(STV_HIDDEN),
SYM_VISIBILITY(STV_PROTECTED),
#undef SYM_VISIBILITY
};
const char *name = "unknown sym visibility name";
if (visibility < ARRAY_SIZE(visibility_name)) {
name = visibility_name[visibility];
}
return name;
}
static const char *rel_type(unsigned type)
{
static const char *type_name[] = {
#define REL_TYPE(X) [X] = #X
#if ELF_BITS == 64
REL_TYPE(R_X86_64_NONE),
REL_TYPE(R_X86_64_64),
REL_TYPE(R_X86_64_PC64),
REL_TYPE(R_X86_64_PC32),
REL_TYPE(R_X86_64_GOT32),
REL_TYPE(R_X86_64_PLT32),
REL_TYPE(R_X86_64_COPY),
REL_TYPE(R_X86_64_GLOB_DAT),
REL_TYPE(R_X86_64_JUMP_SLOT),
REL_TYPE(R_X86_64_RELATIVE),
REL_TYPE(R_X86_64_GOTPCREL),
REL_TYPE(R_X86_64_32),
REL_TYPE(R_X86_64_32S),
REL_TYPE(R_X86_64_16),
REL_TYPE(R_X86_64_PC16),
REL_TYPE(R_X86_64_8),
REL_TYPE(R_X86_64_PC8),
#else
REL_TYPE(R_386_NONE),
REL_TYPE(R_386_32),
REL_TYPE(R_386_PC32),
REL_TYPE(R_386_GOT32),
REL_TYPE(R_386_PLT32),
REL_TYPE(R_386_COPY),
REL_TYPE(R_386_GLOB_DAT),
REL_TYPE(R_386_JMP_SLOT),
REL_TYPE(R_386_RELATIVE),
REL_TYPE(R_386_GOTOFF),
REL_TYPE(R_386_GOTPC),
REL_TYPE(R_386_8),
REL_TYPE(R_386_PC8),
REL_TYPE(R_386_16),
REL_TYPE(R_386_PC16),
#endif
#undef REL_TYPE
};
const char *name = "unknown type rel type name";
if (type < ARRAY_SIZE(type_name) && type_name[type]) {
name = type_name[type];
}
return name;
}
static const char *sec_name(unsigned shndx)
{
const char *sec_strtab;
const char *name;
sec_strtab = secs[shstrndx].strtab;
name = "<noname>";
if (shndx < shnum) {
name = sec_strtab + secs[shndx].shdr.sh_name;
}
else if (shndx == SHN_ABS) {
name = "ABSOLUTE";
}
else if (shndx == SHN_COMMON) {
name = "COMMON";
}
return name;
}
static const char *sym_name(const char *sym_strtab, Elf_Sym *sym)
{
const char *name;
name = "<noname>";
if (sym->st_name) {
name = sym_strtab + sym->st_name;
}
else {
name = sec_name(sym_index(sym));
}
return name;
}
static Elf_Sym *sym_lookup(const char *symname)
{
int i;
for (i = 0; i < shnum; i++) {
struct section *sec = &secs[i];
long nsyms;
char *strtab;
Elf_Sym *symtab;
Elf_Sym *sym;
if (sec->shdr.sh_type != SHT_SYMTAB)
continue;
nsyms = sec->shdr.sh_size/sizeof(Elf_Sym);
symtab = sec->symtab;
strtab = sec->link->strtab;
for (sym = symtab; --nsyms >= 0; sym++) {
if (!sym->st_name)
continue;
if (strcmp(symname, strtab + sym->st_name) == 0)
return sym;
}
}
return 0;
}
#if BYTE_ORDER == LITTLE_ENDIAN
#define le16_to_cpu(val) (val)
#define le32_to_cpu(val) (val)
#define le64_to_cpu(val) (val)
#endif
#if BYTE_ORDER == BIG_ENDIAN
#define le16_to_cpu(val) bswap_16(val)
#define le32_to_cpu(val) bswap_32(val)
#define le64_to_cpu(val) bswap_64(val)
#endif
static uint16_t elf16_to_cpu(uint16_t val)
{
return le16_to_cpu(val);
}
static uint32_t elf32_to_cpu(uint32_t val)
{
return le32_to_cpu(val);
}
#define elf_half_to_cpu(x) elf16_to_cpu(x)
#define elf_word_to_cpu(x) elf32_to_cpu(x)
#if ELF_BITS == 64
static uint64_t elf64_to_cpu(uint64_t val)
{
return le64_to_cpu(val);
}
#define elf_addr_to_cpu(x) elf64_to_cpu(x)
#define elf_off_to_cpu(x) elf64_to_cpu(x)
#define elf_xword_to_cpu(x) elf64_to_cpu(x)
#else
#define elf_addr_to_cpu(x) elf32_to_cpu(x)
#define elf_off_to_cpu(x) elf32_to_cpu(x)
#define elf_xword_to_cpu(x) elf32_to_cpu(x)
#endif
static int sym_index(Elf_Sym *sym)
{
Elf_Sym *symtab = secs[shsymtabndx].symtab;
Elf32_Word *xsymtab = secs[shxsymtabndx].xsymtab;
unsigned long offset;
int index;
if (sym->st_shndx != SHN_XINDEX)
return sym->st_shndx;
/* calculate offset of sym from head of table. */
offset = (unsigned long)sym - (unsigned long)symtab;
index = offset / sizeof(*sym);
return elf32_to_cpu(xsymtab[index]);
}
static void read_ehdr(FILE *fp)
{
if (fread(&ehdr, sizeof(ehdr), 1, fp) != 1) {
die("Cannot read ELF header: %s\n",
strerror(errno));
}
if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0) {
die("No ELF magic\n");
}
if (ehdr.e_ident[EI_CLASS] != ELF_CLASS) {
die("Not a %d bit executable\n", ELF_BITS);
}
if (ehdr.e_ident[EI_DATA] != ELFDATA2LSB) {
die("Not a LSB ELF executable\n");
}
if (ehdr.e_ident[EI_VERSION] != EV_CURRENT) {
die("Unknown ELF version\n");
}
/* Convert the fields to native endian */
ehdr.e_type = elf_half_to_cpu(ehdr.e_type);
ehdr.e_machine = elf_half_to_cpu(ehdr.e_machine);
ehdr.e_version = elf_word_to_cpu(ehdr.e_version);
ehdr.e_entry = elf_addr_to_cpu(ehdr.e_entry);
ehdr.e_phoff = elf_off_to_cpu(ehdr.e_phoff);
ehdr.e_shoff = elf_off_to_cpu(ehdr.e_shoff);
ehdr.e_flags = elf_word_to_cpu(ehdr.e_flags);
ehdr.e_ehsize = elf_half_to_cpu(ehdr.e_ehsize);
ehdr.e_phentsize = elf_half_to_cpu(ehdr.e_phentsize);
ehdr.e_phnum = elf_half_to_cpu(ehdr.e_phnum);
ehdr.e_shentsize = elf_half_to_cpu(ehdr.e_shentsize);
ehdr.e_shnum = elf_half_to_cpu(ehdr.e_shnum);
ehdr.e_shstrndx = elf_half_to_cpu(ehdr.e_shstrndx);
shnum = ehdr.e_shnum;
shstrndx = ehdr.e_shstrndx;
if ((ehdr.e_type != ET_EXEC) && (ehdr.e_type != ET_DYN))
die("Unsupported ELF header type\n");
if (ehdr.e_machine != ELF_MACHINE)
die("Not for %s\n", ELF_MACHINE_NAME);
if (ehdr.e_version != EV_CURRENT)
die("Unknown ELF version\n");
if (ehdr.e_ehsize != sizeof(Elf_Ehdr))
die("Bad ELF header size\n");
if (ehdr.e_phentsize != sizeof(Elf_Phdr))
die("Bad program header entry\n");
if (ehdr.e_shentsize != sizeof(Elf_Shdr))
die("Bad section header entry\n");
if (shnum == SHN_UNDEF || shstrndx == SHN_XINDEX) {
Elf_Shdr shdr;
if (fseek(fp, ehdr.e_shoff, SEEK_SET) < 0)
die("Seek to %" FMT " failed: %s\n", ehdr.e_shoff, strerror(errno));
if (fread(&shdr, sizeof(shdr), 1, fp) != 1)
die("Cannot read initial ELF section header: %s\n", strerror(errno));
if (shnum == SHN_UNDEF)
shnum = elf_xword_to_cpu(shdr.sh_size);
if (shstrndx == SHN_XINDEX)
shstrndx = elf_word_to_cpu(shdr.sh_link);
}
if (shstrndx >= shnum)
die("String table index out of bounds\n");
}
static void read_shdrs(FILE *fp)
{
int i;
Elf_Shdr shdr;
secs = calloc(shnum, sizeof(struct section));
if (!secs) {
die("Unable to allocate %ld section headers\n",
shnum);
}
if (fseek(fp, ehdr.e_shoff, SEEK_SET) < 0) {
die("Seek to %" FMT " failed: %s\n",
ehdr.e_shoff, strerror(errno));
}
for (i = 0; i < shnum; i++) {
struct section *sec = &secs[i];
if (fread(&shdr, sizeof(shdr), 1, fp) != 1)
die("Cannot read ELF section headers %d/%ld: %s\n",
i, shnum, strerror(errno));
sec->shdr.sh_name = elf_word_to_cpu(shdr.sh_name);
sec->shdr.sh_type = elf_word_to_cpu(shdr.sh_type);
sec->shdr.sh_flags = elf_xword_to_cpu(shdr.sh_flags);
sec->shdr.sh_addr = elf_addr_to_cpu(shdr.sh_addr);
sec->shdr.sh_offset = elf_off_to_cpu(shdr.sh_offset);
sec->shdr.sh_size = elf_xword_to_cpu(shdr.sh_size);
sec->shdr.sh_link = elf_word_to_cpu(shdr.sh_link);
sec->shdr.sh_info = elf_word_to_cpu(shdr.sh_info);
sec->shdr.sh_addralign = elf_xword_to_cpu(shdr.sh_addralign);
sec->shdr.sh_entsize = elf_xword_to_cpu(shdr.sh_entsize);
if (sec->shdr.sh_link < shnum)
sec->link = &secs[sec->shdr.sh_link];
}
}
static void read_strtabs(FILE *fp)
{
int i;
for (i = 0; i < shnum; i++) {
struct section *sec = &secs[i];
if (sec->shdr.sh_type != SHT_STRTAB) {
continue;
}
sec->strtab = malloc(sec->shdr.sh_size);
if (!sec->strtab) {
die("malloc of %" FMT " bytes for strtab failed\n",
sec->shdr.sh_size);
}
if (fseek(fp, sec->shdr.sh_offset, SEEK_SET) < 0) {
die("Seek to %" FMT " failed: %s\n",
sec->shdr.sh_offset, strerror(errno));
}
if (fread(sec->strtab, 1, sec->shdr.sh_size, fp)
!= sec->shdr.sh_size) {
die("Cannot read symbol table: %s\n",
strerror(errno));
}
}
}
static void read_symtabs(FILE *fp)
{
int i,j;
for (i = 0; i < shnum; i++) {
struct section *sec = &secs[i];
int num_syms;
switch (sec->shdr.sh_type) {
case SHT_SYMTAB_SHNDX:
sec->xsymtab = malloc(sec->shdr.sh_size);
if (!sec->xsymtab) {
die("malloc of %" FMT " bytes for xsymtab failed\n",
sec->shdr.sh_size);
}
if (fseek(fp, sec->shdr.sh_offset, SEEK_SET) < 0) {
die("Seek to %" FMT " failed: %s\n",
sec->shdr.sh_offset, strerror(errno));
}
if (fread(sec->xsymtab, 1, sec->shdr.sh_size, fp)
!= sec->shdr.sh_size) {
die("Cannot read extended symbol table: %s\n",
strerror(errno));
}
shxsymtabndx = i;
continue;
case SHT_SYMTAB:
num_syms = sec->shdr.sh_size / sizeof(Elf_Sym);
sec->symtab = malloc(sec->shdr.sh_size);
if (!sec->symtab) {
die("malloc of %" FMT " bytes for symtab failed\n",
sec->shdr.sh_size);
}
if (fseek(fp, sec->shdr.sh_offset, SEEK_SET) < 0) {
die("Seek to %" FMT " failed: %s\n",
sec->shdr.sh_offset, strerror(errno));
}
if (fread(sec->symtab, 1, sec->shdr.sh_size, fp)
!= sec->shdr.sh_size) {
die("Cannot read symbol table: %s\n",
strerror(errno));
}
for (j = 0; j < num_syms; j++) {
Elf_Sym *sym = &sec->symtab[j];
sym->st_name = elf_word_to_cpu(sym->st_name);
sym->st_value = elf_addr_to_cpu(sym->st_value);
sym->st_size = elf_xword_to_cpu(sym->st_size);
sym->st_shndx = elf_half_to_cpu(sym->st_shndx);
}
shsymtabndx = i;
continue;
default:
continue;
}
}
}
static void read_relocs(FILE *fp)
{
int i,j;
for (i = 0; i < shnum; i++) {
struct section *sec = &secs[i];
if (sec->shdr.sh_type != SHT_REL_TYPE) {
continue;
}
sec->reltab = malloc(sec->shdr.sh_size);
if (!sec->reltab) {
die("malloc of %" FMT " bytes for relocs failed\n",
sec->shdr.sh_size);
}
if (fseek(fp, sec->shdr.sh_offset, SEEK_SET) < 0) {
die("Seek to %" FMT " failed: %s\n",
sec->shdr.sh_offset, strerror(errno));
}
if (fread(sec->reltab, 1, sec->shdr.sh_size, fp)
!= sec->shdr.sh_size) {
die("Cannot read symbol table: %s\n",
strerror(errno));
}
for (j = 0; j < sec->shdr.sh_size/sizeof(Elf_Rel); j++) {
Elf_Rel *rel = &sec->reltab[j];
rel->r_offset = elf_addr_to_cpu(rel->r_offset);
rel->r_info = elf_xword_to_cpu(rel->r_info);
#if (SHT_REL_TYPE == SHT_RELA)
rel->r_addend = elf_xword_to_cpu(rel->r_addend);
#endif
}
}
}
static void print_absolute_symbols(void)
{
int i;
const char *format;
if (ELF_BITS == 64)
format = "%5d %016"PRIx64" %5"PRId64" %10s %10s %12s %s\n";
else
format = "%5d %08"PRIx32" %5"PRId32" %10s %10s %12s %s\n";
printf("Absolute symbols\n");
printf(" Num: Value Size Type Bind Visibility Name\n");
for (i = 0; i < shnum; i++) {
struct section *sec = &secs[i];
char *sym_strtab;
int j;
if (sec->shdr.sh_type != SHT_SYMTAB) {
continue;
}
sym_strtab = sec->link->strtab;
for (j = 0; j < sec->shdr.sh_size/sizeof(Elf_Sym); j++) {
Elf_Sym *sym;
const char *name;
sym = &sec->symtab[j];
name = sym_name(sym_strtab, sym);
if (sym->st_shndx != SHN_ABS) {
continue;
}
printf(format,
j, sym->st_value, sym->st_size,
sym_type(ELF_ST_TYPE(sym->st_info)),
sym_bind(ELF_ST_BIND(sym->st_info)),
sym_visibility(ELF_ST_VISIBILITY(sym->st_other)),
name);
}
}
printf("\n");
}
static void print_absolute_relocs(void)
{
int i, printed = 0;
const char *format;
if (ELF_BITS == 64)
format = "%016"PRIx64" %016"PRIx64" %10s %016"PRIx64" %s\n";
else
format = "%08"PRIx32" %08"PRIx32" %10s %08"PRIx32" %s\n";
for (i = 0; i < shnum; i++) {
struct section *sec = &secs[i];
struct section *sec_applies, *sec_symtab;
char *sym_strtab;
Elf_Sym *sh_symtab;
int j;
if (sec->shdr.sh_type != SHT_REL_TYPE) {
continue;
}
sec_symtab = sec->link;
sec_applies = &secs[sec->shdr.sh_info];
if (!(sec_applies->shdr.sh_flags & SHF_ALLOC)) {
continue;
}
sh_symtab = sec_symtab->symtab;
sym_strtab = sec_symtab->link->strtab;
for (j = 0; j < sec->shdr.sh_size/sizeof(Elf_Rel); j++) {
Elf_Rel *rel;
Elf_Sym *sym;
const char *name;
rel = &sec->reltab[j];
sym = &sh_symtab[ELF_R_SYM(rel->r_info)];
name = sym_name(sym_strtab, sym);
if (sym->st_shndx != SHN_ABS) {
continue;
}
/* Absolute symbols are not relocated if bzImage is
* loaded at a non-compiled address. Display a warning
* to user at compile time about the absolute
* relocations present.
*
* User need to audit the code to make sure
* some symbols which should have been section
* relative have not become absolute because of some
* linker optimization or wrong programming usage.
*
* Before warning check if this absolute symbol
* relocation is harmless.
*/
if (is_reloc(S_ABS, name) || is_reloc(S_REL, name))
continue;
if (!printed) {
printf("WARNING: Absolute relocations"
" present\n");
printf("Offset Info Type Sym.Value "
"Sym.Name\n");
printed = 1;
}
printf(format,
rel->r_offset,
rel->r_info,
rel_type(ELF_R_TYPE(rel->r_info)),
sym->st_value,
name);
}
}
if (printed)
printf("\n");
}
static void add_reloc(struct relocs *r, uint32_t offset)
{
if (r->count == r->size) {
unsigned long newsize = r->size + 50000;
void *mem = realloc(r->offset, newsize * sizeof(r->offset[0]));
if (!mem)
die("realloc of %ld entries for relocs failed\n",
newsize);
r->offset = mem;
r->size = newsize;
}
r->offset[r->count++] = offset;
}
static void walk_relocs(int (*process)(struct section *sec, Elf_Rel *rel,
Elf_Sym *sym, const char *symname))
{
int i;
/* Walk through the relocations */
for (i = 0; i < shnum; i++) {
char *sym_strtab;
Elf_Sym *sh_symtab;
struct section *sec_applies, *sec_symtab;
int j;
struct section *sec = &secs[i];
if (sec->shdr.sh_type != SHT_REL_TYPE) {
continue;
}
sec_symtab = sec->link;
sec_applies = &secs[sec->shdr.sh_info];
if (!(sec_applies->shdr.sh_flags & SHF_ALLOC)) {
continue;
}
sh_symtab = sec_symtab->symtab;
sym_strtab = sec_symtab->link->strtab;
for (j = 0; j < sec->shdr.sh_size/sizeof(Elf_Rel); j++) {
Elf_Rel *rel = &sec->reltab[j];
Elf_Sym *sym = &sh_symtab[ELF_R_SYM(rel->r_info)];
const char *symname = sym_name(sym_strtab, sym);
process(sec, rel, sym, symname);
}
}
}
/*
* The .data..percpu section is a special case for x86_64 SMP kernels.
* It is used to initialize the actual per_cpu areas and to provide
* definitions for the per_cpu variables that correspond to their offsets
* within the percpu area. Since the values of all of the symbols need
* to be offsets from the start of the per_cpu area the virtual address
* (sh_addr) of .data..percpu is 0 in SMP kernels.
*
* This means that:
*
* Relocations that reference symbols in the per_cpu area do not
* need further relocation (since the value is an offset relative
* to the start of the per_cpu area that does not change).
*
* Relocations that apply to the per_cpu area need to have their
* offset adjusted by by the value of __per_cpu_load to make them
* point to the correct place in the loaded image (because the
* virtual address of .data..percpu is 0).
*
* For non SMP kernels .data..percpu is linked as part of the normal
* kernel data and does not require special treatment.
*
*/
static int per_cpu_shndx = -1;
static Elf_Addr per_cpu_load_addr;
static void percpu_init(void)
{
int i;
for (i = 0; i < shnum; i++) {
ElfW(Sym) *sym;
if (strcmp(sec_name(i), ".data..percpu"))
continue;
if (secs[i].shdr.sh_addr != 0) /* non SMP kernel */
return;
sym = sym_lookup("__per_cpu_load");
if (!sym)
die("can't find __per_cpu_load\n");
per_cpu_shndx = i;
per_cpu_load_addr = sym->st_value;
return;
}
}
#if ELF_BITS == 64
/*
* Check to see if a symbol lies in the .data..percpu section.
*
* The linker incorrectly associates some symbols with the
* .data..percpu section so we also need to check the symbol
* name to make sure that we classify the symbol correctly.
*
* The GNU linker incorrectly associates:
* __init_begin
* __per_cpu_load
*
* The "gold" linker incorrectly associates:
* init_per_cpu__fixed_percpu_data
* init_per_cpu__gdt_page
*/
static int is_percpu_sym(ElfW(Sym) *sym, const char *symname)
{
int shndx = sym_index(sym);
return (shndx == per_cpu_shndx) &&
strcmp(symname, "__init_begin") &&
strcmp(symname, "__per_cpu_load") &&
strncmp(symname, "init_per_cpu_", 13);
}
static int do_reloc64(struct section *sec, Elf_Rel *rel, ElfW(Sym) *sym,
const char *symname)
{
unsigned r_type = ELF64_R_TYPE(rel->r_info);
ElfW(Addr) offset = rel->r_offset;
int shn_abs = (sym->st_shndx == SHN_ABS) && !is_reloc(S_REL, symname);
if (sym->st_shndx == SHN_UNDEF)
return 0;
/*
* Adjust the offset if this reloc applies to the percpu section.
*/
if (sec->shdr.sh_info == per_cpu_shndx)
offset += per_cpu_load_addr;
switch (r_type) {
case R_X86_64_NONE:
/* NONE can be ignored. */
break;
case R_X86_64_PC32:
case R_X86_64_PLT32:
/*
* PC relative relocations don't need to be adjusted unless
* referencing a percpu symbol.
*
* NB: R_X86_64_PLT32 can be treated as R_X86_64_PC32.
*/
if (is_percpu_sym(sym, symname))
add_reloc(&relocs32neg, offset);
break;
case R_X86_64_PC64:
/*
* Only used by jump labels
*/
if (is_percpu_sym(sym, symname))
die("Invalid R_X86_64_PC64 relocation against per-CPU symbol %s\n",
symname);
break;
case R_X86_64_32:
case R_X86_64_32S:
case R_X86_64_64:
/*
* References to the percpu area don't need to be adjusted.
*/
if (is_percpu_sym(sym, symname))
break;
if (shn_abs) {
/*
* Whitelisted absolute symbols do not require
* relocation.
*/
if (is_reloc(S_ABS, symname))
break;
die("Invalid absolute %s relocation: %s\n",
rel_type(r_type), symname);
break;
}
/*
* Relocation offsets for 64 bit kernels are output
* as 32 bits and sign extended back to 64 bits when
* the relocations are processed.
* Make sure that the offset will fit.
*/
if ((int32_t)offset != (int64_t)offset)
die("Relocation offset doesn't fit in 32 bits\n");
if (r_type == R_X86_64_64)
add_reloc(&relocs64, offset);
else
add_reloc(&relocs32, offset);
break;
default:
die("Unsupported relocation type: %s (%d)\n",
rel_type(r_type), r_type);
break;
}
return 0;
}
#else
static int do_reloc32(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
const char *symname)
{
unsigned r_type = ELF32_R_TYPE(rel->r_info);
int shn_abs = (sym->st_shndx == SHN_ABS) && !is_reloc(S_REL, symname);
switch (r_type) {
case R_386_NONE:
case R_386_PC32:
case R_386_PC16:
case R_386_PC8:
case R_386_PLT32:
/*
* NONE can be ignored and PC relative relocations don't need
* to be adjusted. Because sym must be defined, R_386_PLT32 can
* be treated the same way as R_386_PC32.
*/
break;
case R_386_32:
if (shn_abs) {
/*
* Whitelisted absolute symbols do not require
* relocation.
*/
if (is_reloc(S_ABS, symname))
break;
die("Invalid absolute %s relocation: %s\n",
rel_type(r_type), symname);
break;
}
add_reloc(&relocs32, rel->r_offset);
break;
default:
die("Unsupported relocation type: %s (%d)\n",
rel_type(r_type), r_type);
break;
}
return 0;
}
static int do_reloc_real(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
const char *symname)
{
unsigned r_type = ELF32_R_TYPE(rel->r_info);
int shn_abs = (sym->st_shndx == SHN_ABS) && !is_reloc(S_REL, symname);
switch (r_type) {
case R_386_NONE:
case R_386_PC32:
case R_386_PC16:
case R_386_PC8:
case R_386_PLT32:
/*
* NONE can be ignored and PC relative relocations don't need
* to be adjusted. Because sym must be defined, R_386_PLT32 can
* be treated the same way as R_386_PC32.
*/
break;
case R_386_16:
if (shn_abs) {
/*
* Whitelisted absolute symbols do not require
* relocation.
*/
if (is_reloc(S_ABS, symname))
break;
if (is_reloc(S_SEG, symname)) {
add_reloc(&relocs16, rel->r_offset);
break;
}
} else {
if (!is_reloc(S_LIN, symname))
break;
}
die("Invalid %s %s relocation: %s\n",
shn_abs ? "absolute" : "relative",
rel_type(r_type), symname);
break;
case R_386_32:
if (shn_abs) {
/*
* Whitelisted absolute symbols do not require
* relocation.
*/
if (is_reloc(S_ABS, symname))
break;
if (is_reloc(S_REL, symname)) {
add_reloc(&relocs32, rel->r_offset);
break;
}
} else {
if (is_reloc(S_LIN, symname))
add_reloc(&relocs32, rel->r_offset);
break;
}
die("Invalid %s %s relocation: %s\n",
shn_abs ? "absolute" : "relative",
rel_type(r_type), symname);
break;
default:
die("Unsupported relocation type: %s (%d)\n",
rel_type(r_type), r_type);
break;
}
return 0;
}
#endif
static int cmp_relocs(const void *va, const void *vb)
{
const uint32_t *a, *b;
a = va; b = vb;
return (*a == *b)? 0 : (*a > *b)? 1 : -1;
}
static void sort_relocs(struct relocs *r)
{
qsort(r->offset, r->count, sizeof(r->offset[0]), cmp_relocs);
}
static int write32(uint32_t v, FILE *f)
{
unsigned char buf[4];
put_unaligned_le32(v, buf);
return fwrite(buf, 1, 4, f) == 4 ? 0 : -1;
}
static int write32_as_text(uint32_t v, FILE *f)
{
return fprintf(f, "\t.long 0x%08"PRIx32"\n", v) > 0 ? 0 : -1;
}
static void emit_relocs(int as_text, int use_real_mode)
{
int i;
int (*write_reloc)(uint32_t, FILE *) = write32;
int (*do_reloc)(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
const char *symname);
#if ELF_BITS == 64
if (!use_real_mode)
do_reloc = do_reloc64;
else
die("--realmode not valid for a 64-bit ELF file");
#else
if (!use_real_mode)
do_reloc = do_reloc32;
else
do_reloc = do_reloc_real;
#endif
/* Collect up the relocations */
walk_relocs(do_reloc);
if (relocs16.count && !use_real_mode)
die("Segment relocations found but --realmode not specified\n");
/* Order the relocations for more efficient processing */
sort_relocs(&relocs32);
#if ELF_BITS == 64
sort_relocs(&relocs32neg);
sort_relocs(&relocs64);
#else
sort_relocs(&relocs16);
#endif
/* Print the relocations */
if (as_text) {
/* Print the relocations in a form suitable that
* gas will like.
*/
printf(".section \".data.reloc\",\"a\"\n");
printf(".balign 4\n");
write_reloc = write32_as_text;
}
if (use_real_mode) {
write_reloc(relocs16.count, stdout);
for (i = 0; i < relocs16.count; i++)
write_reloc(relocs16.offset[i], stdout);
write_reloc(relocs32.count, stdout);
for (i = 0; i < relocs32.count; i++)
write_reloc(relocs32.offset[i], stdout);
} else {
#if ELF_BITS == 64
/* Print a stop */
write_reloc(0, stdout);
/* Now print each relocation */
for (i = 0; i < relocs64.count; i++)
write_reloc(relocs64.offset[i], stdout);
/* Print a stop */
write_reloc(0, stdout);
/* Now print each inverse 32-bit relocation */
for (i = 0; i < relocs32neg.count; i++)
write_reloc(relocs32neg.offset[i], stdout);
#endif
/* Print a stop */
write_reloc(0, stdout);
/* Now print each relocation */
for (i = 0; i < relocs32.count; i++)
write_reloc(relocs32.offset[i], stdout);
}
}
/*
* As an aid to debugging problems with different linkers
* print summary information about the relocs.
* Since different linkers tend to emit the sections in
* different orders we use the section names in the output.
*/
static int do_reloc_info(struct section *sec, Elf_Rel *rel, ElfW(Sym) *sym,
const char *symname)
{
printf("%s\t%s\t%s\t%s\n",
sec_name(sec->shdr.sh_info),
rel_type(ELF_R_TYPE(rel->r_info)),
symname,
sec_name(sym_index(sym)));
return 0;
}
static void print_reloc_info(void)
{
printf("reloc section\treloc type\tsymbol\tsymbol section\n");
walk_relocs(do_reloc_info);
}
#if ELF_BITS == 64
# define process process_64
#else
# define process process_32
#endif
void process(FILE *fp, int use_real_mode, int as_text,
int show_absolute_syms, int show_absolute_relocs,
int show_reloc_info)
{
regex_init(use_real_mode);
read_ehdr(fp);
read_shdrs(fp);
read_strtabs(fp);
read_symtabs(fp);
read_relocs(fp);
if (ELF_BITS == 64)
percpu_init();
if (show_absolute_syms) {
print_absolute_symbols();
return;
}
if (show_absolute_relocs) {
print_absolute_relocs();
return;
}
if (show_reloc_info) {
print_reloc_info();
return;
}
emit_relocs(as_text, use_real_mode);
}
| linux-master | arch/x86/tools/relocs.c |
// SPDX-License-Identifier: GPL-2.0
#include "relocs.h"
#define ELF_BITS 32
#define ELF_MACHINE EM_386
#define ELF_MACHINE_NAME "i386"
#define SHT_REL_TYPE SHT_REL
#define Elf_Rel ElfW(Rel)
#define ELF_CLASS ELFCLASS32
#define ELF_R_SYM(val) ELF32_R_SYM(val)
#define ELF_R_TYPE(val) ELF32_R_TYPE(val)
#define ELF_ST_TYPE(o) ELF32_ST_TYPE(o)
#define ELF_ST_BIND(o) ELF32_ST_BIND(o)
#define ELF_ST_VISIBILITY(o) ELF32_ST_VISIBILITY(o)
#include "relocs.c"
| linux-master | arch/x86/tools/relocs_32.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* BPF JIT compiler
*
* Copyright (C) 2011-2013 Eric Dumazet ([email protected])
* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
*/
#include <linux/netdevice.h>
#include <linux/filter.h>
#include <linux/if_vlan.h>
#include <linux/bpf.h>
#include <linux/memory.h>
#include <linux/sort.h>
#include <asm/extable.h>
#include <asm/ftrace.h>
#include <asm/set_memory.h>
#include <asm/nospec-branch.h>
#include <asm/text-patching.h>
static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
{
if (len == 1)
*ptr = bytes;
else if (len == 2)
*(u16 *)ptr = bytes;
else {
*(u32 *)ptr = bytes;
barrier();
}
return ptr + len;
}
#define EMIT(bytes, len) \
do { prog = emit_code(prog, bytes, len); } while (0)
#define EMIT1(b1) EMIT(b1, 1)
#define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2)
#define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
#define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
#define EMIT1_off32(b1, off) \
do { EMIT1(b1); EMIT(off, 4); } while (0)
#define EMIT2_off32(b1, b2, off) \
do { EMIT2(b1, b2); EMIT(off, 4); } while (0)
#define EMIT3_off32(b1, b2, b3, off) \
do { EMIT3(b1, b2, b3); EMIT(off, 4); } while (0)
#define EMIT4_off32(b1, b2, b3, b4, off) \
do { EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0)
#ifdef CONFIG_X86_KERNEL_IBT
#define EMIT_ENDBR() EMIT(gen_endbr(), 4)
#else
#define EMIT_ENDBR()
#endif
static bool is_imm8(int value)
{
return value <= 127 && value >= -128;
}
static bool is_simm32(s64 value)
{
return value == (s64)(s32)value;
}
static bool is_uimm32(u64 value)
{
return value == (u64)(u32)value;
}
/* mov dst, src */
#define EMIT_mov(DST, SRC) \
do { \
if (DST != SRC) \
EMIT3(add_2mod(0x48, DST, SRC), 0x89, add_2reg(0xC0, DST, SRC)); \
} while (0)
static int bpf_size_to_x86_bytes(int bpf_size)
{
if (bpf_size == BPF_W)
return 4;
else if (bpf_size == BPF_H)
return 2;
else if (bpf_size == BPF_B)
return 1;
else if (bpf_size == BPF_DW)
return 4; /* imm32 */
else
return 0;
}
/*
* List of x86 cond jumps opcodes (. + s8)
* Add 0x10 (and an extra 0x0f) to generate far jumps (. + s32)
*/
#define X86_JB 0x72
#define X86_JAE 0x73
#define X86_JE 0x74
#define X86_JNE 0x75
#define X86_JBE 0x76
#define X86_JA 0x77
#define X86_JL 0x7C
#define X86_JGE 0x7D
#define X86_JLE 0x7E
#define X86_JG 0x7F
/* Pick a register outside of BPF range for JIT internal work */
#define AUX_REG (MAX_BPF_JIT_REG + 1)
#define X86_REG_R9 (MAX_BPF_JIT_REG + 2)
/*
* The following table maps BPF registers to x86-64 registers.
*
* x86-64 register R12 is unused, since if used as base address
* register in load/store instructions, it always needs an
* extra byte of encoding and is callee saved.
*
* x86-64 register R9 is not used by BPF programs, but can be used by BPF
* trampoline. x86-64 register R10 is used for blinding (if enabled).
*/
static const int reg2hex[] = {
[BPF_REG_0] = 0, /* RAX */
[BPF_REG_1] = 7, /* RDI */
[BPF_REG_2] = 6, /* RSI */
[BPF_REG_3] = 2, /* RDX */
[BPF_REG_4] = 1, /* RCX */
[BPF_REG_5] = 0, /* R8 */
[BPF_REG_6] = 3, /* RBX callee saved */
[BPF_REG_7] = 5, /* R13 callee saved */
[BPF_REG_8] = 6, /* R14 callee saved */
[BPF_REG_9] = 7, /* R15 callee saved */
[BPF_REG_FP] = 5, /* RBP readonly */
[BPF_REG_AX] = 2, /* R10 temp register */
[AUX_REG] = 3, /* R11 temp register */
[X86_REG_R9] = 1, /* R9 register, 6th function argument */
};
static const int reg2pt_regs[] = {
[BPF_REG_0] = offsetof(struct pt_regs, ax),
[BPF_REG_1] = offsetof(struct pt_regs, di),
[BPF_REG_2] = offsetof(struct pt_regs, si),
[BPF_REG_3] = offsetof(struct pt_regs, dx),
[BPF_REG_4] = offsetof(struct pt_regs, cx),
[BPF_REG_5] = offsetof(struct pt_regs, r8),
[BPF_REG_6] = offsetof(struct pt_regs, bx),
[BPF_REG_7] = offsetof(struct pt_regs, r13),
[BPF_REG_8] = offsetof(struct pt_regs, r14),
[BPF_REG_9] = offsetof(struct pt_regs, r15),
};
/*
* is_ereg() == true if BPF register 'reg' maps to x86-64 r8..r15
* which need extra byte of encoding.
* rax,rcx,...,rbp have simpler encoding
*/
static bool is_ereg(u32 reg)
{
return (1 << reg) & (BIT(BPF_REG_5) |
BIT(AUX_REG) |
BIT(BPF_REG_7) |
BIT(BPF_REG_8) |
BIT(BPF_REG_9) |
BIT(X86_REG_R9) |
BIT(BPF_REG_AX));
}
/*
* is_ereg_8l() == true if BPF register 'reg' is mapped to access x86-64
* lower 8-bit registers dil,sil,bpl,spl,r8b..r15b, which need extra byte
* of encoding. al,cl,dl,bl have simpler encoding.
*/
static bool is_ereg_8l(u32 reg)
{
return is_ereg(reg) ||
(1 << reg) & (BIT(BPF_REG_1) |
BIT(BPF_REG_2) |
BIT(BPF_REG_FP));
}
static bool is_axreg(u32 reg)
{
return reg == BPF_REG_0;
}
/* Add modifiers if 'reg' maps to x86-64 registers R8..R15 */
static u8 add_1mod(u8 byte, u32 reg)
{
if (is_ereg(reg))
byte |= 1;
return byte;
}
static u8 add_2mod(u8 byte, u32 r1, u32 r2)
{
if (is_ereg(r1))
byte |= 1;
if (is_ereg(r2))
byte |= 4;
return byte;
}
/* Encode 'dst_reg' register into x86-64 opcode 'byte' */
static u8 add_1reg(u8 byte, u32 dst_reg)
{
return byte + reg2hex[dst_reg];
}
/* Encode 'dst_reg' and 'src_reg' registers into x86-64 opcode 'byte' */
static u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg)
{
return byte + reg2hex[dst_reg] + (reg2hex[src_reg] << 3);
}
/* Some 1-byte opcodes for binary ALU operations */
static u8 simple_alu_opcodes[] = {
[BPF_ADD] = 0x01,
[BPF_SUB] = 0x29,
[BPF_AND] = 0x21,
[BPF_OR] = 0x09,
[BPF_XOR] = 0x31,
[BPF_LSH] = 0xE0,
[BPF_RSH] = 0xE8,
[BPF_ARSH] = 0xF8,
};
static void jit_fill_hole(void *area, unsigned int size)
{
/* Fill whole space with INT3 instructions */
memset(area, 0xcc, size);
}
int bpf_arch_text_invalidate(void *dst, size_t len)
{
return IS_ERR_OR_NULL(text_poke_set(dst, 0xcc, len));
}
struct jit_context {
int cleanup_addr; /* Epilogue code offset */
/*
* Program specific offsets of labels in the code; these rely on the
* JIT doing at least 2 passes, recording the position on the first
* pass, only to generate the correct offset on the second pass.
*/
int tail_call_direct_label;
int tail_call_indirect_label;
};
/* Maximum number of bytes emitted while JITing one eBPF insn */
#define BPF_MAX_INSN_SIZE 128
#define BPF_INSN_SAFETY 64
/* Number of bytes emit_patch() needs to generate instructions */
#define X86_PATCH_SIZE 5
/* Number of bytes that will be skipped on tailcall */
#define X86_TAIL_CALL_OFFSET (11 + ENDBR_INSN_SIZE)
static void push_callee_regs(u8 **pprog, bool *callee_regs_used)
{
u8 *prog = *pprog;
if (callee_regs_used[0])
EMIT1(0x53); /* push rbx */
if (callee_regs_used[1])
EMIT2(0x41, 0x55); /* push r13 */
if (callee_regs_used[2])
EMIT2(0x41, 0x56); /* push r14 */
if (callee_regs_used[3])
EMIT2(0x41, 0x57); /* push r15 */
*pprog = prog;
}
static void pop_callee_regs(u8 **pprog, bool *callee_regs_used)
{
u8 *prog = *pprog;
if (callee_regs_used[3])
EMIT2(0x41, 0x5F); /* pop r15 */
if (callee_regs_used[2])
EMIT2(0x41, 0x5E); /* pop r14 */
if (callee_regs_used[1])
EMIT2(0x41, 0x5D); /* pop r13 */
if (callee_regs_used[0])
EMIT1(0x5B); /* pop rbx */
*pprog = prog;
}
/*
* Emit x86-64 prologue code for BPF program.
* bpf_tail_call helper will skip the first X86_TAIL_CALL_OFFSET bytes
* while jumping to another program
*/
static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf,
bool tail_call_reachable, bool is_subprog)
{
u8 *prog = *pprog;
/* BPF trampoline can be made to work without these nops,
* but let's waste 5 bytes for now and optimize later
*/
EMIT_ENDBR();
memcpy(prog, x86_nops[5], X86_PATCH_SIZE);
prog += X86_PATCH_SIZE;
if (!ebpf_from_cbpf) {
if (tail_call_reachable && !is_subprog)
EMIT2(0x31, 0xC0); /* xor eax, eax */
else
EMIT2(0x66, 0x90); /* nop2 */
}
EMIT1(0x55); /* push rbp */
EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */
/* X86_TAIL_CALL_OFFSET is here */
EMIT_ENDBR();
/* sub rsp, rounded_stack_depth */
if (stack_depth)
EMIT3_off32(0x48, 0x81, 0xEC, round_up(stack_depth, 8));
if (tail_call_reachable)
EMIT1(0x50); /* push rax */
*pprog = prog;
}
static int emit_patch(u8 **pprog, void *func, void *ip, u8 opcode)
{
u8 *prog = *pprog;
s64 offset;
offset = func - (ip + X86_PATCH_SIZE);
if (!is_simm32(offset)) {
pr_err("Target call %p is out of range\n", func);
return -ERANGE;
}
EMIT1_off32(opcode, offset);
*pprog = prog;
return 0;
}
static int emit_call(u8 **pprog, void *func, void *ip)
{
return emit_patch(pprog, func, ip, 0xE8);
}
static int emit_rsb_call(u8 **pprog, void *func, void *ip)
{
OPTIMIZER_HIDE_VAR(func);
x86_call_depth_emit_accounting(pprog, func);
return emit_patch(pprog, func, ip, 0xE8);
}
static int emit_jump(u8 **pprog, void *func, void *ip)
{
return emit_patch(pprog, func, ip, 0xE9);
}
static int __bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
void *old_addr, void *new_addr)
{
const u8 *nop_insn = x86_nops[5];
u8 old_insn[X86_PATCH_SIZE];
u8 new_insn[X86_PATCH_SIZE];
u8 *prog;
int ret;
memcpy(old_insn, nop_insn, X86_PATCH_SIZE);
if (old_addr) {
prog = old_insn;
ret = t == BPF_MOD_CALL ?
emit_call(&prog, old_addr, ip) :
emit_jump(&prog, old_addr, ip);
if (ret)
return ret;
}
memcpy(new_insn, nop_insn, X86_PATCH_SIZE);
if (new_addr) {
prog = new_insn;
ret = t == BPF_MOD_CALL ?
emit_call(&prog, new_addr, ip) :
emit_jump(&prog, new_addr, ip);
if (ret)
return ret;
}
ret = -EBUSY;
mutex_lock(&text_mutex);
if (memcmp(ip, old_insn, X86_PATCH_SIZE))
goto out;
ret = 1;
if (memcmp(ip, new_insn, X86_PATCH_SIZE)) {
text_poke_bp(ip, new_insn, X86_PATCH_SIZE, NULL);
ret = 0;
}
out:
mutex_unlock(&text_mutex);
return ret;
}
int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
void *old_addr, void *new_addr)
{
if (!is_kernel_text((long)ip) &&
!is_bpf_text_address((long)ip))
/* BPF poking in modules is not supported */
return -EINVAL;
/*
* See emit_prologue(), for IBT builds the trampoline hook is preceded
* with an ENDBR instruction.
*/
if (is_endbr(*(u32 *)ip))
ip += ENDBR_INSN_SIZE;
return __bpf_arch_text_poke(ip, t, old_addr, new_addr);
}
#define EMIT_LFENCE() EMIT3(0x0F, 0xAE, 0xE8)
static void emit_indirect_jump(u8 **pprog, int reg, u8 *ip)
{
u8 *prog = *pprog;
if (cpu_feature_enabled(X86_FEATURE_RETPOLINE_LFENCE)) {
EMIT_LFENCE();
EMIT2(0xFF, 0xE0 + reg);
} else if (cpu_feature_enabled(X86_FEATURE_RETPOLINE)) {
OPTIMIZER_HIDE_VAR(reg);
if (cpu_feature_enabled(X86_FEATURE_CALL_DEPTH))
emit_jump(&prog, &__x86_indirect_jump_thunk_array[reg], ip);
else
emit_jump(&prog, &__x86_indirect_thunk_array[reg], ip);
} else {
EMIT2(0xFF, 0xE0 + reg); /* jmp *%\reg */
if (IS_ENABLED(CONFIG_RETPOLINE) || IS_ENABLED(CONFIG_SLS))
EMIT1(0xCC); /* int3 */
}
*pprog = prog;
}
static void emit_return(u8 **pprog, u8 *ip)
{
u8 *prog = *pprog;
if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) {
emit_jump(&prog, x86_return_thunk, ip);
} else {
EMIT1(0xC3); /* ret */
if (IS_ENABLED(CONFIG_SLS))
EMIT1(0xCC); /* int3 */
}
*pprog = prog;
}
/*
* Generate the following code:
*
* ... bpf_tail_call(void *ctx, struct bpf_array *array, u64 index) ...
* if (index >= array->map.max_entries)
* goto out;
* if (tail_call_cnt++ >= MAX_TAIL_CALL_CNT)
* goto out;
* prog = array->ptrs[index];
* if (prog == NULL)
* goto out;
* goto *(prog->bpf_func + prologue_size);
* out:
*/
static void emit_bpf_tail_call_indirect(u8 **pprog, bool *callee_regs_used,
u32 stack_depth, u8 *ip,
struct jit_context *ctx)
{
int tcc_off = -4 - round_up(stack_depth, 8);
u8 *prog = *pprog, *start = *pprog;
int offset;
/*
* rdi - pointer to ctx
* rsi - pointer to bpf_array
* rdx - index in bpf_array
*/
/*
* if (index >= array->map.max_entries)
* goto out;
*/
EMIT2(0x89, 0xD2); /* mov edx, edx */
EMIT3(0x39, 0x56, /* cmp dword ptr [rsi + 16], edx */
offsetof(struct bpf_array, map.max_entries));
offset = ctx->tail_call_indirect_label - (prog + 2 - start);
EMIT2(X86_JBE, offset); /* jbe out */
/*
* if (tail_call_cnt++ >= MAX_TAIL_CALL_CNT)
* goto out;
*/
EMIT2_off32(0x8B, 0x85, tcc_off); /* mov eax, dword ptr [rbp - tcc_off] */
EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */
offset = ctx->tail_call_indirect_label - (prog + 2 - start);
EMIT2(X86_JAE, offset); /* jae out */
EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */
EMIT2_off32(0x89, 0x85, tcc_off); /* mov dword ptr [rbp - tcc_off], eax */
/* prog = array->ptrs[index]; */
EMIT4_off32(0x48, 0x8B, 0x8C, 0xD6, /* mov rcx, [rsi + rdx * 8 + offsetof(...)] */
offsetof(struct bpf_array, ptrs));
/*
* if (prog == NULL)
* goto out;
*/
EMIT3(0x48, 0x85, 0xC9); /* test rcx,rcx */
offset = ctx->tail_call_indirect_label - (prog + 2 - start);
EMIT2(X86_JE, offset); /* je out */
pop_callee_regs(&prog, callee_regs_used);
EMIT1(0x58); /* pop rax */
if (stack_depth)
EMIT3_off32(0x48, 0x81, 0xC4, /* add rsp, sd */
round_up(stack_depth, 8));
/* goto *(prog->bpf_func + X86_TAIL_CALL_OFFSET); */
EMIT4(0x48, 0x8B, 0x49, /* mov rcx, qword ptr [rcx + 32] */
offsetof(struct bpf_prog, bpf_func));
EMIT4(0x48, 0x83, 0xC1, /* add rcx, X86_TAIL_CALL_OFFSET */
X86_TAIL_CALL_OFFSET);
/*
* Now we're ready to jump into next BPF program
* rdi == ctx (1st arg)
* rcx == prog->bpf_func + X86_TAIL_CALL_OFFSET
*/
emit_indirect_jump(&prog, 1 /* rcx */, ip + (prog - start));
/* out: */
ctx->tail_call_indirect_label = prog - start;
*pprog = prog;
}
static void emit_bpf_tail_call_direct(struct bpf_jit_poke_descriptor *poke,
u8 **pprog, u8 *ip,
bool *callee_regs_used, u32 stack_depth,
struct jit_context *ctx)
{
int tcc_off = -4 - round_up(stack_depth, 8);
u8 *prog = *pprog, *start = *pprog;
int offset;
/*
* if (tail_call_cnt++ >= MAX_TAIL_CALL_CNT)
* goto out;
*/
EMIT2_off32(0x8B, 0x85, tcc_off); /* mov eax, dword ptr [rbp - tcc_off] */
EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */
offset = ctx->tail_call_direct_label - (prog + 2 - start);
EMIT2(X86_JAE, offset); /* jae out */
EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */
EMIT2_off32(0x89, 0x85, tcc_off); /* mov dword ptr [rbp - tcc_off], eax */
poke->tailcall_bypass = ip + (prog - start);
poke->adj_off = X86_TAIL_CALL_OFFSET;
poke->tailcall_target = ip + ctx->tail_call_direct_label - X86_PATCH_SIZE;
poke->bypass_addr = (u8 *)poke->tailcall_target + X86_PATCH_SIZE;
emit_jump(&prog, (u8 *)poke->tailcall_target + X86_PATCH_SIZE,
poke->tailcall_bypass);
pop_callee_regs(&prog, callee_regs_used);
EMIT1(0x58); /* pop rax */
if (stack_depth)
EMIT3_off32(0x48, 0x81, 0xC4, round_up(stack_depth, 8));
memcpy(prog, x86_nops[5], X86_PATCH_SIZE);
prog += X86_PATCH_SIZE;
/* out: */
ctx->tail_call_direct_label = prog - start;
*pprog = prog;
}
static void bpf_tail_call_direct_fixup(struct bpf_prog *prog)
{
struct bpf_jit_poke_descriptor *poke;
struct bpf_array *array;
struct bpf_prog *target;
int i, ret;
for (i = 0; i < prog->aux->size_poke_tab; i++) {
poke = &prog->aux->poke_tab[i];
if (poke->aux && poke->aux != prog->aux)
continue;
WARN_ON_ONCE(READ_ONCE(poke->tailcall_target_stable));
if (poke->reason != BPF_POKE_REASON_TAIL_CALL)
continue;
array = container_of(poke->tail_call.map, struct bpf_array, map);
mutex_lock(&array->aux->poke_mutex);
target = array->ptrs[poke->tail_call.key];
if (target) {
ret = __bpf_arch_text_poke(poke->tailcall_target,
BPF_MOD_JUMP, NULL,
(u8 *)target->bpf_func +
poke->adj_off);
BUG_ON(ret < 0);
ret = __bpf_arch_text_poke(poke->tailcall_bypass,
BPF_MOD_JUMP,
(u8 *)poke->tailcall_target +
X86_PATCH_SIZE, NULL);
BUG_ON(ret < 0);
}
WRITE_ONCE(poke->tailcall_target_stable, true);
mutex_unlock(&array->aux->poke_mutex);
}
}
static void emit_mov_imm32(u8 **pprog, bool sign_propagate,
u32 dst_reg, const u32 imm32)
{
u8 *prog = *pprog;
u8 b1, b2, b3;
/*
* Optimization: if imm32 is positive, use 'mov %eax, imm32'
* (which zero-extends imm32) to save 2 bytes.
*/
if (sign_propagate && (s32)imm32 < 0) {
/* 'mov %rax, imm32' sign extends imm32 */
b1 = add_1mod(0x48, dst_reg);
b2 = 0xC7;
b3 = 0xC0;
EMIT3_off32(b1, b2, add_1reg(b3, dst_reg), imm32);
goto done;
}
/*
* Optimization: if imm32 is zero, use 'xor %eax, %eax'
* to save 3 bytes.
*/
if (imm32 == 0) {
if (is_ereg(dst_reg))
EMIT1(add_2mod(0x40, dst_reg, dst_reg));
b2 = 0x31; /* xor */
b3 = 0xC0;
EMIT2(b2, add_2reg(b3, dst_reg, dst_reg));
goto done;
}
/* mov %eax, imm32 */
if (is_ereg(dst_reg))
EMIT1(add_1mod(0x40, dst_reg));
EMIT1_off32(add_1reg(0xB8, dst_reg), imm32);
done:
*pprog = prog;
}
static void emit_mov_imm64(u8 **pprog, u32 dst_reg,
const u32 imm32_hi, const u32 imm32_lo)
{
u8 *prog = *pprog;
if (is_uimm32(((u64)imm32_hi << 32) | (u32)imm32_lo)) {
/*
* For emitting plain u32, where sign bit must not be
* propagated LLVM tends to load imm64 over mov32
* directly, so save couple of bytes by just doing
* 'mov %eax, imm32' instead.
*/
emit_mov_imm32(&prog, false, dst_reg, imm32_lo);
} else {
/* movabsq rax, imm64 */
EMIT2(add_1mod(0x48, dst_reg), add_1reg(0xB8, dst_reg));
EMIT(imm32_lo, 4);
EMIT(imm32_hi, 4);
}
*pprog = prog;
}
static void emit_mov_reg(u8 **pprog, bool is64, u32 dst_reg, u32 src_reg)
{
u8 *prog = *pprog;
if (is64) {
/* mov dst, src */
EMIT_mov(dst_reg, src_reg);
} else {
/* mov32 dst, src */
if (is_ereg(dst_reg) || is_ereg(src_reg))
EMIT1(add_2mod(0x40, dst_reg, src_reg));
EMIT2(0x89, add_2reg(0xC0, dst_reg, src_reg));
}
*pprog = prog;
}
static void emit_movsx_reg(u8 **pprog, int num_bits, bool is64, u32 dst_reg,
u32 src_reg)
{
u8 *prog = *pprog;
if (is64) {
/* movs[b,w,l]q dst, src */
if (num_bits == 8)
EMIT4(add_2mod(0x48, src_reg, dst_reg), 0x0f, 0xbe,
add_2reg(0xC0, src_reg, dst_reg));
else if (num_bits == 16)
EMIT4(add_2mod(0x48, src_reg, dst_reg), 0x0f, 0xbf,
add_2reg(0xC0, src_reg, dst_reg));
else if (num_bits == 32)
EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x63,
add_2reg(0xC0, src_reg, dst_reg));
} else {
/* movs[b,w]l dst, src */
if (num_bits == 8) {
EMIT4(add_2mod(0x40, src_reg, dst_reg), 0x0f, 0xbe,
add_2reg(0xC0, src_reg, dst_reg));
} else if (num_bits == 16) {
if (is_ereg(dst_reg) || is_ereg(src_reg))
EMIT1(add_2mod(0x40, src_reg, dst_reg));
EMIT3(add_2mod(0x0f, src_reg, dst_reg), 0xbf,
add_2reg(0xC0, src_reg, dst_reg));
}
}
*pprog = prog;
}
/* Emit the suffix (ModR/M etc) for addressing *(ptr_reg + off) and val_reg */
static void emit_insn_suffix(u8 **pprog, u32 ptr_reg, u32 val_reg, int off)
{
u8 *prog = *pprog;
if (is_imm8(off)) {
/* 1-byte signed displacement.
*
* If off == 0 we could skip this and save one extra byte, but
* special case of x86 R13 which always needs an offset is not
* worth the hassle
*/
EMIT2(add_2reg(0x40, ptr_reg, val_reg), off);
} else {
/* 4-byte signed displacement */
EMIT1_off32(add_2reg(0x80, ptr_reg, val_reg), off);
}
*pprog = prog;
}
/*
* Emit a REX byte if it will be necessary to address these registers
*/
static void maybe_emit_mod(u8 **pprog, u32 dst_reg, u32 src_reg, bool is64)
{
u8 *prog = *pprog;
if (is64)
EMIT1(add_2mod(0x48, dst_reg, src_reg));
else if (is_ereg(dst_reg) || is_ereg(src_reg))
EMIT1(add_2mod(0x40, dst_reg, src_reg));
*pprog = prog;
}
/*
* Similar version of maybe_emit_mod() for a single register
*/
static void maybe_emit_1mod(u8 **pprog, u32 reg, bool is64)
{
u8 *prog = *pprog;
if (is64)
EMIT1(add_1mod(0x48, reg));
else if (is_ereg(reg))
EMIT1(add_1mod(0x40, reg));
*pprog = prog;
}
/* LDX: dst_reg = *(u8*)(src_reg + off) */
static void emit_ldx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
{
u8 *prog = *pprog;
switch (size) {
case BPF_B:
/* Emit 'movzx rax, byte ptr [rax + off]' */
EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB6);
break;
case BPF_H:
/* Emit 'movzx rax, word ptr [rax + off]' */
EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB7);
break;
case BPF_W:
/* Emit 'mov eax, dword ptr [rax+0x14]' */
if (is_ereg(dst_reg) || is_ereg(src_reg))
EMIT2(add_2mod(0x40, src_reg, dst_reg), 0x8B);
else
EMIT1(0x8B);
break;
case BPF_DW:
/* Emit 'mov rax, qword ptr [rax+0x14]' */
EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x8B);
break;
}
emit_insn_suffix(&prog, src_reg, dst_reg, off);
*pprog = prog;
}
/* LDSX: dst_reg = *(s8*)(src_reg + off) */
static void emit_ldsx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
{
u8 *prog = *pprog;
switch (size) {
case BPF_B:
/* Emit 'movsx rax, byte ptr [rax + off]' */
EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xBE);
break;
case BPF_H:
/* Emit 'movsx rax, word ptr [rax + off]' */
EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xBF);
break;
case BPF_W:
/* Emit 'movsx rax, dword ptr [rax+0x14]' */
EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x63);
break;
}
emit_insn_suffix(&prog, src_reg, dst_reg, off);
*pprog = prog;
}
/* STX: *(u8*)(dst_reg + off) = src_reg */
static void emit_stx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
{
u8 *prog = *pprog;
switch (size) {
case BPF_B:
/* Emit 'mov byte ptr [rax + off], al' */
if (is_ereg(dst_reg) || is_ereg_8l(src_reg))
/* Add extra byte for eregs or SIL,DIL,BPL in src_reg */
EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x88);
else
EMIT1(0x88);
break;
case BPF_H:
if (is_ereg(dst_reg) || is_ereg(src_reg))
EMIT3(0x66, add_2mod(0x40, dst_reg, src_reg), 0x89);
else
EMIT2(0x66, 0x89);
break;
case BPF_W:
if (is_ereg(dst_reg) || is_ereg(src_reg))
EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x89);
else
EMIT1(0x89);
break;
case BPF_DW:
EMIT2(add_2mod(0x48, dst_reg, src_reg), 0x89);
break;
}
emit_insn_suffix(&prog, dst_reg, src_reg, off);
*pprog = prog;
}
static int emit_atomic(u8 **pprog, u8 atomic_op,
u32 dst_reg, u32 src_reg, s16 off, u8 bpf_size)
{
u8 *prog = *pprog;
EMIT1(0xF0); /* lock prefix */
maybe_emit_mod(&prog, dst_reg, src_reg, bpf_size == BPF_DW);
/* emit opcode */
switch (atomic_op) {
case BPF_ADD:
case BPF_AND:
case BPF_OR:
case BPF_XOR:
/* lock *(u32/u64*)(dst_reg + off) <op>= src_reg */
EMIT1(simple_alu_opcodes[atomic_op]);
break;
case BPF_ADD | BPF_FETCH:
/* src_reg = atomic_fetch_add(dst_reg + off, src_reg); */
EMIT2(0x0F, 0xC1);
break;
case BPF_XCHG:
/* src_reg = atomic_xchg(dst_reg + off, src_reg); */
EMIT1(0x87);
break;
case BPF_CMPXCHG:
/* r0 = atomic_cmpxchg(dst_reg + off, r0, src_reg); */
EMIT2(0x0F, 0xB1);
break;
default:
pr_err("bpf_jit: unknown atomic opcode %02x\n", atomic_op);
return -EFAULT;
}
emit_insn_suffix(&prog, dst_reg, src_reg, off);
*pprog = prog;
return 0;
}
bool ex_handler_bpf(const struct exception_table_entry *x, struct pt_regs *regs)
{
u32 reg = x->fixup >> 8;
/* jump over faulting load and clear dest register */
*(unsigned long *)((void *)regs + reg) = 0;
regs->ip += x->fixup & 0xff;
return true;
}
static void detect_reg_usage(struct bpf_insn *insn, int insn_cnt,
bool *regs_used, bool *tail_call_seen)
{
int i;
for (i = 1; i <= insn_cnt; i++, insn++) {
if (insn->code == (BPF_JMP | BPF_TAIL_CALL))
*tail_call_seen = true;
if (insn->dst_reg == BPF_REG_6 || insn->src_reg == BPF_REG_6)
regs_used[0] = true;
if (insn->dst_reg == BPF_REG_7 || insn->src_reg == BPF_REG_7)
regs_used[1] = true;
if (insn->dst_reg == BPF_REG_8 || insn->src_reg == BPF_REG_8)
regs_used[2] = true;
if (insn->dst_reg == BPF_REG_9 || insn->src_reg == BPF_REG_9)
regs_used[3] = true;
}
}
static void emit_nops(u8 **pprog, int len)
{
u8 *prog = *pprog;
int i, noplen;
while (len > 0) {
noplen = len;
if (noplen > ASM_NOP_MAX)
noplen = ASM_NOP_MAX;
for (i = 0; i < noplen; i++)
EMIT1(x86_nops[noplen][i]);
len -= noplen;
}
*pprog = prog;
}
/* emit the 3-byte VEX prefix
*
* r: same as rex.r, extra bit for ModRM reg field
* x: same as rex.x, extra bit for SIB index field
* b: same as rex.b, extra bit for ModRM r/m, or SIB base
* m: opcode map select, encoding escape bytes e.g. 0x0f38
* w: same as rex.w (32 bit or 64 bit) or opcode specific
* src_reg2: additional source reg (encoded as BPF reg)
* l: vector length (128 bit or 256 bit) or reserved
* pp: opcode prefix (none, 0x66, 0xf2 or 0xf3)
*/
static void emit_3vex(u8 **pprog, bool r, bool x, bool b, u8 m,
bool w, u8 src_reg2, bool l, u8 pp)
{
u8 *prog = *pprog;
const u8 b0 = 0xc4; /* first byte of 3-byte VEX prefix */
u8 b1, b2;
u8 vvvv = reg2hex[src_reg2];
/* reg2hex gives only the lower 3 bit of vvvv */
if (is_ereg(src_reg2))
vvvv |= 1 << 3;
/*
* 2nd byte of 3-byte VEX prefix
* ~ means bit inverted encoding
*
* 7 0
* +---+---+---+---+---+---+---+---+
* |~R |~X |~B | m |
* +---+---+---+---+---+---+---+---+
*/
b1 = (!r << 7) | (!x << 6) | (!b << 5) | (m & 0x1f);
/*
* 3rd byte of 3-byte VEX prefix
*
* 7 0
* +---+---+---+---+---+---+---+---+
* | W | ~vvvv | L | pp |
* +---+---+---+---+---+---+---+---+
*/
b2 = (w << 7) | ((~vvvv & 0xf) << 3) | (l << 2) | (pp & 3);
EMIT3(b0, b1, b2);
*pprog = prog;
}
/* emit BMI2 shift instruction */
static void emit_shiftx(u8 **pprog, u32 dst_reg, u8 src_reg, bool is64, u8 op)
{
u8 *prog = *pprog;
bool r = is_ereg(dst_reg);
u8 m = 2; /* escape code 0f38 */
emit_3vex(&prog, r, false, r, m, is64, src_reg, false, op);
EMIT2(0xf7, add_2reg(0xC0, dst_reg, dst_reg));
*pprog = prog;
}
#define INSN_SZ_DIFF (((addrs[i] - addrs[i - 1]) - (prog - temp)))
static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image,
int oldproglen, struct jit_context *ctx, bool jmp_padding)
{
bool tail_call_reachable = bpf_prog->aux->tail_call_reachable;
struct bpf_insn *insn = bpf_prog->insnsi;
bool callee_regs_used[4] = {};
int insn_cnt = bpf_prog->len;
bool tail_call_seen = false;
bool seen_exit = false;
u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY];
int i, excnt = 0;
int ilen, proglen = 0;
u8 *prog = temp;
int err;
detect_reg_usage(insn, insn_cnt, callee_regs_used,
&tail_call_seen);
/* tail call's presence in current prog implies it is reachable */
tail_call_reachable |= tail_call_seen;
emit_prologue(&prog, bpf_prog->aux->stack_depth,
bpf_prog_was_classic(bpf_prog), tail_call_reachable,
bpf_prog->aux->func_idx != 0);
push_callee_regs(&prog, callee_regs_used);
ilen = prog - temp;
if (rw_image)
memcpy(rw_image + proglen, temp, ilen);
proglen += ilen;
addrs[0] = proglen;
prog = temp;
for (i = 1; i <= insn_cnt; i++, insn++) {
const s32 imm32 = insn->imm;
u32 dst_reg = insn->dst_reg;
u32 src_reg = insn->src_reg;
u8 b2 = 0, b3 = 0;
u8 *start_of_ldx;
s64 jmp_offset;
s16 insn_off;
u8 jmp_cond;
u8 *func;
int nops;
switch (insn->code) {
/* ALU */
case BPF_ALU | BPF_ADD | BPF_X:
case BPF_ALU | BPF_SUB | BPF_X:
case BPF_ALU | BPF_AND | BPF_X:
case BPF_ALU | BPF_OR | BPF_X:
case BPF_ALU | BPF_XOR | BPF_X:
case BPF_ALU64 | BPF_ADD | BPF_X:
case BPF_ALU64 | BPF_SUB | BPF_X:
case BPF_ALU64 | BPF_AND | BPF_X:
case BPF_ALU64 | BPF_OR | BPF_X:
case BPF_ALU64 | BPF_XOR | BPF_X:
maybe_emit_mod(&prog, dst_reg, src_reg,
BPF_CLASS(insn->code) == BPF_ALU64);
b2 = simple_alu_opcodes[BPF_OP(insn->code)];
EMIT2(b2, add_2reg(0xC0, dst_reg, src_reg));
break;
case BPF_ALU64 | BPF_MOV | BPF_X:
case BPF_ALU | BPF_MOV | BPF_X:
if (insn->off == 0)
emit_mov_reg(&prog,
BPF_CLASS(insn->code) == BPF_ALU64,
dst_reg, src_reg);
else
emit_movsx_reg(&prog, insn->off,
BPF_CLASS(insn->code) == BPF_ALU64,
dst_reg, src_reg);
break;
/* neg dst */
case BPF_ALU | BPF_NEG:
case BPF_ALU64 | BPF_NEG:
maybe_emit_1mod(&prog, dst_reg,
BPF_CLASS(insn->code) == BPF_ALU64);
EMIT2(0xF7, add_1reg(0xD8, dst_reg));
break;
case BPF_ALU | BPF_ADD | BPF_K:
case BPF_ALU | BPF_SUB | BPF_K:
case BPF_ALU | BPF_AND | BPF_K:
case BPF_ALU | BPF_OR | BPF_K:
case BPF_ALU | BPF_XOR | BPF_K:
case BPF_ALU64 | BPF_ADD | BPF_K:
case BPF_ALU64 | BPF_SUB | BPF_K:
case BPF_ALU64 | BPF_AND | BPF_K:
case BPF_ALU64 | BPF_OR | BPF_K:
case BPF_ALU64 | BPF_XOR | BPF_K:
maybe_emit_1mod(&prog, dst_reg,
BPF_CLASS(insn->code) == BPF_ALU64);
/*
* b3 holds 'normal' opcode, b2 short form only valid
* in case dst is eax/rax.
*/
switch (BPF_OP(insn->code)) {
case BPF_ADD:
b3 = 0xC0;
b2 = 0x05;
break;
case BPF_SUB:
b3 = 0xE8;
b2 = 0x2D;
break;
case BPF_AND:
b3 = 0xE0;
b2 = 0x25;
break;
case BPF_OR:
b3 = 0xC8;
b2 = 0x0D;
break;
case BPF_XOR:
b3 = 0xF0;
b2 = 0x35;
break;
}
if (is_imm8(imm32))
EMIT3(0x83, add_1reg(b3, dst_reg), imm32);
else if (is_axreg(dst_reg))
EMIT1_off32(b2, imm32);
else
EMIT2_off32(0x81, add_1reg(b3, dst_reg), imm32);
break;
case BPF_ALU64 | BPF_MOV | BPF_K:
case BPF_ALU | BPF_MOV | BPF_K:
emit_mov_imm32(&prog, BPF_CLASS(insn->code) == BPF_ALU64,
dst_reg, imm32);
break;
case BPF_LD | BPF_IMM | BPF_DW:
emit_mov_imm64(&prog, dst_reg, insn[1].imm, insn[0].imm);
insn++;
i++;
break;
/* dst %= src, dst /= src, dst %= imm32, dst /= imm32 */
case BPF_ALU | BPF_MOD | BPF_X:
case BPF_ALU | BPF_DIV | BPF_X:
case BPF_ALU | BPF_MOD | BPF_K:
case BPF_ALU | BPF_DIV | BPF_K:
case BPF_ALU64 | BPF_MOD | BPF_X:
case BPF_ALU64 | BPF_DIV | BPF_X:
case BPF_ALU64 | BPF_MOD | BPF_K:
case BPF_ALU64 | BPF_DIV | BPF_K: {
bool is64 = BPF_CLASS(insn->code) == BPF_ALU64;
if (dst_reg != BPF_REG_0)
EMIT1(0x50); /* push rax */
if (dst_reg != BPF_REG_3)
EMIT1(0x52); /* push rdx */
if (BPF_SRC(insn->code) == BPF_X) {
if (src_reg == BPF_REG_0 ||
src_reg == BPF_REG_3) {
/* mov r11, src_reg */
EMIT_mov(AUX_REG, src_reg);
src_reg = AUX_REG;
}
} else {
/* mov r11, imm32 */
EMIT3_off32(0x49, 0xC7, 0xC3, imm32);
src_reg = AUX_REG;
}
if (dst_reg != BPF_REG_0)
/* mov rax, dst_reg */
emit_mov_reg(&prog, is64, BPF_REG_0, dst_reg);
if (insn->off == 0) {
/*
* xor edx, edx
* equivalent to 'xor rdx, rdx', but one byte less
*/
EMIT2(0x31, 0xd2);
/* div src_reg */
maybe_emit_1mod(&prog, src_reg, is64);
EMIT2(0xF7, add_1reg(0xF0, src_reg));
} else {
if (BPF_CLASS(insn->code) == BPF_ALU)
EMIT1(0x99); /* cdq */
else
EMIT2(0x48, 0x99); /* cqo */
/* idiv src_reg */
maybe_emit_1mod(&prog, src_reg, is64);
EMIT2(0xF7, add_1reg(0xF8, src_reg));
}
if (BPF_OP(insn->code) == BPF_MOD &&
dst_reg != BPF_REG_3)
/* mov dst_reg, rdx */
emit_mov_reg(&prog, is64, dst_reg, BPF_REG_3);
else if (BPF_OP(insn->code) == BPF_DIV &&
dst_reg != BPF_REG_0)
/* mov dst_reg, rax */
emit_mov_reg(&prog, is64, dst_reg, BPF_REG_0);
if (dst_reg != BPF_REG_3)
EMIT1(0x5A); /* pop rdx */
if (dst_reg != BPF_REG_0)
EMIT1(0x58); /* pop rax */
break;
}
case BPF_ALU | BPF_MUL | BPF_K:
case BPF_ALU64 | BPF_MUL | BPF_K:
maybe_emit_mod(&prog, dst_reg, dst_reg,
BPF_CLASS(insn->code) == BPF_ALU64);
if (is_imm8(imm32))
/* imul dst_reg, dst_reg, imm8 */
EMIT3(0x6B, add_2reg(0xC0, dst_reg, dst_reg),
imm32);
else
/* imul dst_reg, dst_reg, imm32 */
EMIT2_off32(0x69,
add_2reg(0xC0, dst_reg, dst_reg),
imm32);
break;
case BPF_ALU | BPF_MUL | BPF_X:
case BPF_ALU64 | BPF_MUL | BPF_X:
maybe_emit_mod(&prog, src_reg, dst_reg,
BPF_CLASS(insn->code) == BPF_ALU64);
/* imul dst_reg, src_reg */
EMIT3(0x0F, 0xAF, add_2reg(0xC0, src_reg, dst_reg));
break;
/* Shifts */
case BPF_ALU | BPF_LSH | BPF_K:
case BPF_ALU | BPF_RSH | BPF_K:
case BPF_ALU | BPF_ARSH | BPF_K:
case BPF_ALU64 | BPF_LSH | BPF_K:
case BPF_ALU64 | BPF_RSH | BPF_K:
case BPF_ALU64 | BPF_ARSH | BPF_K:
maybe_emit_1mod(&prog, dst_reg,
BPF_CLASS(insn->code) == BPF_ALU64);
b3 = simple_alu_opcodes[BPF_OP(insn->code)];
if (imm32 == 1)
EMIT2(0xD1, add_1reg(b3, dst_reg));
else
EMIT3(0xC1, add_1reg(b3, dst_reg), imm32);
break;
case BPF_ALU | BPF_LSH | BPF_X:
case BPF_ALU | BPF_RSH | BPF_X:
case BPF_ALU | BPF_ARSH | BPF_X:
case BPF_ALU64 | BPF_LSH | BPF_X:
case BPF_ALU64 | BPF_RSH | BPF_X:
case BPF_ALU64 | BPF_ARSH | BPF_X:
/* BMI2 shifts aren't better when shift count is already in rcx */
if (boot_cpu_has(X86_FEATURE_BMI2) && src_reg != BPF_REG_4) {
/* shrx/sarx/shlx dst_reg, dst_reg, src_reg */
bool w = (BPF_CLASS(insn->code) == BPF_ALU64);
u8 op;
switch (BPF_OP(insn->code)) {
case BPF_LSH:
op = 1; /* prefix 0x66 */
break;
case BPF_RSH:
op = 3; /* prefix 0xf2 */
break;
case BPF_ARSH:
op = 2; /* prefix 0xf3 */
break;
}
emit_shiftx(&prog, dst_reg, src_reg, w, op);
break;
}
if (src_reg != BPF_REG_4) { /* common case */
/* Check for bad case when dst_reg == rcx */
if (dst_reg == BPF_REG_4) {
/* mov r11, dst_reg */
EMIT_mov(AUX_REG, dst_reg);
dst_reg = AUX_REG;
} else {
EMIT1(0x51); /* push rcx */
}
/* mov rcx, src_reg */
EMIT_mov(BPF_REG_4, src_reg);
}
/* shl %rax, %cl | shr %rax, %cl | sar %rax, %cl */
maybe_emit_1mod(&prog, dst_reg,
BPF_CLASS(insn->code) == BPF_ALU64);
b3 = simple_alu_opcodes[BPF_OP(insn->code)];
EMIT2(0xD3, add_1reg(b3, dst_reg));
if (src_reg != BPF_REG_4) {
if (insn->dst_reg == BPF_REG_4)
/* mov dst_reg, r11 */
EMIT_mov(insn->dst_reg, AUX_REG);
else
EMIT1(0x59); /* pop rcx */
}
break;
case BPF_ALU | BPF_END | BPF_FROM_BE:
case BPF_ALU64 | BPF_END | BPF_FROM_LE:
switch (imm32) {
case 16:
/* Emit 'ror %ax, 8' to swap lower 2 bytes */
EMIT1(0x66);
if (is_ereg(dst_reg))
EMIT1(0x41);
EMIT3(0xC1, add_1reg(0xC8, dst_reg), 8);
/* Emit 'movzwl eax, ax' */
if (is_ereg(dst_reg))
EMIT3(0x45, 0x0F, 0xB7);
else
EMIT2(0x0F, 0xB7);
EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
break;
case 32:
/* Emit 'bswap eax' to swap lower 4 bytes */
if (is_ereg(dst_reg))
EMIT2(0x41, 0x0F);
else
EMIT1(0x0F);
EMIT1(add_1reg(0xC8, dst_reg));
break;
case 64:
/* Emit 'bswap rax' to swap 8 bytes */
EMIT3(add_1mod(0x48, dst_reg), 0x0F,
add_1reg(0xC8, dst_reg));
break;
}
break;
case BPF_ALU | BPF_END | BPF_FROM_LE:
switch (imm32) {
case 16:
/*
* Emit 'movzwl eax, ax' to zero extend 16-bit
* into 64 bit
*/
if (is_ereg(dst_reg))
EMIT3(0x45, 0x0F, 0xB7);
else
EMIT2(0x0F, 0xB7);
EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
break;
case 32:
/* Emit 'mov eax, eax' to clear upper 32-bits */
if (is_ereg(dst_reg))
EMIT1(0x45);
EMIT2(0x89, add_2reg(0xC0, dst_reg, dst_reg));
break;
case 64:
/* nop */
break;
}
break;
/* speculation barrier */
case BPF_ST | BPF_NOSPEC:
EMIT_LFENCE();
break;
/* ST: *(u8*)(dst_reg + off) = imm */
case BPF_ST | BPF_MEM | BPF_B:
if (is_ereg(dst_reg))
EMIT2(0x41, 0xC6);
else
EMIT1(0xC6);
goto st;
case BPF_ST | BPF_MEM | BPF_H:
if (is_ereg(dst_reg))
EMIT3(0x66, 0x41, 0xC7);
else
EMIT2(0x66, 0xC7);
goto st;
case BPF_ST | BPF_MEM | BPF_W:
if (is_ereg(dst_reg))
EMIT2(0x41, 0xC7);
else
EMIT1(0xC7);
goto st;
case BPF_ST | BPF_MEM | BPF_DW:
EMIT2(add_1mod(0x48, dst_reg), 0xC7);
st: if (is_imm8(insn->off))
EMIT2(add_1reg(0x40, dst_reg), insn->off);
else
EMIT1_off32(add_1reg(0x80, dst_reg), insn->off);
EMIT(imm32, bpf_size_to_x86_bytes(BPF_SIZE(insn->code)));
break;
/* STX: *(u8*)(dst_reg + off) = src_reg */
case BPF_STX | BPF_MEM | BPF_B:
case BPF_STX | BPF_MEM | BPF_H:
case BPF_STX | BPF_MEM | BPF_W:
case BPF_STX | BPF_MEM | BPF_DW:
emit_stx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
break;
/* LDX: dst_reg = *(u8*)(src_reg + off) */
case BPF_LDX | BPF_MEM | BPF_B:
case BPF_LDX | BPF_PROBE_MEM | BPF_B:
case BPF_LDX | BPF_MEM | BPF_H:
case BPF_LDX | BPF_PROBE_MEM | BPF_H:
case BPF_LDX | BPF_MEM | BPF_W:
case BPF_LDX | BPF_PROBE_MEM | BPF_W:
case BPF_LDX | BPF_MEM | BPF_DW:
case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
/* LDXS: dst_reg = *(s8*)(src_reg + off) */
case BPF_LDX | BPF_MEMSX | BPF_B:
case BPF_LDX | BPF_MEMSX | BPF_H:
case BPF_LDX | BPF_MEMSX | BPF_W:
case BPF_LDX | BPF_PROBE_MEMSX | BPF_B:
case BPF_LDX | BPF_PROBE_MEMSX | BPF_H:
case BPF_LDX | BPF_PROBE_MEMSX | BPF_W:
insn_off = insn->off;
if (BPF_MODE(insn->code) == BPF_PROBE_MEM ||
BPF_MODE(insn->code) == BPF_PROBE_MEMSX) {
/* Conservatively check that src_reg + insn->off is a kernel address:
* src_reg + insn->off >= TASK_SIZE_MAX + PAGE_SIZE
* src_reg is used as scratch for src_reg += insn->off and restored
* after emit_ldx if necessary
*/
u64 limit = TASK_SIZE_MAX + PAGE_SIZE;
u8 *end_of_jmp;
/* At end of these emitted checks, insn->off will have been added
* to src_reg, so no need to do relative load with insn->off offset
*/
insn_off = 0;
/* movabsq r11, limit */
EMIT2(add_1mod(0x48, AUX_REG), add_1reg(0xB8, AUX_REG));
EMIT((u32)limit, 4);
EMIT(limit >> 32, 4);
if (insn->off) {
/* add src_reg, insn->off */
maybe_emit_1mod(&prog, src_reg, true);
EMIT2_off32(0x81, add_1reg(0xC0, src_reg), insn->off);
}
/* cmp src_reg, r11 */
maybe_emit_mod(&prog, src_reg, AUX_REG, true);
EMIT2(0x39, add_2reg(0xC0, src_reg, AUX_REG));
/* if unsigned '>=', goto load */
EMIT2(X86_JAE, 0);
end_of_jmp = prog;
/* xor dst_reg, dst_reg */
emit_mov_imm32(&prog, false, dst_reg, 0);
/* jmp byte_after_ldx */
EMIT2(0xEB, 0);
/* populate jmp_offset for JAE above to jump to start_of_ldx */
start_of_ldx = prog;
end_of_jmp[-1] = start_of_ldx - end_of_jmp;
}
if (BPF_MODE(insn->code) == BPF_PROBE_MEMSX ||
BPF_MODE(insn->code) == BPF_MEMSX)
emit_ldsx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn_off);
else
emit_ldx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn_off);
if (BPF_MODE(insn->code) == BPF_PROBE_MEM ||
BPF_MODE(insn->code) == BPF_PROBE_MEMSX) {
struct exception_table_entry *ex;
u8 *_insn = image + proglen + (start_of_ldx - temp);
s64 delta;
/* populate jmp_offset for JMP above */
start_of_ldx[-1] = prog - start_of_ldx;
if (insn->off && src_reg != dst_reg) {
/* sub src_reg, insn->off
* Restore src_reg after "add src_reg, insn->off" in prev
* if statement. But if src_reg == dst_reg, emit_ldx
* above already clobbered src_reg, so no need to restore.
* If add src_reg, insn->off was unnecessary, no need to
* restore either.
*/
maybe_emit_1mod(&prog, src_reg, true);
EMIT2_off32(0x81, add_1reg(0xE8, src_reg), insn->off);
}
if (!bpf_prog->aux->extable)
break;
if (excnt >= bpf_prog->aux->num_exentries) {
pr_err("ex gen bug\n");
return -EFAULT;
}
ex = &bpf_prog->aux->extable[excnt++];
delta = _insn - (u8 *)&ex->insn;
if (!is_simm32(delta)) {
pr_err("extable->insn doesn't fit into 32-bit\n");
return -EFAULT;
}
/* switch ex to rw buffer for writes */
ex = (void *)rw_image + ((void *)ex - (void *)image);
ex->insn = delta;
ex->data = EX_TYPE_BPF;
if (dst_reg > BPF_REG_9) {
pr_err("verifier error\n");
return -EFAULT;
}
/*
* Compute size of x86 insn and its target dest x86 register.
* ex_handler_bpf() will use lower 8 bits to adjust
* pt_regs->ip to jump over this x86 instruction
* and upper bits to figure out which pt_regs to zero out.
* End result: x86 insn "mov rbx, qword ptr [rax+0x14]"
* of 4 bytes will be ignored and rbx will be zero inited.
*/
ex->fixup = (prog - start_of_ldx) | (reg2pt_regs[dst_reg] << 8);
}
break;
case BPF_STX | BPF_ATOMIC | BPF_W:
case BPF_STX | BPF_ATOMIC | BPF_DW:
if (insn->imm == (BPF_AND | BPF_FETCH) ||
insn->imm == (BPF_OR | BPF_FETCH) ||
insn->imm == (BPF_XOR | BPF_FETCH)) {
bool is64 = BPF_SIZE(insn->code) == BPF_DW;
u32 real_src_reg = src_reg;
u32 real_dst_reg = dst_reg;
u8 *branch_target;
/*
* Can't be implemented with a single x86 insn.
* Need to do a CMPXCHG loop.
*/
/* Will need RAX as a CMPXCHG operand so save R0 */
emit_mov_reg(&prog, true, BPF_REG_AX, BPF_REG_0);
if (src_reg == BPF_REG_0)
real_src_reg = BPF_REG_AX;
if (dst_reg == BPF_REG_0)
real_dst_reg = BPF_REG_AX;
branch_target = prog;
/* Load old value */
emit_ldx(&prog, BPF_SIZE(insn->code),
BPF_REG_0, real_dst_reg, insn->off);
/*
* Perform the (commutative) operation locally,
* put the result in the AUX_REG.
*/
emit_mov_reg(&prog, is64, AUX_REG, BPF_REG_0);
maybe_emit_mod(&prog, AUX_REG, real_src_reg, is64);
EMIT2(simple_alu_opcodes[BPF_OP(insn->imm)],
add_2reg(0xC0, AUX_REG, real_src_reg));
/* Attempt to swap in new value */
err = emit_atomic(&prog, BPF_CMPXCHG,
real_dst_reg, AUX_REG,
insn->off,
BPF_SIZE(insn->code));
if (WARN_ON(err))
return err;
/*
* ZF tells us whether we won the race. If it's
* cleared we need to try again.
*/
EMIT2(X86_JNE, -(prog - branch_target) - 2);
/* Return the pre-modification value */
emit_mov_reg(&prog, is64, real_src_reg, BPF_REG_0);
/* Restore R0 after clobbering RAX */
emit_mov_reg(&prog, true, BPF_REG_0, BPF_REG_AX);
break;
}
err = emit_atomic(&prog, insn->imm, dst_reg, src_reg,
insn->off, BPF_SIZE(insn->code));
if (err)
return err;
break;
/* call */
case BPF_JMP | BPF_CALL: {
int offs;
func = (u8 *) __bpf_call_base + imm32;
if (tail_call_reachable) {
/* mov rax, qword ptr [rbp - rounded_stack_depth - 8] */
EMIT3_off32(0x48, 0x8B, 0x85,
-round_up(bpf_prog->aux->stack_depth, 8) - 8);
if (!imm32)
return -EINVAL;
offs = 7 + x86_call_depth_emit_accounting(&prog, func);
} else {
if (!imm32)
return -EINVAL;
offs = x86_call_depth_emit_accounting(&prog, func);
}
if (emit_call(&prog, func, image + addrs[i - 1] + offs))
return -EINVAL;
break;
}
case BPF_JMP | BPF_TAIL_CALL:
if (imm32)
emit_bpf_tail_call_direct(&bpf_prog->aux->poke_tab[imm32 - 1],
&prog, image + addrs[i - 1],
callee_regs_used,
bpf_prog->aux->stack_depth,
ctx);
else
emit_bpf_tail_call_indirect(&prog,
callee_regs_used,
bpf_prog->aux->stack_depth,
image + addrs[i - 1],
ctx);
break;
/* cond jump */
case BPF_JMP | BPF_JEQ | BPF_X:
case BPF_JMP | BPF_JNE | BPF_X:
case BPF_JMP | BPF_JGT | BPF_X:
case BPF_JMP | BPF_JLT | BPF_X:
case BPF_JMP | BPF_JGE | BPF_X:
case BPF_JMP | BPF_JLE | BPF_X:
case BPF_JMP | BPF_JSGT | BPF_X:
case BPF_JMP | BPF_JSLT | BPF_X:
case BPF_JMP | BPF_JSGE | BPF_X:
case BPF_JMP | BPF_JSLE | BPF_X:
case BPF_JMP32 | BPF_JEQ | BPF_X:
case BPF_JMP32 | BPF_JNE | BPF_X:
case BPF_JMP32 | BPF_JGT | BPF_X:
case BPF_JMP32 | BPF_JLT | BPF_X:
case BPF_JMP32 | BPF_JGE | BPF_X:
case BPF_JMP32 | BPF_JLE | BPF_X:
case BPF_JMP32 | BPF_JSGT | BPF_X:
case BPF_JMP32 | BPF_JSLT | BPF_X:
case BPF_JMP32 | BPF_JSGE | BPF_X:
case BPF_JMP32 | BPF_JSLE | BPF_X:
/* cmp dst_reg, src_reg */
maybe_emit_mod(&prog, dst_reg, src_reg,
BPF_CLASS(insn->code) == BPF_JMP);
EMIT2(0x39, add_2reg(0xC0, dst_reg, src_reg));
goto emit_cond_jmp;
case BPF_JMP | BPF_JSET | BPF_X:
case BPF_JMP32 | BPF_JSET | BPF_X:
/* test dst_reg, src_reg */
maybe_emit_mod(&prog, dst_reg, src_reg,
BPF_CLASS(insn->code) == BPF_JMP);
EMIT2(0x85, add_2reg(0xC0, dst_reg, src_reg));
goto emit_cond_jmp;
case BPF_JMP | BPF_JSET | BPF_K:
case BPF_JMP32 | BPF_JSET | BPF_K:
/* test dst_reg, imm32 */
maybe_emit_1mod(&prog, dst_reg,
BPF_CLASS(insn->code) == BPF_JMP);
EMIT2_off32(0xF7, add_1reg(0xC0, dst_reg), imm32);
goto emit_cond_jmp;
case BPF_JMP | BPF_JEQ | BPF_K:
case BPF_JMP | BPF_JNE | BPF_K:
case BPF_JMP | BPF_JGT | BPF_K:
case BPF_JMP | BPF_JLT | BPF_K:
case BPF_JMP | BPF_JGE | BPF_K:
case BPF_JMP | BPF_JLE | BPF_K:
case BPF_JMP | BPF_JSGT | BPF_K:
case BPF_JMP | BPF_JSLT | BPF_K:
case BPF_JMP | BPF_JSGE | BPF_K:
case BPF_JMP | BPF_JSLE | BPF_K:
case BPF_JMP32 | BPF_JEQ | BPF_K:
case BPF_JMP32 | BPF_JNE | BPF_K:
case BPF_JMP32 | BPF_JGT | BPF_K:
case BPF_JMP32 | BPF_JLT | BPF_K:
case BPF_JMP32 | BPF_JGE | BPF_K:
case BPF_JMP32 | BPF_JLE | BPF_K:
case BPF_JMP32 | BPF_JSGT | BPF_K:
case BPF_JMP32 | BPF_JSLT | BPF_K:
case BPF_JMP32 | BPF_JSGE | BPF_K:
case BPF_JMP32 | BPF_JSLE | BPF_K:
/* test dst_reg, dst_reg to save one extra byte */
if (imm32 == 0) {
maybe_emit_mod(&prog, dst_reg, dst_reg,
BPF_CLASS(insn->code) == BPF_JMP);
EMIT2(0x85, add_2reg(0xC0, dst_reg, dst_reg));
goto emit_cond_jmp;
}
/* cmp dst_reg, imm8/32 */
maybe_emit_1mod(&prog, dst_reg,
BPF_CLASS(insn->code) == BPF_JMP);
if (is_imm8(imm32))
EMIT3(0x83, add_1reg(0xF8, dst_reg), imm32);
else
EMIT2_off32(0x81, add_1reg(0xF8, dst_reg), imm32);
emit_cond_jmp: /* Convert BPF opcode to x86 */
switch (BPF_OP(insn->code)) {
case BPF_JEQ:
jmp_cond = X86_JE;
break;
case BPF_JSET:
case BPF_JNE:
jmp_cond = X86_JNE;
break;
case BPF_JGT:
/* GT is unsigned '>', JA in x86 */
jmp_cond = X86_JA;
break;
case BPF_JLT:
/* LT is unsigned '<', JB in x86 */
jmp_cond = X86_JB;
break;
case BPF_JGE:
/* GE is unsigned '>=', JAE in x86 */
jmp_cond = X86_JAE;
break;
case BPF_JLE:
/* LE is unsigned '<=', JBE in x86 */
jmp_cond = X86_JBE;
break;
case BPF_JSGT:
/* Signed '>', GT in x86 */
jmp_cond = X86_JG;
break;
case BPF_JSLT:
/* Signed '<', LT in x86 */
jmp_cond = X86_JL;
break;
case BPF_JSGE:
/* Signed '>=', GE in x86 */
jmp_cond = X86_JGE;
break;
case BPF_JSLE:
/* Signed '<=', LE in x86 */
jmp_cond = X86_JLE;
break;
default: /* to silence GCC warning */
return -EFAULT;
}
jmp_offset = addrs[i + insn->off] - addrs[i];
if (is_imm8(jmp_offset)) {
if (jmp_padding) {
/* To keep the jmp_offset valid, the extra bytes are
* padded before the jump insn, so we subtract the
* 2 bytes of jmp_cond insn from INSN_SZ_DIFF.
*
* If the previous pass already emits an imm8
* jmp_cond, then this BPF insn won't shrink, so
* "nops" is 0.
*
* On the other hand, if the previous pass emits an
* imm32 jmp_cond, the extra 4 bytes(*) is padded to
* keep the image from shrinking further.
*
* (*) imm32 jmp_cond is 6 bytes, and imm8 jmp_cond
* is 2 bytes, so the size difference is 4 bytes.
*/
nops = INSN_SZ_DIFF - 2;
if (nops != 0 && nops != 4) {
pr_err("unexpected jmp_cond padding: %d bytes\n",
nops);
return -EFAULT;
}
emit_nops(&prog, nops);
}
EMIT2(jmp_cond, jmp_offset);
} else if (is_simm32(jmp_offset)) {
EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset);
} else {
pr_err("cond_jmp gen bug %llx\n", jmp_offset);
return -EFAULT;
}
break;
case BPF_JMP | BPF_JA:
case BPF_JMP32 | BPF_JA:
if (BPF_CLASS(insn->code) == BPF_JMP) {
if (insn->off == -1)
/* -1 jmp instructions will always jump
* backwards two bytes. Explicitly handling
* this case avoids wasting too many passes
* when there are long sequences of replaced
* dead code.
*/
jmp_offset = -2;
else
jmp_offset = addrs[i + insn->off] - addrs[i];
} else {
if (insn->imm == -1)
jmp_offset = -2;
else
jmp_offset = addrs[i + insn->imm] - addrs[i];
}
if (!jmp_offset) {
/*
* If jmp_padding is enabled, the extra nops will
* be inserted. Otherwise, optimize out nop jumps.
*/
if (jmp_padding) {
/* There are 3 possible conditions.
* (1) This BPF_JA is already optimized out in
* the previous run, so there is no need
* to pad any extra byte (0 byte).
* (2) The previous pass emits an imm8 jmp,
* so we pad 2 bytes to match the previous
* insn size.
* (3) Similarly, the previous pass emits an
* imm32 jmp, and 5 bytes is padded.
*/
nops = INSN_SZ_DIFF;
if (nops != 0 && nops != 2 && nops != 5) {
pr_err("unexpected nop jump padding: %d bytes\n",
nops);
return -EFAULT;
}
emit_nops(&prog, nops);
}
break;
}
emit_jmp:
if (is_imm8(jmp_offset)) {
if (jmp_padding) {
/* To avoid breaking jmp_offset, the extra bytes
* are padded before the actual jmp insn, so
* 2 bytes is subtracted from INSN_SZ_DIFF.
*
* If the previous pass already emits an imm8
* jmp, there is nothing to pad (0 byte).
*
* If it emits an imm32 jmp (5 bytes) previously
* and now an imm8 jmp (2 bytes), then we pad
* (5 - 2 = 3) bytes to stop the image from
* shrinking further.
*/
nops = INSN_SZ_DIFF - 2;
if (nops != 0 && nops != 3) {
pr_err("unexpected jump padding: %d bytes\n",
nops);
return -EFAULT;
}
emit_nops(&prog, INSN_SZ_DIFF - 2);
}
EMIT2(0xEB, jmp_offset);
} else if (is_simm32(jmp_offset)) {
EMIT1_off32(0xE9, jmp_offset);
} else {
pr_err("jmp gen bug %llx\n", jmp_offset);
return -EFAULT;
}
break;
case BPF_JMP | BPF_EXIT:
if (seen_exit) {
jmp_offset = ctx->cleanup_addr - addrs[i];
goto emit_jmp;
}
seen_exit = true;
/* Update cleanup_addr */
ctx->cleanup_addr = proglen;
pop_callee_regs(&prog, callee_regs_used);
EMIT1(0xC9); /* leave */
emit_return(&prog, image + addrs[i - 1] + (prog - temp));
break;
default:
/*
* By design x86-64 JIT should support all BPF instructions.
* This error will be seen if new instruction was added
* to the interpreter, but not to the JIT, or if there is
* junk in bpf_prog.
*/
pr_err("bpf_jit: unknown opcode %02x\n", insn->code);
return -EINVAL;
}
ilen = prog - temp;
if (ilen > BPF_MAX_INSN_SIZE) {
pr_err("bpf_jit: fatal insn size error\n");
return -EFAULT;
}
if (image) {
/*
* When populating the image, assert that:
*
* i) We do not write beyond the allocated space, and
* ii) addrs[i] did not change from the prior run, in order
* to validate assumptions made for computing branch
* displacements.
*/
if (unlikely(proglen + ilen > oldproglen ||
proglen + ilen != addrs[i])) {
pr_err("bpf_jit: fatal error\n");
return -EFAULT;
}
memcpy(rw_image + proglen, temp, ilen);
}
proglen += ilen;
addrs[i] = proglen;
prog = temp;
}
if (image && excnt != bpf_prog->aux->num_exentries) {
pr_err("extable is not populated\n");
return -EFAULT;
}
return proglen;
}
static void clean_stack_garbage(const struct btf_func_model *m,
u8 **pprog, int nr_stack_slots,
int stack_size)
{
int arg_size, off;
u8 *prog;
/* Generally speaking, the compiler will pass the arguments
* on-stack with "push" instruction, which will take 8-byte
* on the stack. In this case, there won't be garbage values
* while we copy the arguments from origin stack frame to current
* in BPF_DW.
*
* However, sometimes the compiler will only allocate 4-byte on
* the stack for the arguments. For now, this case will only
* happen if there is only one argument on-stack and its size
* not more than 4 byte. In this case, there will be garbage
* values on the upper 4-byte where we store the argument on
* current stack frame.
*
* arguments on origin stack:
*
* stack_arg_1(4-byte) xxx(4-byte)
*
* what we copy:
*
* stack_arg_1(8-byte): stack_arg_1(origin) xxx
*
* and the xxx is the garbage values which we should clean here.
*/
if (nr_stack_slots != 1)
return;
/* the size of the last argument */
arg_size = m->arg_size[m->nr_args - 1];
if (arg_size <= 4) {
off = -(stack_size - 4);
prog = *pprog;
/* mov DWORD PTR [rbp + off], 0 */
if (!is_imm8(off))
EMIT2_off32(0xC7, 0x85, off);
else
EMIT3(0xC7, 0x45, off);
EMIT(0, 4);
*pprog = prog;
}
}
/* get the count of the regs that are used to pass arguments */
static int get_nr_used_regs(const struct btf_func_model *m)
{
int i, arg_regs, nr_used_regs = 0;
for (i = 0; i < min_t(int, m->nr_args, MAX_BPF_FUNC_ARGS); i++) {
arg_regs = (m->arg_size[i] + 7) / 8;
if (nr_used_regs + arg_regs <= 6)
nr_used_regs += arg_regs;
if (nr_used_regs >= 6)
break;
}
return nr_used_regs;
}
static void save_args(const struct btf_func_model *m, u8 **prog,
int stack_size, bool for_call_origin)
{
int arg_regs, first_off = 0, nr_regs = 0, nr_stack_slots = 0;
int i, j;
/* Store function arguments to stack.
* For a function that accepts two pointers the sequence will be:
* mov QWORD PTR [rbp-0x10],rdi
* mov QWORD PTR [rbp-0x8],rsi
*/
for (i = 0; i < min_t(int, m->nr_args, MAX_BPF_FUNC_ARGS); i++) {
arg_regs = (m->arg_size[i] + 7) / 8;
/* According to the research of Yonghong, struct members
* should be all in register or all on the stack.
* Meanwhile, the compiler will pass the argument on regs
* if the remaining regs can hold the argument.
*
* Disorder of the args can happen. For example:
*
* struct foo_struct {
* long a;
* int b;
* };
* int foo(char, char, char, char, char, struct foo_struct,
* char);
*
* the arg1-5,arg7 will be passed by regs, and arg6 will
* by stack.
*/
if (nr_regs + arg_regs > 6) {
/* copy function arguments from origin stack frame
* into current stack frame.
*
* The starting address of the arguments on-stack
* is:
* rbp + 8(push rbp) +
* 8(return addr of origin call) +
* 8(return addr of the caller)
* which means: rbp + 24
*/
for (j = 0; j < arg_regs; j++) {
emit_ldx(prog, BPF_DW, BPF_REG_0, BPF_REG_FP,
nr_stack_slots * 8 + 0x18);
emit_stx(prog, BPF_DW, BPF_REG_FP, BPF_REG_0,
-stack_size);
if (!nr_stack_slots)
first_off = stack_size;
stack_size -= 8;
nr_stack_slots++;
}
} else {
/* Only copy the arguments on-stack to current
* 'stack_size' and ignore the regs, used to
* prepare the arguments on-stack for orign call.
*/
if (for_call_origin) {
nr_regs += arg_regs;
continue;
}
/* copy the arguments from regs into stack */
for (j = 0; j < arg_regs; j++) {
emit_stx(prog, BPF_DW, BPF_REG_FP,
nr_regs == 5 ? X86_REG_R9 : BPF_REG_1 + nr_regs,
-stack_size);
stack_size -= 8;
nr_regs++;
}
}
}
clean_stack_garbage(m, prog, nr_stack_slots, first_off);
}
static void restore_regs(const struct btf_func_model *m, u8 **prog,
int stack_size)
{
int i, j, arg_regs, nr_regs = 0;
/* Restore function arguments from stack.
* For a function that accepts two pointers the sequence will be:
* EMIT4(0x48, 0x8B, 0x7D, 0xF0); mov rdi,QWORD PTR [rbp-0x10]
* EMIT4(0x48, 0x8B, 0x75, 0xF8); mov rsi,QWORD PTR [rbp-0x8]
*
* The logic here is similar to what we do in save_args()
*/
for (i = 0; i < min_t(int, m->nr_args, MAX_BPF_FUNC_ARGS); i++) {
arg_regs = (m->arg_size[i] + 7) / 8;
if (nr_regs + arg_regs <= 6) {
for (j = 0; j < arg_regs; j++) {
emit_ldx(prog, BPF_DW,
nr_regs == 5 ? X86_REG_R9 : BPF_REG_1 + nr_regs,
BPF_REG_FP,
-stack_size);
stack_size -= 8;
nr_regs++;
}
} else {
stack_size -= 8 * arg_regs;
}
if (nr_regs >= 6)
break;
}
}
static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
struct bpf_tramp_link *l, int stack_size,
int run_ctx_off, bool save_ret)
{
u8 *prog = *pprog;
u8 *jmp_insn;
int ctx_cookie_off = offsetof(struct bpf_tramp_run_ctx, bpf_cookie);
struct bpf_prog *p = l->link.prog;
u64 cookie = l->cookie;
/* mov rdi, cookie */
emit_mov_imm64(&prog, BPF_REG_1, (long) cookie >> 32, (u32) (long) cookie);
/* Prepare struct bpf_tramp_run_ctx.
*
* bpf_tramp_run_ctx is already preserved by
* arch_prepare_bpf_trampoline().
*
* mov QWORD PTR [rbp - run_ctx_off + ctx_cookie_off], rdi
*/
emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_1, -run_ctx_off + ctx_cookie_off);
/* arg1: mov rdi, progs[i] */
emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p);
/* arg2: lea rsi, [rbp - ctx_cookie_off] */
if (!is_imm8(-run_ctx_off))
EMIT3_off32(0x48, 0x8D, 0xB5, -run_ctx_off);
else
EMIT4(0x48, 0x8D, 0x75, -run_ctx_off);
if (emit_rsb_call(&prog, bpf_trampoline_enter(p), prog))
return -EINVAL;
/* remember prog start time returned by __bpf_prog_enter */
emit_mov_reg(&prog, true, BPF_REG_6, BPF_REG_0);
/* if (__bpf_prog_enter*(prog) == 0)
* goto skip_exec_of_prog;
*/
EMIT3(0x48, 0x85, 0xC0); /* test rax,rax */
/* emit 2 nops that will be replaced with JE insn */
jmp_insn = prog;
emit_nops(&prog, 2);
/* arg1: lea rdi, [rbp - stack_size] */
if (!is_imm8(-stack_size))
EMIT3_off32(0x48, 0x8D, 0xBD, -stack_size);
else
EMIT4(0x48, 0x8D, 0x7D, -stack_size);
/* arg2: progs[i]->insnsi for interpreter */
if (!p->jited)
emit_mov_imm64(&prog, BPF_REG_2,
(long) p->insnsi >> 32,
(u32) (long) p->insnsi);
/* call JITed bpf program or interpreter */
if (emit_rsb_call(&prog, p->bpf_func, prog))
return -EINVAL;
/*
* BPF_TRAMP_MODIFY_RETURN trampolines can modify the return
* of the previous call which is then passed on the stack to
* the next BPF program.
*
* BPF_TRAMP_FENTRY trampoline may need to return the return
* value of BPF_PROG_TYPE_STRUCT_OPS prog.
*/
if (save_ret)
emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
/* replace 2 nops with JE insn, since jmp target is known */
jmp_insn[0] = X86_JE;
jmp_insn[1] = prog - jmp_insn - 2;
/* arg1: mov rdi, progs[i] */
emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p);
/* arg2: mov rsi, rbx <- start time in nsec */
emit_mov_reg(&prog, true, BPF_REG_2, BPF_REG_6);
/* arg3: lea rdx, [rbp - run_ctx_off] */
if (!is_imm8(-run_ctx_off))
EMIT3_off32(0x48, 0x8D, 0x95, -run_ctx_off);
else
EMIT4(0x48, 0x8D, 0x55, -run_ctx_off);
if (emit_rsb_call(&prog, bpf_trampoline_exit(p), prog))
return -EINVAL;
*pprog = prog;
return 0;
}
static void emit_align(u8 **pprog, u32 align)
{
u8 *target, *prog = *pprog;
target = PTR_ALIGN(prog, align);
if (target != prog)
emit_nops(&prog, target - prog);
*pprog = prog;
}
static int emit_cond_near_jump(u8 **pprog, void *func, void *ip, u8 jmp_cond)
{
u8 *prog = *pprog;
s64 offset;
offset = func - (ip + 2 + 4);
if (!is_simm32(offset)) {
pr_err("Target %p is out of range\n", func);
return -EINVAL;
}
EMIT2_off32(0x0F, jmp_cond + 0x10, offset);
*pprog = prog;
return 0;
}
static int invoke_bpf(const struct btf_func_model *m, u8 **pprog,
struct bpf_tramp_links *tl, int stack_size,
int run_ctx_off, bool save_ret)
{
int i;
u8 *prog = *pprog;
for (i = 0; i < tl->nr_links; i++) {
if (invoke_bpf_prog(m, &prog, tl->links[i], stack_size,
run_ctx_off, save_ret))
return -EINVAL;
}
*pprog = prog;
return 0;
}
static int invoke_bpf_mod_ret(const struct btf_func_model *m, u8 **pprog,
struct bpf_tramp_links *tl, int stack_size,
int run_ctx_off, u8 **branches)
{
u8 *prog = *pprog;
int i;
/* The first fmod_ret program will receive a garbage return value.
* Set this to 0 to avoid confusing the program.
*/
emit_mov_imm32(&prog, false, BPF_REG_0, 0);
emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
for (i = 0; i < tl->nr_links; i++) {
if (invoke_bpf_prog(m, &prog, tl->links[i], stack_size, run_ctx_off, true))
return -EINVAL;
/* mod_ret prog stored return value into [rbp - 8]. Emit:
* if (*(u64 *)(rbp - 8) != 0)
* goto do_fexit;
*/
/* cmp QWORD PTR [rbp - 0x8], 0x0 */
EMIT4(0x48, 0x83, 0x7d, 0xf8); EMIT1(0x00);
/* Save the location of the branch and Generate 6 nops
* (4 bytes for an offset and 2 bytes for the jump) These nops
* are replaced with a conditional jump once do_fexit (i.e. the
* start of the fexit invocation) is finalized.
*/
branches[i] = prog;
emit_nops(&prog, 4 + 2);
}
*pprog = prog;
return 0;
}
/* Example:
* __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev);
* its 'struct btf_func_model' will be nr_args=2
* The assembly code when eth_type_trans is executing after trampoline:
*
* push rbp
* mov rbp, rsp
* sub rsp, 16 // space for skb and dev
* push rbx // temp regs to pass start time
* mov qword ptr [rbp - 16], rdi // save skb pointer to stack
* mov qword ptr [rbp - 8], rsi // save dev pointer to stack
* call __bpf_prog_enter // rcu_read_lock and preempt_disable
* mov rbx, rax // remember start time in bpf stats are enabled
* lea rdi, [rbp - 16] // R1==ctx of bpf prog
* call addr_of_jited_FENTRY_prog
* movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off
* mov rsi, rbx // prog start time
* call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math
* mov rdi, qword ptr [rbp - 16] // restore skb pointer from stack
* mov rsi, qword ptr [rbp - 8] // restore dev pointer from stack
* pop rbx
* leave
* ret
*
* eth_type_trans has 5 byte nop at the beginning. These 5 bytes will be
* replaced with 'call generated_bpf_trampoline'. When it returns
* eth_type_trans will continue executing with original skb and dev pointers.
*
* The assembly code when eth_type_trans is called from trampoline:
*
* push rbp
* mov rbp, rsp
* sub rsp, 24 // space for skb, dev, return value
* push rbx // temp regs to pass start time
* mov qword ptr [rbp - 24], rdi // save skb pointer to stack
* mov qword ptr [rbp - 16], rsi // save dev pointer to stack
* call __bpf_prog_enter // rcu_read_lock and preempt_disable
* mov rbx, rax // remember start time if bpf stats are enabled
* lea rdi, [rbp - 24] // R1==ctx of bpf prog
* call addr_of_jited_FENTRY_prog // bpf prog can access skb and dev
* movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off
* mov rsi, rbx // prog start time
* call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math
* mov rdi, qword ptr [rbp - 24] // restore skb pointer from stack
* mov rsi, qword ptr [rbp - 16] // restore dev pointer from stack
* call eth_type_trans+5 // execute body of eth_type_trans
* mov qword ptr [rbp - 8], rax // save return value
* call __bpf_prog_enter // rcu_read_lock and preempt_disable
* mov rbx, rax // remember start time in bpf stats are enabled
* lea rdi, [rbp - 24] // R1==ctx of bpf prog
* call addr_of_jited_FEXIT_prog // bpf prog can access skb, dev, return value
* movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off
* mov rsi, rbx // prog start time
* call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math
* mov rax, qword ptr [rbp - 8] // restore eth_type_trans's return value
* pop rbx
* leave
* add rsp, 8 // skip eth_type_trans's frame
* ret // return to its caller
*/
int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *image_end,
const struct btf_func_model *m, u32 flags,
struct bpf_tramp_links *tlinks,
void *func_addr)
{
int i, ret, nr_regs = m->nr_args, stack_size = 0;
int regs_off, nregs_off, ip_off, run_ctx_off, arg_stack_off, rbx_off;
struct bpf_tramp_links *fentry = &tlinks[BPF_TRAMP_FENTRY];
struct bpf_tramp_links *fexit = &tlinks[BPF_TRAMP_FEXIT];
struct bpf_tramp_links *fmod_ret = &tlinks[BPF_TRAMP_MODIFY_RETURN];
void *orig_call = func_addr;
u8 **branches = NULL;
u8 *prog;
bool save_ret;
/* extra registers for struct arguments */
for (i = 0; i < m->nr_args; i++)
if (m->arg_flags[i] & BTF_FMODEL_STRUCT_ARG)
nr_regs += (m->arg_size[i] + 7) / 8 - 1;
/* x86-64 supports up to MAX_BPF_FUNC_ARGS arguments. 1-6
* are passed through regs, the remains are through stack.
*/
if (nr_regs > MAX_BPF_FUNC_ARGS)
return -ENOTSUPP;
/* Generated trampoline stack layout:
*
* RBP + 8 [ return address ]
* RBP + 0 [ RBP ]
*
* RBP - 8 [ return value ] BPF_TRAMP_F_CALL_ORIG or
* BPF_TRAMP_F_RET_FENTRY_RET flags
*
* [ reg_argN ] always
* [ ... ]
* RBP - regs_off [ reg_arg1 ] program's ctx pointer
*
* RBP - nregs_off [ regs count ] always
*
* RBP - ip_off [ traced function ] BPF_TRAMP_F_IP_ARG flag
*
* RBP - rbx_off [ rbx value ] always
*
* RBP - run_ctx_off [ bpf_tramp_run_ctx ]
*
* [ stack_argN ] BPF_TRAMP_F_CALL_ORIG
* [ ... ]
* [ stack_arg2 ]
* RBP - arg_stack_off [ stack_arg1 ]
*/
/* room for return value of orig_call or fentry prog */
save_ret = flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET);
if (save_ret)
stack_size += 8;
stack_size += nr_regs * 8;
regs_off = stack_size;
/* regs count */
stack_size += 8;
nregs_off = stack_size;
if (flags & BPF_TRAMP_F_IP_ARG)
stack_size += 8; /* room for IP address argument */
ip_off = stack_size;
stack_size += 8;
rbx_off = stack_size;
stack_size += (sizeof(struct bpf_tramp_run_ctx) + 7) & ~0x7;
run_ctx_off = stack_size;
if (nr_regs > 6 && (flags & BPF_TRAMP_F_CALL_ORIG)) {
/* the space that used to pass arguments on-stack */
stack_size += (nr_regs - get_nr_used_regs(m)) * 8;
/* make sure the stack pointer is 16-byte aligned if we
* need pass arguments on stack, which means
* [stack_size + 8(rbp) + 8(rip) + 8(origin rip)]
* should be 16-byte aligned. Following code depend on
* that stack_size is already 8-byte aligned.
*/
stack_size += (stack_size % 16) ? 0 : 8;
}
arg_stack_off = stack_size;
if (flags & BPF_TRAMP_F_SKIP_FRAME) {
/* skip patched call instruction and point orig_call to actual
* body of the kernel function.
*/
if (is_endbr(*(u32 *)orig_call))
orig_call += ENDBR_INSN_SIZE;
orig_call += X86_PATCH_SIZE;
}
prog = image;
EMIT_ENDBR();
/*
* This is the direct-call trampoline, as such it needs accounting
* for the __fentry__ call.
*/
x86_call_depth_emit_accounting(&prog, NULL);
EMIT1(0x55); /* push rbp */
EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */
if (!is_imm8(stack_size))
/* sub rsp, stack_size */
EMIT3_off32(0x48, 0x81, 0xEC, stack_size);
else
/* sub rsp, stack_size */
EMIT4(0x48, 0x83, 0xEC, stack_size);
/* mov QWORD PTR [rbp - rbx_off], rbx */
emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_6, -rbx_off);
/* Store number of argument registers of the traced function:
* mov rax, nr_regs
* mov QWORD PTR [rbp - nregs_off], rax
*/
emit_mov_imm64(&prog, BPF_REG_0, 0, (u32) nr_regs);
emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -nregs_off);
if (flags & BPF_TRAMP_F_IP_ARG) {
/* Store IP address of the traced function:
* movabsq rax, func_addr
* mov QWORD PTR [rbp - ip_off], rax
*/
emit_mov_imm64(&prog, BPF_REG_0, (long) func_addr >> 32, (u32) (long) func_addr);
emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -ip_off);
}
save_args(m, &prog, regs_off, false);
if (flags & BPF_TRAMP_F_CALL_ORIG) {
/* arg1: mov rdi, im */
emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im);
if (emit_rsb_call(&prog, __bpf_tramp_enter, prog)) {
ret = -EINVAL;
goto cleanup;
}
}
if (fentry->nr_links)
if (invoke_bpf(m, &prog, fentry, regs_off, run_ctx_off,
flags & BPF_TRAMP_F_RET_FENTRY_RET))
return -EINVAL;
if (fmod_ret->nr_links) {
branches = kcalloc(fmod_ret->nr_links, sizeof(u8 *),
GFP_KERNEL);
if (!branches)
return -ENOMEM;
if (invoke_bpf_mod_ret(m, &prog, fmod_ret, regs_off,
run_ctx_off, branches)) {
ret = -EINVAL;
goto cleanup;
}
}
if (flags & BPF_TRAMP_F_CALL_ORIG) {
restore_regs(m, &prog, regs_off);
save_args(m, &prog, arg_stack_off, true);
if (flags & BPF_TRAMP_F_ORIG_STACK) {
emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, 8);
EMIT2(0xff, 0xd0); /* call *rax */
} else {
/* call original function */
if (emit_rsb_call(&prog, orig_call, prog)) {
ret = -EINVAL;
goto cleanup;
}
}
/* remember return value in a stack for bpf prog to access */
emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
im->ip_after_call = prog;
memcpy(prog, x86_nops[5], X86_PATCH_SIZE);
prog += X86_PATCH_SIZE;
}
if (fmod_ret->nr_links) {
/* From Intel 64 and IA-32 Architectures Optimization
* Reference Manual, 3.4.1.4 Code Alignment, Assembly/Compiler
* Coding Rule 11: All branch targets should be 16-byte
* aligned.
*/
emit_align(&prog, 16);
/* Update the branches saved in invoke_bpf_mod_ret with the
* aligned address of do_fexit.
*/
for (i = 0; i < fmod_ret->nr_links; i++)
emit_cond_near_jump(&branches[i], prog, branches[i],
X86_JNE);
}
if (fexit->nr_links)
if (invoke_bpf(m, &prog, fexit, regs_off, run_ctx_off, false)) {
ret = -EINVAL;
goto cleanup;
}
if (flags & BPF_TRAMP_F_RESTORE_REGS)
restore_regs(m, &prog, regs_off);
/* This needs to be done regardless. If there were fmod_ret programs,
* the return value is only updated on the stack and still needs to be
* restored to R0.
*/
if (flags & BPF_TRAMP_F_CALL_ORIG) {
im->ip_epilogue = prog;
/* arg1: mov rdi, im */
emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im);
if (emit_rsb_call(&prog, __bpf_tramp_exit, prog)) {
ret = -EINVAL;
goto cleanup;
}
}
/* restore return value of orig_call or fentry prog back into RAX */
if (save_ret)
emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8);
emit_ldx(&prog, BPF_DW, BPF_REG_6, BPF_REG_FP, -rbx_off);
EMIT1(0xC9); /* leave */
if (flags & BPF_TRAMP_F_SKIP_FRAME)
/* skip our return address and return to parent */
EMIT4(0x48, 0x83, 0xC4, 8); /* add rsp, 8 */
emit_return(&prog, prog);
/* Make sure the trampoline generation logic doesn't overflow */
if (WARN_ON_ONCE(prog > (u8 *)image_end - BPF_INSN_SAFETY)) {
ret = -EFAULT;
goto cleanup;
}
ret = prog - (u8 *)image;
cleanup:
kfree(branches);
return ret;
}
static int emit_bpf_dispatcher(u8 **pprog, int a, int b, s64 *progs, u8 *image, u8 *buf)
{
u8 *jg_reloc, *prog = *pprog;
int pivot, err, jg_bytes = 1;
s64 jg_offset;
if (a == b) {
/* Leaf node of recursion, i.e. not a range of indices
* anymore.
*/
EMIT1(add_1mod(0x48, BPF_REG_3)); /* cmp rdx,func */
if (!is_simm32(progs[a]))
return -1;
EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3),
progs[a]);
err = emit_cond_near_jump(&prog, /* je func */
(void *)progs[a], image + (prog - buf),
X86_JE);
if (err)
return err;
emit_indirect_jump(&prog, 2 /* rdx */, image + (prog - buf));
*pprog = prog;
return 0;
}
/* Not a leaf node, so we pivot, and recursively descend into
* the lower and upper ranges.
*/
pivot = (b - a) / 2;
EMIT1(add_1mod(0x48, BPF_REG_3)); /* cmp rdx,func */
if (!is_simm32(progs[a + pivot]))
return -1;
EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3), progs[a + pivot]);
if (pivot > 2) { /* jg upper_part */
/* Require near jump. */
jg_bytes = 4;
EMIT2_off32(0x0F, X86_JG + 0x10, 0);
} else {
EMIT2(X86_JG, 0);
}
jg_reloc = prog;
err = emit_bpf_dispatcher(&prog, a, a + pivot, /* emit lower_part */
progs, image, buf);
if (err)
return err;
/* From Intel 64 and IA-32 Architectures Optimization
* Reference Manual, 3.4.1.4 Code Alignment, Assembly/Compiler
* Coding Rule 11: All branch targets should be 16-byte
* aligned.
*/
emit_align(&prog, 16);
jg_offset = prog - jg_reloc;
emit_code(jg_reloc - jg_bytes, jg_offset, jg_bytes);
err = emit_bpf_dispatcher(&prog, a + pivot + 1, /* emit upper_part */
b, progs, image, buf);
if (err)
return err;
*pprog = prog;
return 0;
}
static int cmp_ips(const void *a, const void *b)
{
const s64 *ipa = a;
const s64 *ipb = b;
if (*ipa > *ipb)
return 1;
if (*ipa < *ipb)
return -1;
return 0;
}
int arch_prepare_bpf_dispatcher(void *image, void *buf, s64 *funcs, int num_funcs)
{
u8 *prog = buf;
sort(funcs, num_funcs, sizeof(funcs[0]), cmp_ips, NULL);
return emit_bpf_dispatcher(&prog, 0, num_funcs - 1, funcs, image, buf);
}
struct x64_jit_data {
struct bpf_binary_header *rw_header;
struct bpf_binary_header *header;
int *addrs;
u8 *image;
int proglen;
struct jit_context ctx;
};
#define MAX_PASSES 20
#define PADDING_PASSES (MAX_PASSES - 5)
struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
{
struct bpf_binary_header *rw_header = NULL;
struct bpf_binary_header *header = NULL;
struct bpf_prog *tmp, *orig_prog = prog;
struct x64_jit_data *jit_data;
int proglen, oldproglen = 0;
struct jit_context ctx = {};
bool tmp_blinded = false;
bool extra_pass = false;
bool padding = false;
u8 *rw_image = NULL;
u8 *image = NULL;
int *addrs;
int pass;
int i;
if (!prog->jit_requested)
return orig_prog;
tmp = bpf_jit_blind_constants(prog);
/*
* If blinding was requested and we failed during blinding,
* we must fall back to the interpreter.
*/
if (IS_ERR(tmp))
return orig_prog;
if (tmp != prog) {
tmp_blinded = true;
prog = tmp;
}
jit_data = prog->aux->jit_data;
if (!jit_data) {
jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
if (!jit_data) {
prog = orig_prog;
goto out;
}
prog->aux->jit_data = jit_data;
}
addrs = jit_data->addrs;
if (addrs) {
ctx = jit_data->ctx;
oldproglen = jit_data->proglen;
image = jit_data->image;
header = jit_data->header;
rw_header = jit_data->rw_header;
rw_image = (void *)rw_header + ((void *)image - (void *)header);
extra_pass = true;
padding = true;
goto skip_init_addrs;
}
addrs = kvmalloc_array(prog->len + 1, sizeof(*addrs), GFP_KERNEL);
if (!addrs) {
prog = orig_prog;
goto out_addrs;
}
/*
* Before first pass, make a rough estimation of addrs[]
* each BPF instruction is translated to less than 64 bytes
*/
for (proglen = 0, i = 0; i <= prog->len; i++) {
proglen += 64;
addrs[i] = proglen;
}
ctx.cleanup_addr = proglen;
skip_init_addrs:
/*
* JITed image shrinks with every pass and the loop iterates
* until the image stops shrinking. Very large BPF programs
* may converge on the last pass. In such case do one more
* pass to emit the final image.
*/
for (pass = 0; pass < MAX_PASSES || image; pass++) {
if (!padding && pass >= PADDING_PASSES)
padding = true;
proglen = do_jit(prog, addrs, image, rw_image, oldproglen, &ctx, padding);
if (proglen <= 0) {
out_image:
image = NULL;
if (header) {
bpf_arch_text_copy(&header->size, &rw_header->size,
sizeof(rw_header->size));
bpf_jit_binary_pack_free(header, rw_header);
}
/* Fall back to interpreter mode */
prog = orig_prog;
if (extra_pass) {
prog->bpf_func = NULL;
prog->jited = 0;
prog->jited_len = 0;
}
goto out_addrs;
}
if (image) {
if (proglen != oldproglen) {
pr_err("bpf_jit: proglen=%d != oldproglen=%d\n",
proglen, oldproglen);
goto out_image;
}
break;
}
if (proglen == oldproglen) {
/*
* The number of entries in extable is the number of BPF_LDX
* insns that access kernel memory via "pointer to BTF type".
* The verifier changed their opcode from LDX|MEM|size
* to LDX|PROBE_MEM|size to make JITing easier.
*/
u32 align = __alignof__(struct exception_table_entry);
u32 extable_size = prog->aux->num_exentries *
sizeof(struct exception_table_entry);
/* allocate module memory for x86 insns and extable */
header = bpf_jit_binary_pack_alloc(roundup(proglen, align) + extable_size,
&image, align, &rw_header, &rw_image,
jit_fill_hole);
if (!header) {
prog = orig_prog;
goto out_addrs;
}
prog->aux->extable = (void *) image + roundup(proglen, align);
}
oldproglen = proglen;
cond_resched();
}
if (bpf_jit_enable > 1)
bpf_jit_dump(prog->len, proglen, pass + 1, rw_image);
if (image) {
if (!prog->is_func || extra_pass) {
/*
* bpf_jit_binary_pack_finalize fails in two scenarios:
* 1) header is not pointing to proper module memory;
* 2) the arch doesn't support bpf_arch_text_copy().
*
* Both cases are serious bugs and justify WARN_ON.
*/
if (WARN_ON(bpf_jit_binary_pack_finalize(prog, header, rw_header))) {
/* header has been freed */
header = NULL;
goto out_image;
}
bpf_tail_call_direct_fixup(prog);
} else {
jit_data->addrs = addrs;
jit_data->ctx = ctx;
jit_data->proglen = proglen;
jit_data->image = image;
jit_data->header = header;
jit_data->rw_header = rw_header;
}
prog->bpf_func = (void *)image;
prog->jited = 1;
prog->jited_len = proglen;
} else {
prog = orig_prog;
}
if (!image || !prog->is_func || extra_pass) {
if (image)
bpf_prog_fill_jited_linfo(prog, addrs + 1);
out_addrs:
kvfree(addrs);
kfree(jit_data);
prog->aux->jit_data = NULL;
}
out:
if (tmp_blinded)
bpf_jit_prog_release_other(prog, prog == orig_prog ?
tmp : orig_prog);
return prog;
}
bool bpf_jit_supports_kfunc_call(void)
{
return true;
}
void *bpf_arch_text_copy(void *dst, void *src, size_t len)
{
if (text_poke_copy(dst, src, len) == NULL)
return ERR_PTR(-EINVAL);
return dst;
}
/* Indicate the JIT backend supports mixing bpf2bpf and tailcalls. */
bool bpf_jit_supports_subprog_tailcalls(void)
{
return true;
}
void bpf_jit_free(struct bpf_prog *prog)
{
if (prog->jited) {
struct x64_jit_data *jit_data = prog->aux->jit_data;
struct bpf_binary_header *hdr;
/*
* If we fail the final pass of JIT (from jit_subprogs),
* the program may not be finalized yet. Call finalize here
* before freeing it.
*/
if (jit_data) {
bpf_jit_binary_pack_finalize(prog, jit_data->header,
jit_data->rw_header);
kvfree(jit_data->addrs);
kfree(jit_data);
}
hdr = bpf_jit_binary_pack_hdr(prog);
bpf_jit_binary_pack_free(hdr, NULL);
WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(prog));
}
bpf_prog_unlock_free(prog);
}
| linux-master | arch/x86/net/bpf_jit_comp.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Just-In-Time compiler for eBPF filters on IA32 (32bit x86)
*
* Author: Wang YanQing ([email protected])
* The code based on code and ideas from:
* Eric Dumazet ([email protected])
* and from:
* Shubham Bansal <[email protected]>
*/
#include <linux/netdevice.h>
#include <linux/filter.h>
#include <linux/if_vlan.h>
#include <asm/cacheflush.h>
#include <asm/set_memory.h>
#include <asm/nospec-branch.h>
#include <asm/asm-prototypes.h>
#include <linux/bpf.h>
/*
* eBPF prog stack layout:
*
* high
* original ESP => +-----+
* | | callee saved registers
* +-----+
* | ... | eBPF JIT scratch space
* BPF_FP,IA32_EBP => +-----+
* | ... | eBPF prog stack
* +-----+
* |RSVD | JIT scratchpad
* current ESP => +-----+
* | |
* | ... | Function call stack
* | |
* +-----+
* low
*
* The callee saved registers:
*
* high
* original ESP => +------------------+ \
* | ebp | |
* current EBP => +------------------+ } callee saved registers
* | ebx,esi,edi | |
* +------------------+ /
* low
*/
static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
{
if (len == 1)
*ptr = bytes;
else if (len == 2)
*(u16 *)ptr = bytes;
else {
*(u32 *)ptr = bytes;
barrier();
}
return ptr + len;
}
#define EMIT(bytes, len) \
do { prog = emit_code(prog, bytes, len); cnt += len; } while (0)
#define EMIT1(b1) EMIT(b1, 1)
#define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2)
#define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
#define EMIT4(b1, b2, b3, b4) \
EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
#define EMIT1_off32(b1, off) \
do { EMIT1(b1); EMIT(off, 4); } while (0)
#define EMIT2_off32(b1, b2, off) \
do { EMIT2(b1, b2); EMIT(off, 4); } while (0)
#define EMIT3_off32(b1, b2, b3, off) \
do { EMIT3(b1, b2, b3); EMIT(off, 4); } while (0)
#define EMIT4_off32(b1, b2, b3, b4, off) \
do { EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0)
#define jmp_label(label, jmp_insn_len) (label - cnt - jmp_insn_len)
static bool is_imm8(int value)
{
return value <= 127 && value >= -128;
}
static bool is_simm32(s64 value)
{
return value == (s64) (s32) value;
}
#define STACK_OFFSET(k) (k)
#define TCALL_CNT (MAX_BPF_JIT_REG + 0) /* Tail Call Count */
#define IA32_EAX (0x0)
#define IA32_EBX (0x3)
#define IA32_ECX (0x1)
#define IA32_EDX (0x2)
#define IA32_ESI (0x6)
#define IA32_EDI (0x7)
#define IA32_EBP (0x5)
#define IA32_ESP (0x4)
/*
* List of x86 cond jumps opcodes (. + s8)
* Add 0x10 (and an extra 0x0f) to generate far jumps (. + s32)
*/
#define IA32_JB 0x72
#define IA32_JAE 0x73
#define IA32_JE 0x74
#define IA32_JNE 0x75
#define IA32_JBE 0x76
#define IA32_JA 0x77
#define IA32_JL 0x7C
#define IA32_JGE 0x7D
#define IA32_JLE 0x7E
#define IA32_JG 0x7F
#define COND_JMP_OPCODE_INVALID (0xFF)
/*
* Map eBPF registers to IA32 32bit registers or stack scratch space.
*
* 1. All the registers, R0-R10, are mapped to scratch space on stack.
* 2. We need two 64 bit temp registers to do complex operations on eBPF
* registers.
* 3. For performance reason, the BPF_REG_AX for blinding constant, is
* mapped to real hardware register pair, IA32_ESI and IA32_EDI.
*
* As the eBPF registers are all 64 bit registers and IA32 has only 32 bit
* registers, we have to map each eBPF registers with two IA32 32 bit regs
* or scratch memory space and we have to build eBPF 64 bit register from those.
*
* We use IA32_EAX, IA32_EDX, IA32_ECX, IA32_EBX as temporary registers.
*/
static const u8 bpf2ia32[][2] = {
/* Return value from in-kernel function, and exit value from eBPF */
[BPF_REG_0] = {STACK_OFFSET(0), STACK_OFFSET(4)},
/* The arguments from eBPF program to in-kernel function */
/* Stored on stack scratch space */
[BPF_REG_1] = {STACK_OFFSET(8), STACK_OFFSET(12)},
[BPF_REG_2] = {STACK_OFFSET(16), STACK_OFFSET(20)},
[BPF_REG_3] = {STACK_OFFSET(24), STACK_OFFSET(28)},
[BPF_REG_4] = {STACK_OFFSET(32), STACK_OFFSET(36)},
[BPF_REG_5] = {STACK_OFFSET(40), STACK_OFFSET(44)},
/* Callee saved registers that in-kernel function will preserve */
/* Stored on stack scratch space */
[BPF_REG_6] = {STACK_OFFSET(48), STACK_OFFSET(52)},
[BPF_REG_7] = {STACK_OFFSET(56), STACK_OFFSET(60)},
[BPF_REG_8] = {STACK_OFFSET(64), STACK_OFFSET(68)},
[BPF_REG_9] = {STACK_OFFSET(72), STACK_OFFSET(76)},
/* Read only Frame Pointer to access Stack */
[BPF_REG_FP] = {STACK_OFFSET(80), STACK_OFFSET(84)},
/* Temporary register for blinding constants. */
[BPF_REG_AX] = {IA32_ESI, IA32_EDI},
/* Tail call count. Stored on stack scratch space. */
[TCALL_CNT] = {STACK_OFFSET(88), STACK_OFFSET(92)},
};
#define dst_lo dst[0]
#define dst_hi dst[1]
#define src_lo src[0]
#define src_hi src[1]
#define STACK_ALIGNMENT 8
/*
* Stack space for BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4,
* BPF_REG_5, BPF_REG_6, BPF_REG_7, BPF_REG_8, BPF_REG_9,
* BPF_REG_FP, BPF_REG_AX and Tail call counts.
*/
#define SCRATCH_SIZE 96
/* Total stack size used in JITed code */
#define _STACK_SIZE (stack_depth + SCRATCH_SIZE)
#define STACK_SIZE ALIGN(_STACK_SIZE, STACK_ALIGNMENT)
/* Get the offset of eBPF REGISTERs stored on scratch space. */
#define STACK_VAR(off) (off)
/* Encode 'dst_reg' register into IA32 opcode 'byte' */
static u8 add_1reg(u8 byte, u32 dst_reg)
{
return byte + dst_reg;
}
/* Encode 'dst_reg' and 'src_reg' registers into IA32 opcode 'byte' */
static u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg)
{
return byte + dst_reg + (src_reg << 3);
}
static void jit_fill_hole(void *area, unsigned int size)
{
/* Fill whole space with int3 instructions */
memset(area, 0xcc, size);
}
static inline void emit_ia32_mov_i(const u8 dst, const u32 val, bool dstk,
u8 **pprog)
{
u8 *prog = *pprog;
int cnt = 0;
if (dstk) {
if (val == 0) {
/* xor eax,eax */
EMIT2(0x33, add_2reg(0xC0, IA32_EAX, IA32_EAX));
/* mov dword ptr [ebp+off],eax */
EMIT3(0x89, add_2reg(0x40, IA32_EBP, IA32_EAX),
STACK_VAR(dst));
} else {
EMIT3_off32(0xC7, add_1reg(0x40, IA32_EBP),
STACK_VAR(dst), val);
}
} else {
if (val == 0)
EMIT2(0x33, add_2reg(0xC0, dst, dst));
else
EMIT2_off32(0xC7, add_1reg(0xC0, dst),
val);
}
*pprog = prog;
}
/* dst = imm (4 bytes)*/
static inline void emit_ia32_mov_r(const u8 dst, const u8 src, bool dstk,
bool sstk, u8 **pprog)
{
u8 *prog = *pprog;
int cnt = 0;
u8 sreg = sstk ? IA32_EAX : src;
if (sstk)
/* mov eax,dword ptr [ebp+off] */
EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX), STACK_VAR(src));
if (dstk)
/* mov dword ptr [ebp+off],eax */
EMIT3(0x89, add_2reg(0x40, IA32_EBP, sreg), STACK_VAR(dst));
else
/* mov dst,sreg */
EMIT2(0x89, add_2reg(0xC0, dst, sreg));
*pprog = prog;
}
/* dst = src */
static inline void emit_ia32_mov_r64(const bool is64, const u8 dst[],
const u8 src[], bool dstk,
bool sstk, u8 **pprog,
const struct bpf_prog_aux *aux)
{
emit_ia32_mov_r(dst_lo, src_lo, dstk, sstk, pprog);
if (is64)
/* complete 8 byte move */
emit_ia32_mov_r(dst_hi, src_hi, dstk, sstk, pprog);
else if (!aux->verifier_zext)
/* zero out high 4 bytes */
emit_ia32_mov_i(dst_hi, 0, dstk, pprog);
}
/* Sign extended move */
static inline void emit_ia32_mov_i64(const bool is64, const u8 dst[],
const u32 val, bool dstk, u8 **pprog)
{
u32 hi = 0;
if (is64 && (val & (1<<31)))
hi = (u32)~0;
emit_ia32_mov_i(dst_lo, val, dstk, pprog);
emit_ia32_mov_i(dst_hi, hi, dstk, pprog);
}
/*
* ALU operation (32 bit)
* dst = dst * src
*/
static inline void emit_ia32_mul_r(const u8 dst, const u8 src, bool dstk,
bool sstk, u8 **pprog)
{
u8 *prog = *pprog;
int cnt = 0;
u8 sreg = sstk ? IA32_ECX : src;
if (sstk)
/* mov ecx,dword ptr [ebp+off] */
EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_ECX), STACK_VAR(src));
if (dstk)
/* mov eax,dword ptr [ebp+off] */
EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX), STACK_VAR(dst));
else
/* mov eax,dst */
EMIT2(0x8B, add_2reg(0xC0, dst, IA32_EAX));
EMIT2(0xF7, add_1reg(0xE0, sreg));
if (dstk)
/* mov dword ptr [ebp+off],eax */
EMIT3(0x89, add_2reg(0x40, IA32_EBP, IA32_EAX),
STACK_VAR(dst));
else
/* mov dst,eax */
EMIT2(0x89, add_2reg(0xC0, dst, IA32_EAX));
*pprog = prog;
}
static inline void emit_ia32_to_le_r64(const u8 dst[], s32 val,
bool dstk, u8 **pprog,
const struct bpf_prog_aux *aux)
{
u8 *prog = *pprog;
int cnt = 0;
u8 dreg_lo = dstk ? IA32_EAX : dst_lo;
u8 dreg_hi = dstk ? IA32_EDX : dst_hi;
if (dstk && val != 64) {
EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX),
STACK_VAR(dst_lo));
EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EDX),
STACK_VAR(dst_hi));
}
switch (val) {
case 16:
/*
* Emit 'movzwl eax,ax' to zero extend 16-bit
* into 64 bit
*/
EMIT2(0x0F, 0xB7);
EMIT1(add_2reg(0xC0, dreg_lo, dreg_lo));
if (!aux->verifier_zext)
/* xor dreg_hi,dreg_hi */
EMIT2(0x33, add_2reg(0xC0, dreg_hi, dreg_hi));
break;
case 32:
if (!aux->verifier_zext)
/* xor dreg_hi,dreg_hi */
EMIT2(0x33, add_2reg(0xC0, dreg_hi, dreg_hi));
break;
case 64:
/* nop */
break;
}
if (dstk && val != 64) {
/* mov dword ptr [ebp+off],dreg_lo */
EMIT3(0x89, add_2reg(0x40, IA32_EBP, dreg_lo),
STACK_VAR(dst_lo));
/* mov dword ptr [ebp+off],dreg_hi */
EMIT3(0x89, add_2reg(0x40, IA32_EBP, dreg_hi),
STACK_VAR(dst_hi));
}
*pprog = prog;
}
static inline void emit_ia32_to_be_r64(const u8 dst[], s32 val,
bool dstk, u8 **pprog,
const struct bpf_prog_aux *aux)
{
u8 *prog = *pprog;
int cnt = 0;
u8 dreg_lo = dstk ? IA32_EAX : dst_lo;
u8 dreg_hi = dstk ? IA32_EDX : dst_hi;
if (dstk) {
EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX),
STACK_VAR(dst_lo));
EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EDX),
STACK_VAR(dst_hi));
}
switch (val) {
case 16:
/* Emit 'ror %ax, 8' to swap lower 2 bytes */
EMIT1(0x66);
EMIT3(0xC1, add_1reg(0xC8, dreg_lo), 8);
EMIT2(0x0F, 0xB7);
EMIT1(add_2reg(0xC0, dreg_lo, dreg_lo));
if (!aux->verifier_zext)
/* xor dreg_hi,dreg_hi */
EMIT2(0x33, add_2reg(0xC0, dreg_hi, dreg_hi));
break;
case 32:
/* Emit 'bswap eax' to swap lower 4 bytes */
EMIT1(0x0F);
EMIT1(add_1reg(0xC8, dreg_lo));
if (!aux->verifier_zext)
/* xor dreg_hi,dreg_hi */
EMIT2(0x33, add_2reg(0xC0, dreg_hi, dreg_hi));
break;
case 64:
/* Emit 'bswap eax' to swap lower 4 bytes */
EMIT1(0x0F);
EMIT1(add_1reg(0xC8, dreg_lo));
/* Emit 'bswap edx' to swap lower 4 bytes */
EMIT1(0x0F);
EMIT1(add_1reg(0xC8, dreg_hi));
/* mov ecx,dreg_hi */
EMIT2(0x89, add_2reg(0xC0, IA32_ECX, dreg_hi));
/* mov dreg_hi,dreg_lo */
EMIT2(0x89, add_2reg(0xC0, dreg_hi, dreg_lo));
/* mov dreg_lo,ecx */
EMIT2(0x89, add_2reg(0xC0, dreg_lo, IA32_ECX));
break;
}
if (dstk) {
/* mov dword ptr [ebp+off],dreg_lo */
EMIT3(0x89, add_2reg(0x40, IA32_EBP, dreg_lo),
STACK_VAR(dst_lo));
/* mov dword ptr [ebp+off],dreg_hi */
EMIT3(0x89, add_2reg(0x40, IA32_EBP, dreg_hi),
STACK_VAR(dst_hi));
}
*pprog = prog;
}
/*
* ALU operation (32 bit)
* dst = dst (div|mod) src
*/
static inline void emit_ia32_div_mod_r(const u8 op, const u8 dst, const u8 src,
bool dstk, bool sstk, u8 **pprog)
{
u8 *prog = *pprog;
int cnt = 0;
if (sstk)
/* mov ecx,dword ptr [ebp+off] */
EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_ECX),
STACK_VAR(src));
else if (src != IA32_ECX)
/* mov ecx,src */
EMIT2(0x8B, add_2reg(0xC0, src, IA32_ECX));
if (dstk)
/* mov eax,dword ptr [ebp+off] */
EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX),
STACK_VAR(dst));
else
/* mov eax,dst */
EMIT2(0x8B, add_2reg(0xC0, dst, IA32_EAX));
/* xor edx,edx */
EMIT2(0x31, add_2reg(0xC0, IA32_EDX, IA32_EDX));
/* div ecx */
EMIT2(0xF7, add_1reg(0xF0, IA32_ECX));
if (op == BPF_MOD) {
if (dstk)
EMIT3(0x89, add_2reg(0x40, IA32_EBP, IA32_EDX),
STACK_VAR(dst));
else
EMIT2(0x89, add_2reg(0xC0, dst, IA32_EDX));
} else {
if (dstk)
EMIT3(0x89, add_2reg(0x40, IA32_EBP, IA32_EAX),
STACK_VAR(dst));
else
EMIT2(0x89, add_2reg(0xC0, dst, IA32_EAX));
}
*pprog = prog;
}
/*
* ALU operation (32 bit)
* dst = dst (shift) src
*/
static inline void emit_ia32_shift_r(const u8 op, const u8 dst, const u8 src,
bool dstk, bool sstk, u8 **pprog)
{
u8 *prog = *pprog;
int cnt = 0;
u8 dreg = dstk ? IA32_EAX : dst;
u8 b2;
if (dstk)
/* mov eax,dword ptr [ebp+off] */
EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX), STACK_VAR(dst));
if (sstk)
/* mov ecx,dword ptr [ebp+off] */
EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_ECX), STACK_VAR(src));
else if (src != IA32_ECX)
/* mov ecx,src */
EMIT2(0x8B, add_2reg(0xC0, src, IA32_ECX));
switch (op) {
case BPF_LSH:
b2 = 0xE0; break;
case BPF_RSH:
b2 = 0xE8; break;
case BPF_ARSH:
b2 = 0xF8; break;
default:
return;
}
EMIT2(0xD3, add_1reg(b2, dreg));
if (dstk)
/* mov dword ptr [ebp+off],dreg */
EMIT3(0x89, add_2reg(0x40, IA32_EBP, dreg), STACK_VAR(dst));
*pprog = prog;
}
/*
* ALU operation (32 bit)
* dst = dst (op) src
*/
static inline void emit_ia32_alu_r(const bool is64, const bool hi, const u8 op,
const u8 dst, const u8 src, bool dstk,
bool sstk, u8 **pprog)
{
u8 *prog = *pprog;
int cnt = 0;
u8 sreg = sstk ? IA32_EAX : src;
u8 dreg = dstk ? IA32_EDX : dst;
if (sstk)
/* mov eax,dword ptr [ebp+off] */
EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX), STACK_VAR(src));
if (dstk)
/* mov eax,dword ptr [ebp+off] */
EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EDX), STACK_VAR(dst));
switch (BPF_OP(op)) {
/* dst = dst + src */
case BPF_ADD:
if (hi && is64)
EMIT2(0x11, add_2reg(0xC0, dreg, sreg));
else
EMIT2(0x01, add_2reg(0xC0, dreg, sreg));
break;
/* dst = dst - src */
case BPF_SUB:
if (hi && is64)
EMIT2(0x19, add_2reg(0xC0, dreg, sreg));
else
EMIT2(0x29, add_2reg(0xC0, dreg, sreg));
break;
/* dst = dst | src */
case BPF_OR:
EMIT2(0x09, add_2reg(0xC0, dreg, sreg));
break;
/* dst = dst & src */
case BPF_AND:
EMIT2(0x21, add_2reg(0xC0, dreg, sreg));
break;
/* dst = dst ^ src */
case BPF_XOR:
EMIT2(0x31, add_2reg(0xC0, dreg, sreg));
break;
}
if (dstk)
/* mov dword ptr [ebp+off],dreg */
EMIT3(0x89, add_2reg(0x40, IA32_EBP, dreg),
STACK_VAR(dst));
*pprog = prog;
}
/* ALU operation (64 bit) */
static inline void emit_ia32_alu_r64(const bool is64, const u8 op,
const u8 dst[], const u8 src[],
bool dstk, bool sstk,
u8 **pprog, const struct bpf_prog_aux *aux)
{
u8 *prog = *pprog;
emit_ia32_alu_r(is64, false, op, dst_lo, src_lo, dstk, sstk, &prog);
if (is64)
emit_ia32_alu_r(is64, true, op, dst_hi, src_hi, dstk, sstk,
&prog);
else if (!aux->verifier_zext)
emit_ia32_mov_i(dst_hi, 0, dstk, &prog);
*pprog = prog;
}
/*
* ALU operation (32 bit)
* dst = dst (op) val
*/
static inline void emit_ia32_alu_i(const bool is64, const bool hi, const u8 op,
const u8 dst, const s32 val, bool dstk,
u8 **pprog)
{
u8 *prog = *pprog;
int cnt = 0;
u8 dreg = dstk ? IA32_EAX : dst;
u8 sreg = IA32_EDX;
if (dstk)
/* mov eax,dword ptr [ebp+off] */
EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX), STACK_VAR(dst));
if (!is_imm8(val))
/* mov edx,imm32*/
EMIT2_off32(0xC7, add_1reg(0xC0, IA32_EDX), val);
switch (op) {
/* dst = dst + val */
case BPF_ADD:
if (hi && is64) {
if (is_imm8(val))
EMIT3(0x83, add_1reg(0xD0, dreg), val);
else
EMIT2(0x11, add_2reg(0xC0, dreg, sreg));
} else {
if (is_imm8(val))
EMIT3(0x83, add_1reg(0xC0, dreg), val);
else
EMIT2(0x01, add_2reg(0xC0, dreg, sreg));
}
break;
/* dst = dst - val */
case BPF_SUB:
if (hi && is64) {
if (is_imm8(val))
EMIT3(0x83, add_1reg(0xD8, dreg), val);
else
EMIT2(0x19, add_2reg(0xC0, dreg, sreg));
} else {
if (is_imm8(val))
EMIT3(0x83, add_1reg(0xE8, dreg), val);
else
EMIT2(0x29, add_2reg(0xC0, dreg, sreg));
}
break;
/* dst = dst | val */
case BPF_OR:
if (is_imm8(val))
EMIT3(0x83, add_1reg(0xC8, dreg), val);
else
EMIT2(0x09, add_2reg(0xC0, dreg, sreg));
break;
/* dst = dst & val */
case BPF_AND:
if (is_imm8(val))
EMIT3(0x83, add_1reg(0xE0, dreg), val);
else
EMIT2(0x21, add_2reg(0xC0, dreg, sreg));
break;
/* dst = dst ^ val */
case BPF_XOR:
if (is_imm8(val))
EMIT3(0x83, add_1reg(0xF0, dreg), val);
else
EMIT2(0x31, add_2reg(0xC0, dreg, sreg));
break;
case BPF_NEG:
EMIT2(0xF7, add_1reg(0xD8, dreg));
break;
}
if (dstk)
/* mov dword ptr [ebp+off],dreg */
EMIT3(0x89, add_2reg(0x40, IA32_EBP, dreg),
STACK_VAR(dst));
*pprog = prog;
}
/* ALU operation (64 bit) */
static inline void emit_ia32_alu_i64(const bool is64, const u8 op,
const u8 dst[], const u32 val,
bool dstk, u8 **pprog,
const struct bpf_prog_aux *aux)
{
u8 *prog = *pprog;
u32 hi = 0;
if (is64 && (val & (1<<31)))
hi = (u32)~0;
emit_ia32_alu_i(is64, false, op, dst_lo, val, dstk, &prog);
if (is64)
emit_ia32_alu_i(is64, true, op, dst_hi, hi, dstk, &prog);
else if (!aux->verifier_zext)
emit_ia32_mov_i(dst_hi, 0, dstk, &prog);
*pprog = prog;
}
/* dst = ~dst (64 bit) */
static inline void emit_ia32_neg64(const u8 dst[], bool dstk, u8 **pprog)
{
u8 *prog = *pprog;
int cnt = 0;
u8 dreg_lo = dstk ? IA32_EAX : dst_lo;
u8 dreg_hi = dstk ? IA32_EDX : dst_hi;
if (dstk) {
EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX),
STACK_VAR(dst_lo));
EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EDX),
STACK_VAR(dst_hi));
}
/* neg dreg_lo */
EMIT2(0xF7, add_1reg(0xD8, dreg_lo));
/* adc dreg_hi,0x0 */
EMIT3(0x83, add_1reg(0xD0, dreg_hi), 0x00);
/* neg dreg_hi */
EMIT2(0xF7, add_1reg(0xD8, dreg_hi));
if (dstk) {
/* mov dword ptr [ebp+off],dreg_lo */
EMIT3(0x89, add_2reg(0x40, IA32_EBP, dreg_lo),
STACK_VAR(dst_lo));
/* mov dword ptr [ebp+off],dreg_hi */
EMIT3(0x89, add_2reg(0x40, IA32_EBP, dreg_hi),
STACK_VAR(dst_hi));
}
*pprog = prog;
}
/* dst = dst << src */
static inline void emit_ia32_lsh_r64(const u8 dst[], const u8 src[],
bool dstk, bool sstk, u8 **pprog)
{
u8 *prog = *pprog;
int cnt = 0;
u8 dreg_lo = dstk ? IA32_EAX : dst_lo;
u8 dreg_hi = dstk ? IA32_EDX : dst_hi;
if (dstk) {
EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX),
STACK_VAR(dst_lo));
EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EDX),
STACK_VAR(dst_hi));
}
if (sstk)
/* mov ecx,dword ptr [ebp+off] */
EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_ECX),
STACK_VAR(src_lo));
else
/* mov ecx,src_lo */
EMIT2(0x8B, add_2reg(0xC0, src_lo, IA32_ECX));
/* shld dreg_hi,dreg_lo,cl */
EMIT3(0x0F, 0xA5, add_2reg(0xC0, dreg_hi, dreg_lo));
/* shl dreg_lo,cl */
EMIT2(0xD3, add_1reg(0xE0, dreg_lo));
/* if ecx >= 32, mov dreg_lo into dreg_hi and clear dreg_lo */
/* cmp ecx,32 */
EMIT3(0x83, add_1reg(0xF8, IA32_ECX), 32);
/* skip the next two instructions (4 bytes) when < 32 */
EMIT2(IA32_JB, 4);
/* mov dreg_hi,dreg_lo */
EMIT2(0x89, add_2reg(0xC0, dreg_hi, dreg_lo));
/* xor dreg_lo,dreg_lo */
EMIT2(0x33, add_2reg(0xC0, dreg_lo, dreg_lo));
if (dstk) {
/* mov dword ptr [ebp+off],dreg_lo */
EMIT3(0x89, add_2reg(0x40, IA32_EBP, dreg_lo),
STACK_VAR(dst_lo));
/* mov dword ptr [ebp+off],dreg_hi */
EMIT3(0x89, add_2reg(0x40, IA32_EBP, dreg_hi),
STACK_VAR(dst_hi));
}
/* out: */
*pprog = prog;
}
/* dst = dst >> src (signed)*/
static inline void emit_ia32_arsh_r64(const u8 dst[], const u8 src[],
bool dstk, bool sstk, u8 **pprog)
{
u8 *prog = *pprog;
int cnt = 0;
u8 dreg_lo = dstk ? IA32_EAX : dst_lo;
u8 dreg_hi = dstk ? IA32_EDX : dst_hi;
if (dstk) {
EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX),
STACK_VAR(dst_lo));
EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EDX),
STACK_VAR(dst_hi));
}
if (sstk)
/* mov ecx,dword ptr [ebp+off] */
EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_ECX),
STACK_VAR(src_lo));
else
/* mov ecx,src_lo */
EMIT2(0x8B, add_2reg(0xC0, src_lo, IA32_ECX));
/* shrd dreg_lo,dreg_hi,cl */
EMIT3(0x0F, 0xAD, add_2reg(0xC0, dreg_lo, dreg_hi));
/* sar dreg_hi,cl */
EMIT2(0xD3, add_1reg(0xF8, dreg_hi));
/* if ecx >= 32, mov dreg_hi to dreg_lo and set/clear dreg_hi depending on sign */
/* cmp ecx,32 */
EMIT3(0x83, add_1reg(0xF8, IA32_ECX), 32);
/* skip the next two instructions (5 bytes) when < 32 */
EMIT2(IA32_JB, 5);
/* mov dreg_lo,dreg_hi */
EMIT2(0x89, add_2reg(0xC0, dreg_lo, dreg_hi));
/* sar dreg_hi,31 */
EMIT3(0xC1, add_1reg(0xF8, dreg_hi), 31);
if (dstk) {
/* mov dword ptr [ebp+off],dreg_lo */
EMIT3(0x89, add_2reg(0x40, IA32_EBP, dreg_lo),
STACK_VAR(dst_lo));
/* mov dword ptr [ebp+off],dreg_hi */
EMIT3(0x89, add_2reg(0x40, IA32_EBP, dreg_hi),
STACK_VAR(dst_hi));
}
/* out: */
*pprog = prog;
}
/* dst = dst >> src */
static inline void emit_ia32_rsh_r64(const u8 dst[], const u8 src[], bool dstk,
bool sstk, u8 **pprog)
{
u8 *prog = *pprog;
int cnt = 0;
u8 dreg_lo = dstk ? IA32_EAX : dst_lo;
u8 dreg_hi = dstk ? IA32_EDX : dst_hi;
if (dstk) {
EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX),
STACK_VAR(dst_lo));
EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EDX),
STACK_VAR(dst_hi));
}
if (sstk)
/* mov ecx,dword ptr [ebp+off] */
EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_ECX),
STACK_VAR(src_lo));
else
/* mov ecx,src_lo */
EMIT2(0x8B, add_2reg(0xC0, src_lo, IA32_ECX));
/* shrd dreg_lo,dreg_hi,cl */
EMIT3(0x0F, 0xAD, add_2reg(0xC0, dreg_lo, dreg_hi));
/* shr dreg_hi,cl */
EMIT2(0xD3, add_1reg(0xE8, dreg_hi));
/* if ecx >= 32, mov dreg_hi to dreg_lo and clear dreg_hi */
/* cmp ecx,32 */
EMIT3(0x83, add_1reg(0xF8, IA32_ECX), 32);
/* skip the next two instructions (4 bytes) when < 32 */
EMIT2(IA32_JB, 4);
/* mov dreg_lo,dreg_hi */
EMIT2(0x89, add_2reg(0xC0, dreg_lo, dreg_hi));
/* xor dreg_hi,dreg_hi */
EMIT2(0x33, add_2reg(0xC0, dreg_hi, dreg_hi));
if (dstk) {
/* mov dword ptr [ebp+off],dreg_lo */
EMIT3(0x89, add_2reg(0x40, IA32_EBP, dreg_lo),
STACK_VAR(dst_lo));
/* mov dword ptr [ebp+off],dreg_hi */
EMIT3(0x89, add_2reg(0x40, IA32_EBP, dreg_hi),
STACK_VAR(dst_hi));
}
/* out: */
*pprog = prog;
}
/* dst = dst << val */
static inline void emit_ia32_lsh_i64(const u8 dst[], const u32 val,
bool dstk, u8 **pprog)
{
u8 *prog = *pprog;
int cnt = 0;
u8 dreg_lo = dstk ? IA32_EAX : dst_lo;
u8 dreg_hi = dstk ? IA32_EDX : dst_hi;
if (dstk) {
EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX),
STACK_VAR(dst_lo));
EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EDX),
STACK_VAR(dst_hi));
}
/* Do LSH operation */
if (val < 32) {
/* shld dreg_hi,dreg_lo,imm8 */
EMIT4(0x0F, 0xA4, add_2reg(0xC0, dreg_hi, dreg_lo), val);
/* shl dreg_lo,imm8 */
EMIT3(0xC1, add_1reg(0xE0, dreg_lo), val);
} else if (val >= 32 && val < 64) {
u32 value = val - 32;
/* shl dreg_lo,imm8 */
EMIT3(0xC1, add_1reg(0xE0, dreg_lo), value);
/* mov dreg_hi,dreg_lo */
EMIT2(0x89, add_2reg(0xC0, dreg_hi, dreg_lo));
/* xor dreg_lo,dreg_lo */
EMIT2(0x33, add_2reg(0xC0, dreg_lo, dreg_lo));
} else {
/* xor dreg_lo,dreg_lo */
EMIT2(0x33, add_2reg(0xC0, dreg_lo, dreg_lo));
/* xor dreg_hi,dreg_hi */
EMIT2(0x33, add_2reg(0xC0, dreg_hi, dreg_hi));
}
if (dstk) {
/* mov dword ptr [ebp+off],dreg_lo */
EMIT3(0x89, add_2reg(0x40, IA32_EBP, dreg_lo),
STACK_VAR(dst_lo));
/* mov dword ptr [ebp+off],dreg_hi */
EMIT3(0x89, add_2reg(0x40, IA32_EBP, dreg_hi),
STACK_VAR(dst_hi));
}
*pprog = prog;
}
/* dst = dst >> val */
static inline void emit_ia32_rsh_i64(const u8 dst[], const u32 val,
bool dstk, u8 **pprog)
{
u8 *prog = *pprog;
int cnt = 0;
u8 dreg_lo = dstk ? IA32_EAX : dst_lo;
u8 dreg_hi = dstk ? IA32_EDX : dst_hi;
if (dstk) {
EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX),
STACK_VAR(dst_lo));
EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EDX),
STACK_VAR(dst_hi));
}
/* Do RSH operation */
if (val < 32) {
/* shrd dreg_lo,dreg_hi,imm8 */
EMIT4(0x0F, 0xAC, add_2reg(0xC0, dreg_lo, dreg_hi), val);
/* shr dreg_hi,imm8 */
EMIT3(0xC1, add_1reg(0xE8, dreg_hi), val);
} else if (val >= 32 && val < 64) {
u32 value = val - 32;
/* shr dreg_hi,imm8 */
EMIT3(0xC1, add_1reg(0xE8, dreg_hi), value);
/* mov dreg_lo,dreg_hi */
EMIT2(0x89, add_2reg(0xC0, dreg_lo, dreg_hi));
/* xor dreg_hi,dreg_hi */
EMIT2(0x33, add_2reg(0xC0, dreg_hi, dreg_hi));
} else {
/* xor dreg_lo,dreg_lo */
EMIT2(0x33, add_2reg(0xC0, dreg_lo, dreg_lo));
/* xor dreg_hi,dreg_hi */
EMIT2(0x33, add_2reg(0xC0, dreg_hi, dreg_hi));
}
if (dstk) {
/* mov dword ptr [ebp+off],dreg_lo */
EMIT3(0x89, add_2reg(0x40, IA32_EBP, dreg_lo),
STACK_VAR(dst_lo));
/* mov dword ptr [ebp+off],dreg_hi */
EMIT3(0x89, add_2reg(0x40, IA32_EBP, dreg_hi),
STACK_VAR(dst_hi));
}
*pprog = prog;
}
/* dst = dst >> val (signed) */
static inline void emit_ia32_arsh_i64(const u8 dst[], const u32 val,
bool dstk, u8 **pprog)
{
u8 *prog = *pprog;
int cnt = 0;
u8 dreg_lo = dstk ? IA32_EAX : dst_lo;
u8 dreg_hi = dstk ? IA32_EDX : dst_hi;
if (dstk) {
EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX),
STACK_VAR(dst_lo));
EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EDX),
STACK_VAR(dst_hi));
}
/* Do RSH operation */
if (val < 32) {
/* shrd dreg_lo,dreg_hi,imm8 */
EMIT4(0x0F, 0xAC, add_2reg(0xC0, dreg_lo, dreg_hi), val);
/* ashr dreg_hi,imm8 */
EMIT3(0xC1, add_1reg(0xF8, dreg_hi), val);
} else if (val >= 32 && val < 64) {
u32 value = val - 32;
/* ashr dreg_hi,imm8 */
EMIT3(0xC1, add_1reg(0xF8, dreg_hi), value);
/* mov dreg_lo,dreg_hi */
EMIT2(0x89, add_2reg(0xC0, dreg_lo, dreg_hi));
/* ashr dreg_hi,imm8 */
EMIT3(0xC1, add_1reg(0xF8, dreg_hi), 31);
} else {
/* ashr dreg_hi,imm8 */
EMIT3(0xC1, add_1reg(0xF8, dreg_hi), 31);
/* mov dreg_lo,dreg_hi */
EMIT2(0x89, add_2reg(0xC0, dreg_lo, dreg_hi));
}
if (dstk) {
/* mov dword ptr [ebp+off],dreg_lo */
EMIT3(0x89, add_2reg(0x40, IA32_EBP, dreg_lo),
STACK_VAR(dst_lo));
/* mov dword ptr [ebp+off],dreg_hi */
EMIT3(0x89, add_2reg(0x40, IA32_EBP, dreg_hi),
STACK_VAR(dst_hi));
}
*pprog = prog;
}
static inline void emit_ia32_mul_r64(const u8 dst[], const u8 src[], bool dstk,
bool sstk, u8 **pprog)
{
u8 *prog = *pprog;
int cnt = 0;
if (dstk)
/* mov eax,dword ptr [ebp+off] */
EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX),
STACK_VAR(dst_hi));
else
/* mov eax,dst_hi */
EMIT2(0x8B, add_2reg(0xC0, dst_hi, IA32_EAX));
if (sstk)
/* mul dword ptr [ebp+off] */
EMIT3(0xF7, add_1reg(0x60, IA32_EBP), STACK_VAR(src_lo));
else
/* mul src_lo */
EMIT2(0xF7, add_1reg(0xE0, src_lo));
/* mov ecx,eax */
EMIT2(0x89, add_2reg(0xC0, IA32_ECX, IA32_EAX));
if (dstk)
/* mov eax,dword ptr [ebp+off] */
EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX),
STACK_VAR(dst_lo));
else
/* mov eax,dst_lo */
EMIT2(0x8B, add_2reg(0xC0, dst_lo, IA32_EAX));
if (sstk)
/* mul dword ptr [ebp+off] */
EMIT3(0xF7, add_1reg(0x60, IA32_EBP), STACK_VAR(src_hi));
else
/* mul src_hi */
EMIT2(0xF7, add_1reg(0xE0, src_hi));
/* add eax,eax */
EMIT2(0x01, add_2reg(0xC0, IA32_ECX, IA32_EAX));
if (dstk)
/* mov eax,dword ptr [ebp+off] */
EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX),
STACK_VAR(dst_lo));
else
/* mov eax,dst_lo */
EMIT2(0x8B, add_2reg(0xC0, dst_lo, IA32_EAX));
if (sstk)
/* mul dword ptr [ebp+off] */
EMIT3(0xF7, add_1reg(0x60, IA32_EBP), STACK_VAR(src_lo));
else
/* mul src_lo */
EMIT2(0xF7, add_1reg(0xE0, src_lo));
/* add ecx,edx */
EMIT2(0x01, add_2reg(0xC0, IA32_ECX, IA32_EDX));
if (dstk) {
/* mov dword ptr [ebp+off],eax */
EMIT3(0x89, add_2reg(0x40, IA32_EBP, IA32_EAX),
STACK_VAR(dst_lo));
/* mov dword ptr [ebp+off],ecx */
EMIT3(0x89, add_2reg(0x40, IA32_EBP, IA32_ECX),
STACK_VAR(dst_hi));
} else {
/* mov dst_lo,eax */
EMIT2(0x89, add_2reg(0xC0, dst_lo, IA32_EAX));
/* mov dst_hi,ecx */
EMIT2(0x89, add_2reg(0xC0, dst_hi, IA32_ECX));
}
*pprog = prog;
}
static inline void emit_ia32_mul_i64(const u8 dst[], const u32 val,
bool dstk, u8 **pprog)
{
u8 *prog = *pprog;
int cnt = 0;
u32 hi;
hi = val & (1<<31) ? (u32)~0 : 0;
/* movl eax,imm32 */
EMIT2_off32(0xC7, add_1reg(0xC0, IA32_EAX), val);
if (dstk)
/* mul dword ptr [ebp+off] */
EMIT3(0xF7, add_1reg(0x60, IA32_EBP), STACK_VAR(dst_hi));
else
/* mul dst_hi */
EMIT2(0xF7, add_1reg(0xE0, dst_hi));
/* mov ecx,eax */
EMIT2(0x89, add_2reg(0xC0, IA32_ECX, IA32_EAX));
/* movl eax,imm32 */
EMIT2_off32(0xC7, add_1reg(0xC0, IA32_EAX), hi);
if (dstk)
/* mul dword ptr [ebp+off] */
EMIT3(0xF7, add_1reg(0x60, IA32_EBP), STACK_VAR(dst_lo));
else
/* mul dst_lo */
EMIT2(0xF7, add_1reg(0xE0, dst_lo));
/* add ecx,eax */
EMIT2(0x01, add_2reg(0xC0, IA32_ECX, IA32_EAX));
/* movl eax,imm32 */
EMIT2_off32(0xC7, add_1reg(0xC0, IA32_EAX), val);
if (dstk)
/* mul dword ptr [ebp+off] */
EMIT3(0xF7, add_1reg(0x60, IA32_EBP), STACK_VAR(dst_lo));
else
/* mul dst_lo */
EMIT2(0xF7, add_1reg(0xE0, dst_lo));
/* add ecx,edx */
EMIT2(0x01, add_2reg(0xC0, IA32_ECX, IA32_EDX));
if (dstk) {
/* mov dword ptr [ebp+off],eax */
EMIT3(0x89, add_2reg(0x40, IA32_EBP, IA32_EAX),
STACK_VAR(dst_lo));
/* mov dword ptr [ebp+off],ecx */
EMIT3(0x89, add_2reg(0x40, IA32_EBP, IA32_ECX),
STACK_VAR(dst_hi));
} else {
/* mov dword ptr [ebp+off],eax */
EMIT2(0x89, add_2reg(0xC0, dst_lo, IA32_EAX));
/* mov dword ptr [ebp+off],ecx */
EMIT2(0x89, add_2reg(0xC0, dst_hi, IA32_ECX));
}
*pprog = prog;
}
static int bpf_size_to_x86_bytes(int bpf_size)
{
if (bpf_size == BPF_W)
return 4;
else if (bpf_size == BPF_H)
return 2;
else if (bpf_size == BPF_B)
return 1;
else if (bpf_size == BPF_DW)
return 4; /* imm32 */
else
return 0;
}
struct jit_context {
int cleanup_addr; /* Epilogue code offset */
};
/* Maximum number of bytes emitted while JITing one eBPF insn */
#define BPF_MAX_INSN_SIZE 128
#define BPF_INSN_SAFETY 64
#define PROLOGUE_SIZE 35
/*
* Emit prologue code for BPF program and check it's size.
* bpf_tail_call helper will skip it while jumping into another program.
*/
static void emit_prologue(u8 **pprog, u32 stack_depth)
{
u8 *prog = *pprog;
int cnt = 0;
const u8 *r1 = bpf2ia32[BPF_REG_1];
const u8 fplo = bpf2ia32[BPF_REG_FP][0];
const u8 fphi = bpf2ia32[BPF_REG_FP][1];
const u8 *tcc = bpf2ia32[TCALL_CNT];
/* push ebp */
EMIT1(0x55);
/* mov ebp,esp */
EMIT2(0x89, 0xE5);
/* push edi */
EMIT1(0x57);
/* push esi */
EMIT1(0x56);
/* push ebx */
EMIT1(0x53);
/* sub esp,STACK_SIZE */
EMIT2_off32(0x81, 0xEC, STACK_SIZE);
/* sub ebp,SCRATCH_SIZE+12*/
EMIT3(0x83, add_1reg(0xE8, IA32_EBP), SCRATCH_SIZE + 12);
/* xor ebx,ebx */
EMIT2(0x31, add_2reg(0xC0, IA32_EBX, IA32_EBX));
/* Set up BPF prog stack base register */
EMIT3(0x89, add_2reg(0x40, IA32_EBP, IA32_EBP), STACK_VAR(fplo));
EMIT3(0x89, add_2reg(0x40, IA32_EBP, IA32_EBX), STACK_VAR(fphi));
/* Move BPF_CTX (EAX) to BPF_REG_R1 */
/* mov dword ptr [ebp+off],eax */
EMIT3(0x89, add_2reg(0x40, IA32_EBP, IA32_EAX), STACK_VAR(r1[0]));
EMIT3(0x89, add_2reg(0x40, IA32_EBP, IA32_EBX), STACK_VAR(r1[1]));
/* Initialize Tail Count */
EMIT3(0x89, add_2reg(0x40, IA32_EBP, IA32_EBX), STACK_VAR(tcc[0]));
EMIT3(0x89, add_2reg(0x40, IA32_EBP, IA32_EBX), STACK_VAR(tcc[1]));
BUILD_BUG_ON(cnt != PROLOGUE_SIZE);
*pprog = prog;
}
/* Emit epilogue code for BPF program */
static void emit_epilogue(u8 **pprog, u32 stack_depth)
{
u8 *prog = *pprog;
const u8 *r0 = bpf2ia32[BPF_REG_0];
int cnt = 0;
/* mov eax,dword ptr [ebp+off]*/
EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX), STACK_VAR(r0[0]));
/* mov edx,dword ptr [ebp+off]*/
EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EDX), STACK_VAR(r0[1]));
/* add ebp,SCRATCH_SIZE+12*/
EMIT3(0x83, add_1reg(0xC0, IA32_EBP), SCRATCH_SIZE + 12);
/* mov ebx,dword ptr [ebp-12]*/
EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EBX), -12);
/* mov esi,dword ptr [ebp-8]*/
EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_ESI), -8);
/* mov edi,dword ptr [ebp-4]*/
EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EDI), -4);
EMIT1(0xC9); /* leave */
EMIT1(0xC3); /* ret */
*pprog = prog;
}
static int emit_jmp_edx(u8 **pprog, u8 *ip)
{
u8 *prog = *pprog;
int cnt = 0;
#ifdef CONFIG_RETPOLINE
EMIT1_off32(0xE9, (u8 *)__x86_indirect_thunk_edx - (ip + 5));
#else
EMIT2(0xFF, 0xE2);
#endif
*pprog = prog;
return cnt;
}
/*
* Generate the following code:
* ... bpf_tail_call(void *ctx, struct bpf_array *array, u64 index) ...
* if (index >= array->map.max_entries)
* goto out;
* if (++tail_call_cnt > MAX_TAIL_CALL_CNT)
* goto out;
* prog = array->ptrs[index];
* if (prog == NULL)
* goto out;
* goto *(prog->bpf_func + prologue_size);
* out:
*/
static void emit_bpf_tail_call(u8 **pprog, u8 *ip)
{
u8 *prog = *pprog;
int cnt = 0;
const u8 *r1 = bpf2ia32[BPF_REG_1];
const u8 *r2 = bpf2ia32[BPF_REG_2];
const u8 *r3 = bpf2ia32[BPF_REG_3];
const u8 *tcc = bpf2ia32[TCALL_CNT];
u32 lo, hi;
static int jmp_label1 = -1;
/*
* if (index >= array->map.max_entries)
* goto out;
*/
/* mov eax,dword ptr [ebp+off] */
EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX), STACK_VAR(r2[0]));
/* mov edx,dword ptr [ebp+off] */
EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EDX), STACK_VAR(r3[0]));
/* cmp dword ptr [eax+off],edx */
EMIT3(0x39, add_2reg(0x40, IA32_EAX, IA32_EDX),
offsetof(struct bpf_array, map.max_entries));
/* jbe out */
EMIT2(IA32_JBE, jmp_label(jmp_label1, 2));
/*
* if (tail_call_cnt++ >= MAX_TAIL_CALL_CNT)
* goto out;
*/
lo = (u32)MAX_TAIL_CALL_CNT;
hi = (u32)((u64)MAX_TAIL_CALL_CNT >> 32);
EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_ECX), STACK_VAR(tcc[0]));
EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EBX), STACK_VAR(tcc[1]));
/* cmp edx,hi */
EMIT3(0x83, add_1reg(0xF8, IA32_EBX), hi);
EMIT2(IA32_JNE, 3);
/* cmp ecx,lo */
EMIT3(0x83, add_1reg(0xF8, IA32_ECX), lo);
/* jae out */
EMIT2(IA32_JAE, jmp_label(jmp_label1, 2));
/* add eax,0x1 */
EMIT3(0x83, add_1reg(0xC0, IA32_ECX), 0x01);
/* adc ebx,0x0 */
EMIT3(0x83, add_1reg(0xD0, IA32_EBX), 0x00);
/* mov dword ptr [ebp+off],eax */
EMIT3(0x89, add_2reg(0x40, IA32_EBP, IA32_ECX), STACK_VAR(tcc[0]));
/* mov dword ptr [ebp+off],edx */
EMIT3(0x89, add_2reg(0x40, IA32_EBP, IA32_EBX), STACK_VAR(tcc[1]));
/* prog = array->ptrs[index]; */
/* mov edx, [eax + edx * 4 + offsetof(...)] */
EMIT3_off32(0x8B, 0x94, 0x90, offsetof(struct bpf_array, ptrs));
/*
* if (prog == NULL)
* goto out;
*/
/* test edx,edx */
EMIT2(0x85, add_2reg(0xC0, IA32_EDX, IA32_EDX));
/* je out */
EMIT2(IA32_JE, jmp_label(jmp_label1, 2));
/* goto *(prog->bpf_func + prologue_size); */
/* mov edx, dword ptr [edx + 32] */
EMIT3(0x8B, add_2reg(0x40, IA32_EDX, IA32_EDX),
offsetof(struct bpf_prog, bpf_func));
/* add edx,prologue_size */
EMIT3(0x83, add_1reg(0xC0, IA32_EDX), PROLOGUE_SIZE);
/* mov eax,dword ptr [ebp+off] */
EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX), STACK_VAR(r1[0]));
/*
* Now we're ready to jump into next BPF program:
* eax == ctx (1st arg)
* edx == prog->bpf_func + prologue_size
*/
cnt += emit_jmp_edx(&prog, ip + cnt);
if (jmp_label1 == -1)
jmp_label1 = cnt;
/* out: */
*pprog = prog;
}
/* Push the scratch stack register on top of the stack. */
static inline void emit_push_r64(const u8 src[], u8 **pprog)
{
u8 *prog = *pprog;
int cnt = 0;
/* mov ecx,dword ptr [ebp+off] */
EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_ECX), STACK_VAR(src_hi));
/* push ecx */
EMIT1(0x51);
/* mov ecx,dword ptr [ebp+off] */
EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_ECX), STACK_VAR(src_lo));
/* push ecx */
EMIT1(0x51);
*pprog = prog;
}
static void emit_push_r32(const u8 src[], u8 **pprog)
{
u8 *prog = *pprog;
int cnt = 0;
/* mov ecx,dword ptr [ebp+off] */
EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_ECX), STACK_VAR(src_lo));
/* push ecx */
EMIT1(0x51);
*pprog = prog;
}
static u8 get_cond_jmp_opcode(const u8 op, bool is_cmp_lo)
{
u8 jmp_cond;
/* Convert BPF opcode to x86 */
switch (op) {
case BPF_JEQ:
jmp_cond = IA32_JE;
break;
case BPF_JSET:
case BPF_JNE:
jmp_cond = IA32_JNE;
break;
case BPF_JGT:
/* GT is unsigned '>', JA in x86 */
jmp_cond = IA32_JA;
break;
case BPF_JLT:
/* LT is unsigned '<', JB in x86 */
jmp_cond = IA32_JB;
break;
case BPF_JGE:
/* GE is unsigned '>=', JAE in x86 */
jmp_cond = IA32_JAE;
break;
case BPF_JLE:
/* LE is unsigned '<=', JBE in x86 */
jmp_cond = IA32_JBE;
break;
case BPF_JSGT:
if (!is_cmp_lo)
/* Signed '>', GT in x86 */
jmp_cond = IA32_JG;
else
/* GT is unsigned '>', JA in x86 */
jmp_cond = IA32_JA;
break;
case BPF_JSLT:
if (!is_cmp_lo)
/* Signed '<', LT in x86 */
jmp_cond = IA32_JL;
else
/* LT is unsigned '<', JB in x86 */
jmp_cond = IA32_JB;
break;
case BPF_JSGE:
if (!is_cmp_lo)
/* Signed '>=', GE in x86 */
jmp_cond = IA32_JGE;
else
/* GE is unsigned '>=', JAE in x86 */
jmp_cond = IA32_JAE;
break;
case BPF_JSLE:
if (!is_cmp_lo)
/* Signed '<=', LE in x86 */
jmp_cond = IA32_JLE;
else
/* LE is unsigned '<=', JBE in x86 */
jmp_cond = IA32_JBE;
break;
default: /* to silence GCC warning */
jmp_cond = COND_JMP_OPCODE_INVALID;
break;
}
return jmp_cond;
}
/* i386 kernel compiles with "-mregparm=3". From gcc document:
*
* ==== snippet ====
* regparm (number)
* On x86-32 targets, the regparm attribute causes the compiler
* to pass arguments number one to (number) if they are of integral
* type in registers EAX, EDX, and ECX instead of on the stack.
* Functions that take a variable number of arguments continue
* to be passed all of their arguments on the stack.
* ==== snippet ====
*
* The first three args of a function will be considered for
* putting into the 32bit register EAX, EDX, and ECX.
*
* Two 32bit registers are used to pass a 64bit arg.
*
* For example,
* void foo(u32 a, u32 b, u32 c, u32 d):
* u32 a: EAX
* u32 b: EDX
* u32 c: ECX
* u32 d: stack
*
* void foo(u64 a, u32 b, u32 c):
* u64 a: EAX (lo32) EDX (hi32)
* u32 b: ECX
* u32 c: stack
*
* void foo(u32 a, u64 b, u32 c):
* u32 a: EAX
* u64 b: EDX (lo32) ECX (hi32)
* u32 c: stack
*
* void foo(u32 a, u32 b, u64 c):
* u32 a: EAX
* u32 b: EDX
* u64 c: stack
*
* The return value will be stored in the EAX (and EDX for 64bit value).
*
* For example,
* u32 foo(u32 a, u32 b, u32 c):
* return value: EAX
*
* u64 foo(u32 a, u32 b, u32 c):
* return value: EAX (lo32) EDX (hi32)
*
* Notes:
* The verifier only accepts function having integer and pointers
* as its args and return value, so it does not have
* struct-by-value.
*
* emit_kfunc_call() finds out the btf_func_model by calling
* bpf_jit_find_kfunc_model(). A btf_func_model
* has the details about the number of args, size of each arg,
* and the size of the return value.
*
* It first decides how many args can be passed by EAX, EDX, and ECX.
* That will decide what args should be pushed to the stack:
* [first_stack_regno, last_stack_regno] are the bpf regnos
* that should be pushed to the stack.
*
* It will first push all args to the stack because the push
* will need to use ECX. Then, it moves
* [BPF_REG_1, first_stack_regno) to EAX, EDX, and ECX.
*
* When emitting a call (0xE8), it needs to figure out
* the jmp_offset relative to the jit-insn address immediately
* following the call (0xE8) instruction. At this point, it knows
* the end of the jit-insn address after completely translated the
* current (BPF_JMP | BPF_CALL) bpf-insn. It is passed as "end_addr"
* to the emit_kfunc_call(). Thus, it can learn the "immediate-follow-call"
* address by figuring out how many jit-insn is generated between
* the call (0xE8) and the end_addr:
* - 0-1 jit-insn (3 bytes each) to restore the esp pointer if there
* is arg pushed to the stack.
* - 0-2 jit-insns (3 bytes each) to handle the return value.
*/
static int emit_kfunc_call(const struct bpf_prog *bpf_prog, u8 *end_addr,
const struct bpf_insn *insn, u8 **pprog)
{
const u8 arg_regs[] = { IA32_EAX, IA32_EDX, IA32_ECX };
int i, cnt = 0, first_stack_regno, last_stack_regno;
int free_arg_regs = ARRAY_SIZE(arg_regs);
const struct btf_func_model *fm;
int bytes_in_stack = 0;
const u8 *cur_arg_reg;
u8 *prog = *pprog;
s64 jmp_offset;
fm = bpf_jit_find_kfunc_model(bpf_prog, insn);
if (!fm)
return -EINVAL;
first_stack_regno = BPF_REG_1;
for (i = 0; i < fm->nr_args; i++) {
int regs_needed = fm->arg_size[i] > sizeof(u32) ? 2 : 1;
if (regs_needed > free_arg_regs)
break;
free_arg_regs -= regs_needed;
first_stack_regno++;
}
/* Push the args to the stack */
last_stack_regno = BPF_REG_0 + fm->nr_args;
for (i = last_stack_regno; i >= first_stack_regno; i--) {
if (fm->arg_size[i - 1] > sizeof(u32)) {
emit_push_r64(bpf2ia32[i], &prog);
bytes_in_stack += 8;
} else {
emit_push_r32(bpf2ia32[i], &prog);
bytes_in_stack += 4;
}
}
cur_arg_reg = &arg_regs[0];
for (i = BPF_REG_1; i < first_stack_regno; i++) {
/* mov e[adc]x,dword ptr [ebp+off] */
EMIT3(0x8B, add_2reg(0x40, IA32_EBP, *cur_arg_reg++),
STACK_VAR(bpf2ia32[i][0]));
if (fm->arg_size[i - 1] > sizeof(u32))
/* mov e[adc]x,dword ptr [ebp+off] */
EMIT3(0x8B, add_2reg(0x40, IA32_EBP, *cur_arg_reg++),
STACK_VAR(bpf2ia32[i][1]));
}
if (bytes_in_stack)
/* add esp,"bytes_in_stack" */
end_addr -= 3;
/* mov dword ptr [ebp+off],edx */
if (fm->ret_size > sizeof(u32))
end_addr -= 3;
/* mov dword ptr [ebp+off],eax */
if (fm->ret_size)
end_addr -= 3;
jmp_offset = (u8 *)__bpf_call_base + insn->imm - end_addr;
if (!is_simm32(jmp_offset)) {
pr_err("unsupported BPF kernel function jmp_offset:%lld\n",
jmp_offset);
return -EINVAL;
}
EMIT1_off32(0xE8, jmp_offset);
if (fm->ret_size)
/* mov dword ptr [ebp+off],eax */
EMIT3(0x89, add_2reg(0x40, IA32_EBP, IA32_EAX),
STACK_VAR(bpf2ia32[BPF_REG_0][0]));
if (fm->ret_size > sizeof(u32))
/* mov dword ptr [ebp+off],edx */
EMIT3(0x89, add_2reg(0x40, IA32_EBP, IA32_EDX),
STACK_VAR(bpf2ia32[BPF_REG_0][1]));
if (bytes_in_stack)
/* add esp,"bytes_in_stack" */
EMIT3(0x83, add_1reg(0xC0, IA32_ESP), bytes_in_stack);
*pprog = prog;
return 0;
}
static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
int oldproglen, struct jit_context *ctx)
{
struct bpf_insn *insn = bpf_prog->insnsi;
int insn_cnt = bpf_prog->len;
bool seen_exit = false;
u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY];
int i, cnt = 0;
int proglen = 0;
u8 *prog = temp;
emit_prologue(&prog, bpf_prog->aux->stack_depth);
for (i = 0; i < insn_cnt; i++, insn++) {
const s32 imm32 = insn->imm;
const bool is64 = BPF_CLASS(insn->code) == BPF_ALU64;
const bool dstk = insn->dst_reg != BPF_REG_AX;
const bool sstk = insn->src_reg != BPF_REG_AX;
const u8 code = insn->code;
const u8 *dst = bpf2ia32[insn->dst_reg];
const u8 *src = bpf2ia32[insn->src_reg];
const u8 *r0 = bpf2ia32[BPF_REG_0];
s64 jmp_offset;
u8 jmp_cond;
int ilen;
u8 *func;
switch (code) {
/* ALU operations */
/* dst = src */
case BPF_ALU | BPF_MOV | BPF_K:
case BPF_ALU | BPF_MOV | BPF_X:
case BPF_ALU64 | BPF_MOV | BPF_K:
case BPF_ALU64 | BPF_MOV | BPF_X:
switch (BPF_SRC(code)) {
case BPF_X:
if (imm32 == 1) {
/* Special mov32 for zext. */
emit_ia32_mov_i(dst_hi, 0, dstk, &prog);
break;
}
emit_ia32_mov_r64(is64, dst, src, dstk, sstk,
&prog, bpf_prog->aux);
break;
case BPF_K:
/* Sign-extend immediate value to dst reg */
emit_ia32_mov_i64(is64, dst, imm32,
dstk, &prog);
break;
}
break;
/* dst = dst + src/imm */
/* dst = dst - src/imm */
/* dst = dst | src/imm */
/* dst = dst & src/imm */
/* dst = dst ^ src/imm */
/* dst = dst * src/imm */
/* dst = dst << src */
/* dst = dst >> src */
case BPF_ALU | BPF_ADD | BPF_K:
case BPF_ALU | BPF_ADD | BPF_X:
case BPF_ALU | BPF_SUB | BPF_K:
case BPF_ALU | BPF_SUB | BPF_X:
case BPF_ALU | BPF_OR | BPF_K:
case BPF_ALU | BPF_OR | BPF_X:
case BPF_ALU | BPF_AND | BPF_K:
case BPF_ALU | BPF_AND | BPF_X:
case BPF_ALU | BPF_XOR | BPF_K:
case BPF_ALU | BPF_XOR | BPF_X:
case BPF_ALU64 | BPF_ADD | BPF_K:
case BPF_ALU64 | BPF_ADD | BPF_X:
case BPF_ALU64 | BPF_SUB | BPF_K:
case BPF_ALU64 | BPF_SUB | BPF_X:
case BPF_ALU64 | BPF_OR | BPF_K:
case BPF_ALU64 | BPF_OR | BPF_X:
case BPF_ALU64 | BPF_AND | BPF_K:
case BPF_ALU64 | BPF_AND | BPF_X:
case BPF_ALU64 | BPF_XOR | BPF_K:
case BPF_ALU64 | BPF_XOR | BPF_X:
switch (BPF_SRC(code)) {
case BPF_X:
emit_ia32_alu_r64(is64, BPF_OP(code), dst,
src, dstk, sstk, &prog,
bpf_prog->aux);
break;
case BPF_K:
emit_ia32_alu_i64(is64, BPF_OP(code), dst,
imm32, dstk, &prog,
bpf_prog->aux);
break;
}
break;
case BPF_ALU | BPF_MUL | BPF_K:
case BPF_ALU | BPF_MUL | BPF_X:
switch (BPF_SRC(code)) {
case BPF_X:
emit_ia32_mul_r(dst_lo, src_lo, dstk,
sstk, &prog);
break;
case BPF_K:
/* mov ecx,imm32*/
EMIT2_off32(0xC7, add_1reg(0xC0, IA32_ECX),
imm32);
emit_ia32_mul_r(dst_lo, IA32_ECX, dstk,
false, &prog);
break;
}
if (!bpf_prog->aux->verifier_zext)
emit_ia32_mov_i(dst_hi, 0, dstk, &prog);
break;
case BPF_ALU | BPF_LSH | BPF_X:
case BPF_ALU | BPF_RSH | BPF_X:
case BPF_ALU | BPF_ARSH | BPF_K:
case BPF_ALU | BPF_ARSH | BPF_X:
switch (BPF_SRC(code)) {
case BPF_X:
emit_ia32_shift_r(BPF_OP(code), dst_lo, src_lo,
dstk, sstk, &prog);
break;
case BPF_K:
/* mov ecx,imm32*/
EMIT2_off32(0xC7, add_1reg(0xC0, IA32_ECX),
imm32);
emit_ia32_shift_r(BPF_OP(code), dst_lo,
IA32_ECX, dstk, false,
&prog);
break;
}
if (!bpf_prog->aux->verifier_zext)
emit_ia32_mov_i(dst_hi, 0, dstk, &prog);
break;
/* dst = dst / src(imm) */
/* dst = dst % src(imm) */
case BPF_ALU | BPF_DIV | BPF_K:
case BPF_ALU | BPF_DIV | BPF_X:
case BPF_ALU | BPF_MOD | BPF_K:
case BPF_ALU | BPF_MOD | BPF_X:
switch (BPF_SRC(code)) {
case BPF_X:
emit_ia32_div_mod_r(BPF_OP(code), dst_lo,
src_lo, dstk, sstk, &prog);
break;
case BPF_K:
/* mov ecx,imm32*/
EMIT2_off32(0xC7, add_1reg(0xC0, IA32_ECX),
imm32);
emit_ia32_div_mod_r(BPF_OP(code), dst_lo,
IA32_ECX, dstk, false,
&prog);
break;
}
if (!bpf_prog->aux->verifier_zext)
emit_ia32_mov_i(dst_hi, 0, dstk, &prog);
break;
case BPF_ALU64 | BPF_DIV | BPF_K:
case BPF_ALU64 | BPF_DIV | BPF_X:
case BPF_ALU64 | BPF_MOD | BPF_K:
case BPF_ALU64 | BPF_MOD | BPF_X:
goto notyet;
/* dst = dst >> imm */
/* dst = dst << imm */
case BPF_ALU | BPF_RSH | BPF_K:
case BPF_ALU | BPF_LSH | BPF_K:
if (unlikely(imm32 > 31))
return -EINVAL;
/* mov ecx,imm32*/
EMIT2_off32(0xC7, add_1reg(0xC0, IA32_ECX), imm32);
emit_ia32_shift_r(BPF_OP(code), dst_lo, IA32_ECX, dstk,
false, &prog);
if (!bpf_prog->aux->verifier_zext)
emit_ia32_mov_i(dst_hi, 0, dstk, &prog);
break;
/* dst = dst << imm */
case BPF_ALU64 | BPF_LSH | BPF_K:
if (unlikely(imm32 > 63))
return -EINVAL;
emit_ia32_lsh_i64(dst, imm32, dstk, &prog);
break;
/* dst = dst >> imm */
case BPF_ALU64 | BPF_RSH | BPF_K:
if (unlikely(imm32 > 63))
return -EINVAL;
emit_ia32_rsh_i64(dst, imm32, dstk, &prog);
break;
/* dst = dst << src */
case BPF_ALU64 | BPF_LSH | BPF_X:
emit_ia32_lsh_r64(dst, src, dstk, sstk, &prog);
break;
/* dst = dst >> src */
case BPF_ALU64 | BPF_RSH | BPF_X:
emit_ia32_rsh_r64(dst, src, dstk, sstk, &prog);
break;
/* dst = dst >> src (signed) */
case BPF_ALU64 | BPF_ARSH | BPF_X:
emit_ia32_arsh_r64(dst, src, dstk, sstk, &prog);
break;
/* dst = dst >> imm (signed) */
case BPF_ALU64 | BPF_ARSH | BPF_K:
if (unlikely(imm32 > 63))
return -EINVAL;
emit_ia32_arsh_i64(dst, imm32, dstk, &prog);
break;
/* dst = ~dst */
case BPF_ALU | BPF_NEG:
emit_ia32_alu_i(is64, false, BPF_OP(code),
dst_lo, 0, dstk, &prog);
if (!bpf_prog->aux->verifier_zext)
emit_ia32_mov_i(dst_hi, 0, dstk, &prog);
break;
/* dst = ~dst (64 bit) */
case BPF_ALU64 | BPF_NEG:
emit_ia32_neg64(dst, dstk, &prog);
break;
/* dst = dst * src/imm */
case BPF_ALU64 | BPF_MUL | BPF_X:
case BPF_ALU64 | BPF_MUL | BPF_K:
switch (BPF_SRC(code)) {
case BPF_X:
emit_ia32_mul_r64(dst, src, dstk, sstk, &prog);
break;
case BPF_K:
emit_ia32_mul_i64(dst, imm32, dstk, &prog);
break;
}
break;
/* dst = htole(dst) */
case BPF_ALU | BPF_END | BPF_FROM_LE:
emit_ia32_to_le_r64(dst, imm32, dstk, &prog,
bpf_prog->aux);
break;
/* dst = htobe(dst) */
case BPF_ALU | BPF_END | BPF_FROM_BE:
emit_ia32_to_be_r64(dst, imm32, dstk, &prog,
bpf_prog->aux);
break;
/* dst = imm64 */
case BPF_LD | BPF_IMM | BPF_DW: {
s32 hi, lo = imm32;
hi = insn[1].imm;
emit_ia32_mov_i(dst_lo, lo, dstk, &prog);
emit_ia32_mov_i(dst_hi, hi, dstk, &prog);
insn++;
i++;
break;
}
/* speculation barrier */
case BPF_ST | BPF_NOSPEC:
if (boot_cpu_has(X86_FEATURE_XMM2))
/* Emit 'lfence' */
EMIT3(0x0F, 0xAE, 0xE8);
break;
/* ST: *(u8*)(dst_reg + off) = imm */
case BPF_ST | BPF_MEM | BPF_H:
case BPF_ST | BPF_MEM | BPF_B:
case BPF_ST | BPF_MEM | BPF_W:
case BPF_ST | BPF_MEM | BPF_DW:
if (dstk)
/* mov eax,dword ptr [ebp+off] */
EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX),
STACK_VAR(dst_lo));
else
/* mov eax,dst_lo */
EMIT2(0x8B, add_2reg(0xC0, dst_lo, IA32_EAX));
switch (BPF_SIZE(code)) {
case BPF_B:
EMIT(0xC6, 1); break;
case BPF_H:
EMIT2(0x66, 0xC7); break;
case BPF_W:
case BPF_DW:
EMIT(0xC7, 1); break;
}
if (is_imm8(insn->off))
EMIT2(add_1reg(0x40, IA32_EAX), insn->off);
else
EMIT1_off32(add_1reg(0x80, IA32_EAX),
insn->off);
EMIT(imm32, bpf_size_to_x86_bytes(BPF_SIZE(code)));
if (BPF_SIZE(code) == BPF_DW) {
u32 hi;
hi = imm32 & (1<<31) ? (u32)~0 : 0;
EMIT2_off32(0xC7, add_1reg(0x80, IA32_EAX),
insn->off + 4);
EMIT(hi, 4);
}
break;
/* STX: *(u8*)(dst_reg + off) = src_reg */
case BPF_STX | BPF_MEM | BPF_B:
case BPF_STX | BPF_MEM | BPF_H:
case BPF_STX | BPF_MEM | BPF_W:
case BPF_STX | BPF_MEM | BPF_DW:
if (dstk)
/* mov eax,dword ptr [ebp+off] */
EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX),
STACK_VAR(dst_lo));
else
/* mov eax,dst_lo */
EMIT2(0x8B, add_2reg(0xC0, dst_lo, IA32_EAX));
if (sstk)
/* mov edx,dword ptr [ebp+off] */
EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EDX),
STACK_VAR(src_lo));
else
/* mov edx,src_lo */
EMIT2(0x8B, add_2reg(0xC0, src_lo, IA32_EDX));
switch (BPF_SIZE(code)) {
case BPF_B:
EMIT(0x88, 1); break;
case BPF_H:
EMIT2(0x66, 0x89); break;
case BPF_W:
case BPF_DW:
EMIT(0x89, 1); break;
}
if (is_imm8(insn->off))
EMIT2(add_2reg(0x40, IA32_EAX, IA32_EDX),
insn->off);
else
EMIT1_off32(add_2reg(0x80, IA32_EAX, IA32_EDX),
insn->off);
if (BPF_SIZE(code) == BPF_DW) {
if (sstk)
/* mov edi,dword ptr [ebp+off] */
EMIT3(0x8B, add_2reg(0x40, IA32_EBP,
IA32_EDX),
STACK_VAR(src_hi));
else
/* mov edi,src_hi */
EMIT2(0x8B, add_2reg(0xC0, src_hi,
IA32_EDX));
EMIT1(0x89);
if (is_imm8(insn->off + 4)) {
EMIT2(add_2reg(0x40, IA32_EAX,
IA32_EDX),
insn->off + 4);
} else {
EMIT1(add_2reg(0x80, IA32_EAX,
IA32_EDX));
EMIT(insn->off + 4, 4);
}
}
break;
/* LDX: dst_reg = *(u8*)(src_reg + off) */
case BPF_LDX | BPF_MEM | BPF_B:
case BPF_LDX | BPF_MEM | BPF_H:
case BPF_LDX | BPF_MEM | BPF_W:
case BPF_LDX | BPF_MEM | BPF_DW:
if (sstk)
/* mov eax,dword ptr [ebp+off] */
EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX),
STACK_VAR(src_lo));
else
/* mov eax,dword ptr [ebp+off] */
EMIT2(0x8B, add_2reg(0xC0, src_lo, IA32_EAX));
switch (BPF_SIZE(code)) {
case BPF_B:
EMIT2(0x0F, 0xB6); break;
case BPF_H:
EMIT2(0x0F, 0xB7); break;
case BPF_W:
case BPF_DW:
EMIT(0x8B, 1); break;
}
if (is_imm8(insn->off))
EMIT2(add_2reg(0x40, IA32_EAX, IA32_EDX),
insn->off);
else
EMIT1_off32(add_2reg(0x80, IA32_EAX, IA32_EDX),
insn->off);
if (dstk)
/* mov dword ptr [ebp+off],edx */
EMIT3(0x89, add_2reg(0x40, IA32_EBP, IA32_EDX),
STACK_VAR(dst_lo));
else
/* mov dst_lo,edx */
EMIT2(0x89, add_2reg(0xC0, dst_lo, IA32_EDX));
switch (BPF_SIZE(code)) {
case BPF_B:
case BPF_H:
case BPF_W:
if (bpf_prog->aux->verifier_zext)
break;
if (dstk) {
EMIT3(0xC7, add_1reg(0x40, IA32_EBP),
STACK_VAR(dst_hi));
EMIT(0x0, 4);
} else {
/* xor dst_hi,dst_hi */
EMIT2(0x33,
add_2reg(0xC0, dst_hi, dst_hi));
}
break;
case BPF_DW:
EMIT2_off32(0x8B,
add_2reg(0x80, IA32_EAX, IA32_EDX),
insn->off + 4);
if (dstk)
EMIT3(0x89,
add_2reg(0x40, IA32_EBP,
IA32_EDX),
STACK_VAR(dst_hi));
else
EMIT2(0x89,
add_2reg(0xC0, dst_hi, IA32_EDX));
break;
default:
break;
}
break;
/* call */
case BPF_JMP | BPF_CALL:
{
const u8 *r1 = bpf2ia32[BPF_REG_1];
const u8 *r2 = bpf2ia32[BPF_REG_2];
const u8 *r3 = bpf2ia32[BPF_REG_3];
const u8 *r4 = bpf2ia32[BPF_REG_4];
const u8 *r5 = bpf2ia32[BPF_REG_5];
if (insn->src_reg == BPF_PSEUDO_CALL)
goto notyet;
if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) {
int err;
err = emit_kfunc_call(bpf_prog,
image + addrs[i],
insn, &prog);
if (err)
return err;
break;
}
func = (u8 *) __bpf_call_base + imm32;
jmp_offset = func - (image + addrs[i]);
if (!imm32 || !is_simm32(jmp_offset)) {
pr_err("unsupported BPF func %d addr %p image %p\n",
imm32, func, image);
return -EINVAL;
}
/* mov eax,dword ptr [ebp+off] */
EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX),
STACK_VAR(r1[0]));
/* mov edx,dword ptr [ebp+off] */
EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EDX),
STACK_VAR(r1[1]));
emit_push_r64(r5, &prog);
emit_push_r64(r4, &prog);
emit_push_r64(r3, &prog);
emit_push_r64(r2, &prog);
EMIT1_off32(0xE8, jmp_offset + 9);
/* mov dword ptr [ebp+off],eax */
EMIT3(0x89, add_2reg(0x40, IA32_EBP, IA32_EAX),
STACK_VAR(r0[0]));
/* mov dword ptr [ebp+off],edx */
EMIT3(0x89, add_2reg(0x40, IA32_EBP, IA32_EDX),
STACK_VAR(r0[1]));
/* add esp,32 */
EMIT3(0x83, add_1reg(0xC0, IA32_ESP), 32);
break;
}
case BPF_JMP | BPF_TAIL_CALL:
emit_bpf_tail_call(&prog, image + addrs[i - 1]);
break;
/* cond jump */
case BPF_JMP | BPF_JEQ | BPF_X:
case BPF_JMP | BPF_JNE | BPF_X:
case BPF_JMP | BPF_JGT | BPF_X:
case BPF_JMP | BPF_JLT | BPF_X:
case BPF_JMP | BPF_JGE | BPF_X:
case BPF_JMP | BPF_JLE | BPF_X:
case BPF_JMP32 | BPF_JEQ | BPF_X:
case BPF_JMP32 | BPF_JNE | BPF_X:
case BPF_JMP32 | BPF_JGT | BPF_X:
case BPF_JMP32 | BPF_JLT | BPF_X:
case BPF_JMP32 | BPF_JGE | BPF_X:
case BPF_JMP32 | BPF_JLE | BPF_X:
case BPF_JMP32 | BPF_JSGT | BPF_X:
case BPF_JMP32 | BPF_JSLE | BPF_X:
case BPF_JMP32 | BPF_JSLT | BPF_X:
case BPF_JMP32 | BPF_JSGE | BPF_X: {
bool is_jmp64 = BPF_CLASS(insn->code) == BPF_JMP;
u8 dreg_lo = dstk ? IA32_EAX : dst_lo;
u8 dreg_hi = dstk ? IA32_EDX : dst_hi;
u8 sreg_lo = sstk ? IA32_ECX : src_lo;
u8 sreg_hi = sstk ? IA32_EBX : src_hi;
if (dstk) {
EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX),
STACK_VAR(dst_lo));
if (is_jmp64)
EMIT3(0x8B,
add_2reg(0x40, IA32_EBP,
IA32_EDX),
STACK_VAR(dst_hi));
}
if (sstk) {
EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_ECX),
STACK_VAR(src_lo));
if (is_jmp64)
EMIT3(0x8B,
add_2reg(0x40, IA32_EBP,
IA32_EBX),
STACK_VAR(src_hi));
}
if (is_jmp64) {
/* cmp dreg_hi,sreg_hi */
EMIT2(0x39, add_2reg(0xC0, dreg_hi, sreg_hi));
EMIT2(IA32_JNE, 2);
}
/* cmp dreg_lo,sreg_lo */
EMIT2(0x39, add_2reg(0xC0, dreg_lo, sreg_lo));
goto emit_cond_jmp;
}
case BPF_JMP | BPF_JSGT | BPF_X:
case BPF_JMP | BPF_JSLE | BPF_X:
case BPF_JMP | BPF_JSLT | BPF_X:
case BPF_JMP | BPF_JSGE | BPF_X: {
u8 dreg_lo = dstk ? IA32_EAX : dst_lo;
u8 dreg_hi = dstk ? IA32_EDX : dst_hi;
u8 sreg_lo = sstk ? IA32_ECX : src_lo;
u8 sreg_hi = sstk ? IA32_EBX : src_hi;
if (dstk) {
EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX),
STACK_VAR(dst_lo));
EMIT3(0x8B,
add_2reg(0x40, IA32_EBP,
IA32_EDX),
STACK_VAR(dst_hi));
}
if (sstk) {
EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_ECX),
STACK_VAR(src_lo));
EMIT3(0x8B,
add_2reg(0x40, IA32_EBP,
IA32_EBX),
STACK_VAR(src_hi));
}
/* cmp dreg_hi,sreg_hi */
EMIT2(0x39, add_2reg(0xC0, dreg_hi, sreg_hi));
EMIT2(IA32_JNE, 10);
/* cmp dreg_lo,sreg_lo */
EMIT2(0x39, add_2reg(0xC0, dreg_lo, sreg_lo));
goto emit_cond_jmp_signed;
}
case BPF_JMP | BPF_JSET | BPF_X:
case BPF_JMP32 | BPF_JSET | BPF_X: {
bool is_jmp64 = BPF_CLASS(insn->code) == BPF_JMP;
u8 dreg_lo = IA32_EAX;
u8 dreg_hi = IA32_EDX;
u8 sreg_lo = sstk ? IA32_ECX : src_lo;
u8 sreg_hi = sstk ? IA32_EBX : src_hi;
if (dstk) {
EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX),
STACK_VAR(dst_lo));
if (is_jmp64)
EMIT3(0x8B,
add_2reg(0x40, IA32_EBP,
IA32_EDX),
STACK_VAR(dst_hi));
} else {
/* mov dreg_lo,dst_lo */
EMIT2(0x89, add_2reg(0xC0, dreg_lo, dst_lo));
if (is_jmp64)
/* mov dreg_hi,dst_hi */
EMIT2(0x89,
add_2reg(0xC0, dreg_hi, dst_hi));
}
if (sstk) {
EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_ECX),
STACK_VAR(src_lo));
if (is_jmp64)
EMIT3(0x8B,
add_2reg(0x40, IA32_EBP,
IA32_EBX),
STACK_VAR(src_hi));
}
/* and dreg_lo,sreg_lo */
EMIT2(0x23, add_2reg(0xC0, sreg_lo, dreg_lo));
if (is_jmp64) {
/* and dreg_hi,sreg_hi */
EMIT2(0x23, add_2reg(0xC0, sreg_hi, dreg_hi));
/* or dreg_lo,dreg_hi */
EMIT2(0x09, add_2reg(0xC0, dreg_lo, dreg_hi));
}
goto emit_cond_jmp;
}
case BPF_JMP | BPF_JSET | BPF_K:
case BPF_JMP32 | BPF_JSET | BPF_K: {
bool is_jmp64 = BPF_CLASS(insn->code) == BPF_JMP;
u8 dreg_lo = IA32_EAX;
u8 dreg_hi = IA32_EDX;
u8 sreg_lo = IA32_ECX;
u8 sreg_hi = IA32_EBX;
u32 hi;
if (dstk) {
EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX),
STACK_VAR(dst_lo));
if (is_jmp64)
EMIT3(0x8B,
add_2reg(0x40, IA32_EBP,
IA32_EDX),
STACK_VAR(dst_hi));
} else {
/* mov dreg_lo,dst_lo */
EMIT2(0x89, add_2reg(0xC0, dreg_lo, dst_lo));
if (is_jmp64)
/* mov dreg_hi,dst_hi */
EMIT2(0x89,
add_2reg(0xC0, dreg_hi, dst_hi));
}
/* mov ecx,imm32 */
EMIT2_off32(0xC7, add_1reg(0xC0, sreg_lo), imm32);
/* and dreg_lo,sreg_lo */
EMIT2(0x23, add_2reg(0xC0, sreg_lo, dreg_lo));
if (is_jmp64) {
hi = imm32 & (1 << 31) ? (u32)~0 : 0;
/* mov ebx,imm32 */
EMIT2_off32(0xC7, add_1reg(0xC0, sreg_hi), hi);
/* and dreg_hi,sreg_hi */
EMIT2(0x23, add_2reg(0xC0, sreg_hi, dreg_hi));
/* or dreg_lo,dreg_hi */
EMIT2(0x09, add_2reg(0xC0, dreg_lo, dreg_hi));
}
goto emit_cond_jmp;
}
case BPF_JMP | BPF_JEQ | BPF_K:
case BPF_JMP | BPF_JNE | BPF_K:
case BPF_JMP | BPF_JGT | BPF_K:
case BPF_JMP | BPF_JLT | BPF_K:
case BPF_JMP | BPF_JGE | BPF_K:
case BPF_JMP | BPF_JLE | BPF_K:
case BPF_JMP32 | BPF_JEQ | BPF_K:
case BPF_JMP32 | BPF_JNE | BPF_K:
case BPF_JMP32 | BPF_JGT | BPF_K:
case BPF_JMP32 | BPF_JLT | BPF_K:
case BPF_JMP32 | BPF_JGE | BPF_K:
case BPF_JMP32 | BPF_JLE | BPF_K:
case BPF_JMP32 | BPF_JSGT | BPF_K:
case BPF_JMP32 | BPF_JSLE | BPF_K:
case BPF_JMP32 | BPF_JSLT | BPF_K:
case BPF_JMP32 | BPF_JSGE | BPF_K: {
bool is_jmp64 = BPF_CLASS(insn->code) == BPF_JMP;
u8 dreg_lo = dstk ? IA32_EAX : dst_lo;
u8 dreg_hi = dstk ? IA32_EDX : dst_hi;
u8 sreg_lo = IA32_ECX;
u8 sreg_hi = IA32_EBX;
u32 hi;
if (dstk) {
EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX),
STACK_VAR(dst_lo));
if (is_jmp64)
EMIT3(0x8B,
add_2reg(0x40, IA32_EBP,
IA32_EDX),
STACK_VAR(dst_hi));
}
/* mov ecx,imm32 */
EMIT2_off32(0xC7, add_1reg(0xC0, IA32_ECX), imm32);
if (is_jmp64) {
hi = imm32 & (1 << 31) ? (u32)~0 : 0;
/* mov ebx,imm32 */
EMIT2_off32(0xC7, add_1reg(0xC0, IA32_EBX), hi);
/* cmp dreg_hi,sreg_hi */
EMIT2(0x39, add_2reg(0xC0, dreg_hi, sreg_hi));
EMIT2(IA32_JNE, 2);
}
/* cmp dreg_lo,sreg_lo */
EMIT2(0x39, add_2reg(0xC0, dreg_lo, sreg_lo));
emit_cond_jmp: jmp_cond = get_cond_jmp_opcode(BPF_OP(code), false);
if (jmp_cond == COND_JMP_OPCODE_INVALID)
return -EFAULT;
jmp_offset = addrs[i + insn->off] - addrs[i];
if (is_imm8(jmp_offset)) {
EMIT2(jmp_cond, jmp_offset);
} else if (is_simm32(jmp_offset)) {
EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset);
} else {
pr_err("cond_jmp gen bug %llx\n", jmp_offset);
return -EFAULT;
}
break;
}
case BPF_JMP | BPF_JSGT | BPF_K:
case BPF_JMP | BPF_JSLE | BPF_K:
case BPF_JMP | BPF_JSLT | BPF_K:
case BPF_JMP | BPF_JSGE | BPF_K: {
u8 dreg_lo = dstk ? IA32_EAX : dst_lo;
u8 dreg_hi = dstk ? IA32_EDX : dst_hi;
u8 sreg_lo = IA32_ECX;
u8 sreg_hi = IA32_EBX;
u32 hi;
if (dstk) {
EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX),
STACK_VAR(dst_lo));
EMIT3(0x8B,
add_2reg(0x40, IA32_EBP,
IA32_EDX),
STACK_VAR(dst_hi));
}
/* mov ecx,imm32 */
EMIT2_off32(0xC7, add_1reg(0xC0, IA32_ECX), imm32);
hi = imm32 & (1 << 31) ? (u32)~0 : 0;
/* mov ebx,imm32 */
EMIT2_off32(0xC7, add_1reg(0xC0, IA32_EBX), hi);
/* cmp dreg_hi,sreg_hi */
EMIT2(0x39, add_2reg(0xC0, dreg_hi, sreg_hi));
EMIT2(IA32_JNE, 10);
/* cmp dreg_lo,sreg_lo */
EMIT2(0x39, add_2reg(0xC0, dreg_lo, sreg_lo));
/*
* For simplicity of branch offset computation,
* let's use fixed jump coding here.
*/
emit_cond_jmp_signed: /* Check the condition for low 32-bit comparison */
jmp_cond = get_cond_jmp_opcode(BPF_OP(code), true);
if (jmp_cond == COND_JMP_OPCODE_INVALID)
return -EFAULT;
jmp_offset = addrs[i + insn->off] - addrs[i] + 8;
if (is_simm32(jmp_offset)) {
EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset);
} else {
pr_err("cond_jmp gen bug %llx\n", jmp_offset);
return -EFAULT;
}
EMIT2(0xEB, 6);
/* Check the condition for high 32-bit comparison */
jmp_cond = get_cond_jmp_opcode(BPF_OP(code), false);
if (jmp_cond == COND_JMP_OPCODE_INVALID)
return -EFAULT;
jmp_offset = addrs[i + insn->off] - addrs[i];
if (is_simm32(jmp_offset)) {
EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset);
} else {
pr_err("cond_jmp gen bug %llx\n", jmp_offset);
return -EFAULT;
}
break;
}
case BPF_JMP | BPF_JA:
if (insn->off == -1)
/* -1 jmp instructions will always jump
* backwards two bytes. Explicitly handling
* this case avoids wasting too many passes
* when there are long sequences of replaced
* dead code.
*/
jmp_offset = -2;
else
jmp_offset = addrs[i + insn->off] - addrs[i];
if (!jmp_offset)
/* Optimize out nop jumps */
break;
emit_jmp:
if (is_imm8(jmp_offset)) {
EMIT2(0xEB, jmp_offset);
} else if (is_simm32(jmp_offset)) {
EMIT1_off32(0xE9, jmp_offset);
} else {
pr_err("jmp gen bug %llx\n", jmp_offset);
return -EFAULT;
}
break;
case BPF_STX | BPF_ATOMIC | BPF_W:
case BPF_STX | BPF_ATOMIC | BPF_DW:
goto notyet;
case BPF_JMP | BPF_EXIT:
if (seen_exit) {
jmp_offset = ctx->cleanup_addr - addrs[i];
goto emit_jmp;
}
seen_exit = true;
/* Update cleanup_addr */
ctx->cleanup_addr = proglen;
emit_epilogue(&prog, bpf_prog->aux->stack_depth);
break;
notyet:
pr_info_once("*** NOT YET: opcode %02x ***\n", code);
return -EFAULT;
default:
/*
* This error will be seen if new instruction was added
* to interpreter, but not to JIT or if there is junk in
* bpf_prog
*/
pr_err("bpf_jit: unknown opcode %02x\n", code);
return -EINVAL;
}
ilen = prog - temp;
if (ilen > BPF_MAX_INSN_SIZE) {
pr_err("bpf_jit: fatal insn size error\n");
return -EFAULT;
}
if (image) {
/*
* When populating the image, assert that:
*
* i) We do not write beyond the allocated space, and
* ii) addrs[i] did not change from the prior run, in order
* to validate assumptions made for computing branch
* displacements.
*/
if (unlikely(proglen + ilen > oldproglen ||
proglen + ilen != addrs[i])) {
pr_err("bpf_jit: fatal error\n");
return -EFAULT;
}
memcpy(image + proglen, temp, ilen);
}
proglen += ilen;
addrs[i] = proglen;
prog = temp;
}
return proglen;
}
bool bpf_jit_needs_zext(void)
{
return true;
}
struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
{
struct bpf_binary_header *header = NULL;
struct bpf_prog *tmp, *orig_prog = prog;
int proglen, oldproglen = 0;
struct jit_context ctx = {};
bool tmp_blinded = false;
u8 *image = NULL;
int *addrs;
int pass;
int i;
if (!prog->jit_requested)
return orig_prog;
tmp = bpf_jit_blind_constants(prog);
/*
* If blinding was requested and we failed during blinding,
* we must fall back to the interpreter.
*/
if (IS_ERR(tmp))
return orig_prog;
if (tmp != prog) {
tmp_blinded = true;
prog = tmp;
}
addrs = kmalloc_array(prog->len, sizeof(*addrs), GFP_KERNEL);
if (!addrs) {
prog = orig_prog;
goto out;
}
/*
* Before first pass, make a rough estimation of addrs[]
* each BPF instruction is translated to less than 64 bytes
*/
for (proglen = 0, i = 0; i < prog->len; i++) {
proglen += 64;
addrs[i] = proglen;
}
ctx.cleanup_addr = proglen;
/*
* JITed image shrinks with every pass and the loop iterates
* until the image stops shrinking. Very large BPF programs
* may converge on the last pass. In such case do one more
* pass to emit the final image.
*/
for (pass = 0; pass < 20 || image; pass++) {
proglen = do_jit(prog, addrs, image, oldproglen, &ctx);
if (proglen <= 0) {
out_image:
image = NULL;
if (header)
bpf_jit_binary_free(header);
prog = orig_prog;
goto out_addrs;
}
if (image) {
if (proglen != oldproglen) {
pr_err("bpf_jit: proglen=%d != oldproglen=%d\n",
proglen, oldproglen);
goto out_image;
}
break;
}
if (proglen == oldproglen) {
header = bpf_jit_binary_alloc(proglen, &image,
1, jit_fill_hole);
if (!header) {
prog = orig_prog;
goto out_addrs;
}
}
oldproglen = proglen;
cond_resched();
}
if (bpf_jit_enable > 1)
bpf_jit_dump(prog->len, proglen, pass + 1, image);
if (image) {
bpf_jit_binary_lock_ro(header);
prog->bpf_func = (void *)image;
prog->jited = 1;
prog->jited_len = proglen;
} else {
prog = orig_prog;
}
out_addrs:
kfree(addrs);
out:
if (tmp_blinded)
bpf_jit_prog_release_other(prog, prog == orig_prog ?
tmp : orig_prog);
return prog;
}
bool bpf_jit_supports_kfunc_call(void)
{
return true;
}
| linux-master | arch/x86/net/bpf_jit_comp32.c |
// SPDX-License-Identifier: GPL-2.0-only
/* -*- linux-c -*- ------------------------------------------------------- *
*
* Copyright (C) 1991, 1992 Linus Torvalds
* Copyright 2007 rPath, Inc. - All Rights Reserved
* Copyright 2009 Intel Corporation; author H. Peter Anvin
*
* Original APM BIOS checking by Stephen Rothwell, May 1994
* ([email protected])
*
* ----------------------------------------------------------------------- */
/*
* Get APM BIOS information
*/
#include "boot.h"
int query_apm_bios(void)
{
struct biosregs ireg, oreg;
/* APM BIOS installation check */
initregs(&ireg);
ireg.ah = 0x53;
intcall(0x15, &ireg, &oreg);
if (oreg.flags & X86_EFLAGS_CF)
return -1; /* No APM BIOS */
if (oreg.bx != 0x504d) /* "PM" signature */
return -1;
if (!(oreg.cx & 0x02)) /* 32 bits supported? */
return -1;
/* Disconnect first, just in case */
ireg.al = 0x04;
intcall(0x15, &ireg, NULL);
/* 32-bit connect */
ireg.al = 0x03;
intcall(0x15, &ireg, &oreg);
boot_params.apm_bios_info.cseg = oreg.ax;
boot_params.apm_bios_info.offset = oreg.ebx;
boot_params.apm_bios_info.cseg_16 = oreg.cx;
boot_params.apm_bios_info.dseg = oreg.dx;
boot_params.apm_bios_info.cseg_len = oreg.si;
boot_params.apm_bios_info.cseg_16_len = oreg.hsi;
boot_params.apm_bios_info.dseg_len = oreg.di;
if (oreg.flags & X86_EFLAGS_CF)
return -1;
/* Redo the installation check as the 32-bit connect;
some BIOSes return different flags this way... */
ireg.al = 0x00;
intcall(0x15, &ireg, &oreg);
if ((oreg.eflags & X86_EFLAGS_CF) || oreg.bx != 0x504d) {
/* Failure with 32-bit connect, try to disconnect and ignore */
ireg.al = 0x04;
intcall(0x15, &ireg, NULL);
return -1;
}
boot_params.apm_bios_info.version = oreg.ax;
boot_params.apm_bios_info.flags = oreg.cx;
return 0;
}
| linux-master | arch/x86/boot/apm.c |
// SPDX-License-Identifier: GPL-2.0-only
/* -*- linux-c -*- ------------------------------------------------------- *
*
* Copyright (C) 1991, 1992 Linus Torvalds
* Copyright 2007 rPath, Inc. - All Rights Reserved
*
* ----------------------------------------------------------------------- */
/*
* Oh, it's a waste of space, but oh-so-yummy for debugging. This
* version of printf() does not include 64-bit support. "Live with
* it."
*
*/
#include "boot.h"
static int skip_atoi(const char **s)
{
int i = 0;
while (isdigit(**s))
i = i * 10 + *((*s)++) - '0';
return i;
}
#define ZEROPAD 1 /* pad with zero */
#define SIGN 2 /* unsigned/signed long */
#define PLUS 4 /* show plus */
#define SPACE 8 /* space if plus */
#define LEFT 16 /* left justified */
#define SMALL 32 /* Must be 32 == 0x20 */
#define SPECIAL 64 /* 0x */
#define __do_div(n, base) ({ \
int __res; \
__res = ((unsigned long) n) % (unsigned) base; \
n = ((unsigned long) n) / (unsigned) base; \
__res; })
static char *number(char *str, long num, int base, int size, int precision,
int type)
{
/* we are called with base 8, 10 or 16, only, thus don't need "G..." */
static const char digits[16] = "0123456789ABCDEF"; /* "GHIJKLMNOPQRSTUVWXYZ"; */
char tmp[66];
char c, sign, locase;
int i;
/* locase = 0 or 0x20. ORing digits or letters with 'locase'
* produces same digits or (maybe lowercased) letters */
locase = (type & SMALL);
if (type & LEFT)
type &= ~ZEROPAD;
if (base < 2 || base > 16)
return NULL;
c = (type & ZEROPAD) ? '0' : ' ';
sign = 0;
if (type & SIGN) {
if (num < 0) {
sign = '-';
num = -num;
size--;
} else if (type & PLUS) {
sign = '+';
size--;
} else if (type & SPACE) {
sign = ' ';
size--;
}
}
if (type & SPECIAL) {
if (base == 16)
size -= 2;
else if (base == 8)
size--;
}
i = 0;
if (num == 0)
tmp[i++] = '0';
else
while (num != 0)
tmp[i++] = (digits[__do_div(num, base)] | locase);
if (i > precision)
precision = i;
size -= precision;
if (!(type & (ZEROPAD + LEFT)))
while (size-- > 0)
*str++ = ' ';
if (sign)
*str++ = sign;
if (type & SPECIAL) {
if (base == 8)
*str++ = '0';
else if (base == 16) {
*str++ = '0';
*str++ = ('X' | locase);
}
}
if (!(type & LEFT))
while (size-- > 0)
*str++ = c;
while (i < precision--)
*str++ = '0';
while (i-- > 0)
*str++ = tmp[i];
while (size-- > 0)
*str++ = ' ';
return str;
}
int vsprintf(char *buf, const char *fmt, va_list args)
{
int len;
unsigned long num;
int i, base;
char *str;
const char *s;
int flags; /* flags to number() */
int field_width; /* width of output field */
int precision; /* min. # of digits for integers; max
number of chars for from string */
int qualifier; /* 'h', 'l', or 'L' for integer fields */
for (str = buf; *fmt; ++fmt) {
if (*fmt != '%') {
*str++ = *fmt;
continue;
}
/* process flags */
flags = 0;
repeat:
++fmt; /* this also skips first '%' */
switch (*fmt) {
case '-':
flags |= LEFT;
goto repeat;
case '+':
flags |= PLUS;
goto repeat;
case ' ':
flags |= SPACE;
goto repeat;
case '#':
flags |= SPECIAL;
goto repeat;
case '0':
flags |= ZEROPAD;
goto repeat;
}
/* get field width */
field_width = -1;
if (isdigit(*fmt))
field_width = skip_atoi(&fmt);
else if (*fmt == '*') {
++fmt;
/* it's the next argument */
field_width = va_arg(args, int);
if (field_width < 0) {
field_width = -field_width;
flags |= LEFT;
}
}
/* get the precision */
precision = -1;
if (*fmt == '.') {
++fmt;
if (isdigit(*fmt))
precision = skip_atoi(&fmt);
else if (*fmt == '*') {
++fmt;
/* it's the next argument */
precision = va_arg(args, int);
}
if (precision < 0)
precision = 0;
}
/* get the conversion qualifier */
qualifier = -1;
if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L') {
qualifier = *fmt;
++fmt;
}
/* default base */
base = 10;
switch (*fmt) {
case 'c':
if (!(flags & LEFT))
while (--field_width > 0)
*str++ = ' ';
*str++ = (unsigned char)va_arg(args, int);
while (--field_width > 0)
*str++ = ' ';
continue;
case 's':
s = va_arg(args, char *);
len = strnlen(s, precision);
if (!(flags & LEFT))
while (len < field_width--)
*str++ = ' ';
for (i = 0; i < len; ++i)
*str++ = *s++;
while (len < field_width--)
*str++ = ' ';
continue;
case 'p':
if (field_width == -1) {
field_width = 2 * sizeof(void *);
flags |= ZEROPAD;
}
str = number(str,
(unsigned long)va_arg(args, void *), 16,
field_width, precision, flags);
continue;
case 'n':
if (qualifier == 'l') {
long *ip = va_arg(args, long *);
*ip = (str - buf);
} else {
int *ip = va_arg(args, int *);
*ip = (str - buf);
}
continue;
case '%':
*str++ = '%';
continue;
/* integer number formats - set up the flags and "break" */
case 'o':
base = 8;
break;
case 'x':
flags |= SMALL;
case 'X':
base = 16;
break;
case 'd':
case 'i':
flags |= SIGN;
case 'u':
break;
default:
*str++ = '%';
if (*fmt)
*str++ = *fmt;
else
--fmt;
continue;
}
if (qualifier == 'l')
num = va_arg(args, unsigned long);
else if (qualifier == 'h') {
num = (unsigned short)va_arg(args, int);
if (flags & SIGN)
num = (short)num;
} else if (flags & SIGN)
num = va_arg(args, int);
else
num = va_arg(args, unsigned int);
str = number(str, num, base, field_width, precision, flags);
}
*str = '\0';
return str - buf;
}
int sprintf(char *buf, const char *fmt, ...)
{
va_list args;
int i;
va_start(args, fmt);
i = vsprintf(buf, fmt, args);
va_end(args);
return i;
}
int printf(const char *fmt, ...)
{
char printf_buf[1024];
va_list args;
int printed;
va_start(args, fmt);
printed = vsprintf(printf_buf, fmt, args);
va_end(args);
puts(printf_buf);
return printed;
}
| linux-master | arch/x86/boot/printf.c |
// SPDX-License-Identifier: GPL-2.0-only
/* -*- linux-c -*- ------------------------------------------------------- *
*
* Copyright (C) 1991, 1992 Linus Torvalds
* Copyright 2007-2008 rPath, Inc. - All Rights Reserved
*
* ----------------------------------------------------------------------- */
/*
* arch/i386/boot/video-mode.c
*
* Set the video mode. This is separated out into a different
* file in order to be shared with the ACPI wakeup code.
*/
#include "boot.h"
#include "video.h"
#include "vesa.h"
#include <uapi/asm/boot.h>
/*
* Common variables
*/
int adapter; /* 0=CGA/MDA/HGC, 1=EGA, 2=VGA+ */
int force_x, force_y; /* Don't query the BIOS for cols/rows */
int do_restore; /* Screen contents changed during mode flip */
int graphic_mode; /* Graphic mode with linear frame buffer */
/* Probe the video drivers and have them generate their mode lists. */
void probe_cards(int unsafe)
{
struct card_info *card;
static u8 probed[2];
if (probed[unsafe])
return;
probed[unsafe] = 1;
for (card = video_cards; card < video_cards_end; card++) {
if (card->unsafe == unsafe) {
if (card->probe)
card->nmodes = card->probe();
else
card->nmodes = 0;
}
}
}
/* Test if a mode is defined */
int mode_defined(u16 mode)
{
struct card_info *card;
struct mode_info *mi;
int i;
for (card = video_cards; card < video_cards_end; card++) {
mi = card->modes;
for (i = 0; i < card->nmodes; i++, mi++) {
if (mi->mode == mode)
return 1;
}
}
return 0;
}
/* Set mode (without recalc) */
static int raw_set_mode(u16 mode, u16 *real_mode)
{
int nmode, i;
struct card_info *card;
struct mode_info *mi;
/* Drop the recalc bit if set */
mode &= ~VIDEO_RECALC;
/* Scan for mode based on fixed ID, position, or resolution */
nmode = 0;
for (card = video_cards; card < video_cards_end; card++) {
mi = card->modes;
for (i = 0; i < card->nmodes; i++, mi++) {
int visible = mi->x || mi->y;
if ((mode == nmode && visible) ||
mode == mi->mode ||
mode == (mi->y << 8)+mi->x) {
*real_mode = mi->mode;
return card->set_mode(mi);
}
if (visible)
nmode++;
}
}
/* Nothing found? Is it an "exceptional" (unprobed) mode? */
for (card = video_cards; card < video_cards_end; card++) {
if (mode >= card->xmode_first &&
mode < card->xmode_first+card->xmode_n) {
struct mode_info mix;
*real_mode = mix.mode = mode;
mix.x = mix.y = 0;
return card->set_mode(&mix);
}
}
/* Otherwise, failure... */
return -1;
}
/*
* Recalculate the vertical video cutoff (hack!)
*/
static void vga_recalc_vertical(void)
{
unsigned int font_size, rows;
u16 crtc;
u8 pt, ov;
set_fs(0);
font_size = rdfs8(0x485); /* BIOS: font size (pixels) */
rows = force_y ? force_y : rdfs8(0x484)+1; /* Text rows */
rows *= font_size; /* Visible scan lines */
rows--; /* ... minus one */
crtc = vga_crtc();
pt = in_idx(crtc, 0x11);
pt &= ~0x80; /* Unlock CR0-7 */
out_idx(pt, crtc, 0x11);
out_idx((u8)rows, crtc, 0x12); /* Lower height register */
ov = in_idx(crtc, 0x07); /* Overflow register */
ov &= 0xbd;
ov |= (rows >> (8-1)) & 0x02;
ov |= (rows >> (9-6)) & 0x40;
out_idx(ov, crtc, 0x07);
}
/* Set mode (with recalc if specified) */
int set_mode(u16 mode)
{
int rv;
u16 real_mode;
/* Very special mode numbers... */
if (mode == VIDEO_CURRENT_MODE)
return 0; /* Nothing to do... */
else if (mode == NORMAL_VGA)
mode = VIDEO_80x25;
else if (mode == EXTENDED_VGA)
mode = VIDEO_8POINT;
rv = raw_set_mode(mode, &real_mode);
if (rv)
return rv;
if (mode & VIDEO_RECALC)
vga_recalc_vertical();
/* Save the canonical mode number for the kernel, not
an alias, size specification or menu position */
#ifndef _WAKEUP
boot_params.hdr.vid_mode = real_mode;
#endif
return 0;
}
| linux-master | arch/x86/boot/video-mode.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/types.h>
#include "bitops.h"
#include <asm/processor-flags.h>
#include <asm/required-features.h>
#include <asm/msr-index.h>
#include "cpuflags.h"
struct cpu_features cpu;
u32 cpu_vendor[3];
static bool loaded_flags;
static int has_fpu(void)
{
u16 fcw = -1, fsw = -1;
unsigned long cr0;
asm volatile("mov %%cr0,%0" : "=r" (cr0));
if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
asm volatile("mov %0,%%cr0" : : "r" (cr0));
}
asm volatile("fninit ; fnstsw %0 ; fnstcw %1"
: "+m" (fsw), "+m" (fcw));
return fsw == 0 && (fcw & 0x103f) == 0x003f;
}
/*
* For building the 16-bit code we want to explicitly specify 32-bit
* push/pop operations, rather than just saying 'pushf' or 'popf' and
* letting the compiler choose. But this is also included from the
* compressed/ directory where it may be 64-bit code, and thus needs
* to be 'pushfq' or 'popfq' in that case.
*/
#ifdef __x86_64__
#define PUSHF "pushfq"
#define POPF "popfq"
#else
#define PUSHF "pushfl"
#define POPF "popfl"
#endif
int has_eflag(unsigned long mask)
{
unsigned long f0, f1;
asm volatile(PUSHF " \n\t"
PUSHF " \n\t"
"pop %0 \n\t"
"mov %0,%1 \n\t"
"xor %2,%1 \n\t"
"push %1 \n\t"
POPF " \n\t"
PUSHF " \n\t"
"pop %1 \n\t"
POPF
: "=&r" (f0), "=&r" (f1)
: "ri" (mask));
return !!((f0^f1) & mask);
}
void cpuid_count(u32 id, u32 count, u32 *a, u32 *b, u32 *c, u32 *d)
{
asm volatile("cpuid"
: "=a" (*a), "=b" (*b), "=c" (*c), "=d" (*d)
: "0" (id), "2" (count)
);
}
#define cpuid(id, a, b, c, d) cpuid_count(id, 0, a, b, c, d)
void get_cpuflags(void)
{
u32 max_intel_level, max_amd_level;
u32 tfms;
u32 ignored;
if (loaded_flags)
return;
loaded_flags = true;
if (has_fpu())
set_bit(X86_FEATURE_FPU, cpu.flags);
if (has_eflag(X86_EFLAGS_ID)) {
cpuid(0x0, &max_intel_level, &cpu_vendor[0], &cpu_vendor[2],
&cpu_vendor[1]);
if (max_intel_level >= 0x00000001 &&
max_intel_level <= 0x0000ffff) {
cpuid(0x1, &tfms, &ignored, &cpu.flags[4],
&cpu.flags[0]);
cpu.level = (tfms >> 8) & 15;
cpu.family = cpu.level;
cpu.model = (tfms >> 4) & 15;
if (cpu.level >= 6)
cpu.model += ((tfms >> 16) & 0xf) << 4;
}
if (max_intel_level >= 0x00000007) {
cpuid_count(0x00000007, 0, &ignored, &ignored,
&cpu.flags[16], &ignored);
}
cpuid(0x80000000, &max_amd_level, &ignored, &ignored,
&ignored);
if (max_amd_level >= 0x80000001 &&
max_amd_level <= 0x8000ffff) {
cpuid(0x80000001, &ignored, &ignored, &cpu.flags[6],
&cpu.flags[1]);
}
}
}
| linux-master | arch/x86/boot/cpuflags.c |
// SPDX-License-Identifier: GPL-2.0-only
/* -*- linux-c -*- ------------------------------------------------------- *
*
* Copyright (C) 1991, 1992 Linus Torvalds
* Copyright 2007 rPath, Inc. - All Rights Reserved
* Copyright 2009 Intel Corporation; author H. Peter Anvin
*
* ----------------------------------------------------------------------- */
/*
* Memory detection code
*/
#include "boot.h"
#define SMAP 0x534d4150 /* ASCII "SMAP" */
static void detect_memory_e820(void)
{
int count = 0;
struct biosregs ireg, oreg;
struct boot_e820_entry *desc = boot_params.e820_table;
static struct boot_e820_entry buf; /* static so it is zeroed */
initregs(&ireg);
ireg.ax = 0xe820;
ireg.cx = sizeof(buf);
ireg.edx = SMAP;
ireg.di = (size_t)&buf;
/*
* Note: at least one BIOS is known which assumes that the
* buffer pointed to by one e820 call is the same one as
* the previous call, and only changes modified fields. Therefore,
* we use a temporary buffer and copy the results entry by entry.
*
* This routine deliberately does not try to account for
* ACPI 3+ extended attributes. This is because there are
* BIOSes in the field which report zero for the valid bit for
* all ranges, and we don't currently make any use of the
* other attribute bits. Revisit this if we see the extended
* attribute bits deployed in a meaningful way in the future.
*/
do {
intcall(0x15, &ireg, &oreg);
ireg.ebx = oreg.ebx; /* for next iteration... */
/* BIOSes which terminate the chain with CF = 1 as opposed
to %ebx = 0 don't always report the SMAP signature on
the final, failing, probe. */
if (oreg.eflags & X86_EFLAGS_CF)
break;
/* Some BIOSes stop returning SMAP in the middle of
the search loop. We don't know exactly how the BIOS
screwed up the map at that point, we might have a
partial map, the full map, or complete garbage, so
just return failure. */
if (oreg.eax != SMAP) {
count = 0;
break;
}
*desc++ = buf;
count++;
} while (ireg.ebx && count < ARRAY_SIZE(boot_params.e820_table));
boot_params.e820_entries = count;
}
static void detect_memory_e801(void)
{
struct biosregs ireg, oreg;
initregs(&ireg);
ireg.ax = 0xe801;
intcall(0x15, &ireg, &oreg);
if (oreg.eflags & X86_EFLAGS_CF)
return;
/* Do we really need to do this? */
if (oreg.cx || oreg.dx) {
oreg.ax = oreg.cx;
oreg.bx = oreg.dx;
}
if (oreg.ax > 15*1024) {
return; /* Bogus! */
} else if (oreg.ax == 15*1024) {
boot_params.alt_mem_k = (oreg.bx << 6) + oreg.ax;
} else {
/*
* This ignores memory above 16MB if we have a memory
* hole there. If someone actually finds a machine
* with a memory hole at 16MB and no support for
* 0E820h they should probably generate a fake e820
* map.
*/
boot_params.alt_mem_k = oreg.ax;
}
}
static void detect_memory_88(void)
{
struct biosregs ireg, oreg;
initregs(&ireg);
ireg.ah = 0x88;
intcall(0x15, &ireg, &oreg);
boot_params.screen_info.ext_mem_k = oreg.ax;
}
void detect_memory(void)
{
detect_memory_e820();
detect_memory_e801();
detect_memory_88();
}
| linux-master | arch/x86/boot/memory.c |
// SPDX-License-Identifier: GPL-2.0-only
/* -*- linux-c -*- ------------------------------------------------------- *
*
* Copyright (C) 1991, 1992 Linus Torvalds
* Copyright 2007-2008 rPath, Inc. - All Rights Reserved
*
* ----------------------------------------------------------------------- */
/*
* arch/x86/boot/cpu.c
*
* Check for obligatory CPU features and abort if the features are not
* present.
*/
#include "boot.h"
#include "cpustr.h"
static char *cpu_name(int level)
{
static char buf[6];
if (level == 64) {
return "x86-64";
} else {
if (level == 15)
level = 6;
sprintf(buf, "i%d86", level);
return buf;
}
}
static void show_cap_strs(u32 *err_flags)
{
int i, j;
const unsigned char *msg_strs = (const unsigned char *)x86_cap_strs;
for (i = 0; i < NCAPINTS; i++) {
u32 e = err_flags[i];
for (j = 0; j < 32; j++) {
if (msg_strs[0] < i ||
(msg_strs[0] == i && msg_strs[1] < j)) {
/* Skip to the next string */
msg_strs += 2;
while (*msg_strs++)
;
}
if (e & 1) {
if (msg_strs[0] == i &&
msg_strs[1] == j &&
msg_strs[2])
printf("%s ", msg_strs+2);
else
printf("%d:%d ", i, j);
}
e >>= 1;
}
}
}
int validate_cpu(void)
{
u32 *err_flags;
int cpu_level, req_level;
check_cpu(&cpu_level, &req_level, &err_flags);
if (cpu_level < req_level) {
printf("This kernel requires an %s CPU, ",
cpu_name(req_level));
printf("but only detected an %s CPU.\n",
cpu_name(cpu_level));
return -1;
}
if (err_flags) {
puts("This kernel requires the following features "
"not present on the CPU:\n");
show_cap_strs(err_flags);
putchar('\n');
return -1;
} else if (check_knl_erratum()) {
return -1;
} else {
return 0;
}
}
| linux-master | arch/x86/boot/cpu.c |
// SPDX-License-Identifier: GPL-2.0-only
/* -*- linux-c -*- ------------------------------------------------------- *
*
* Copyright (C) 1991, 1992 Linus Torvalds
* Copyright 2007 rPath, Inc. - All Rights Reserved
* Copyright 2009 Intel Corporation; author H. Peter Anvin
*
* ----------------------------------------------------------------------- */
/*
* Select video mode
*/
#include <uapi/asm/boot.h>
#include "boot.h"
#include "video.h"
#include "vesa.h"
static u16 video_segment;
static void store_cursor_position(void)
{
struct biosregs ireg, oreg;
initregs(&ireg);
ireg.ah = 0x03;
intcall(0x10, &ireg, &oreg);
boot_params.screen_info.orig_x = oreg.dl;
boot_params.screen_info.orig_y = oreg.dh;
if (oreg.ch & 0x20)
boot_params.screen_info.flags |= VIDEO_FLAGS_NOCURSOR;
if ((oreg.ch & 0x1f) > (oreg.cl & 0x1f))
boot_params.screen_info.flags |= VIDEO_FLAGS_NOCURSOR;
}
static void store_video_mode(void)
{
struct biosregs ireg, oreg;
/* N.B.: the saving of the video page here is a bit silly,
since we pretty much assume page 0 everywhere. */
initregs(&ireg);
ireg.ah = 0x0f;
intcall(0x10, &ireg, &oreg);
/* Not all BIOSes are clean with respect to the top bit */
boot_params.screen_info.orig_video_mode = oreg.al & 0x7f;
boot_params.screen_info.orig_video_page = oreg.bh;
}
/*
* Store the video mode parameters for later usage by the kernel.
* This is done by asking the BIOS except for the rows/columns
* parameters in the default 80x25 mode -- these are set directly,
* because some very obscure BIOSes supply insane values.
*/
static void store_mode_params(void)
{
u16 font_size;
int x, y;
/* For graphics mode, it is up to the mode-setting driver
(currently only video-vesa.c) to store the parameters */
if (graphic_mode)
return;
store_cursor_position();
store_video_mode();
if (boot_params.screen_info.orig_video_mode == 0x07) {
/* MDA, HGC, or VGA in monochrome mode */
video_segment = 0xb000;
} else {
/* CGA, EGA, VGA and so forth */
video_segment = 0xb800;
}
set_fs(0);
font_size = rdfs16(0x485); /* Font size, BIOS area */
boot_params.screen_info.orig_video_points = font_size;
x = rdfs16(0x44a);
y = (adapter == ADAPTER_CGA) ? 25 : rdfs8(0x484)+1;
if (force_x)
x = force_x;
if (force_y)
y = force_y;
boot_params.screen_info.orig_video_cols = x;
boot_params.screen_info.orig_video_lines = y;
}
static unsigned int get_entry(void)
{
char entry_buf[4];
int i, len = 0;
int key;
unsigned int v;
do {
key = getchar();
if (key == '\b') {
if (len > 0) {
puts("\b \b");
len--;
}
} else if ((key >= '0' && key <= '9') ||
(key >= 'A' && key <= 'Z') ||
(key >= 'a' && key <= 'z')) {
if (len < sizeof(entry_buf)) {
entry_buf[len++] = key;
putchar(key);
}
}
} while (key != '\r');
putchar('\n');
if (len == 0)
return VIDEO_CURRENT_MODE; /* Default */
v = 0;
for (i = 0; i < len; i++) {
v <<= 4;
key = entry_buf[i] | 0x20;
v += (key > '9') ? key-'a'+10 : key-'0';
}
return v;
}
static void display_menu(void)
{
struct card_info *card;
struct mode_info *mi;
char ch;
int i;
int nmodes;
int modes_per_line;
int col;
nmodes = 0;
for (card = video_cards; card < video_cards_end; card++)
nmodes += card->nmodes;
modes_per_line = 1;
if (nmodes >= 20)
modes_per_line = 3;
for (col = 0; col < modes_per_line; col++)
puts("Mode: Resolution: Type: ");
putchar('\n');
col = 0;
ch = '0';
for (card = video_cards; card < video_cards_end; card++) {
mi = card->modes;
for (i = 0; i < card->nmodes; i++, mi++) {
char resbuf[32];
int visible = mi->x && mi->y;
u16 mode_id = mi->mode ? mi->mode :
(mi->y << 8)+mi->x;
if (!visible)
continue; /* Hidden mode */
if (mi->depth)
sprintf(resbuf, "%dx%d", mi->y, mi->depth);
else
sprintf(resbuf, "%d", mi->y);
printf("%c %03X %4dx%-7s %-6s",
ch, mode_id, mi->x, resbuf, card->card_name);
col++;
if (col >= modes_per_line) {
putchar('\n');
col = 0;
}
if (ch == '9')
ch = 'a';
else if (ch == 'z' || ch == ' ')
ch = ' '; /* Out of keys... */
else
ch++;
}
}
if (col)
putchar('\n');
}
#define H(x) ((x)-'a'+10)
#define SCAN ((H('s')<<12)+(H('c')<<8)+(H('a')<<4)+H('n'))
static unsigned int mode_menu(void)
{
int key;
unsigned int sel;
puts("Press <ENTER> to see video modes available, "
"<SPACE> to continue, or wait 30 sec\n");
kbd_flush();
while (1) {
key = getchar_timeout();
if (key == ' ' || key == 0)
return VIDEO_CURRENT_MODE; /* Default */
if (key == '\r')
break;
putchar('\a'); /* Beep! */
}
for (;;) {
display_menu();
puts("Enter a video mode or \"scan\" to scan for "
"additional modes: ");
sel = get_entry();
if (sel != SCAN)
return sel;
probe_cards(1);
}
}
/* Save screen content to the heap */
static struct saved_screen {
int x, y;
int curx, cury;
u16 *data;
} saved;
static void save_screen(void)
{
/* Should be called after store_mode_params() */
saved.x = boot_params.screen_info.orig_video_cols;
saved.y = boot_params.screen_info.orig_video_lines;
saved.curx = boot_params.screen_info.orig_x;
saved.cury = boot_params.screen_info.orig_y;
if (!heap_free(saved.x*saved.y*sizeof(u16)+512))
return; /* Not enough heap to save the screen */
saved.data = GET_HEAP(u16, saved.x*saved.y);
set_fs(video_segment);
copy_from_fs(saved.data, 0, saved.x*saved.y*sizeof(u16));
}
static void restore_screen(void)
{
/* Should be called after store_mode_params() */
int xs = boot_params.screen_info.orig_video_cols;
int ys = boot_params.screen_info.orig_video_lines;
int y;
addr_t dst = 0;
u16 *src = saved.data;
struct biosregs ireg;
if (graphic_mode)
return; /* Can't restore onto a graphic mode */
if (!src)
return; /* No saved screen contents */
/* Restore screen contents */
set_fs(video_segment);
for (y = 0; y < ys; y++) {
int npad;
if (y < saved.y) {
int copy = (xs < saved.x) ? xs : saved.x;
copy_to_fs(dst, src, copy*sizeof(u16));
dst += copy*sizeof(u16);
src += saved.x;
npad = (xs < saved.x) ? 0 : xs-saved.x;
} else {
npad = xs;
}
/* Writes "npad" blank characters to
video_segment:dst and advances dst */
asm volatile("pushw %%es ; "
"movw %2,%%es ; "
"shrw %%cx ; "
"jnc 1f ; "
"stosw \n\t"
"1: rep;stosl ; "
"popw %%es"
: "+D" (dst), "+c" (npad)
: "bdS" (video_segment),
"a" (0x07200720));
}
/* Restore cursor position */
if (saved.curx >= xs)
saved.curx = xs-1;
if (saved.cury >= ys)
saved.cury = ys-1;
initregs(&ireg);
ireg.ah = 0x02; /* Set cursor position */
ireg.dh = saved.cury;
ireg.dl = saved.curx;
intcall(0x10, &ireg, NULL);
store_cursor_position();
}
void set_video(void)
{
u16 mode = boot_params.hdr.vid_mode;
RESET_HEAP();
store_mode_params();
save_screen();
probe_cards(0);
for (;;) {
if (mode == ASK_VGA)
mode = mode_menu();
if (!set_mode(mode))
break;
printf("Undefined video mode number: %x\n", mode);
mode = ASK_VGA;
}
boot_params.hdr.vid_mode = mode;
vesa_store_edid();
store_mode_params();
if (do_restore)
restore_screen();
}
| linux-master | arch/x86/boot/video.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* -----------------------------------------------------------------------
*
* Copyright 2009 Intel Corporation; author H. Peter Anvin
*
* ----------------------------------------------------------------------- */
/*
* Simple helper function for initializing a register set.
*
* Note that this sets EFLAGS_CF in the input register set; this
* makes it easier to catch functions which do nothing but don't
* explicitly set CF.
*/
#include "boot.h"
#include "string.h"
void initregs(struct biosregs *reg)
{
memset(reg, 0, sizeof(*reg));
reg->eflags |= X86_EFLAGS_CF;
reg->ds = ds();
reg->es = ds();
reg->fs = fs();
reg->gs = gs();
}
| linux-master | arch/x86/boot/regs.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* ----------------------------------------------------------------------- *
*
* Copyright 2008 rPath, Inc. - All Rights Reserved
*
* ----------------------------------------------------------------------- */
/*
* This is a host program to preprocess the CPU strings into a
* compact format suitable for the setup code.
*/
#include <stdio.h>
#include "../include/asm/required-features.h"
#include "../include/asm/disabled-features.h"
#include "../include/asm/cpufeatures.h"
#include "../include/asm/vmxfeatures.h"
#include "../kernel/cpu/capflags.c"
int main(void)
{
int i, j;
const char *str;
printf("static const char x86_cap_strs[] =\n");
for (i = 0; i < NCAPINTS; i++) {
for (j = 0; j < 32; j++) {
str = x86_cap_flags[i*32+j];
if (i == NCAPINTS-1 && j == 31) {
/* The last entry must be unconditional; this
also consumes the compiler-added null
character */
if (!str)
str = "";
printf("\t\"\\x%02x\\x%02x\"\"%s\"\n",
i, j, str);
} else if (str) {
printf("#if REQUIRED_MASK%d & (1 << %d)\n"
"\t\"\\x%02x\\x%02x\"\"%s\\0\"\n"
"#endif\n",
i, j, i, j, str);
}
}
}
printf("\t;\n");
return 0;
}
| linux-master | arch/x86/boot/mkcpustr.c |
// SPDX-License-Identifier: GPL-2.0-only
/* -*- linux-c -*- ------------------------------------------------------- *
*
* Copyright (C) 1991, 1992 Linus Torvalds
* Copyright 2007 rPath, Inc. - All Rights Reserved
*
* ----------------------------------------------------------------------- */
/*
* Very basic string functions
*/
#include <linux/types.h>
#include <linux/compiler.h>
#include <linux/errno.h>
#include <linux/limits.h>
#include <asm/asm.h>
#include "ctype.h"
#include "string.h"
#define KSTRTOX_OVERFLOW (1U << 31)
/*
* Undef these macros so that the functions that we provide
* here will have the correct names regardless of how string.h
* may have chosen to #define them.
*/
#undef memcpy
#undef memset
#undef memcmp
int memcmp(const void *s1, const void *s2, size_t len)
{
bool diff;
asm("repe; cmpsb" CC_SET(nz)
: CC_OUT(nz) (diff), "+D" (s1), "+S" (s2), "+c" (len));
return diff;
}
/*
* Clang may lower `memcmp == 0` to `bcmp == 0`.
*/
int bcmp(const void *s1, const void *s2, size_t len)
{
return memcmp(s1, s2, len);
}
int strcmp(const char *str1, const char *str2)
{
const unsigned char *s1 = (const unsigned char *)str1;
const unsigned char *s2 = (const unsigned char *)str2;
int delta = 0;
while (*s1 || *s2) {
delta = *s1 - *s2;
if (delta)
return delta;
s1++;
s2++;
}
return 0;
}
int strncmp(const char *cs, const char *ct, size_t count)
{
unsigned char c1, c2;
while (count) {
c1 = *cs++;
c2 = *ct++;
if (c1 != c2)
return c1 < c2 ? -1 : 1;
if (!c1)
break;
count--;
}
return 0;
}
size_t strnlen(const char *s, size_t maxlen)
{
const char *es = s;
while (*es && maxlen) {
es++;
maxlen--;
}
return (es - s);
}
unsigned int atou(const char *s)
{
unsigned int i = 0;
while (isdigit(*s))
i = i * 10 + (*s++ - '0');
return i;
}
/* Works only for digits and letters, but small and fast */
#define TOLOWER(x) ((x) | 0x20)
static unsigned int simple_guess_base(const char *cp)
{
if (cp[0] == '0') {
if (TOLOWER(cp[1]) == 'x' && isxdigit(cp[2]))
return 16;
else
return 8;
} else {
return 10;
}
}
/**
* simple_strtoull - convert a string to an unsigned long long
* @cp: The start of the string
* @endp: A pointer to the end of the parsed string will be placed here
* @base: The number base to use
*/
unsigned long long simple_strtoull(const char *cp, char **endp, unsigned int base)
{
unsigned long long result = 0;
if (!base)
base = simple_guess_base(cp);
if (base == 16 && cp[0] == '0' && TOLOWER(cp[1]) == 'x')
cp += 2;
while (isxdigit(*cp)) {
unsigned int value;
value = isdigit(*cp) ? *cp - '0' : TOLOWER(*cp) - 'a' + 10;
if (value >= base)
break;
result = result * base + value;
cp++;
}
if (endp)
*endp = (char *)cp;
return result;
}
long simple_strtol(const char *cp, char **endp, unsigned int base)
{
if (*cp == '-')
return -simple_strtoull(cp + 1, endp, base);
return simple_strtoull(cp, endp, base);
}
/**
* strlen - Find the length of a string
* @s: The string to be sized
*/
size_t strlen(const char *s)
{
const char *sc;
for (sc = s; *sc != '\0'; ++sc)
/* nothing */;
return sc - s;
}
/**
* strstr - Find the first substring in a %NUL terminated string
* @s1: The string to be searched
* @s2: The string to search for
*/
char *strstr(const char *s1, const char *s2)
{
size_t l1, l2;
l2 = strlen(s2);
if (!l2)
return (char *)s1;
l1 = strlen(s1);
while (l1 >= l2) {
l1--;
if (!memcmp(s1, s2, l2))
return (char *)s1;
s1++;
}
return NULL;
}
/**
* strchr - Find the first occurrence of the character c in the string s.
* @s: the string to be searched
* @c: the character to search for
*/
char *strchr(const char *s, int c)
{
while (*s != (char)c)
if (*s++ == '\0')
return NULL;
return (char *)s;
}
static inline u64 __div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
{
union {
u64 v64;
u32 v32[2];
} d = { dividend };
u32 upper;
upper = d.v32[1];
d.v32[1] = 0;
if (upper >= divisor) {
d.v32[1] = upper / divisor;
upper %= divisor;
}
asm ("divl %2" : "=a" (d.v32[0]), "=d" (*remainder) :
"rm" (divisor), "0" (d.v32[0]), "1" (upper));
return d.v64;
}
static inline u64 __div_u64(u64 dividend, u32 divisor)
{
u32 remainder;
return __div_u64_rem(dividend, divisor, &remainder);
}
static inline char _tolower(const char c)
{
return c | 0x20;
}
static const char *_parse_integer_fixup_radix(const char *s, unsigned int *base)
{
if (*base == 0) {
if (s[0] == '0') {
if (_tolower(s[1]) == 'x' && isxdigit(s[2]))
*base = 16;
else
*base = 8;
} else
*base = 10;
}
if (*base == 16 && s[0] == '0' && _tolower(s[1]) == 'x')
s += 2;
return s;
}
/*
* Convert non-negative integer string representation in explicitly given radix
* to an integer.
* Return number of characters consumed maybe or-ed with overflow bit.
* If overflow occurs, result integer (incorrect) is still returned.
*
* Don't you dare use this function.
*/
static unsigned int _parse_integer(const char *s,
unsigned int base,
unsigned long long *p)
{
unsigned long long res;
unsigned int rv;
res = 0;
rv = 0;
while (1) {
unsigned int c = *s;
unsigned int lc = c | 0x20; /* don't tolower() this line */
unsigned int val;
if ('0' <= c && c <= '9')
val = c - '0';
else if ('a' <= lc && lc <= 'f')
val = lc - 'a' + 10;
else
break;
if (val >= base)
break;
/*
* Check for overflow only if we are within range of
* it in the max base we support (16)
*/
if (unlikely(res & (~0ull << 60))) {
if (res > __div_u64(ULLONG_MAX - val, base))
rv |= KSTRTOX_OVERFLOW;
}
res = res * base + val;
rv++;
s++;
}
*p = res;
return rv;
}
static int _kstrtoull(const char *s, unsigned int base, unsigned long long *res)
{
unsigned long long _res;
unsigned int rv;
s = _parse_integer_fixup_radix(s, &base);
rv = _parse_integer(s, base, &_res);
if (rv & KSTRTOX_OVERFLOW)
return -ERANGE;
if (rv == 0)
return -EINVAL;
s += rv;
if (*s == '\n')
s++;
if (*s)
return -EINVAL;
*res = _res;
return 0;
}
/**
* kstrtoull - convert a string to an unsigned long long
* @s: The start of the string. The string must be null-terminated, and may also
* include a single newline before its terminating null. The first character
* may also be a plus sign, but not a minus sign.
* @base: The number base to use. The maximum supported base is 16. If base is
* given as 0, then the base of the string is automatically detected with the
* conventional semantics - If it begins with 0x the number will be parsed as a
* hexadecimal (case insensitive), if it otherwise begins with 0, it will be
* parsed as an octal number. Otherwise it will be parsed as a decimal.
* @res: Where to write the result of the conversion on success.
*
* Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error.
* Used as a replacement for the obsolete simple_strtoull. Return code must
* be checked.
*/
int kstrtoull(const char *s, unsigned int base, unsigned long long *res)
{
if (s[0] == '+')
s++;
return _kstrtoull(s, base, res);
}
static int _kstrtoul(const char *s, unsigned int base, unsigned long *res)
{
unsigned long long tmp;
int rv;
rv = kstrtoull(s, base, &tmp);
if (rv < 0)
return rv;
if (tmp != (unsigned long)tmp)
return -ERANGE;
*res = tmp;
return 0;
}
/**
* boot_kstrtoul - convert a string to an unsigned long
* @s: The start of the string. The string must be null-terminated, and may also
* include a single newline before its terminating null. The first character
* may also be a plus sign, but not a minus sign.
* @base: The number base to use. The maximum supported base is 16. If base is
* given as 0, then the base of the string is automatically detected with the
* conventional semantics - If it begins with 0x the number will be parsed as a
* hexadecimal (case insensitive), if it otherwise begins with 0, it will be
* parsed as an octal number. Otherwise it will be parsed as a decimal.
* @res: Where to write the result of the conversion on success.
*
* Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error.
* Used as a replacement for the simple_strtoull.
*/
int boot_kstrtoul(const char *s, unsigned int base, unsigned long *res)
{
/*
* We want to shortcut function call, but
* __builtin_types_compatible_p(unsigned long, unsigned long long) = 0.
*/
if (sizeof(unsigned long) == sizeof(unsigned long long) &&
__alignof__(unsigned long) == __alignof__(unsigned long long))
return kstrtoull(s, base, (unsigned long long *)res);
else
return _kstrtoul(s, base, res);
}
| linux-master | arch/x86/boot/string.c |
// SPDX-License-Identifier: GPL-2.0-only
/* -*- linux-c -*- ------------------------------------------------------- *
*
* Copyright (C) 1991, 1992 Linus Torvalds
* Copyright 2007 rPath, Inc. - All Rights Reserved
* Copyright 2009 Intel Corporation; author H. Peter Anvin
*
* ----------------------------------------------------------------------- */
/*
* VESA text modes
*/
#include "boot.h"
#include "video.h"
#include "vesa.h"
#include "string.h"
/* VESA information */
static struct vesa_general_info vginfo;
static struct vesa_mode_info vminfo;
static __videocard video_vesa;
#ifndef _WAKEUP
static void vesa_store_mode_params_graphics(void);
#else /* _WAKEUP */
static inline void vesa_store_mode_params_graphics(void) {}
#endif /* _WAKEUP */
static int vesa_probe(void)
{
struct biosregs ireg, oreg;
u16 mode;
addr_t mode_ptr;
struct mode_info *mi;
int nmodes = 0;
video_vesa.modes = GET_HEAP(struct mode_info, 0);
initregs(&ireg);
ireg.ax = 0x4f00;
ireg.di = (size_t)&vginfo;
intcall(0x10, &ireg, &oreg);
if (oreg.ax != 0x004f ||
vginfo.signature != VESA_MAGIC ||
vginfo.version < 0x0102)
return 0; /* Not present */
set_fs(vginfo.video_mode_ptr.seg);
mode_ptr = vginfo.video_mode_ptr.off;
while ((mode = rdfs16(mode_ptr)) != 0xffff) {
mode_ptr += 2;
if (!heap_free(sizeof(struct mode_info)))
break; /* Heap full, can't save mode info */
if (mode & ~0x1ff)
continue;
memset(&vminfo, 0, sizeof(vminfo)); /* Just in case... */
ireg.ax = 0x4f01;
ireg.cx = mode;
ireg.di = (size_t)&vminfo;
intcall(0x10, &ireg, &oreg);
if (oreg.ax != 0x004f)
continue;
if ((vminfo.mode_attr & 0x15) == 0x05) {
/* Text Mode, TTY BIOS supported,
supported by hardware */
mi = GET_HEAP(struct mode_info, 1);
mi->mode = mode + VIDEO_FIRST_VESA;
mi->depth = 0; /* text */
mi->x = vminfo.h_res;
mi->y = vminfo.v_res;
nmodes++;
} else if ((vminfo.mode_attr & 0x99) == 0x99 &&
(vminfo.memory_layout == 4 ||
vminfo.memory_layout == 6) &&
vminfo.memory_planes == 1) {
#ifdef CONFIG_BOOT_VESA_SUPPORT
/* Graphics mode, color, linear frame buffer
supported. Only register the mode if
if framebuffer is configured, however,
otherwise the user will be left without a screen. */
mi = GET_HEAP(struct mode_info, 1);
mi->mode = mode + VIDEO_FIRST_VESA;
mi->depth = vminfo.bpp;
mi->x = vminfo.h_res;
mi->y = vminfo.v_res;
nmodes++;
#endif
}
}
return nmodes;
}
static int vesa_set_mode(struct mode_info *mode)
{
struct biosregs ireg, oreg;
int is_graphic;
u16 vesa_mode = mode->mode - VIDEO_FIRST_VESA;
memset(&vminfo, 0, sizeof(vminfo)); /* Just in case... */
initregs(&ireg);
ireg.ax = 0x4f01;
ireg.cx = vesa_mode;
ireg.di = (size_t)&vminfo;
intcall(0x10, &ireg, &oreg);
if (oreg.ax != 0x004f)
return -1;
if ((vminfo.mode_attr & 0x15) == 0x05) {
/* It's a supported text mode */
is_graphic = 0;
#ifdef CONFIG_BOOT_VESA_SUPPORT
} else if ((vminfo.mode_attr & 0x99) == 0x99) {
/* It's a graphics mode with linear frame buffer */
is_graphic = 1;
vesa_mode |= 0x4000; /* Request linear frame buffer */
#endif
} else {
return -1; /* Invalid mode */
}
initregs(&ireg);
ireg.ax = 0x4f02;
ireg.bx = vesa_mode;
intcall(0x10, &ireg, &oreg);
if (oreg.ax != 0x004f)
return -1;
graphic_mode = is_graphic;
if (!is_graphic) {
/* Text mode */
force_x = mode->x;
force_y = mode->y;
do_restore = 1;
} else {
/* Graphics mode */
vesa_store_mode_params_graphics();
}
return 0;
}
#ifndef _WAKEUP
/* Switch DAC to 8-bit mode */
static void vesa_dac_set_8bits(void)
{
struct biosregs ireg, oreg;
u8 dac_size = 6;
/* If possible, switch the DAC to 8-bit mode */
if (vginfo.capabilities & 1) {
initregs(&ireg);
ireg.ax = 0x4f08;
ireg.bh = 0x08;
intcall(0x10, &ireg, &oreg);
if (oreg.ax == 0x004f)
dac_size = oreg.bh;
}
/* Set the color sizes to the DAC size, and offsets to 0 */
boot_params.screen_info.red_size = dac_size;
boot_params.screen_info.green_size = dac_size;
boot_params.screen_info.blue_size = dac_size;
boot_params.screen_info.rsvd_size = dac_size;
boot_params.screen_info.red_pos = 0;
boot_params.screen_info.green_pos = 0;
boot_params.screen_info.blue_pos = 0;
boot_params.screen_info.rsvd_pos = 0;
}
/* Save the VESA protected mode info */
static void vesa_store_pm_info(void)
{
struct biosregs ireg, oreg;
initregs(&ireg);
ireg.ax = 0x4f0a;
intcall(0x10, &ireg, &oreg);
if (oreg.ax != 0x004f)
return;
boot_params.screen_info.vesapm_seg = oreg.es;
boot_params.screen_info.vesapm_off = oreg.di;
}
/*
* Save video mode parameters for graphics mode
*/
static void vesa_store_mode_params_graphics(void)
{
/* Tell the kernel we're in VESA graphics mode */
boot_params.screen_info.orig_video_isVGA = VIDEO_TYPE_VLFB;
/* Mode parameters */
boot_params.screen_info.vesa_attributes = vminfo.mode_attr;
boot_params.screen_info.lfb_linelength = vminfo.logical_scan;
boot_params.screen_info.lfb_width = vminfo.h_res;
boot_params.screen_info.lfb_height = vminfo.v_res;
boot_params.screen_info.lfb_depth = vminfo.bpp;
boot_params.screen_info.pages = vminfo.image_planes;
boot_params.screen_info.lfb_base = vminfo.lfb_ptr;
memcpy(&boot_params.screen_info.red_size,
&vminfo.rmask, 8);
/* General parameters */
boot_params.screen_info.lfb_size = vginfo.total_memory;
if (vminfo.bpp <= 8)
vesa_dac_set_8bits();
vesa_store_pm_info();
}
/*
* Save EDID information for the kernel; this is invoked, separately,
* after mode-setting.
*/
void vesa_store_edid(void)
{
#ifdef CONFIG_FIRMWARE_EDID
struct biosregs ireg, oreg;
/* Apparently used as a nonsense token... */
memset(&boot_params.edid_info, 0x13, sizeof(boot_params.edid_info));
if (vginfo.version < 0x0200)
return; /* EDID requires VBE 2.0+ */
initregs(&ireg);
ireg.ax = 0x4f15; /* VBE DDC */
/* ireg.bx = 0x0000; */ /* Report DDC capabilities */
/* ireg.cx = 0; */ /* Controller 0 */
ireg.es = 0; /* ES:DI must be 0 by spec */
intcall(0x10, &ireg, &oreg);
if (oreg.ax != 0x004f)
return; /* No EDID */
/* BH = time in seconds to transfer EDD information */
/* BL = DDC level supported */
ireg.ax = 0x4f15; /* VBE DDC */
ireg.bx = 0x0001; /* Read EDID */
/* ireg.cx = 0; */ /* Controller 0 */
/* ireg.dx = 0; */ /* EDID block number */
ireg.es = ds();
ireg.di =(size_t)&boot_params.edid_info; /* (ES:)Pointer to block */
intcall(0x10, &ireg, &oreg);
#endif /* CONFIG_FIRMWARE_EDID */
}
#endif /* not _WAKEUP */
static __videocard video_vesa =
{
.card_name = "VESA",
.probe = vesa_probe,
.set_mode = vesa_set_mode,
.xmode_first = VIDEO_FIRST_VESA,
.xmode_n = 0x200,
};
| linux-master | arch/x86/boot/video-vesa.c |
// SPDX-License-Identifier: GPL-2.0-only
/* -*- linux-c -*- ------------------------------------------------------- *
*
* Copyright (C) 1991, 1992 Linus Torvalds
* Copyright 2007 rPath, Inc. - All Rights Reserved
* Copyright 2009 Intel Corporation; author H. Peter Anvin
*
* ----------------------------------------------------------------------- */
/*
* Common all-VGA modes
*/
#include "boot.h"
#include "video.h"
static struct mode_info vga_modes[] = {
{ VIDEO_80x25, 80, 25, 0 },
{ VIDEO_8POINT, 80, 50, 0 },
{ VIDEO_80x43, 80, 43, 0 },
{ VIDEO_80x28, 80, 28, 0 },
{ VIDEO_80x30, 80, 30, 0 },
{ VIDEO_80x34, 80, 34, 0 },
{ VIDEO_80x60, 80, 60, 0 },
};
static struct mode_info ega_modes[] = {
{ VIDEO_80x25, 80, 25, 0 },
{ VIDEO_8POINT, 80, 43, 0 },
};
static struct mode_info cga_modes[] = {
{ VIDEO_80x25, 80, 25, 0 },
};
static __videocard video_vga;
/* Set basic 80x25 mode */
static u8 vga_set_basic_mode(void)
{
struct biosregs ireg, oreg;
u8 mode;
initregs(&ireg);
/* Query current mode */
ireg.ax = 0x0f00;
intcall(0x10, &ireg, &oreg);
mode = oreg.al;
if (mode != 3 && mode != 7)
mode = 3;
/* Set the mode */
ireg.ax = mode; /* AH=0: set mode */
intcall(0x10, &ireg, NULL);
do_restore = 1;
return mode;
}
static void vga_set_8font(void)
{
/* Set 8x8 font - 80x43 on EGA, 80x50 on VGA */
struct biosregs ireg;
initregs(&ireg);
/* Set 8x8 font */
ireg.ax = 0x1112;
/* ireg.bl = 0; */
intcall(0x10, &ireg, NULL);
/* Use alternate print screen */
ireg.ax = 0x1200;
ireg.bl = 0x20;
intcall(0x10, &ireg, NULL);
/* Turn off cursor emulation */
ireg.ax = 0x1201;
ireg.bl = 0x34;
intcall(0x10, &ireg, NULL);
/* Cursor is scan lines 6-7 */
ireg.ax = 0x0100;
ireg.cx = 0x0607;
intcall(0x10, &ireg, NULL);
}
static void vga_set_14font(void)
{
/* Set 9x14 font - 80x28 on VGA */
struct biosregs ireg;
initregs(&ireg);
/* Set 9x14 font */
ireg.ax = 0x1111;
/* ireg.bl = 0; */
intcall(0x10, &ireg, NULL);
/* Turn off cursor emulation */
ireg.ax = 0x1201;
ireg.bl = 0x34;
intcall(0x10, &ireg, NULL);
/* Cursor is scan lines 11-12 */
ireg.ax = 0x0100;
ireg.cx = 0x0b0c;
intcall(0x10, &ireg, NULL);
}
static void vga_set_80x43(void)
{
/* Set 80x43 mode on VGA (not EGA) */
struct biosregs ireg;
initregs(&ireg);
/* Set 350 scans */
ireg.ax = 0x1201;
ireg.bl = 0x30;
intcall(0x10, &ireg, NULL);
/* Reset video mode */
ireg.ax = 0x0003;
intcall(0x10, &ireg, NULL);
vga_set_8font();
}
/* I/O address of the VGA CRTC */
u16 vga_crtc(void)
{
return (inb(0x3cc) & 1) ? 0x3d4 : 0x3b4;
}
static void vga_set_480_scanlines(void)
{
u16 crtc; /* CRTC base address */
u8 csel; /* CRTC miscellaneous output register */
crtc = vga_crtc();
out_idx(0x0c, crtc, 0x11); /* Vertical sync end, unlock CR0-7 */
out_idx(0x0b, crtc, 0x06); /* Vertical total */
out_idx(0x3e, crtc, 0x07); /* Vertical overflow */
out_idx(0xea, crtc, 0x10); /* Vertical sync start */
out_idx(0xdf, crtc, 0x12); /* Vertical display end */
out_idx(0xe7, crtc, 0x15); /* Vertical blank start */
out_idx(0x04, crtc, 0x16); /* Vertical blank end */
csel = inb(0x3cc);
csel &= 0x0d;
csel |= 0xe2;
outb(csel, 0x3c2);
}
static void vga_set_vertical_end(int lines)
{
u16 crtc; /* CRTC base address */
u8 ovfw; /* CRTC overflow register */
int end = lines-1;
crtc = vga_crtc();
ovfw = 0x3c | ((end >> (8-1)) & 0x02) | ((end >> (9-6)) & 0x40);
out_idx(ovfw, crtc, 0x07); /* Vertical overflow */
out_idx(end, crtc, 0x12); /* Vertical display end */
}
static void vga_set_80x30(void)
{
vga_set_480_scanlines();
vga_set_vertical_end(30*16);
}
static void vga_set_80x34(void)
{
vga_set_480_scanlines();
vga_set_14font();
vga_set_vertical_end(34*14);
}
static void vga_set_80x60(void)
{
vga_set_480_scanlines();
vga_set_8font();
vga_set_vertical_end(60*8);
}
static int vga_set_mode(struct mode_info *mode)
{
/* Set the basic mode */
vga_set_basic_mode();
/* Override a possibly broken BIOS */
force_x = mode->x;
force_y = mode->y;
switch (mode->mode) {
case VIDEO_80x25:
break;
case VIDEO_8POINT:
vga_set_8font();
break;
case VIDEO_80x43:
vga_set_80x43();
break;
case VIDEO_80x28:
vga_set_14font();
break;
case VIDEO_80x30:
vga_set_80x30();
break;
case VIDEO_80x34:
vga_set_80x34();
break;
case VIDEO_80x60:
vga_set_80x60();
break;
}
return 0;
}
/*
* Note: this probe includes basic information required by all
* systems. It should be executed first, by making sure
* video-vga.c is listed first in the Makefile.
*/
static int vga_probe(void)
{
static const char *card_name[] = {
"CGA/MDA/HGC", "EGA", "VGA"
};
static struct mode_info *mode_lists[] = {
cga_modes,
ega_modes,
vga_modes,
};
static int mode_count[] = {
ARRAY_SIZE(cga_modes),
ARRAY_SIZE(ega_modes),
ARRAY_SIZE(vga_modes),
};
struct biosregs ireg, oreg;
initregs(&ireg);
ireg.ax = 0x1200;
ireg.bl = 0x10; /* Check EGA/VGA */
intcall(0x10, &ireg, &oreg);
#ifndef _WAKEUP
boot_params.screen_info.orig_video_ega_bx = oreg.bx;
#endif
/* If we have MDA/CGA/HGC then BL will be unchanged at 0x10 */
if (oreg.bl != 0x10) {
/* EGA/VGA */
ireg.ax = 0x1a00;
intcall(0x10, &ireg, &oreg);
if (oreg.al == 0x1a) {
adapter = ADAPTER_VGA;
#ifndef _WAKEUP
boot_params.screen_info.orig_video_isVGA = 1;
#endif
} else {
adapter = ADAPTER_EGA;
}
} else {
adapter = ADAPTER_CGA;
}
video_vga.modes = mode_lists[adapter];
video_vga.card_name = card_name[adapter];
return mode_count[adapter];
}
static __videocard video_vga = {
.card_name = "VGA",
.probe = vga_probe,
.set_mode = vga_set_mode,
};
| linux-master | arch/x86/boot/video-vga.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Serial port routines for use during early boot reporting. This code is
* included from both the compressed kernel and the regular kernel.
*/
#include "boot.h"
#define DEFAULT_SERIAL_PORT 0x3f8 /* ttyS0 */
#define DLAB 0x80
#define TXR 0 /* Transmit register (WRITE) */
#define RXR 0 /* Receive register (READ) */
#define IER 1 /* Interrupt Enable */
#define IIR 2 /* Interrupt ID */
#define FCR 2 /* FIFO control */
#define LCR 3 /* Line control */
#define MCR 4 /* Modem control */
#define LSR 5 /* Line Status */
#define MSR 6 /* Modem Status */
#define DLL 0 /* Divisor Latch Low */
#define DLH 1 /* Divisor latch High */
#define DEFAULT_BAUD 9600
static void early_serial_init(int port, int baud)
{
unsigned char c;
unsigned divisor;
outb(0x3, port + LCR); /* 8n1 */
outb(0, port + IER); /* no interrupt */
outb(0, port + FCR); /* no fifo */
outb(0x3, port + MCR); /* DTR + RTS */
divisor = 115200 / baud;
c = inb(port + LCR);
outb(c | DLAB, port + LCR);
outb(divisor & 0xff, port + DLL);
outb((divisor >> 8) & 0xff, port + DLH);
outb(c & ~DLAB, port + LCR);
early_serial_base = port;
}
static void parse_earlyprintk(void)
{
int baud = DEFAULT_BAUD;
char arg[32];
int pos = 0;
int port = 0;
if (cmdline_find_option("earlyprintk", arg, sizeof(arg)) > 0) {
char *e;
if (!strncmp(arg, "serial", 6)) {
port = DEFAULT_SERIAL_PORT;
pos += 6;
}
if (arg[pos] == ',')
pos++;
/*
* make sure we have
* "serial,0x3f8,115200"
* "serial,ttyS0,115200"
* "ttyS0,115200"
*/
if (pos == 7 && !strncmp(arg + pos, "0x", 2)) {
port = simple_strtoull(arg + pos, &e, 16);
if (port == 0 || arg + pos == e)
port = DEFAULT_SERIAL_PORT;
else
pos = e - arg;
} else if (!strncmp(arg + pos, "ttyS", 4)) {
static const int bases[] = { 0x3f8, 0x2f8 };
int idx = 0;
/* += strlen("ttyS"); */
pos += 4;
if (arg[pos++] == '1')
idx = 1;
port = bases[idx];
}
if (arg[pos] == ',')
pos++;
baud = simple_strtoull(arg + pos, &e, 0);
if (baud == 0 || arg + pos == e)
baud = DEFAULT_BAUD;
}
if (port)
early_serial_init(port, baud);
}
#define BASE_BAUD (1843200/16)
static unsigned int probe_baud(int port)
{
unsigned char lcr, dll, dlh;
unsigned int quot;
lcr = inb(port + LCR);
outb(lcr | DLAB, port + LCR);
dll = inb(port + DLL);
dlh = inb(port + DLH);
outb(lcr, port + LCR);
quot = (dlh << 8) | dll;
return BASE_BAUD / quot;
}
static void parse_console_uart8250(void)
{
char optstr[64], *options;
int baud = DEFAULT_BAUD;
int port = 0;
/*
* console=uart8250,io,0x3f8,115200n8
* need to make sure it is last one console !
*/
if (cmdline_find_option("console", optstr, sizeof(optstr)) <= 0)
return;
options = optstr;
if (!strncmp(options, "uart8250,io,", 12))
port = simple_strtoull(options + 12, &options, 0);
else if (!strncmp(options, "uart,io,", 8))
port = simple_strtoull(options + 8, &options, 0);
else
return;
if (options && (options[0] == ','))
baud = simple_strtoull(options + 1, &options, 0);
else
baud = probe_baud(port);
if (port)
early_serial_init(port, baud);
}
void console_init(void)
{
parse_earlyprintk();
if (!early_serial_base)
parse_console_uart8250();
}
| linux-master | arch/x86/boot/early_serial_console.c |
// SPDX-License-Identifier: GPL-2.0-only
/* -*- linux-c -*- ------------------------------------------------------- *
*
* Copyright (C) 1991, 1992 Linus Torvalds
* Copyright 2007 rPath, Inc. - All Rights Reserved
*
* ----------------------------------------------------------------------- */
/*
* Prepare the machine for transition to protected mode.
*/
#include "boot.h"
#include <asm/segment.h>
/*
* Invoke the realmode switch hook if present; otherwise
* disable all interrupts.
*/
static void realmode_switch_hook(void)
{
if (boot_params.hdr.realmode_swtch) {
asm volatile("lcallw *%0"
: : "m" (boot_params.hdr.realmode_swtch)
: "eax", "ebx", "ecx", "edx");
} else {
asm volatile("cli");
outb(0x80, 0x70); /* Disable NMI */
io_delay();
}
}
/*
* Disable all interrupts at the legacy PIC.
*/
static void mask_all_interrupts(void)
{
outb(0xff, 0xa1); /* Mask all interrupts on the secondary PIC */
io_delay();
outb(0xfb, 0x21); /* Mask all but cascade on the primary PIC */
io_delay();
}
/*
* Reset IGNNE# if asserted in the FPU.
*/
static void reset_coprocessor(void)
{
outb(0, 0xf0);
io_delay();
outb(0, 0xf1);
io_delay();
}
/*
* Set up the GDT
*/
struct gdt_ptr {
u16 len;
u32 ptr;
} __attribute__((packed));
static void setup_gdt(void)
{
/* There are machines which are known to not boot with the GDT
being 8-byte unaligned. Intel recommends 16 byte alignment. */
static const u64 boot_gdt[] __attribute__((aligned(16))) = {
/* CS: code, read/execute, 4 GB, base 0 */
[GDT_ENTRY_BOOT_CS] = GDT_ENTRY(0xc09b, 0, 0xfffff),
/* DS: data, read/write, 4 GB, base 0 */
[GDT_ENTRY_BOOT_DS] = GDT_ENTRY(0xc093, 0, 0xfffff),
/* TSS: 32-bit tss, 104 bytes, base 4096 */
/* We only have a TSS here to keep Intel VT happy;
we don't actually use it for anything. */
[GDT_ENTRY_BOOT_TSS] = GDT_ENTRY(0x0089, 4096, 103),
};
/* Xen HVM incorrectly stores a pointer to the gdt_ptr, instead
of the gdt_ptr contents. Thus, make it static so it will
stay in memory, at least long enough that we switch to the
proper kernel GDT. */
static struct gdt_ptr gdt;
gdt.len = sizeof(boot_gdt)-1;
gdt.ptr = (u32)&boot_gdt + (ds() << 4);
asm volatile("lgdtl %0" : : "m" (gdt));
}
/*
* Set up the IDT
*/
static void setup_idt(void)
{
static const struct gdt_ptr null_idt = {0, 0};
asm volatile("lidtl %0" : : "m" (null_idt));
}
/*
* Actual invocation sequence
*/
void go_to_protected_mode(void)
{
/* Hook before leaving real mode, also disables interrupts */
realmode_switch_hook();
/* Enable the A20 gate */
if (enable_a20()) {
puts("A20 gate not responding, unable to boot...\n");
die();
}
/* Reset coprocessor (IGNNE#) */
reset_coprocessor();
/* Mask all interrupts in the PIC */
mask_all_interrupts();
/* Actual transition to protected mode... */
setup_idt();
setup_gdt();
protected_mode_jump(boot_params.hdr.code32_start,
(u32)&boot_params + (ds() << 4));
}
| linux-master | arch/x86/boot/pm.c |
// SPDX-License-Identifier: GPL-2.0-only
/* -*- linux-c -*- ------------------------------------------------------- *
*
* Copyright (C) 1991, 1992 Linus Torvalds
* Copyright 2007 rPath, Inc. - All Rights Reserved
* Copyright 2009 Intel Corporation; author H. Peter Anvin
*
* ----------------------------------------------------------------------- */
/*
* Main module for the real-mode kernel code
*/
#include <linux/build_bug.h>
#include "boot.h"
#include "string.h"
struct boot_params boot_params __attribute__((aligned(16)));
struct port_io_ops pio_ops;
char *HEAP = _end;
char *heap_end = _end; /* Default end of heap = no heap */
/*
* Copy the header into the boot parameter block. Since this
* screws up the old-style command line protocol, adjust by
* filling in the new-style command line pointer instead.
*/
static void copy_boot_params(void)
{
struct old_cmdline {
u16 cl_magic;
u16 cl_offset;
};
const struct old_cmdline * const oldcmd =
absolute_pointer(OLD_CL_ADDRESS);
BUILD_BUG_ON(sizeof(boot_params) != 4096);
memcpy(&boot_params.hdr, &hdr, sizeof(hdr));
if (!boot_params.hdr.cmd_line_ptr &&
oldcmd->cl_magic == OLD_CL_MAGIC) {
/* Old-style command line protocol. */
u16 cmdline_seg;
/* Figure out if the command line falls in the region
of memory that an old kernel would have copied up
to 0x90000... */
if (oldcmd->cl_offset < boot_params.hdr.setup_move_size)
cmdline_seg = ds();
else
cmdline_seg = 0x9000;
boot_params.hdr.cmd_line_ptr =
(cmdline_seg << 4) + oldcmd->cl_offset;
}
}
/*
* Query the keyboard lock status as given by the BIOS, and
* set the keyboard repeat rate to maximum. Unclear why the latter
* is done here; this might be possible to kill off as stale code.
*/
static void keyboard_init(void)
{
struct biosregs ireg, oreg;
initregs(&ireg);
ireg.ah = 0x02; /* Get keyboard status */
intcall(0x16, &ireg, &oreg);
boot_params.kbd_status = oreg.al;
ireg.ax = 0x0305; /* Set keyboard repeat rate */
intcall(0x16, &ireg, NULL);
}
/*
* Get Intel SpeedStep (IST) information.
*/
static void query_ist(void)
{
struct biosregs ireg, oreg;
/* Some older BIOSes apparently crash on this call, so filter
it from machines too old to have SpeedStep at all. */
if (cpu.level < 6)
return;
initregs(&ireg);
ireg.ax = 0xe980; /* IST Support */
ireg.edx = 0x47534943; /* Request value */
intcall(0x15, &ireg, &oreg);
boot_params.ist_info.signature = oreg.eax;
boot_params.ist_info.command = oreg.ebx;
boot_params.ist_info.event = oreg.ecx;
boot_params.ist_info.perf_level = oreg.edx;
}
/*
* Tell the BIOS what CPU mode we intend to run in.
*/
static void set_bios_mode(void)
{
#ifdef CONFIG_X86_64
struct biosregs ireg;
initregs(&ireg);
ireg.ax = 0xec00;
ireg.bx = 2;
intcall(0x15, &ireg, NULL);
#endif
}
static void init_heap(void)
{
char *stack_end;
if (boot_params.hdr.loadflags & CAN_USE_HEAP) {
asm("leal %P1(%%esp),%0"
: "=r" (stack_end) : "i" (-STACK_SIZE));
heap_end = (char *)
((size_t)boot_params.hdr.heap_end_ptr + 0x200);
if (heap_end > stack_end)
heap_end = stack_end;
} else {
/* Boot protocol 2.00 only, no heap available */
puts("WARNING: Ancient bootloader, some functionality "
"may be limited!\n");
}
}
void main(void)
{
init_default_io_ops();
/* First, copy the boot header into the "zeropage" */
copy_boot_params();
/* Initialize the early-boot console */
console_init();
if (cmdline_find_option_bool("debug"))
puts("early console in setup code\n");
/* End of heap check */
init_heap();
/* Make sure we have all the proper CPU support */
if (validate_cpu()) {
puts("Unable to boot - please use a kernel appropriate "
"for your CPU.\n");
die();
}
/* Tell the BIOS what CPU mode we intend to run in. */
set_bios_mode();
/* Detect memory layout */
detect_memory();
/* Set keyboard repeat rate (why?) and query the lock flags */
keyboard_init();
/* Query Intel SpeedStep (IST) information */
query_ist();
/* Query APM information */
#if defined(CONFIG_APM) || defined(CONFIG_APM_MODULE)
query_apm_bios();
#endif
/* Query EDD information */
#if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
query_edd();
#endif
/* Set the video mode */
set_video();
/* Do the last things and invoke protected mode */
go_to_protected_mode();
}
| linux-master | arch/x86/boot/main.c |
// SPDX-License-Identifier: GPL-2.0-only
/* -*- linux-c -*- ------------------------------------------------------- *
*
* Copyright (C) 1991, 1992 Linus Torvalds
* Copyright 2007 rPath, Inc. - All Rights Reserved
* Copyright 2009 Intel Corporation; author H. Peter Anvin
*
* ----------------------------------------------------------------------- */
/*
* Standard video BIOS modes
*
* We have two options for this; silent and scanned.
*/
#include "boot.h"
#include "video.h"
static __videocard video_bios;
/* Set a conventional BIOS mode */
static int set_bios_mode(u8 mode);
static int bios_set_mode(struct mode_info *mi)
{
return set_bios_mode(mi->mode - VIDEO_FIRST_BIOS);
}
static int set_bios_mode(u8 mode)
{
struct biosregs ireg, oreg;
u8 new_mode;
initregs(&ireg);
ireg.al = mode; /* AH=0x00 Set Video Mode */
intcall(0x10, &ireg, NULL);
ireg.ah = 0x0f; /* Get Current Video Mode */
intcall(0x10, &ireg, &oreg);
do_restore = 1; /* Assume video contents were lost */
/* Not all BIOSes are clean with the top bit */
new_mode = oreg.al & 0x7f;
if (new_mode == mode)
return 0; /* Mode change OK */
#ifndef _WAKEUP
if (new_mode != boot_params.screen_info.orig_video_mode) {
/* Mode setting failed, but we didn't end up where we
started. That's bad. Try to revert to the original
video mode. */
ireg.ax = boot_params.screen_info.orig_video_mode;
intcall(0x10, &ireg, NULL);
}
#endif
return -1;
}
static int bios_probe(void)
{
u8 mode;
#ifdef _WAKEUP
u8 saved_mode = 0x03;
#else
u8 saved_mode = boot_params.screen_info.orig_video_mode;
#endif
u16 crtc;
struct mode_info *mi;
int nmodes = 0;
if (adapter != ADAPTER_EGA && adapter != ADAPTER_VGA)
return 0;
set_fs(0);
crtc = vga_crtc();
video_bios.modes = GET_HEAP(struct mode_info, 0);
for (mode = 0x14; mode <= 0x7f; mode++) {
if (!heap_free(sizeof(struct mode_info)))
break;
if (mode_defined(VIDEO_FIRST_BIOS+mode))
continue;
if (set_bios_mode(mode))
continue;
/* Try to verify that it's a text mode. */
/* Attribute Controller: make graphics controller disabled */
if (in_idx(0x3c0, 0x10) & 0x01)
continue;
/* Graphics Controller: verify Alpha addressing enabled */
if (in_idx(0x3ce, 0x06) & 0x01)
continue;
/* CRTC cursor location low should be zero(?) */
if (in_idx(crtc, 0x0f))
continue;
mi = GET_HEAP(struct mode_info, 1);
mi->mode = VIDEO_FIRST_BIOS+mode;
mi->depth = 0; /* text */
mi->x = rdfs16(0x44a);
mi->y = rdfs8(0x484)+1;
nmodes++;
}
set_bios_mode(saved_mode);
return nmodes;
}
static __videocard video_bios =
{
.card_name = "BIOS",
.probe = bios_probe,
.set_mode = bios_set_mode,
.unsafe = 1,
.xmode_first = VIDEO_FIRST_BIOS,
.xmode_n = 0x80,
};
| linux-master | arch/x86/boot/video-bios.c |
// SPDX-License-Identifier: GPL-2.0-only
/* -*- linux-c -*- ------------------------------------------------------- *
*
* Copyright (C) 1991, 1992 Linus Torvalds
* Copyright 2007 rPath, Inc. - All Rights Reserved
* Copyright 2009 Intel Corporation; author H. Peter Anvin
*
* ----------------------------------------------------------------------- */
/*
* Very simple screen and serial I/O
*/
#include "boot.h"
int early_serial_base;
#define XMTRDY 0x20
#define TXR 0 /* Transmit register (WRITE) */
#define LSR 5 /* Line Status */
/*
* These functions are in .inittext so they can be used to signal
* error during initialization.
*/
static void __section(".inittext") serial_putchar(int ch)
{
unsigned timeout = 0xffff;
while ((inb(early_serial_base + LSR) & XMTRDY) == 0 && --timeout)
cpu_relax();
outb(ch, early_serial_base + TXR);
}
static void __section(".inittext") bios_putchar(int ch)
{
struct biosregs ireg;
initregs(&ireg);
ireg.bx = 0x0007;
ireg.cx = 0x0001;
ireg.ah = 0x0e;
ireg.al = ch;
intcall(0x10, &ireg, NULL);
}
void __section(".inittext") putchar(int ch)
{
if (ch == '\n')
putchar('\r'); /* \n -> \r\n */
bios_putchar(ch);
if (early_serial_base != 0)
serial_putchar(ch);
}
void __section(".inittext") puts(const char *str)
{
while (*str)
putchar(*str++);
}
/*
* Read the CMOS clock through the BIOS, and return the
* seconds in BCD.
*/
static u8 gettime(void)
{
struct biosregs ireg, oreg;
initregs(&ireg);
ireg.ah = 0x02;
intcall(0x1a, &ireg, &oreg);
return oreg.dh;
}
/*
* Read from the keyboard
*/
int getchar(void)
{
struct biosregs ireg, oreg;
initregs(&ireg);
/* ireg.ah = 0x00; */
intcall(0x16, &ireg, &oreg);
return oreg.al;
}
static int kbd_pending(void)
{
struct biosregs ireg, oreg;
initregs(&ireg);
ireg.ah = 0x01;
intcall(0x16, &ireg, &oreg);
return !(oreg.eflags & X86_EFLAGS_ZF);
}
void kbd_flush(void)
{
for (;;) {
if (!kbd_pending())
break;
getchar();
}
}
int getchar_timeout(void)
{
int cnt = 30;
int t0, t1;
t0 = gettime();
while (cnt) {
if (kbd_pending())
return getchar();
t1 = gettime();
if (t0 != t1) {
cnt--;
t0 = t1;
}
}
return 0; /* Timeout! */
}
| linux-master | arch/x86/boot/tty.c |
// SPDX-License-Identifier: GPL-2.0-only
/* -*- linux-c -*- ------------------------------------------------------- *
*
* Copyright (C) 1991, 1992 Linus Torvalds
* Copyright 2007 rPath, Inc. - All Rights Reserved
*
* ----------------------------------------------------------------------- */
/*
* Kernel version string
*/
#include "boot.h"
#include <generated/utsversion.h>
#include <generated/utsrelease.h>
#include <generated/compile.h>
const char kernel_version[] =
UTS_RELEASE " (" LINUX_COMPILE_BY "@" LINUX_COMPILE_HOST ") "
UTS_VERSION;
| linux-master | arch/x86/boot/version.c |
// SPDX-License-Identifier: GPL-2.0-only
/* -*- linux-c -*- ------------------------------------------------------- *
*
* Copyright (C) 1991, 1992 Linus Torvalds
* Copyright 2007 rPath, Inc. - All Rights Reserved
*
* ----------------------------------------------------------------------- */
/*
* Simple command-line parser for early boot.
*/
#include "boot.h"
static inline int myisspace(u8 c)
{
return c <= ' '; /* Close enough approximation */
}
/*
* Find a non-boolean option, that is, "option=argument". In accordance
* with standard Linux practice, if this option is repeated, this returns
* the last instance on the command line.
*
* Returns the length of the argument (regardless of if it was
* truncated to fit in the buffer), or -1 on not found.
*/
int __cmdline_find_option(unsigned long cmdline_ptr, const char *option, char *buffer, int bufsize)
{
addr_t cptr;
char c;
int len = -1;
const char *opptr = NULL;
char *bufptr = buffer;
enum {
st_wordstart, /* Start of word/after whitespace */
st_wordcmp, /* Comparing this word */
st_wordskip, /* Miscompare, skip */
st_bufcpy /* Copying this to buffer */
} state = st_wordstart;
if (!cmdline_ptr)
return -1; /* No command line */
cptr = cmdline_ptr & 0xf;
set_fs(cmdline_ptr >> 4);
while (cptr < 0x10000 && (c = rdfs8(cptr++))) {
switch (state) {
case st_wordstart:
if (myisspace(c))
break;
/* else */
state = st_wordcmp;
opptr = option;
fallthrough;
case st_wordcmp:
if (c == '=' && !*opptr) {
len = 0;
bufptr = buffer;
state = st_bufcpy;
} else if (myisspace(c)) {
state = st_wordstart;
} else if (c != *opptr++) {
state = st_wordskip;
}
break;
case st_wordskip:
if (myisspace(c))
state = st_wordstart;
break;
case st_bufcpy:
if (myisspace(c)) {
state = st_wordstart;
} else {
if (len < bufsize-1)
*bufptr++ = c;
len++;
}
break;
}
}
if (bufsize)
*bufptr = '\0';
return len;
}
/*
* Find a boolean option (like quiet,noapic,nosmp....)
*
* Returns the position of that option (starts counting with 1)
* or 0 on not found
*/
int __cmdline_find_option_bool(unsigned long cmdline_ptr, const char *option)
{
addr_t cptr;
char c;
int pos = 0, wstart = 0;
const char *opptr = NULL;
enum {
st_wordstart, /* Start of word/after whitespace */
st_wordcmp, /* Comparing this word */
st_wordskip, /* Miscompare, skip */
} state = st_wordstart;
if (!cmdline_ptr)
return -1; /* No command line */
cptr = cmdline_ptr & 0xf;
set_fs(cmdline_ptr >> 4);
while (cptr < 0x10000) {
c = rdfs8(cptr++);
pos++;
switch (state) {
case st_wordstart:
if (!c)
return 0;
else if (myisspace(c))
break;
state = st_wordcmp;
opptr = option;
wstart = pos;
fallthrough;
case st_wordcmp:
if (!*opptr)
if (!c || myisspace(c))
return wstart;
else
state = st_wordskip;
else if (!c)
return 0;
else if (c != *opptr++)
state = st_wordskip;
break;
case st_wordskip:
if (!c)
return 0;
else if (myisspace(c))
state = st_wordstart;
break;
}
}
return 0; /* Buffer overrun */
}
| linux-master | arch/x86/boot/cmdline.c |
// SPDX-License-Identifier: GPL-2.0-only
/* -*- linux-c -*- ------------------------------------------------------- *
*
* Copyright (C) 1991, 1992 Linus Torvalds
* Copyright 2007-2008 rPath, Inc. - All Rights Reserved
* Copyright 2009 Intel Corporation; author H. Peter Anvin
*
* ----------------------------------------------------------------------- */
/*
* Enable A20 gate (return -1 on failure)
*/
#include "boot.h"
#define MAX_8042_LOOPS 100000
#define MAX_8042_FF 32
static int empty_8042(void)
{
u8 status;
int loops = MAX_8042_LOOPS;
int ffs = MAX_8042_FF;
while (loops--) {
io_delay();
status = inb(0x64);
if (status == 0xff) {
/* FF is a plausible, but very unlikely status */
if (!--ffs)
return -1; /* Assume no KBC present */
}
if (status & 1) {
/* Read and discard input data */
io_delay();
(void)inb(0x60);
} else if (!(status & 2)) {
/* Buffers empty, finished! */
return 0;
}
}
return -1;
}
/* Returns nonzero if the A20 line is enabled. The memory address
used as a test is the int $0x80 vector, which should be safe. */
#define A20_TEST_ADDR (4*0x80)
#define A20_TEST_SHORT 32
#define A20_TEST_LONG 2097152 /* 2^21 */
static int a20_test(int loops)
{
int ok = 0;
int saved, ctr;
set_fs(0x0000);
set_gs(0xffff);
saved = ctr = rdfs32(A20_TEST_ADDR);
while (loops--) {
wrfs32(++ctr, A20_TEST_ADDR);
io_delay(); /* Serialize and make delay constant */
ok = rdgs32(A20_TEST_ADDR+0x10) ^ ctr;
if (ok)
break;
}
wrfs32(saved, A20_TEST_ADDR);
return ok;
}
/* Quick test to see if A20 is already enabled */
static int a20_test_short(void)
{
return a20_test(A20_TEST_SHORT);
}
/* Longer test that actually waits for A20 to come on line; this
is useful when dealing with the KBC or other slow external circuitry. */
static int a20_test_long(void)
{
return a20_test(A20_TEST_LONG);
}
static void enable_a20_bios(void)
{
struct biosregs ireg;
initregs(&ireg);
ireg.ax = 0x2401;
intcall(0x15, &ireg, NULL);
}
static void enable_a20_kbc(void)
{
empty_8042();
outb(0xd1, 0x64); /* Command write */
empty_8042();
outb(0xdf, 0x60); /* A20 on */
empty_8042();
outb(0xff, 0x64); /* Null command, but UHCI wants it */
empty_8042();
}
static void enable_a20_fast(void)
{
u8 port_a;
port_a = inb(0x92); /* Configuration port A */
port_a |= 0x02; /* Enable A20 */
port_a &= ~0x01; /* Do not reset machine */
outb(port_a, 0x92);
}
/*
* Actual routine to enable A20; return 0 on ok, -1 on failure
*/
#define A20_ENABLE_LOOPS 255 /* Number of times to try */
int enable_a20(void)
{
int loops = A20_ENABLE_LOOPS;
int kbc_err;
while (loops--) {
/* First, check to see if A20 is already enabled
(legacy free, etc.) */
if (a20_test_short())
return 0;
/* Next, try the BIOS (INT 0x15, AX=0x2401) */
enable_a20_bios();
if (a20_test_short())
return 0;
/* Try enabling A20 through the keyboard controller */
kbc_err = empty_8042();
if (a20_test_short())
return 0; /* BIOS worked, but with delayed reaction */
if (!kbc_err) {
enable_a20_kbc();
if (a20_test_long())
return 0;
}
/* Finally, try enabling the "fast A20 gate" */
enable_a20_fast();
if (a20_test_long())
return 0;
}
return -1;
}
| linux-master | arch/x86/boot/a20.c |
// SPDX-License-Identifier: GPL-2.0-only
/* -*- linux-c -*- ------------------------------------------------------- *
*
* Copyright (C) 1991, 1992 Linus Torvalds
* Copyright 2007 rPath, Inc. - All Rights Reserved
*
* ----------------------------------------------------------------------- */
/*
* Check for obligatory CPU features and abort if the features are not
* present. This code should be compilable as 16-, 32- or 64-bit
* code, so be very careful with types and inline assembly.
*
* This code should not contain any messages; that requires an
* additional wrapper.
*
* As written, this code is not safe for inclusion into the kernel
* proper (after FPU initialization, in particular).
*/
#ifdef _SETUP
# include "boot.h"
#endif
#include <linux/types.h>
#include <asm/intel-family.h>
#include <asm/processor-flags.h>
#include <asm/required-features.h>
#include <asm/msr-index.h>
#include "string.h"
#include "msr.h"
static u32 err_flags[NCAPINTS];
static const int req_level = CONFIG_X86_MINIMUM_CPU_FAMILY;
static const u32 req_flags[NCAPINTS] =
{
REQUIRED_MASK0,
REQUIRED_MASK1,
0, /* REQUIRED_MASK2 not implemented in this file */
0, /* REQUIRED_MASK3 not implemented in this file */
REQUIRED_MASK4,
0, /* REQUIRED_MASK5 not implemented in this file */
REQUIRED_MASK6,
0, /* REQUIRED_MASK7 not implemented in this file */
0, /* REQUIRED_MASK8 not implemented in this file */
0, /* REQUIRED_MASK9 not implemented in this file */
0, /* REQUIRED_MASK10 not implemented in this file */
0, /* REQUIRED_MASK11 not implemented in this file */
0, /* REQUIRED_MASK12 not implemented in this file */
0, /* REQUIRED_MASK13 not implemented in this file */
0, /* REQUIRED_MASK14 not implemented in this file */
0, /* REQUIRED_MASK15 not implemented in this file */
REQUIRED_MASK16,
};
#define A32(a, b, c, d) (((d) << 24)+((c) << 16)+((b) << 8)+(a))
static int is_amd(void)
{
return cpu_vendor[0] == A32('A', 'u', 't', 'h') &&
cpu_vendor[1] == A32('e', 'n', 't', 'i') &&
cpu_vendor[2] == A32('c', 'A', 'M', 'D');
}
static int is_centaur(void)
{
return cpu_vendor[0] == A32('C', 'e', 'n', 't') &&
cpu_vendor[1] == A32('a', 'u', 'r', 'H') &&
cpu_vendor[2] == A32('a', 'u', 'l', 's');
}
static int is_transmeta(void)
{
return cpu_vendor[0] == A32('G', 'e', 'n', 'u') &&
cpu_vendor[1] == A32('i', 'n', 'e', 'T') &&
cpu_vendor[2] == A32('M', 'x', '8', '6');
}
static int is_intel(void)
{
return cpu_vendor[0] == A32('G', 'e', 'n', 'u') &&
cpu_vendor[1] == A32('i', 'n', 'e', 'I') &&
cpu_vendor[2] == A32('n', 't', 'e', 'l');
}
/* Returns a bitmask of which words we have error bits in */
static int check_cpuflags(void)
{
u32 err;
int i;
err = 0;
for (i = 0; i < NCAPINTS; i++) {
err_flags[i] = req_flags[i] & ~cpu.flags[i];
if (err_flags[i])
err |= 1 << i;
}
return err;
}
/*
* Returns -1 on error.
*
* *cpu_level is set to the current CPU level; *req_level to the required
* level. x86-64 is considered level 64 for this purpose.
*
* *err_flags_ptr is set to the flags error array if there are flags missing.
*/
int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
{
int err;
memset(&cpu.flags, 0, sizeof(cpu.flags));
cpu.level = 3;
if (has_eflag(X86_EFLAGS_AC))
cpu.level = 4;
get_cpuflags();
err = check_cpuflags();
if (test_bit(X86_FEATURE_LM, cpu.flags))
cpu.level = 64;
if (err == 0x01 &&
!(err_flags[0] &
~((1 << X86_FEATURE_XMM)|(1 << X86_FEATURE_XMM2))) &&
is_amd()) {
/* If this is an AMD and we're only missing SSE+SSE2, try to
turn them on */
struct msr m;
boot_rdmsr(MSR_K7_HWCR, &m);
m.l &= ~(1 << 15);
boot_wrmsr(MSR_K7_HWCR, &m);
get_cpuflags(); /* Make sure it really did something */
err = check_cpuflags();
} else if (err == 0x01 &&
!(err_flags[0] & ~(1 << X86_FEATURE_CX8)) &&
is_centaur() && cpu.model >= 6) {
/* If this is a VIA C3, we might have to enable CX8
explicitly */
struct msr m;
boot_rdmsr(MSR_VIA_FCR, &m);
m.l |= (1 << 1) | (1 << 7);
boot_wrmsr(MSR_VIA_FCR, &m);
set_bit(X86_FEATURE_CX8, cpu.flags);
err = check_cpuflags();
} else if (err == 0x01 && is_transmeta()) {
/* Transmeta might have masked feature bits in word 0 */
struct msr m, m_tmp;
u32 level = 1;
boot_rdmsr(0x80860004, &m);
m_tmp = m;
m_tmp.l = ~0;
boot_wrmsr(0x80860004, &m_tmp);
asm("cpuid"
: "+a" (level), "=d" (cpu.flags[0])
: : "ecx", "ebx");
boot_wrmsr(0x80860004, &m);
err = check_cpuflags();
} else if (err == 0x01 &&
!(err_flags[0] & ~(1 << X86_FEATURE_PAE)) &&
is_intel() && cpu.level == 6 &&
(cpu.model == 9 || cpu.model == 13)) {
/* PAE is disabled on this Pentium M but can be forced */
if (cmdline_find_option_bool("forcepae")) {
puts("WARNING: Forcing PAE in CPU flags\n");
set_bit(X86_FEATURE_PAE, cpu.flags);
err = check_cpuflags();
}
else {
puts("WARNING: PAE disabled. Use parameter 'forcepae' to enable at your own risk!\n");
}
}
if (!err)
err = check_knl_erratum();
if (err_flags_ptr)
*err_flags_ptr = err ? err_flags : NULL;
if (cpu_level_ptr)
*cpu_level_ptr = cpu.level;
if (req_level_ptr)
*req_level_ptr = req_level;
return (cpu.level < req_level || err) ? -1 : 0;
}
int check_knl_erratum(void)
{
/*
* First check for the affected model/family:
*/
if (!is_intel() ||
cpu.family != 6 ||
cpu.model != INTEL_FAM6_XEON_PHI_KNL)
return 0;
/*
* This erratum affects the Accessed/Dirty bits, and can
* cause stray bits to be set in !Present PTEs. We have
* enough bits in our 64-bit PTEs (which we have on real
* 64-bit mode or PAE) to avoid using these troublesome
* bits. But, we do not have enough space in our 32-bit
* PTEs. So, refuse to run on 32-bit non-PAE kernels.
*/
if (IS_ENABLED(CONFIG_X86_64) || IS_ENABLED(CONFIG_X86_PAE))
return 0;
puts("This 32-bit kernel can not run on this Xeon Phi x200\n"
"processor due to a processor erratum. Use a 64-bit\n"
"kernel, or enable PAE in this 32-bit kernel.\n\n");
return -1;
}
| linux-master | arch/x86/boot/cpucheck.c |
// SPDX-License-Identifier: GPL-2.0-only
/* -*- linux-c -*- ------------------------------------------------------- *
*
* Copyright (C) 1991, 1992 Linus Torvalds
* Copyright 2007 rPath, Inc. - All Rights Reserved
* Copyright 2009 Intel Corporation; author H. Peter Anvin
*
* ----------------------------------------------------------------------- */
/*
* Get EDD BIOS disk information
*/
#include "boot.h"
#include <linux/edd.h>
#include "string.h"
#if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
/*
* Read the MBR (first sector) from a specific device.
*/
static int read_mbr(u8 devno, void *buf)
{
struct biosregs ireg, oreg;
initregs(&ireg);
ireg.ax = 0x0201; /* Legacy Read, one sector */
ireg.cx = 0x0001; /* Sector 0-0-1 */
ireg.dl = devno;
ireg.bx = (size_t)buf;
intcall(0x13, &ireg, &oreg);
return -(oreg.eflags & X86_EFLAGS_CF); /* 0 or -1 */
}
static u32 read_mbr_sig(u8 devno, struct edd_info *ei, u32 *mbrsig)
{
int sector_size;
char *mbrbuf_ptr, *mbrbuf_end;
u32 buf_base, mbr_base;
extern char _end[];
u16 mbr_magic;
sector_size = ei->params.bytes_per_sector;
if (!sector_size)
sector_size = 512; /* Best available guess */
/* Produce a naturally aligned buffer on the heap */
buf_base = (ds() << 4) + (u32)&_end;
mbr_base = (buf_base+sector_size-1) & ~(sector_size-1);
mbrbuf_ptr = _end + (mbr_base-buf_base);
mbrbuf_end = mbrbuf_ptr + sector_size;
/* Make sure we actually have space on the heap... */
if (!(boot_params.hdr.loadflags & CAN_USE_HEAP))
return -1;
if (mbrbuf_end > (char *)(size_t)boot_params.hdr.heap_end_ptr)
return -1;
memset(mbrbuf_ptr, 0, sector_size);
if (read_mbr(devno, mbrbuf_ptr))
return -1;
*mbrsig = *(u32 *)&mbrbuf_ptr[EDD_MBR_SIG_OFFSET];
mbr_magic = *(u16 *)&mbrbuf_ptr[510];
/* check for valid MBR magic */
return mbr_magic == 0xAA55 ? 0 : -1;
}
static int get_edd_info(u8 devno, struct edd_info *ei)
{
struct biosregs ireg, oreg;
memset(ei, 0, sizeof(*ei));
/* Check Extensions Present */
initregs(&ireg);
ireg.ah = 0x41;
ireg.bx = EDDMAGIC1;
ireg.dl = devno;
intcall(0x13, &ireg, &oreg);
if (oreg.eflags & X86_EFLAGS_CF)
return -1; /* No extended information */
if (oreg.bx != EDDMAGIC2)
return -1;
ei->device = devno;
ei->version = oreg.ah; /* EDD version number */
ei->interface_support = oreg.cx; /* EDD functionality subsets */
/* Extended Get Device Parameters */
ei->params.length = sizeof(ei->params);
ireg.ah = 0x48;
ireg.si = (size_t)&ei->params;
intcall(0x13, &ireg, &oreg);
/* Get legacy CHS parameters */
/* Ralf Brown recommends setting ES:DI to 0:0 */
ireg.ah = 0x08;
ireg.es = 0;
intcall(0x13, &ireg, &oreg);
if (!(oreg.eflags & X86_EFLAGS_CF)) {
ei->legacy_max_cylinder = oreg.ch + ((oreg.cl & 0xc0) << 2);
ei->legacy_max_head = oreg.dh;
ei->legacy_sectors_per_track = oreg.cl & 0x3f;
}
return 0;
}
void query_edd(void)
{
char eddarg[8];
int do_mbr = 1;
#ifdef CONFIG_EDD_OFF
int do_edd = 0;
#else
int do_edd = 1;
#endif
int be_quiet;
int devno;
struct edd_info ei, *edp;
u32 *mbrptr;
if (cmdline_find_option("edd", eddarg, sizeof(eddarg)) > 0) {
if (!strcmp(eddarg, "skipmbr") || !strcmp(eddarg, "skip")) {
do_edd = 1;
do_mbr = 0;
}
else if (!strcmp(eddarg, "off"))
do_edd = 0;
else if (!strcmp(eddarg, "on"))
do_edd = 1;
}
be_quiet = cmdline_find_option_bool("quiet");
edp = boot_params.eddbuf;
mbrptr = boot_params.edd_mbr_sig_buffer;
if (!do_edd)
return;
/* Bugs in OnBoard or AddOnCards Bios may hang the EDD probe,
* so give a hint if this happens.
*/
if (!be_quiet)
printf("Probing EDD (edd=off to disable)... ");
for (devno = 0x80; devno < 0x80+EDD_MBR_SIG_MAX; devno++) {
/*
* Scan the BIOS-supported hard disks and query EDD
* information...
*/
if (!get_edd_info(devno, &ei)
&& boot_params.eddbuf_entries < EDDMAXNR) {
memcpy(edp, &ei, sizeof(ei));
edp++;
boot_params.eddbuf_entries++;
}
if (do_mbr && !read_mbr_sig(devno, &ei, mbrptr++))
boot_params.edd_mbr_sig_buf_entries = devno-0x80+1;
}
if (!be_quiet)
printf("ok\n");
}
#endif
| linux-master | arch/x86/boot/edd.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 1991, 1992 Linus Torvalds
* Copyright (C) 1997 Martin Mares
* Copyright (C) 2007 H. Peter Anvin
*/
/*
* This file builds a disk-image from three different files:
*
* - setup: 8086 machine code, sets up system parm
* - system: 80386 code for actual system
* - zoffset.h: header with ZO_* defines
*
* It does some checking that all files are of the correct type, and writes
* the result to the specified destination, removing headers and padding to
* the right amount. It also writes some system data to stdout.
*/
/*
* Changes by tytso to allow root device specification
* High loaded stuff by Hans Lermen & Werner Almesberger, Feb. 1996
* Cross compiling fixes by Gertjan van Wingerde, July 1996
* Rewritten by Martin Mares, April 1997
* Substantially overhauled by H. Peter Anvin, April 2007
*/
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <stdarg.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#include <fcntl.h>
#include <sys/mman.h>
#include <tools/le_byteshift.h>
typedef unsigned char u8;
typedef unsigned short u16;
typedef unsigned int u32;
#define DEFAULT_MAJOR_ROOT 0
#define DEFAULT_MINOR_ROOT 0
#define DEFAULT_ROOT_DEV (DEFAULT_MAJOR_ROOT << 8 | DEFAULT_MINOR_ROOT)
/* Minimal number of setup sectors */
#define SETUP_SECT_MIN 5
#define SETUP_SECT_MAX 64
/* This must be large enough to hold the entire setup */
u8 buf[SETUP_SECT_MAX*512];
#define PECOFF_RELOC_RESERVE 0x20
#ifdef CONFIG_EFI_MIXED
#define PECOFF_COMPAT_RESERVE 0x20
#else
#define PECOFF_COMPAT_RESERVE 0x0
#endif
static unsigned long efi32_stub_entry;
static unsigned long efi64_stub_entry;
static unsigned long efi_pe_entry;
static unsigned long efi32_pe_entry;
static unsigned long kernel_info;
static unsigned long startup_64;
static unsigned long _ehead;
static unsigned long _end;
/*----------------------------------------------------------------------*/
static const u32 crctab32[] = {
0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, 0x076dc419,
0x706af48f, 0xe963a535, 0x9e6495a3, 0x0edb8832, 0x79dcb8a4,
0xe0d5e91e, 0x97d2d988, 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07,
0x90bf1d91, 0x1db71064, 0x6ab020f2, 0xf3b97148, 0x84be41de,
0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7, 0x136c9856,
0x646ba8c0, 0xfd62f97a, 0x8a65c9ec, 0x14015c4f, 0x63066cd9,
0xfa0f3d63, 0x8d080df5, 0x3b6e20c8, 0x4c69105e, 0xd56041e4,
0xa2677172, 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b,
0x35b5a8fa, 0x42b2986c, 0xdbbbc9d6, 0xacbcf940, 0x32d86ce3,
0x45df5c75, 0xdcd60dcf, 0xabd13d59, 0x26d930ac, 0x51de003a,
0xc8d75180, 0xbfd06116, 0x21b4f4b5, 0x56b3c423, 0xcfba9599,
0xb8bda50f, 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924,
0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d, 0x76dc4190,
0x01db7106, 0x98d220bc, 0xefd5102a, 0x71b18589, 0x06b6b51f,
0x9fbfe4a5, 0xe8b8d433, 0x7807c9a2, 0x0f00f934, 0x9609a88e,
0xe10e9818, 0x7f6a0dbb, 0x086d3d2d, 0x91646c97, 0xe6635c01,
0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e, 0x6c0695ed,
0x1b01a57b, 0x8208f4c1, 0xf50fc457, 0x65b0d9c6, 0x12b7e950,
0x8bbeb8ea, 0xfcb9887c, 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3,
0xfbd44c65, 0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2,
0x4adfa541, 0x3dd895d7, 0xa4d1c46d, 0xd3d6f4fb, 0x4369e96a,
0x346ed9fc, 0xad678846, 0xda60b8d0, 0x44042d73, 0x33031de5,
0xaa0a4c5f, 0xdd0d7cc9, 0x5005713c, 0x270241aa, 0xbe0b1010,
0xc90c2086, 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f,
0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, 0x59b33d17,
0x2eb40d81, 0xb7bd5c3b, 0xc0ba6cad, 0xedb88320, 0x9abfb3b6,
0x03b6e20c, 0x74b1d29a, 0xead54739, 0x9dd277af, 0x04db2615,
0x73dc1683, 0xe3630b12, 0x94643b84, 0x0d6d6a3e, 0x7a6a5aa8,
0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1, 0xf00f9344,
0x8708a3d2, 0x1e01f268, 0x6906c2fe, 0xf762575d, 0x806567cb,
0x196c3671, 0x6e6b06e7, 0xfed41b76, 0x89d32be0, 0x10da7a5a,
0x67dd4acc, 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5,
0xd6d6a3e8, 0xa1d1937e, 0x38d8c2c4, 0x4fdff252, 0xd1bb67f1,
0xa6bc5767, 0x3fb506dd, 0x48b2364b, 0xd80d2bda, 0xaf0a1b4c,
0x36034af6, 0x41047a60, 0xdf60efc3, 0xa867df55, 0x316e8eef,
0x4669be79, 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236,
0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f, 0xc5ba3bbe,
0xb2bd0b28, 0x2bb45a92, 0x5cb36a04, 0xc2d7ffa7, 0xb5d0cf31,
0x2cd99e8b, 0x5bdeae1d, 0x9b64c2b0, 0xec63f226, 0x756aa39c,
0x026d930a, 0x9c0906a9, 0xeb0e363f, 0x72076785, 0x05005713,
0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38, 0x92d28e9b,
0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21, 0x86d3d2d4, 0xf1d4e242,
0x68ddb3f8, 0x1fda836e, 0x81be16cd, 0xf6b9265b, 0x6fb077e1,
0x18b74777, 0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c,
0x8f659eff, 0xf862ae69, 0x616bffd3, 0x166ccf45, 0xa00ae278,
0xd70dd2ee, 0x4e048354, 0x3903b3c2, 0xa7672661, 0xd06016f7,
0x4969474d, 0x3e6e77db, 0xaed16a4a, 0xd9d65adc, 0x40df0b66,
0x37d83bf0, 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9,
0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6, 0xbad03605,
0xcdd70693, 0x54de5729, 0x23d967bf, 0xb3667a2e, 0xc4614ab8,
0x5d681b02, 0x2a6f2b94, 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b,
0x2d02ef8d
};
static u32 partial_crc32_one(u8 c, u32 crc)
{
return crctab32[(crc ^ c) & 0xff] ^ (crc >> 8);
}
static u32 partial_crc32(const u8 *s, int len, u32 crc)
{
while (len--)
crc = partial_crc32_one(*s++, crc);
return crc;
}
static void die(const char * str, ...)
{
va_list args;
va_start(args, str);
vfprintf(stderr, str, args);
va_end(args);
fputc('\n', stderr);
exit(1);
}
static void usage(void)
{
die("Usage: build setup system zoffset.h image");
}
#ifdef CONFIG_EFI_STUB
static void update_pecoff_section_header_fields(char *section_name, u32 vma, u32 size, u32 datasz, u32 offset)
{
unsigned int pe_header;
unsigned short num_sections;
u8 *section;
pe_header = get_unaligned_le32(&buf[0x3c]);
num_sections = get_unaligned_le16(&buf[pe_header + 6]);
#ifdef CONFIG_X86_32
section = &buf[pe_header + 0xa8];
#else
section = &buf[pe_header + 0xb8];
#endif
while (num_sections > 0) {
if (strncmp((char*)section, section_name, 8) == 0) {
/* section header size field */
put_unaligned_le32(size, section + 0x8);
/* section header vma field */
put_unaligned_le32(vma, section + 0xc);
/* section header 'size of initialised data' field */
put_unaligned_le32(datasz, section + 0x10);
/* section header 'file offset' field */
put_unaligned_le32(offset, section + 0x14);
break;
}
section += 0x28;
num_sections--;
}
}
static void update_pecoff_section_header(char *section_name, u32 offset, u32 size)
{
update_pecoff_section_header_fields(section_name, offset, size, size, offset);
}
static void update_pecoff_setup_and_reloc(unsigned int size)
{
u32 setup_offset = 0x200;
u32 reloc_offset = size - PECOFF_RELOC_RESERVE - PECOFF_COMPAT_RESERVE;
#ifdef CONFIG_EFI_MIXED
u32 compat_offset = reloc_offset + PECOFF_RELOC_RESERVE;
#endif
u32 setup_size = reloc_offset - setup_offset;
update_pecoff_section_header(".setup", setup_offset, setup_size);
update_pecoff_section_header(".reloc", reloc_offset, PECOFF_RELOC_RESERVE);
/*
* Modify .reloc section contents with a single entry. The
* relocation is applied to offset 10 of the relocation section.
*/
put_unaligned_le32(reloc_offset + 10, &buf[reloc_offset]);
put_unaligned_le32(10, &buf[reloc_offset + 4]);
#ifdef CONFIG_EFI_MIXED
update_pecoff_section_header(".compat", compat_offset, PECOFF_COMPAT_RESERVE);
/*
* Put the IA-32 machine type (0x14c) and the associated entry point
* address in the .compat section, so loaders can figure out which other
* execution modes this image supports.
*/
buf[compat_offset] = 0x1;
buf[compat_offset + 1] = 0x8;
put_unaligned_le16(0x14c, &buf[compat_offset + 2]);
put_unaligned_le32(efi32_pe_entry + size, &buf[compat_offset + 4]);
#endif
}
static void update_pecoff_text(unsigned int text_start, unsigned int file_sz,
unsigned int init_sz)
{
unsigned int pe_header;
unsigned int text_sz = file_sz - text_start;
unsigned int bss_sz = init_sz - file_sz;
pe_header = get_unaligned_le32(&buf[0x3c]);
/*
* The PE/COFF loader may load the image at an address which is
* misaligned with respect to the kernel_alignment field in the setup
* header.
*
* In order to avoid relocating the kernel to correct the misalignment,
* add slack to allow the buffer to be aligned within the declared size
* of the image.
*/
bss_sz += CONFIG_PHYSICAL_ALIGN;
init_sz += CONFIG_PHYSICAL_ALIGN;
/*
* Size of code: Subtract the size of the first sector (512 bytes)
* which includes the header.
*/
put_unaligned_le32(file_sz - 512 + bss_sz, &buf[pe_header + 0x1c]);
/* Size of image */
put_unaligned_le32(init_sz, &buf[pe_header + 0x50]);
/*
* Address of entry point for PE/COFF executable
*/
put_unaligned_le32(text_start + efi_pe_entry, &buf[pe_header + 0x28]);
update_pecoff_section_header_fields(".text", text_start, text_sz + bss_sz,
text_sz, text_start);
}
static int reserve_pecoff_reloc_section(int c)
{
/* Reserve 0x20 bytes for .reloc section */
memset(buf+c, 0, PECOFF_RELOC_RESERVE);
return PECOFF_RELOC_RESERVE;
}
static void efi_stub_defaults(void)
{
/* Defaults for old kernel */
#ifdef CONFIG_X86_32
efi_pe_entry = 0x10;
#else
efi_pe_entry = 0x210;
startup_64 = 0x200;
#endif
}
static void efi_stub_entry_update(void)
{
unsigned long addr = efi32_stub_entry;
#ifdef CONFIG_EFI_HANDOVER_PROTOCOL
#ifdef CONFIG_X86_64
/* Yes, this is really how we defined it :( */
addr = efi64_stub_entry - 0x200;
#endif
#ifdef CONFIG_EFI_MIXED
if (efi32_stub_entry != addr)
die("32-bit and 64-bit EFI entry points do not match\n");
#endif
#endif
put_unaligned_le32(addr, &buf[0x264]);
}
#else
static inline void update_pecoff_setup_and_reloc(unsigned int size) {}
static inline void update_pecoff_text(unsigned int text_start,
unsigned int file_sz,
unsigned int init_sz) {}
static inline void efi_stub_defaults(void) {}
static inline void efi_stub_entry_update(void) {}
static inline int reserve_pecoff_reloc_section(int c)
{
return 0;
}
#endif /* CONFIG_EFI_STUB */
static int reserve_pecoff_compat_section(int c)
{
/* Reserve 0x20 bytes for .compat section */
memset(buf+c, 0, PECOFF_COMPAT_RESERVE);
return PECOFF_COMPAT_RESERVE;
}
/*
* Parse zoffset.h and find the entry points. We could just #include zoffset.h
* but that would mean tools/build would have to be rebuilt every time. It's
* not as if parsing it is hard...
*/
#define PARSE_ZOFS(p, sym) do { \
if (!strncmp(p, "#define ZO_" #sym " ", 11+sizeof(#sym))) \
sym = strtoul(p + 11 + sizeof(#sym), NULL, 16); \
} while (0)
static void parse_zoffset(char *fname)
{
FILE *file;
char *p;
int c;
file = fopen(fname, "r");
if (!file)
die("Unable to open `%s': %m", fname);
c = fread(buf, 1, sizeof(buf) - 1, file);
if (ferror(file))
die("read-error on `zoffset.h'");
fclose(file);
buf[c] = 0;
p = (char *)buf;
while (p && *p) {
PARSE_ZOFS(p, efi32_stub_entry);
PARSE_ZOFS(p, efi64_stub_entry);
PARSE_ZOFS(p, efi_pe_entry);
PARSE_ZOFS(p, efi32_pe_entry);
PARSE_ZOFS(p, kernel_info);
PARSE_ZOFS(p, startup_64);
PARSE_ZOFS(p, _ehead);
PARSE_ZOFS(p, _end);
p = strchr(p, '\n');
while (p && (*p == '\r' || *p == '\n'))
p++;
}
}
int main(int argc, char ** argv)
{
unsigned int i, sz, setup_sectors, init_sz;
int c;
u32 sys_size;
struct stat sb;
FILE *file, *dest;
int fd;
void *kernel;
u32 crc = 0xffffffffUL;
efi_stub_defaults();
if (argc != 5)
usage();
parse_zoffset(argv[3]);
dest = fopen(argv[4], "w");
if (!dest)
die("Unable to write `%s': %m", argv[4]);
/* Copy the setup code */
file = fopen(argv[1], "r");
if (!file)
die("Unable to open `%s': %m", argv[1]);
c = fread(buf, 1, sizeof(buf), file);
if (ferror(file))
die("read-error on `setup'");
if (c < 1024)
die("The setup must be at least 1024 bytes");
if (get_unaligned_le16(&buf[510]) != 0xAA55)
die("Boot block hasn't got boot flag (0xAA55)");
fclose(file);
c += reserve_pecoff_compat_section(c);
c += reserve_pecoff_reloc_section(c);
/* Pad unused space with zeros */
setup_sectors = (c + 511) / 512;
if (setup_sectors < SETUP_SECT_MIN)
setup_sectors = SETUP_SECT_MIN;
i = setup_sectors*512;
memset(buf+c, 0, i-c);
update_pecoff_setup_and_reloc(i);
/* Set the default root device */
put_unaligned_le16(DEFAULT_ROOT_DEV, &buf[508]);
/* Open and stat the kernel file */
fd = open(argv[2], O_RDONLY);
if (fd < 0)
die("Unable to open `%s': %m", argv[2]);
if (fstat(fd, &sb))
die("Unable to stat `%s': %m", argv[2]);
sz = sb.st_size;
kernel = mmap(NULL, sz, PROT_READ, MAP_SHARED, fd, 0);
if (kernel == MAP_FAILED)
die("Unable to mmap '%s': %m", argv[2]);
/* Number of 16-byte paragraphs, including space for a 4-byte CRC */
sys_size = (sz + 15 + 4) / 16;
#ifdef CONFIG_EFI_STUB
/*
* COFF requires minimum 32-byte alignment of sections, and
* adding a signature is problematic without that alignment.
*/
sys_size = (sys_size + 1) & ~1;
#endif
/* Patch the setup code with the appropriate size parameters */
buf[0x1f1] = setup_sectors-1;
put_unaligned_le32(sys_size, &buf[0x1f4]);
init_sz = get_unaligned_le32(&buf[0x260]);
#ifdef CONFIG_EFI_STUB
/*
* The decompression buffer will start at ImageBase. When relocating
* the compressed kernel to its end, we must ensure that the head
* section does not get overwritten. The head section occupies
* [i, i + _ehead), and the destination is [init_sz - _end, init_sz).
*
* At present these should never overlap, because 'i' is at most 32k
* because of SETUP_SECT_MAX, '_ehead' is less than 1k, and the
* calculation of INIT_SIZE in boot/header.S ensures that
* 'init_sz - _end' is at least 64k.
*
* For future-proofing, increase init_sz if necessary.
*/
if (init_sz - _end < i + _ehead) {
init_sz = (i + _ehead + _end + 4095) & ~4095;
put_unaligned_le32(init_sz, &buf[0x260]);
}
#endif
update_pecoff_text(setup_sectors * 512, i + (sys_size * 16), init_sz);
efi_stub_entry_update();
/* Update kernel_info offset. */
put_unaligned_le32(kernel_info, &buf[0x268]);
crc = partial_crc32(buf, i, crc);
if (fwrite(buf, 1, i, dest) != i)
die("Writing setup failed");
/* Copy the kernel code */
crc = partial_crc32(kernel, sz, crc);
if (fwrite(kernel, 1, sz, dest) != sz)
die("Writing kernel failed");
/* Add padding leaving 4 bytes for the checksum */
while (sz++ < (sys_size*16) - 4) {
crc = partial_crc32_one('\0', crc);
if (fwrite("\0", 1, 1, dest) != 1)
die("Writing padding failed");
}
/* Write the CRC */
put_unaligned_le32(crc, buf);
if (fwrite(buf, 1, 4, dest) != 4)
die("Writing CRC failed");
/* Catch any delayed write failures */
if (fclose(dest))
die("Writing image failed");
close(fd);
/* Everything is OK */
return 0;
}
| linux-master | arch/x86/boot/tools/build.c |
// SPDX-License-Identifier: GPL-2.0
/*
* This code is used on x86_64 to create page table identity mappings on
* demand by building up a new set of page tables (or appending to the
* existing ones), and then switching over to them when ready.
*
* Copyright (C) 2015-2016 Yinghai Lu
* Copyright (C) 2016 Kees Cook
*/
/* No PAGE_TABLE_ISOLATION support needed either: */
#undef CONFIG_PAGE_TABLE_ISOLATION
#include "error.h"
#include "misc.h"
/* These actually do the work of building the kernel identity maps. */
#include <linux/pgtable.h>
#include <asm/cmpxchg.h>
#include <asm/trap_pf.h>
#include <asm/trapnr.h>
#include <asm/init.h>
/* Use the static base for this part of the boot process */
#undef __PAGE_OFFSET
#define __PAGE_OFFSET __PAGE_OFFSET_BASE
#include "../../mm/ident_map.c"
#define _SETUP
#include <asm/setup.h> /* For COMMAND_LINE_SIZE */
#undef _SETUP
extern unsigned long get_cmd_line_ptr(void);
/* Used by PAGE_KERN* macros: */
pteval_t __default_kernel_pte_mask __read_mostly = ~0;
/* Used to track our page table allocation area. */
struct alloc_pgt_data {
unsigned char *pgt_buf;
unsigned long pgt_buf_size;
unsigned long pgt_buf_offset;
};
/*
* Allocates space for a page table entry, using struct alloc_pgt_data
* above. Besides the local callers, this is used as the allocation
* callback in mapping_info below.
*/
static void *alloc_pgt_page(void *context)
{
struct alloc_pgt_data *pages = (struct alloc_pgt_data *)context;
unsigned char *entry;
/* Validate there is space available for a new page. */
if (pages->pgt_buf_offset >= pages->pgt_buf_size) {
debug_putstr("out of pgt_buf in " __FILE__ "!?\n");
debug_putaddr(pages->pgt_buf_offset);
debug_putaddr(pages->pgt_buf_size);
return NULL;
}
/* Consumed more tables than expected? */
if (pages->pgt_buf_offset == BOOT_PGT_SIZE_WARN) {
debug_putstr("pgt_buf running low in " __FILE__ "\n");
debug_putstr("Need to raise BOOT_PGT_SIZE?\n");
debug_putaddr(pages->pgt_buf_offset);
debug_putaddr(pages->pgt_buf_size);
}
entry = pages->pgt_buf + pages->pgt_buf_offset;
pages->pgt_buf_offset += PAGE_SIZE;
return entry;
}
/* Used to track our allocated page tables. */
static struct alloc_pgt_data pgt_data;
/* The top level page table entry pointer. */
static unsigned long top_level_pgt;
phys_addr_t physical_mask = (1ULL << __PHYSICAL_MASK_SHIFT) - 1;
/*
* Mapping information structure passed to kernel_ident_mapping_init().
* Due to relocation, pointers must be assigned at run time not build time.
*/
static struct x86_mapping_info mapping_info;
/*
* Adds the specified range to the identity mappings.
*/
void kernel_add_identity_map(unsigned long start, unsigned long end)
{
int ret;
/* Align boundary to 2M. */
start = round_down(start, PMD_SIZE);
end = round_up(end, PMD_SIZE);
if (start >= end)
return;
/* Build the mapping. */
ret = kernel_ident_mapping_init(&mapping_info, (pgd_t *)top_level_pgt, start, end);
if (ret)
error("Error: kernel_ident_mapping_init() failed\n");
}
/* Locates and clears a region for a new top level page table. */
void initialize_identity_maps(void *rmode)
{
unsigned long cmdline;
struct setup_data *sd;
/* Exclude the encryption mask from __PHYSICAL_MASK */
physical_mask &= ~sme_me_mask;
/* Init mapping_info with run-time function/buffer pointers. */
mapping_info.alloc_pgt_page = alloc_pgt_page;
mapping_info.context = &pgt_data;
mapping_info.page_flag = __PAGE_KERNEL_LARGE_EXEC | sme_me_mask;
mapping_info.kernpg_flag = _KERNPG_TABLE;
/*
* It should be impossible for this not to already be true,
* but since calling this a second time would rewind the other
* counters, let's just make sure this is reset too.
*/
pgt_data.pgt_buf_offset = 0;
/*
* If we came here via startup_32(), cr3 will be _pgtable already
* and we must append to the existing area instead of entirely
* overwriting it.
*
* With 5-level paging, we use '_pgtable' to allocate the p4d page table,
* the top-level page table is allocated separately.
*
* p4d_offset(top_level_pgt, 0) would cover both the 4- and 5-level
* cases. On 4-level paging it's equal to 'top_level_pgt'.
*/
top_level_pgt = read_cr3_pa();
if (p4d_offset((pgd_t *)top_level_pgt, 0) == (p4d_t *)_pgtable) {
pgt_data.pgt_buf = _pgtable + BOOT_INIT_PGT_SIZE;
pgt_data.pgt_buf_size = BOOT_PGT_SIZE - BOOT_INIT_PGT_SIZE;
memset(pgt_data.pgt_buf, 0, pgt_data.pgt_buf_size);
} else {
pgt_data.pgt_buf = _pgtable;
pgt_data.pgt_buf_size = BOOT_PGT_SIZE;
memset(pgt_data.pgt_buf, 0, pgt_data.pgt_buf_size);
top_level_pgt = (unsigned long)alloc_pgt_page(&pgt_data);
}
/*
* New page-table is set up - map the kernel image, boot_params and the
* command line. The uncompressed kernel requires boot_params and the
* command line to be mapped in the identity mapping. Map them
* explicitly here in case the compressed kernel does not touch them,
* or does not touch all the pages covering them.
*/
kernel_add_identity_map((unsigned long)_head, (unsigned long)_end);
boot_params = rmode;
kernel_add_identity_map((unsigned long)boot_params, (unsigned long)(boot_params + 1));
cmdline = get_cmd_line_ptr();
kernel_add_identity_map(cmdline, cmdline + COMMAND_LINE_SIZE);
/*
* Also map the setup_data entries passed via boot_params in case they
* need to be accessed by uncompressed kernel via the identity mapping.
*/
sd = (struct setup_data *)boot_params->hdr.setup_data;
while (sd) {
unsigned long sd_addr = (unsigned long)sd;
kernel_add_identity_map(sd_addr, sd_addr + sizeof(*sd) + sd->len);
sd = (struct setup_data *)sd->next;
}
sev_prep_identity_maps(top_level_pgt);
/* Load the new page-table. */
write_cr3(top_level_pgt);
/*
* Now that the required page table mappings are established and a
* GHCB can be used, check for SNP guest/HV feature compatibility.
*/
snp_check_features();
}
static pte_t *split_large_pmd(struct x86_mapping_info *info,
pmd_t *pmdp, unsigned long __address)
{
unsigned long page_flags;
unsigned long address;
pte_t *pte;
pmd_t pmd;
int i;
pte = (pte_t *)info->alloc_pgt_page(info->context);
if (!pte)
return NULL;
address = __address & PMD_MASK;
/* No large page - clear PSE flag */
page_flags = info->page_flag & ~_PAGE_PSE;
/* Populate the PTEs */
for (i = 0; i < PTRS_PER_PMD; i++) {
set_pte(&pte[i], __pte(address | page_flags));
address += PAGE_SIZE;
}
/*
* Ideally we need to clear the large PMD first and do a TLB
* flush before we write the new PMD. But the 2M range of the
* PMD might contain the code we execute and/or the stack
* we are on, so we can't do that. But that should be safe here
* because we are going from large to small mappings and we are
* also the only user of the page-table, so there is no chance
* of a TLB multihit.
*/
pmd = __pmd((unsigned long)pte | info->kernpg_flag);
set_pmd(pmdp, pmd);
/* Flush TLB to establish the new PMD */
write_cr3(top_level_pgt);
return pte + pte_index(__address);
}
static void clflush_page(unsigned long address)
{
unsigned int flush_size;
char *cl, *start, *end;
/*
* Hardcode cl-size to 64 - CPUID can't be used here because that might
* cause another #VC exception and the GHCB is not ready to use yet.
*/
flush_size = 64;
start = (char *)(address & PAGE_MASK);
end = start + PAGE_SIZE;
/*
* First make sure there are no pending writes on the cache-lines to
* flush.
*/
asm volatile("mfence" : : : "memory");
for (cl = start; cl != end; cl += flush_size)
clflush(cl);
}
static int set_clr_page_flags(struct x86_mapping_info *info,
unsigned long address,
pteval_t set, pteval_t clr)
{
pgd_t *pgdp = (pgd_t *)top_level_pgt;
p4d_t *p4dp;
pud_t *pudp;
pmd_t *pmdp;
pte_t *ptep, pte;
/*
* First make sure there is a PMD mapping for 'address'.
* It should already exist, but keep things generic.
*
* To map the page just read from it and fault it in if there is no
* mapping yet. kernel_add_identity_map() can't be called here because
* that would unconditionally map the address on PMD level, destroying
* any PTE-level mappings that might already exist. Use assembly here
* so the access won't be optimized away.
*/
asm volatile("mov %[address], %%r9"
:: [address] "g" (*(unsigned long *)address)
: "r9", "memory");
/*
* The page is mapped at least with PMD size - so skip checks and walk
* directly to the PMD.
*/
p4dp = p4d_offset(pgdp, address);
pudp = pud_offset(p4dp, address);
pmdp = pmd_offset(pudp, address);
if (pmd_large(*pmdp))
ptep = split_large_pmd(info, pmdp, address);
else
ptep = pte_offset_kernel(pmdp, address);
if (!ptep)
return -ENOMEM;
/*
* Changing encryption attributes of a page requires to flush it from
* the caches.
*/
if ((set | clr) & _PAGE_ENC) {
clflush_page(address);
/*
* If the encryption attribute is being cleared, change the page state
* to shared in the RMP table.
*/
if (clr)
snp_set_page_shared(__pa(address & PAGE_MASK));
}
/* Update PTE */
pte = *ptep;
pte = pte_set_flags(pte, set);
pte = pte_clear_flags(pte, clr);
set_pte(ptep, pte);
/*
* If the encryption attribute is being set, then change the page state to
* private in the RMP entry. The page state change must be done after the PTE
* is updated.
*/
if (set & _PAGE_ENC)
snp_set_page_private(__pa(address & PAGE_MASK));
/* Flush TLB after changing encryption attribute */
write_cr3(top_level_pgt);
return 0;
}
int set_page_decrypted(unsigned long address)
{
return set_clr_page_flags(&mapping_info, address, 0, _PAGE_ENC);
}
int set_page_encrypted(unsigned long address)
{
return set_clr_page_flags(&mapping_info, address, _PAGE_ENC, 0);
}
int set_page_non_present(unsigned long address)
{
return set_clr_page_flags(&mapping_info, address, 0, _PAGE_PRESENT);
}
static void do_pf_error(const char *msg, unsigned long error_code,
unsigned long address, unsigned long ip)
{
error_putstr(msg);
error_putstr("\nError Code: ");
error_puthex(error_code);
error_putstr("\nCR2: 0x");
error_puthex(address);
error_putstr("\nRIP relative to _head: 0x");
error_puthex(ip - (unsigned long)_head);
error_putstr("\n");
error("Stopping.\n");
}
void do_boot_page_fault(struct pt_regs *regs, unsigned long error_code)
{
unsigned long address = native_read_cr2();
unsigned long end;
bool ghcb_fault;
ghcb_fault = sev_es_check_ghcb_fault(address);
address &= PMD_MASK;
end = address + PMD_SIZE;
/*
* Check for unexpected error codes. Unexpected are:
* - Faults on present pages
* - User faults
* - Reserved bits set
*/
if (error_code & (X86_PF_PROT | X86_PF_USER | X86_PF_RSVD))
do_pf_error("Unexpected page-fault:", error_code, address, regs->ip);
else if (ghcb_fault)
do_pf_error("Page-fault on GHCB page:", error_code, address, regs->ip);
/*
* Error code is sane - now identity map the 2M region around
* the faulting address.
*/
kernel_add_identity_map(address, end);
}
| linux-master | arch/x86/boot/compressed/ident_map_64.c |
#include "error.h"
#include "../../coco/tdx/tdx-shared.c"
| linux-master | arch/x86/boot/compressed/tdx-shared.c |
// SPDX-License-Identifier: GPL-2.0
#include "../cpuflags.c"
bool has_cpuflag(int flag)
{
get_cpuflags();
return test_bit(flag, cpu.flags);
}
| linux-master | arch/x86/boot/compressed/cpuflags.c |
// SPDX-License-Identifier: GPL-2.0-only
#include "error.h"
#include "misc.h"
#include "tdx.h"
#include "sev.h"
#include <asm/shared/tdx.h>
/*
* accept_memory() and process_unaccepted_memory() called from EFI stub which
* runs before decompresser and its early_tdx_detect().
*
* Enumerate TDX directly from the early users.
*/
static bool early_is_tdx_guest(void)
{
static bool once;
static bool is_tdx;
if (!IS_ENABLED(CONFIG_INTEL_TDX_GUEST))
return false;
if (!once) {
u32 eax, sig[3];
cpuid_count(TDX_CPUID_LEAF_ID, 0, &eax,
&sig[0], &sig[2], &sig[1]);
is_tdx = !memcmp(TDX_IDENT, sig, sizeof(sig));
once = true;
}
return is_tdx;
}
void arch_accept_memory(phys_addr_t start, phys_addr_t end)
{
/* Platform-specific memory-acceptance call goes here */
if (early_is_tdx_guest()) {
if (!tdx_accept_memory(start, end))
panic("TDX: Failed to accept memory\n");
} else if (sev_snp_enabled()) {
snp_accept_memory(start, end);
} else {
error("Cannot accept memory: unknown platform\n");
}
}
bool init_unaccepted_memory(void)
{
guid_t guid = LINUX_EFI_UNACCEPTED_MEM_TABLE_GUID;
struct efi_unaccepted_memory *table;
unsigned long cfg_table_pa;
unsigned int cfg_table_len;
enum efi_type et;
int ret;
et = efi_get_type(boot_params);
if (et == EFI_TYPE_NONE)
return false;
ret = efi_get_conf_table(boot_params, &cfg_table_pa, &cfg_table_len);
if (ret) {
warn("EFI config table not found.");
return false;
}
table = (void *)efi_find_vendor_table(boot_params, cfg_table_pa,
cfg_table_len, guid);
if (!table)
return false;
if (table->version != 1)
error("Unknown version of unaccepted memory table\n");
/*
* In many cases unaccepted_table is already set by EFI stub, but it
* has to be initialized again to cover cases when the table is not
* allocated by EFI stub or EFI stub copied the kernel image with
* efi_relocate_kernel() before the variable is set.
*
* It must be initialized before the first usage of accept_memory().
*/
unaccepted_table = table;
return true;
}
| linux-master | arch/x86/boot/compressed/mem.c |
// SPDX-License-Identifier: GPL-2.0
/*
* This provides an optimized implementation of memcpy, and a simplified
* implementation of memset and memmove. These are used here because the
* standard kernel runtime versions are not yet available and we don't
* trust the gcc built-in implementations as they may do unexpected things
* (e.g. FPU ops) in the minimal decompression stub execution environment.
*/
#include "error.h"
#include "../string.c"
#ifdef CONFIG_X86_32
static void *____memcpy(void *dest, const void *src, size_t n)
{
int d0, d1, d2;
asm volatile(
"rep ; movsl\n\t"
"movl %4,%%ecx\n\t"
"rep ; movsb\n\t"
: "=&c" (d0), "=&D" (d1), "=&S" (d2)
: "0" (n >> 2), "g" (n & 3), "1" (dest), "2" (src)
: "memory");
return dest;
}
#else
static void *____memcpy(void *dest, const void *src, size_t n)
{
long d0, d1, d2;
asm volatile(
"rep ; movsq\n\t"
"movq %4,%%rcx\n\t"
"rep ; movsb\n\t"
: "=&c" (d0), "=&D" (d1), "=&S" (d2)
: "0" (n >> 3), "g" (n & 7), "1" (dest), "2" (src)
: "memory");
return dest;
}
#endif
void *memset(void *s, int c, size_t n)
{
int i;
char *ss = s;
for (i = 0; i < n; i++)
ss[i] = c;
return s;
}
void *memmove(void *dest, const void *src, size_t n)
{
unsigned char *d = dest;
const unsigned char *s = src;
if (d <= s || d - s >= n)
return ____memcpy(dest, src, n);
while (n-- > 0)
d[n] = s[n];
return dest;
}
/* Detect and warn about potential overlaps, but handle them with memmove. */
void *memcpy(void *dest, const void *src, size_t n)
{
if (dest > src && dest - src < n) {
warn("Avoiding potentially unsafe overlapping memcpy()!");
return memmove(dest, src, n);
}
return ____memcpy(dest, src, n);
}
#ifdef CONFIG_KASAN
extern void *__memset(void *s, int c, size_t n) __alias(memset);
extern void *__memmove(void *dest, const void *src, size_t n) __alias(memmove);
extern void *__memcpy(void *dest, const void *src, size_t n) __alias(memcpy);
#endif
| linux-master | arch/x86/boot/compressed/string.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Helpers for early access to EFI configuration table.
*
* Originally derived from arch/x86/boot/compressed/acpi.c
*/
#include "misc.h"
/**
* efi_get_type - Given a pointer to boot_params, determine the type of EFI environment.
*
* @bp: pointer to boot_params
*
* Return: EFI_TYPE_{32,64} for valid EFI environments, EFI_TYPE_NONE otherwise.
*/
enum efi_type efi_get_type(struct boot_params *bp)
{
struct efi_info *ei;
enum efi_type et;
const char *sig;
ei = &bp->efi_info;
sig = (char *)&ei->efi_loader_signature;
if (!strncmp(sig, EFI64_LOADER_SIGNATURE, 4)) {
et = EFI_TYPE_64;
} else if (!strncmp(sig, EFI32_LOADER_SIGNATURE, 4)) {
et = EFI_TYPE_32;
} else {
debug_putstr("No EFI environment detected.\n");
et = EFI_TYPE_NONE;
}
#ifndef CONFIG_X86_64
/*
* Existing callers like acpi.c treat this case as an indicator to
* fall-through to non-EFI, rather than an error, so maintain that
* functionality here as well.
*/
if (ei->efi_systab_hi || ei->efi_memmap_hi) {
debug_putstr("EFI system table is located above 4GB and cannot be accessed.\n");
et = EFI_TYPE_NONE;
}
#endif
return et;
}
/**
* efi_get_system_table - Given a pointer to boot_params, retrieve the physical address
* of the EFI system table.
*
* @bp: pointer to boot_params
*
* Return: EFI system table address on success. On error, return 0.
*/
unsigned long efi_get_system_table(struct boot_params *bp)
{
unsigned long sys_tbl_pa;
struct efi_info *ei;
enum efi_type et;
/* Get systab from boot params. */
ei = &bp->efi_info;
#ifdef CONFIG_X86_64
sys_tbl_pa = ei->efi_systab | ((__u64)ei->efi_systab_hi << 32);
#else
sys_tbl_pa = ei->efi_systab;
#endif
if (!sys_tbl_pa) {
debug_putstr("EFI system table not found.");
return 0;
}
return sys_tbl_pa;
}
/*
* EFI config table address changes to virtual address after boot, which may
* not be accessible for the kexec'd kernel. To address this, kexec provides
* the initial physical address via a struct setup_data entry, which is
* checked for here, along with some sanity checks.
*/
static struct efi_setup_data *get_kexec_setup_data(struct boot_params *bp,
enum efi_type et)
{
#ifdef CONFIG_X86_64
struct efi_setup_data *esd = NULL;
struct setup_data *data;
u64 pa_data;
pa_data = bp->hdr.setup_data;
while (pa_data) {
data = (struct setup_data *)pa_data;
if (data->type == SETUP_EFI) {
esd = (struct efi_setup_data *)(pa_data + sizeof(struct setup_data));
break;
}
pa_data = data->next;
}
/*
* Original ACPI code falls back to attempting normal EFI boot in these
* cases, so maintain existing behavior by indicating non-kexec
* environment to the caller, but print them for debugging.
*/
if (esd && !esd->tables) {
debug_putstr("kexec EFI environment missing valid configuration table.\n");
return NULL;
}
return esd;
#endif
return NULL;
}
/**
* efi_get_conf_table - Given a pointer to boot_params, locate and return the physical
* address of EFI configuration table.
*
* @bp: pointer to boot_params
* @cfg_tbl_pa: location to store physical address of config table
* @cfg_tbl_len: location to store number of config table entries
*
* Return: 0 on success. On error, return params are left unchanged.
*/
int efi_get_conf_table(struct boot_params *bp, unsigned long *cfg_tbl_pa,
unsigned int *cfg_tbl_len)
{
unsigned long sys_tbl_pa;
enum efi_type et;
int ret;
if (!cfg_tbl_pa || !cfg_tbl_len)
return -EINVAL;
sys_tbl_pa = efi_get_system_table(bp);
if (!sys_tbl_pa)
return -EINVAL;
/* Handle EFI bitness properly */
et = efi_get_type(bp);
if (et == EFI_TYPE_64) {
efi_system_table_64_t *stbl = (efi_system_table_64_t *)sys_tbl_pa;
struct efi_setup_data *esd;
/* kexec provides an alternative EFI conf table, check for it. */
esd = get_kexec_setup_data(bp, et);
*cfg_tbl_pa = esd ? esd->tables : stbl->tables;
*cfg_tbl_len = stbl->nr_tables;
} else if (et == EFI_TYPE_32) {
efi_system_table_32_t *stbl = (efi_system_table_32_t *)sys_tbl_pa;
*cfg_tbl_pa = stbl->tables;
*cfg_tbl_len = stbl->nr_tables;
} else {
return -EINVAL;
}
return 0;
}
/* Get vendor table address/guid from EFI config table at the given index */
static int get_vendor_table(void *cfg_tbl, unsigned int idx,
unsigned long *vendor_tbl_pa,
efi_guid_t *vendor_tbl_guid,
enum efi_type et)
{
if (et == EFI_TYPE_64) {
efi_config_table_64_t *tbl_entry = (efi_config_table_64_t *)cfg_tbl + idx;
if (!IS_ENABLED(CONFIG_X86_64) && tbl_entry->table >> 32) {
debug_putstr("Error: EFI config table entry located above 4GB.\n");
return -EINVAL;
}
*vendor_tbl_pa = tbl_entry->table;
*vendor_tbl_guid = tbl_entry->guid;
} else if (et == EFI_TYPE_32) {
efi_config_table_32_t *tbl_entry = (efi_config_table_32_t *)cfg_tbl + idx;
*vendor_tbl_pa = tbl_entry->table;
*vendor_tbl_guid = tbl_entry->guid;
} else {
return -EINVAL;
}
return 0;
}
/**
* efi_find_vendor_table - Given EFI config table, search it for the physical
* address of the vendor table associated with GUID.
*
* @bp: pointer to boot_params
* @cfg_tbl_pa: pointer to EFI configuration table
* @cfg_tbl_len: number of entries in EFI configuration table
* @guid: GUID of vendor table
*
* Return: vendor table address on success. On error, return 0.
*/
unsigned long efi_find_vendor_table(struct boot_params *bp,
unsigned long cfg_tbl_pa,
unsigned int cfg_tbl_len,
efi_guid_t guid)
{
enum efi_type et;
unsigned int i;
et = efi_get_type(bp);
if (et == EFI_TYPE_NONE)
return 0;
for (i = 0; i < cfg_tbl_len; i++) {
unsigned long vendor_tbl_pa;
efi_guid_t vendor_tbl_guid;
int ret;
ret = get_vendor_table((void *)cfg_tbl_pa, i,
&vendor_tbl_pa,
&vendor_tbl_guid, et);
if (ret)
return 0;
if (!efi_guidcmp(guid, vendor_tbl_guid))
return vendor_tbl_pa;
}
return 0;
}
| linux-master | arch/x86/boot/compressed/efi.c |
// SPDX-License-Identifier: GPL-2.0-only
/* ----------------------------------------------------------------------- *
*
* Copyright (C) 2009 Intel Corporation. All rights reserved.
*
* H. Peter Anvin <[email protected]>
*
* -----------------------------------------------------------------------
*
* Outputs a small assembly wrapper with the appropriate symbols defined.
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <inttypes.h>
#include <tools/le_byteshift.h>
int main(int argc, char *argv[])
{
uint32_t olen;
long ilen;
FILE *f = NULL;
int retval = 1;
if (argc < 2) {
fprintf(stderr, "Usage: %s compressed_file\n", argv[0]);
goto bail;
}
/* Get the information for the compressed kernel image first */
f = fopen(argv[1], "r");
if (!f) {
perror(argv[1]);
goto bail;
}
if (fseek(f, -4L, SEEK_END)) {
perror(argv[1]);
}
if (fread(&olen, sizeof(olen), 1, f) != 1) {
perror(argv[1]);
goto bail;
}
ilen = ftell(f);
olen = get_unaligned_le32(&olen);
printf(".section \".rodata..compressed\",\"a\",@progbits\n");
printf(".globl z_input_len\n");
printf("z_input_len = %lu\n", ilen);
printf(".globl z_output_len\n");
printf("z_output_len = %lu\n", (unsigned long)olen);
printf(".globl input_data, input_data_end\n");
printf("input_data:\n");
printf(".incbin \"%s\"\n", argv[1]);
printf("input_data_end:\n");
printf(".section \".rodata\",\"a\",@progbits\n");
printf(".globl input_len\n");
printf("input_len:\n\t.long %lu\n", ilen);
printf(".globl output_len\n");
printf("output_len:\n\t.long %lu\n", (unsigned long)olen);
retval = 0;
bail:
if (f)
fclose(f);
return retval;
}
| linux-master | arch/x86/boot/compressed/mkpiggy.c |
#include "misc.h"
/* This might be accessed before .bss is cleared, so use .data instead. */
int early_serial_base __section(".data");
#include "../early_serial_console.c"
| linux-master | arch/x86/boot/compressed/early_serial_console.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Callers outside of misc.c need access to the error reporting routines,
* but the *_putstr() functions need to stay in misc.c because of how
* memcpy() and memmove() are defined for the compressed boot environment.
*/
#include "misc.h"
#include "error.h"
void warn(const char *m)
{
error_putstr("\n\n");
error_putstr(m);
error_putstr("\n\n");
}
void error(char *m)
{
warn(m);
error_putstr(" -- System halted");
while (1)
asm("hlt");
}
/* EFI libstub provides vsnprintf() */
#ifdef CONFIG_EFI_STUB
void panic(const char *fmt, ...)
{
static char buf[1024];
va_list args;
int len;
va_start(args, fmt);
len = vsnprintf(buf, sizeof(buf), fmt, args);
va_end(args);
if (len && buf[len - 1] == '\n')
buf[len - 1] = '\0';
error(buf);
}
#endif
| linux-master | arch/x86/boot/compressed/error.c |
// SPDX-License-Identifier: GPL-2.0
#include "misc.h"
static unsigned long fs;
static inline void set_fs(unsigned long seg)
{
fs = seg << 4; /* shift it back */
}
typedef unsigned long addr_t;
static inline char rdfs8(addr_t addr)
{
return *((char *)(fs + addr));
}
#include "../cmdline.c"
unsigned long get_cmd_line_ptr(void)
{
unsigned long cmd_line_ptr = boot_params->hdr.cmd_line_ptr;
cmd_line_ptr |= (u64)boot_params->ext_cmd_line_ptr << 32;
return cmd_line_ptr;
}
int cmdline_find_option(const char *option, char *buffer, int bufsize)
{
return __cmdline_find_option(get_cmd_line_ptr(), option, buffer, bufsize);
}
int cmdline_find_option_bool(const char *option)
{
return __cmdline_find_option_bool(get_cmd_line_ptr(), option);
}
| linux-master | arch/x86/boot/compressed/cmdline.c |
// SPDX-License-Identifier: GPL-2.0
/*
* kaslr.c
*
* This contains the routines needed to generate a reasonable level of
* entropy to choose a randomized kernel base address offset in support
* of Kernel Address Space Layout Randomization (KASLR). Additionally
* handles walking the physical memory maps (and tracking memory regions
* to avoid) in order to select a physical memory location that can
* contain the entire properly aligned running kernel image.
*
*/
/*
* isspace() in linux/ctype.h is expected by next_args() to filter
* out "space/lf/tab". While boot/ctype.h conflicts with linux/ctype.h,
* since isdigit() is implemented in both of them. Hence disable it
* here.
*/
#define BOOT_CTYPE_H
#include "misc.h"
#include "error.h"
#include "../string.h"
#include "efi.h"
#include <generated/compile.h>
#include <linux/module.h>
#include <linux/uts.h>
#include <linux/utsname.h>
#include <linux/ctype.h>
#include <generated/utsversion.h>
#include <generated/utsrelease.h>
#define _SETUP
#include <asm/setup.h> /* For COMMAND_LINE_SIZE */
#undef _SETUP
extern unsigned long get_cmd_line_ptr(void);
/* Simplified build-specific string for starting entropy. */
static const char build_str[] = UTS_RELEASE " (" LINUX_COMPILE_BY "@"
LINUX_COMPILE_HOST ") (" LINUX_COMPILER ") " UTS_VERSION;
static unsigned long rotate_xor(unsigned long hash, const void *area,
size_t size)
{
size_t i;
unsigned long *ptr = (unsigned long *)area;
for (i = 0; i < size / sizeof(hash); i++) {
/* Rotate by odd number of bits and XOR. */
hash = (hash << ((sizeof(hash) * 8) - 7)) | (hash >> 7);
hash ^= ptr[i];
}
return hash;
}
/* Attempt to create a simple but unpredictable starting entropy. */
static unsigned long get_boot_seed(void)
{
unsigned long hash = 0;
hash = rotate_xor(hash, build_str, sizeof(build_str));
hash = rotate_xor(hash, boot_params, sizeof(*boot_params));
return hash;
}
#define KASLR_COMPRESSED_BOOT
#include "../../lib/kaslr.c"
/* Only supporting at most 4 unusable memmap regions with kaslr */
#define MAX_MEMMAP_REGIONS 4
static bool memmap_too_large;
/*
* Store memory limit: MAXMEM on 64-bit and KERNEL_IMAGE_SIZE on 32-bit.
* It may be reduced by "mem=nn[KMG]" or "memmap=nn[KMG]" command line options.
*/
static u64 mem_limit;
/* Number of immovable memory regions */
static int num_immovable_mem;
enum mem_avoid_index {
MEM_AVOID_ZO_RANGE = 0,
MEM_AVOID_INITRD,
MEM_AVOID_CMDLINE,
MEM_AVOID_BOOTPARAMS,
MEM_AVOID_MEMMAP_BEGIN,
MEM_AVOID_MEMMAP_END = MEM_AVOID_MEMMAP_BEGIN + MAX_MEMMAP_REGIONS - 1,
MEM_AVOID_MAX,
};
static struct mem_vector mem_avoid[MEM_AVOID_MAX];
static bool mem_overlaps(struct mem_vector *one, struct mem_vector *two)
{
/* Item one is entirely before item two. */
if (one->start + one->size <= two->start)
return false;
/* Item one is entirely after item two. */
if (one->start >= two->start + two->size)
return false;
return true;
}
char *skip_spaces(const char *str)
{
while (isspace(*str))
++str;
return (char *)str;
}
#include "../../../../lib/ctype.c"
#include "../../../../lib/cmdline.c"
enum parse_mode {
PARSE_MEMMAP,
PARSE_EFI,
};
static int
parse_memmap(char *p, u64 *start, u64 *size, enum parse_mode mode)
{
char *oldp;
if (!p)
return -EINVAL;
/* We don't care about this option here */
if (!strncmp(p, "exactmap", 8))
return -EINVAL;
oldp = p;
*size = memparse(p, &p);
if (p == oldp)
return -EINVAL;
switch (*p) {
case '#':
case '$':
case '!':
*start = memparse(p + 1, &p);
return 0;
case '@':
if (mode == PARSE_MEMMAP) {
/*
* memmap=nn@ss specifies usable region, should
* be skipped
*/
*size = 0;
} else {
u64 flags;
/*
* efi_fake_mem=nn@ss:attr the attr specifies
* flags that might imply a soft-reservation.
*/
*start = memparse(p + 1, &p);
if (p && *p == ':') {
p++;
if (kstrtoull(p, 0, &flags) < 0)
*size = 0;
else if (flags & EFI_MEMORY_SP)
return 0;
}
*size = 0;
}
fallthrough;
default:
/*
* If w/o offset, only size specified, memmap=nn[KMG] has the
* same behaviour as mem=nn[KMG]. It limits the max address
* system can use. Region above the limit should be avoided.
*/
*start = 0;
return 0;
}
return -EINVAL;
}
static void mem_avoid_memmap(enum parse_mode mode, char *str)
{
static int i;
if (i >= MAX_MEMMAP_REGIONS)
return;
while (str && (i < MAX_MEMMAP_REGIONS)) {
int rc;
u64 start, size;
char *k = strchr(str, ',');
if (k)
*k++ = 0;
rc = parse_memmap(str, &start, &size, mode);
if (rc < 0)
break;
str = k;
if (start == 0) {
/* Store the specified memory limit if size > 0 */
if (size > 0 && size < mem_limit)
mem_limit = size;
continue;
}
mem_avoid[MEM_AVOID_MEMMAP_BEGIN + i].start = start;
mem_avoid[MEM_AVOID_MEMMAP_BEGIN + i].size = size;
i++;
}
/* More than 4 memmaps, fail kaslr */
if ((i >= MAX_MEMMAP_REGIONS) && str)
memmap_too_large = true;
}
/* Store the number of 1GB huge pages which users specified: */
static unsigned long max_gb_huge_pages;
static void parse_gb_huge_pages(char *param, char *val)
{
static bool gbpage_sz;
char *p;
if (!strcmp(param, "hugepagesz")) {
p = val;
if (memparse(p, &p) != PUD_SIZE) {
gbpage_sz = false;
return;
}
if (gbpage_sz)
warn("Repeatedly set hugeTLB page size of 1G!\n");
gbpage_sz = true;
return;
}
if (!strcmp(param, "hugepages") && gbpage_sz) {
p = val;
max_gb_huge_pages = simple_strtoull(p, &p, 0);
return;
}
}
static void handle_mem_options(void)
{
char *args = (char *)get_cmd_line_ptr();
size_t len;
char *tmp_cmdline;
char *param, *val;
u64 mem_size;
if (!args)
return;
len = strnlen(args, COMMAND_LINE_SIZE-1);
tmp_cmdline = malloc(len + 1);
if (!tmp_cmdline)
error("Failed to allocate space for tmp_cmdline");
memcpy(tmp_cmdline, args, len);
tmp_cmdline[len] = 0;
args = tmp_cmdline;
/* Chew leading spaces */
args = skip_spaces(args);
while (*args) {
args = next_arg(args, ¶m, &val);
/* Stop at -- */
if (!val && strcmp(param, "--") == 0)
break;
if (!strcmp(param, "memmap")) {
mem_avoid_memmap(PARSE_MEMMAP, val);
} else if (IS_ENABLED(CONFIG_X86_64) && strstr(param, "hugepages")) {
parse_gb_huge_pages(param, val);
} else if (!strcmp(param, "mem")) {
char *p = val;
if (!strcmp(p, "nopentium"))
continue;
mem_size = memparse(p, &p);
if (mem_size == 0)
break;
if (mem_size < mem_limit)
mem_limit = mem_size;
} else if (!strcmp(param, "efi_fake_mem")) {
mem_avoid_memmap(PARSE_EFI, val);
}
}
free(tmp_cmdline);
return;
}
/*
* In theory, KASLR can put the kernel anywhere in the range of [16M, MAXMEM)
* on 64-bit, and [16M, KERNEL_IMAGE_SIZE) on 32-bit.
*
* The mem_avoid array is used to store the ranges that need to be avoided
* when KASLR searches for an appropriate random address. We must avoid any
* regions that are unsafe to overlap with during decompression, and other
* things like the initrd, cmdline and boot_params. This comment seeks to
* explain mem_avoid as clearly as possible since incorrect mem_avoid
* memory ranges lead to really hard to debug boot failures.
*
* The initrd, cmdline, and boot_params are trivial to identify for
* avoiding. They are MEM_AVOID_INITRD, MEM_AVOID_CMDLINE, and
* MEM_AVOID_BOOTPARAMS respectively below.
*
* What is not obvious how to avoid is the range of memory that is used
* during decompression (MEM_AVOID_ZO_RANGE below). This range must cover
* the compressed kernel (ZO) and its run space, which is used to extract
* the uncompressed kernel (VO) and relocs.
*
* ZO's full run size sits against the end of the decompression buffer, so
* we can calculate where text, data, bss, etc of ZO are positioned more
* easily.
*
* For additional background, the decompression calculations can be found
* in header.S, and the memory diagram is based on the one found in misc.c.
*
* The following conditions are already enforced by the image layouts and
* associated code:
* - input + input_size >= output + output_size
* - kernel_total_size <= init_size
* - kernel_total_size <= output_size (see Note below)
* - output + init_size >= output + output_size
*
* (Note that kernel_total_size and output_size have no fundamental
* relationship, but output_size is passed to choose_random_location
* as a maximum of the two. The diagram is showing a case where
* kernel_total_size is larger than output_size, but this case is
* handled by bumping output_size.)
*
* The above conditions can be illustrated by a diagram:
*
* 0 output input input+input_size output+init_size
* | | | | |
* | | | | |
* |-----|--------|--------|--------------|-----------|--|-------------|
* | | |
* | | |
* output+init_size-ZO_INIT_SIZE output+output_size output+kernel_total_size
*
* [output, output+init_size) is the entire memory range used for
* extracting the compressed image.
*
* [output, output+kernel_total_size) is the range needed for the
* uncompressed kernel (VO) and its run size (bss, brk, etc).
*
* [output, output+output_size) is VO plus relocs (i.e. the entire
* uncompressed payload contained by ZO). This is the area of the buffer
* written to during decompression.
*
* [output+init_size-ZO_INIT_SIZE, output+init_size) is the worst-case
* range of the copied ZO and decompression code. (i.e. the range
* covered backwards of size ZO_INIT_SIZE, starting from output+init_size.)
*
* [input, input+input_size) is the original copied compressed image (ZO)
* (i.e. it does not include its run size). This range must be avoided
* because it contains the data used for decompression.
*
* [input+input_size, output+init_size) is [_text, _end) for ZO. This
* range includes ZO's heap and stack, and must be avoided since it
* performs the decompression.
*
* Since the above two ranges need to be avoided and they are adjacent,
* they can be merged, resulting in: [input, output+init_size) which
* becomes the MEM_AVOID_ZO_RANGE below.
*/
static void mem_avoid_init(unsigned long input, unsigned long input_size,
unsigned long output)
{
unsigned long init_size = boot_params->hdr.init_size;
u64 initrd_start, initrd_size;
unsigned long cmd_line, cmd_line_size;
/*
* Avoid the region that is unsafe to overlap during
* decompression.
*/
mem_avoid[MEM_AVOID_ZO_RANGE].start = input;
mem_avoid[MEM_AVOID_ZO_RANGE].size = (output + init_size) - input;
/* Avoid initrd. */
initrd_start = (u64)boot_params->ext_ramdisk_image << 32;
initrd_start |= boot_params->hdr.ramdisk_image;
initrd_size = (u64)boot_params->ext_ramdisk_size << 32;
initrd_size |= boot_params->hdr.ramdisk_size;
mem_avoid[MEM_AVOID_INITRD].start = initrd_start;
mem_avoid[MEM_AVOID_INITRD].size = initrd_size;
/* No need to set mapping for initrd, it will be handled in VO. */
/* Avoid kernel command line. */
cmd_line = get_cmd_line_ptr();
/* Calculate size of cmd_line. */
if (cmd_line) {
cmd_line_size = strnlen((char *)cmd_line, COMMAND_LINE_SIZE-1) + 1;
mem_avoid[MEM_AVOID_CMDLINE].start = cmd_line;
mem_avoid[MEM_AVOID_CMDLINE].size = cmd_line_size;
}
/* Avoid boot parameters. */
mem_avoid[MEM_AVOID_BOOTPARAMS].start = (unsigned long)boot_params;
mem_avoid[MEM_AVOID_BOOTPARAMS].size = sizeof(*boot_params);
/* We don't need to set a mapping for setup_data. */
/* Mark the memmap regions we need to avoid */
handle_mem_options();
/* Enumerate the immovable memory regions */
num_immovable_mem = count_immovable_mem_regions();
}
/*
* Does this memory vector overlap a known avoided area? If so, record the
* overlap region with the lowest address.
*/
static bool mem_avoid_overlap(struct mem_vector *img,
struct mem_vector *overlap)
{
int i;
struct setup_data *ptr;
u64 earliest = img->start + img->size;
bool is_overlapping = false;
for (i = 0; i < MEM_AVOID_MAX; i++) {
if (mem_overlaps(img, &mem_avoid[i]) &&
mem_avoid[i].start < earliest) {
*overlap = mem_avoid[i];
earliest = overlap->start;
is_overlapping = true;
}
}
/* Avoid all entries in the setup_data linked list. */
ptr = (struct setup_data *)(unsigned long)boot_params->hdr.setup_data;
while (ptr) {
struct mem_vector avoid;
avoid.start = (unsigned long)ptr;
avoid.size = sizeof(*ptr) + ptr->len;
if (mem_overlaps(img, &avoid) && (avoid.start < earliest)) {
*overlap = avoid;
earliest = overlap->start;
is_overlapping = true;
}
if (ptr->type == SETUP_INDIRECT &&
((struct setup_indirect *)ptr->data)->type != SETUP_INDIRECT) {
avoid.start = ((struct setup_indirect *)ptr->data)->addr;
avoid.size = ((struct setup_indirect *)ptr->data)->len;
if (mem_overlaps(img, &avoid) && (avoid.start < earliest)) {
*overlap = avoid;
earliest = overlap->start;
is_overlapping = true;
}
}
ptr = (struct setup_data *)(unsigned long)ptr->next;
}
return is_overlapping;
}
struct slot_area {
u64 addr;
unsigned long num;
};
#define MAX_SLOT_AREA 100
static struct slot_area slot_areas[MAX_SLOT_AREA];
static unsigned int slot_area_index;
static unsigned long slot_max;
static void store_slot_info(struct mem_vector *region, unsigned long image_size)
{
struct slot_area slot_area;
if (slot_area_index == MAX_SLOT_AREA)
return;
slot_area.addr = region->start;
slot_area.num = 1 + (region->size - image_size) / CONFIG_PHYSICAL_ALIGN;
slot_areas[slot_area_index++] = slot_area;
slot_max += slot_area.num;
}
/*
* Skip as many 1GB huge pages as possible in the passed region
* according to the number which users specified:
*/
static void
process_gb_huge_pages(struct mem_vector *region, unsigned long image_size)
{
u64 pud_start, pud_end;
unsigned long gb_huge_pages;
struct mem_vector tmp;
if (!IS_ENABLED(CONFIG_X86_64) || !max_gb_huge_pages) {
store_slot_info(region, image_size);
return;
}
/* Are there any 1GB pages in the region? */
pud_start = ALIGN(region->start, PUD_SIZE);
pud_end = ALIGN_DOWN(region->start + region->size, PUD_SIZE);
/* No good 1GB huge pages found: */
if (pud_start >= pud_end) {
store_slot_info(region, image_size);
return;
}
/* Check if the head part of the region is usable. */
if (pud_start >= region->start + image_size) {
tmp.start = region->start;
tmp.size = pud_start - region->start;
store_slot_info(&tmp, image_size);
}
/* Skip the good 1GB pages. */
gb_huge_pages = (pud_end - pud_start) >> PUD_SHIFT;
if (gb_huge_pages > max_gb_huge_pages) {
pud_end = pud_start + (max_gb_huge_pages << PUD_SHIFT);
max_gb_huge_pages = 0;
} else {
max_gb_huge_pages -= gb_huge_pages;
}
/* Check if the tail part of the region is usable. */
if (region->start + region->size >= pud_end + image_size) {
tmp.start = pud_end;
tmp.size = region->start + region->size - pud_end;
store_slot_info(&tmp, image_size);
}
}
static u64 slots_fetch_random(void)
{
unsigned long slot;
unsigned int i;
/* Handle case of no slots stored. */
if (slot_max == 0)
return 0;
slot = kaslr_get_random_long("Physical") % slot_max;
for (i = 0; i < slot_area_index; i++) {
if (slot >= slot_areas[i].num) {
slot -= slot_areas[i].num;
continue;
}
return slot_areas[i].addr + ((u64)slot * CONFIG_PHYSICAL_ALIGN);
}
if (i == slot_area_index)
debug_putstr("slots_fetch_random() failed!?\n");
return 0;
}
static void __process_mem_region(struct mem_vector *entry,
unsigned long minimum,
unsigned long image_size)
{
struct mem_vector region, overlap;
u64 region_end;
/* Enforce minimum and memory limit. */
region.start = max_t(u64, entry->start, minimum);
region_end = min(entry->start + entry->size, mem_limit);
/* Give up if slot area array is full. */
while (slot_area_index < MAX_SLOT_AREA) {
/* Potentially raise address to meet alignment needs. */
region.start = ALIGN(region.start, CONFIG_PHYSICAL_ALIGN);
/* Did we raise the address above the passed in memory entry? */
if (region.start > region_end)
return;
/* Reduce size by any delta from the original address. */
region.size = region_end - region.start;
/* Return if region can't contain decompressed kernel */
if (region.size < image_size)
return;
/* If nothing overlaps, store the region and return. */
if (!mem_avoid_overlap(®ion, &overlap)) {
process_gb_huge_pages(®ion, image_size);
return;
}
/* Store beginning of region if holds at least image_size. */
if (overlap.start >= region.start + image_size) {
region.size = overlap.start - region.start;
process_gb_huge_pages(®ion, image_size);
}
/* Clip off the overlapping region and start over. */
region.start = overlap.start + overlap.size;
}
}
static bool process_mem_region(struct mem_vector *region,
unsigned long minimum,
unsigned long image_size)
{
int i;
/*
* If no immovable memory found, or MEMORY_HOTREMOVE disabled,
* use @region directly.
*/
if (!num_immovable_mem) {
__process_mem_region(region, minimum, image_size);
if (slot_area_index == MAX_SLOT_AREA) {
debug_putstr("Aborted e820/efi memmap scan (slot_areas full)!\n");
return true;
}
return false;
}
#if defined(CONFIG_MEMORY_HOTREMOVE) && defined(CONFIG_ACPI)
/*
* If immovable memory found, filter the intersection between
* immovable memory and @region.
*/
for (i = 0; i < num_immovable_mem; i++) {
u64 start, end, entry_end, region_end;
struct mem_vector entry;
if (!mem_overlaps(region, &immovable_mem[i]))
continue;
start = immovable_mem[i].start;
end = start + immovable_mem[i].size;
region_end = region->start + region->size;
entry.start = clamp(region->start, start, end);
entry_end = clamp(region_end, start, end);
entry.size = entry_end - entry.start;
__process_mem_region(&entry, minimum, image_size);
if (slot_area_index == MAX_SLOT_AREA) {
debug_putstr("Aborted e820/efi memmap scan when walking immovable regions(slot_areas full)!\n");
return true;
}
}
#endif
return false;
}
#ifdef CONFIG_EFI
/*
* Only EFI_CONVENTIONAL_MEMORY and EFI_UNACCEPTED_MEMORY (if supported) are
* guaranteed to be free.
*
* Pick free memory more conservatively than the EFI spec allows: according to
* the spec, EFI_BOOT_SERVICES_{CODE|DATA} are also free memory and thus
* available to place the kernel image into, but in practice there's firmware
* where using that memory leads to crashes. Buggy vendor EFI code registers
* for an event that triggers on SetVirtualAddressMap(). The handler assumes
* that EFI_BOOT_SERVICES_DATA memory has not been touched by loader yet, which
* is probably true for Windows.
*
* Preserve EFI_BOOT_SERVICES_* regions until after SetVirtualAddressMap().
*/
static inline bool memory_type_is_free(efi_memory_desc_t *md)
{
if (md->type == EFI_CONVENTIONAL_MEMORY)
return true;
if (IS_ENABLED(CONFIG_UNACCEPTED_MEMORY) &&
md->type == EFI_UNACCEPTED_MEMORY)
return true;
return false;
}
/*
* Returns true if we processed the EFI memmap, which we prefer over the E820
* table if it is available.
*/
static bool
process_efi_entries(unsigned long minimum, unsigned long image_size)
{
struct efi_info *e = &boot_params->efi_info;
bool efi_mirror_found = false;
struct mem_vector region;
efi_memory_desc_t *md;
unsigned long pmap;
char *signature;
u32 nr_desc;
int i;
signature = (char *)&e->efi_loader_signature;
if (strncmp(signature, EFI32_LOADER_SIGNATURE, 4) &&
strncmp(signature, EFI64_LOADER_SIGNATURE, 4))
return false;
#ifdef CONFIG_X86_32
/* Can't handle data above 4GB at this time */
if (e->efi_memmap_hi) {
warn("EFI memmap is above 4GB, can't be handled now on x86_32. EFI should be disabled.\n");
return false;
}
pmap = e->efi_memmap;
#else
pmap = (e->efi_memmap | ((__u64)e->efi_memmap_hi << 32));
#endif
nr_desc = e->efi_memmap_size / e->efi_memdesc_size;
for (i = 0; i < nr_desc; i++) {
md = efi_early_memdesc_ptr(pmap, e->efi_memdesc_size, i);
if (md->attribute & EFI_MEMORY_MORE_RELIABLE) {
efi_mirror_found = true;
break;
}
}
for (i = 0; i < nr_desc; i++) {
md = efi_early_memdesc_ptr(pmap, e->efi_memdesc_size, i);
if (!memory_type_is_free(md))
continue;
if (efi_soft_reserve_enabled() &&
(md->attribute & EFI_MEMORY_SP))
continue;
if (efi_mirror_found &&
!(md->attribute & EFI_MEMORY_MORE_RELIABLE))
continue;
region.start = md->phys_addr;
region.size = md->num_pages << EFI_PAGE_SHIFT;
if (process_mem_region(®ion, minimum, image_size))
break;
}
return true;
}
#else
static inline bool
process_efi_entries(unsigned long minimum, unsigned long image_size)
{
return false;
}
#endif
static void process_e820_entries(unsigned long minimum,
unsigned long image_size)
{
int i;
struct mem_vector region;
struct boot_e820_entry *entry;
/* Verify potential e820 positions, appending to slots list. */
for (i = 0; i < boot_params->e820_entries; i++) {
entry = &boot_params->e820_table[i];
/* Skip non-RAM entries. */
if (entry->type != E820_TYPE_RAM)
continue;
region.start = entry->addr;
region.size = entry->size;
if (process_mem_region(®ion, minimum, image_size))
break;
}
}
static unsigned long find_random_phys_addr(unsigned long minimum,
unsigned long image_size)
{
u64 phys_addr;
/* Bail out early if it's impossible to succeed. */
if (minimum + image_size > mem_limit)
return 0;
/* Check if we had too many memmaps. */
if (memmap_too_large) {
debug_putstr("Aborted memory entries scan (more than 4 memmap= args)!\n");
return 0;
}
if (!process_efi_entries(minimum, image_size))
process_e820_entries(minimum, image_size);
phys_addr = slots_fetch_random();
/* Perform a final check to make sure the address is in range. */
if (phys_addr < minimum || phys_addr + image_size > mem_limit) {
warn("Invalid physical address chosen!\n");
return 0;
}
return (unsigned long)phys_addr;
}
static unsigned long find_random_virt_addr(unsigned long minimum,
unsigned long image_size)
{
unsigned long slots, random_addr;
/*
* There are how many CONFIG_PHYSICAL_ALIGN-sized slots
* that can hold image_size within the range of minimum to
* KERNEL_IMAGE_SIZE?
*/
slots = 1 + (KERNEL_IMAGE_SIZE - minimum - image_size) / CONFIG_PHYSICAL_ALIGN;
random_addr = kaslr_get_random_long("Virtual") % slots;
return random_addr * CONFIG_PHYSICAL_ALIGN + minimum;
}
/*
* Since this function examines addresses much more numerically,
* it takes the input and output pointers as 'unsigned long'.
*/
void choose_random_location(unsigned long input,
unsigned long input_size,
unsigned long *output,
unsigned long output_size,
unsigned long *virt_addr)
{
unsigned long random_addr, min_addr;
if (cmdline_find_option_bool("nokaslr")) {
warn("KASLR disabled: 'nokaslr' on cmdline.");
return;
}
boot_params->hdr.loadflags |= KASLR_FLAG;
if (IS_ENABLED(CONFIG_X86_32))
mem_limit = KERNEL_IMAGE_SIZE;
else
mem_limit = MAXMEM;
/* Record the various known unsafe memory ranges. */
mem_avoid_init(input, input_size, *output);
/*
* Low end of the randomization range should be the
* smaller of 512M or the initial kernel image
* location:
*/
min_addr = min(*output, 512UL << 20);
/* Make sure minimum is aligned. */
min_addr = ALIGN(min_addr, CONFIG_PHYSICAL_ALIGN);
/* Walk available memory entries to find a random address. */
random_addr = find_random_phys_addr(min_addr, output_size);
if (!random_addr) {
warn("Physical KASLR disabled: no suitable memory region!");
} else {
/* Update the new physical address location. */
if (*output != random_addr)
*output = random_addr;
}
/* Pick random virtual address starting from LOAD_PHYSICAL_ADDR. */
if (IS_ENABLED(CONFIG_X86_64))
random_addr = find_random_virt_addr(LOAD_PHYSICAL_ADDR, output_size);
*virt_addr = random_addr;
}
| linux-master | arch/x86/boot/compressed/kaslr.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <asm/trap_pf.h>
#include <asm/segment.h>
#include <asm/trapnr.h>
#include "misc.h"
static void set_idt_entry(int vector, void (*handler)(void))
{
unsigned long address = (unsigned long)handler;
gate_desc entry;
memset(&entry, 0, sizeof(entry));
entry.offset_low = (u16)(address & 0xffff);
entry.segment = __KERNEL_CS;
entry.bits.type = GATE_TRAP;
entry.bits.p = 1;
entry.offset_middle = (u16)((address >> 16) & 0xffff);
entry.offset_high = (u32)(address >> 32);
memcpy(&boot_idt[vector], &entry, sizeof(entry));
}
/* Have this here so we don't need to include <asm/desc.h> */
static void load_boot_idt(const struct desc_ptr *dtr)
{
asm volatile("lidt %0"::"m" (*dtr));
}
/* Setup IDT before kernel jumping to .Lrelocated */
void load_stage1_idt(void)
{
boot_idt_desc.address = (unsigned long)boot_idt;
if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT))
set_idt_entry(X86_TRAP_VC, boot_stage1_vc);
load_boot_idt(&boot_idt_desc);
}
/*
* Setup IDT after kernel jumping to .Lrelocated.
*
* initialize_identity_maps() needs a #PF handler to be setup
* in order to be able to fault-in identity mapping ranges; see
* do_boot_page_fault().
*
* This #PF handler setup needs to happen in load_stage2_idt() where the
* IDT is loaded and there the #VC IDT entry gets setup too.
*
* In order to be able to handle #VCs, one needs a GHCB which
* gets setup with an already set up pagetable, which is done in
* initialize_identity_maps(). And there's the catch 22: the boot #VC
* handler do_boot_stage2_vc() needs to call early_setup_ghcb() itself
* (and, especially set_page_decrypted()) because the SEV-ES setup code
* cannot initialize a GHCB as there's no #PF handler yet...
*/
void load_stage2_idt(void)
{
boot_idt_desc.address = (unsigned long)boot_idt;
set_idt_entry(X86_TRAP_PF, boot_page_fault);
#ifdef CONFIG_AMD_MEM_ENCRYPT
/*
* Clear the second stage #VC handler in case guest types
* needing #VC have not been detected.
*/
if (sev_status & BIT(1))
set_idt_entry(X86_TRAP_VC, boot_stage2_vc);
else
set_idt_entry(X86_TRAP_VC, NULL);
#endif
load_boot_idt(&boot_idt_desc);
}
void cleanup_exception_handling(void)
{
/*
* Flush GHCB from cache and map it encrypted again when running as
* SEV-ES guest.
*/
sev_es_shutdown_ghcb();
/* Set a null-idt, disabling #PF and #VC handling */
boot_idt_desc.size = 0;
boot_idt_desc.address = 0;
load_boot_idt(&boot_idt_desc);
}
| linux-master | arch/x86/boot/compressed/idt_64.c |
// SPDX-License-Identifier: GPL-2.0
/*
* misc.c
*
* This is a collection of several routines used to extract the kernel
* which includes KASLR relocation, decompression, ELF parsing, and
* relocation processing. Additionally included are the screen and serial
* output functions and related debugging support functions.
*
* malloc by Hannu Savolainen 1993 and Matthias Urlichs 1994
* puts by Nick Holloway 1993, better puts by Martin Mares 1995
* High loaded stuff by Hans Lermen & Werner Almesberger, Feb. 1996
*/
#include "misc.h"
#include "error.h"
#include "pgtable.h"
#include "../string.h"
#include "../voffset.h"
#include <asm/bootparam_utils.h>
/*
* WARNING!!
* This code is compiled with -fPIC and it is relocated dynamically at
* run time, but no relocation processing is performed. This means that
* it is not safe to place pointers in static structures.
*/
/* Macros used by the included decompressor code below. */
#define STATIC static
/* Define an externally visible malloc()/free(). */
#define MALLOC_VISIBLE
#include <linux/decompress/mm.h>
/*
* Provide definitions of memzero and memmove as some of the decompressors will
* try to define their own functions if these are not defined as macros.
*/
#define memzero(s, n) memset((s), 0, (n))
#ifndef memmove
#define memmove memmove
/* Functions used by the included decompressor code below. */
void *memmove(void *dest, const void *src, size_t n);
#endif
/*
* This is set up by the setup-routine at boot-time
*/
struct boot_params *boot_params;
struct port_io_ops pio_ops;
memptr free_mem_ptr;
memptr free_mem_end_ptr;
static char *vidmem;
static int vidport;
/* These might be accessed before .bss is cleared, so use .data instead. */
static int lines __section(".data");
static int cols __section(".data");
#ifdef CONFIG_KERNEL_GZIP
#include "../../../../lib/decompress_inflate.c"
#endif
#ifdef CONFIG_KERNEL_BZIP2
#include "../../../../lib/decompress_bunzip2.c"
#endif
#ifdef CONFIG_KERNEL_LZMA
#include "../../../../lib/decompress_unlzma.c"
#endif
#ifdef CONFIG_KERNEL_XZ
#include "../../../../lib/decompress_unxz.c"
#endif
#ifdef CONFIG_KERNEL_LZO
#include "../../../../lib/decompress_unlzo.c"
#endif
#ifdef CONFIG_KERNEL_LZ4
#include "../../../../lib/decompress_unlz4.c"
#endif
#ifdef CONFIG_KERNEL_ZSTD
#include "../../../../lib/decompress_unzstd.c"
#endif
/*
* NOTE: When adding a new decompressor, please update the analysis in
* ../header.S.
*/
static void scroll(void)
{
int i;
memmove(vidmem, vidmem + cols * 2, (lines - 1) * cols * 2);
for (i = (lines - 1) * cols * 2; i < lines * cols * 2; i += 2)
vidmem[i] = ' ';
}
#define XMTRDY 0x20
#define TXR 0 /* Transmit register (WRITE) */
#define LSR 5 /* Line Status */
static void serial_putchar(int ch)
{
unsigned timeout = 0xffff;
while ((inb(early_serial_base + LSR) & XMTRDY) == 0 && --timeout)
cpu_relax();
outb(ch, early_serial_base + TXR);
}
void __putstr(const char *s)
{
int x, y, pos;
char c;
if (early_serial_base) {
const char *str = s;
while (*str) {
if (*str == '\n')
serial_putchar('\r');
serial_putchar(*str++);
}
}
if (lines == 0 || cols == 0)
return;
x = boot_params->screen_info.orig_x;
y = boot_params->screen_info.orig_y;
while ((c = *s++) != '\0') {
if (c == '\n') {
x = 0;
if (++y >= lines) {
scroll();
y--;
}
} else {
vidmem[(x + cols * y) * 2] = c;
if (++x >= cols) {
x = 0;
if (++y >= lines) {
scroll();
y--;
}
}
}
}
boot_params->screen_info.orig_x = x;
boot_params->screen_info.orig_y = y;
pos = (x + cols * y) * 2; /* Update cursor position */
outb(14, vidport);
outb(0xff & (pos >> 9), vidport+1);
outb(15, vidport);
outb(0xff & (pos >> 1), vidport+1);
}
void __puthex(unsigned long value)
{
char alpha[2] = "0";
int bits;
for (bits = sizeof(value) * 8 - 4; bits >= 0; bits -= 4) {
unsigned long digit = (value >> bits) & 0xf;
if (digit < 0xA)
alpha[0] = '0' + digit;
else
alpha[0] = 'a' + (digit - 0xA);
__putstr(alpha);
}
}
#ifdef CONFIG_X86_NEED_RELOCS
static void handle_relocations(void *output, unsigned long output_len,
unsigned long virt_addr)
{
int *reloc;
unsigned long delta, map, ptr;
unsigned long min_addr = (unsigned long)output;
unsigned long max_addr = min_addr + (VO___bss_start - VO__text);
/*
* Calculate the delta between where vmlinux was linked to load
* and where it was actually loaded.
*/
delta = min_addr - LOAD_PHYSICAL_ADDR;
/*
* The kernel contains a table of relocation addresses. Those
* addresses have the final load address of the kernel in virtual
* memory. We are currently working in the self map. So we need to
* create an adjustment for kernel memory addresses to the self map.
* This will involve subtracting out the base address of the kernel.
*/
map = delta - __START_KERNEL_map;
/*
* 32-bit always performs relocations. 64-bit relocations are only
* needed if KASLR has chosen a different starting address offset
* from __START_KERNEL_map.
*/
if (IS_ENABLED(CONFIG_X86_64))
delta = virt_addr - LOAD_PHYSICAL_ADDR;
if (!delta) {
debug_putstr("No relocation needed... ");
return;
}
debug_putstr("Performing relocations... ");
/*
* Process relocations: 32 bit relocations first then 64 bit after.
* Three sets of binary relocations are added to the end of the kernel
* before compression. Each relocation table entry is the kernel
* address of the location which needs to be updated stored as a
* 32-bit value which is sign extended to 64 bits.
*
* Format is:
*
* kernel bits...
* 0 - zero terminator for 64 bit relocations
* 64 bit relocation repeated
* 0 - zero terminator for inverse 32 bit relocations
* 32 bit inverse relocation repeated
* 0 - zero terminator for 32 bit relocations
* 32 bit relocation repeated
*
* So we work backwards from the end of the decompressed image.
*/
for (reloc = output + output_len - sizeof(*reloc); *reloc; reloc--) {
long extended = *reloc;
extended += map;
ptr = (unsigned long)extended;
if (ptr < min_addr || ptr > max_addr)
error("32-bit relocation outside of kernel!\n");
*(uint32_t *)ptr += delta;
}
#ifdef CONFIG_X86_64
while (*--reloc) {
long extended = *reloc;
extended += map;
ptr = (unsigned long)extended;
if (ptr < min_addr || ptr > max_addr)
error("inverse 32-bit relocation outside of kernel!\n");
*(int32_t *)ptr -= delta;
}
for (reloc--; *reloc; reloc--) {
long extended = *reloc;
extended += map;
ptr = (unsigned long)extended;
if (ptr < min_addr || ptr > max_addr)
error("64-bit relocation outside of kernel!\n");
*(uint64_t *)ptr += delta;
}
#endif
}
#else
static inline void handle_relocations(void *output, unsigned long output_len,
unsigned long virt_addr)
{ }
#endif
static size_t parse_elf(void *output)
{
#ifdef CONFIG_X86_64
Elf64_Ehdr ehdr;
Elf64_Phdr *phdrs, *phdr;
#else
Elf32_Ehdr ehdr;
Elf32_Phdr *phdrs, *phdr;
#endif
void *dest;
int i;
memcpy(&ehdr, output, sizeof(ehdr));
if (ehdr.e_ident[EI_MAG0] != ELFMAG0 ||
ehdr.e_ident[EI_MAG1] != ELFMAG1 ||
ehdr.e_ident[EI_MAG2] != ELFMAG2 ||
ehdr.e_ident[EI_MAG3] != ELFMAG3)
error("Kernel is not a valid ELF file");
debug_putstr("Parsing ELF... ");
phdrs = malloc(sizeof(*phdrs) * ehdr.e_phnum);
if (!phdrs)
error("Failed to allocate space for phdrs");
memcpy(phdrs, output + ehdr.e_phoff, sizeof(*phdrs) * ehdr.e_phnum);
for (i = 0; i < ehdr.e_phnum; i++) {
phdr = &phdrs[i];
switch (phdr->p_type) {
case PT_LOAD:
#ifdef CONFIG_X86_64
if ((phdr->p_align % 0x200000) != 0)
error("Alignment of LOAD segment isn't multiple of 2MB");
#endif
#ifdef CONFIG_RELOCATABLE
dest = output;
dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
#else
dest = (void *)(phdr->p_paddr);
#endif
memmove(dest, output + phdr->p_offset, phdr->p_filesz);
break;
default: /* Ignore other PT_* */ break;
}
}
free(phdrs);
return ehdr.e_entry - LOAD_PHYSICAL_ADDR;
}
const unsigned long kernel_total_size = VO__end - VO__text;
static u8 boot_heap[BOOT_HEAP_SIZE] __aligned(4);
extern unsigned char input_data[];
extern unsigned int input_len, output_len;
unsigned long decompress_kernel(unsigned char *outbuf, unsigned long virt_addr,
void (*error)(char *x))
{
unsigned long entry;
if (!free_mem_ptr) {
free_mem_ptr = (unsigned long)boot_heap;
free_mem_end_ptr = (unsigned long)boot_heap + sizeof(boot_heap);
}
if (__decompress(input_data, input_len, NULL, NULL, outbuf, output_len,
NULL, error) < 0)
return ULONG_MAX;
entry = parse_elf(outbuf);
handle_relocations(outbuf, output_len, virt_addr);
return entry;
}
/*
* The compressed kernel image (ZO), has been moved so that its position
* is against the end of the buffer used to hold the uncompressed kernel
* image (VO) and the execution environment (.bss, .brk), which makes sure
* there is room to do the in-place decompression. (See header.S for the
* calculations.)
*
* |-----compressed kernel image------|
* V V
* 0 extract_offset +INIT_SIZE
* |-----------|---------------|-------------------------|--------|
* | | | |
* VO__text startup_32 of ZO VO__end ZO__end
* ^ ^
* |-------uncompressed kernel image---------|
*
*/
asmlinkage __visible void *extract_kernel(void *rmode, unsigned char *output)
{
unsigned long virt_addr = LOAD_PHYSICAL_ADDR;
memptr heap = (memptr)boot_heap;
unsigned long needed_size;
size_t entry_offset;
/* Retain x86 boot parameters pointer passed from startup_32/64. */
boot_params = rmode;
/* Clear flags intended for solely in-kernel use. */
boot_params->hdr.loadflags &= ~KASLR_FLAG;
sanitize_boot_params(boot_params);
if (boot_params->screen_info.orig_video_mode == 7) {
vidmem = (char *) 0xb0000;
vidport = 0x3b4;
} else {
vidmem = (char *) 0xb8000;
vidport = 0x3d4;
}
lines = boot_params->screen_info.orig_video_lines;
cols = boot_params->screen_info.orig_video_cols;
init_default_io_ops();
/*
* Detect TDX guest environment.
*
* It has to be done before console_init() in order to use
* paravirtualized port I/O operations if needed.
*/
early_tdx_detect();
console_init();
/*
* Save RSDP address for later use. Have this after console_init()
* so that early debugging output from the RSDP parsing code can be
* collected.
*/
boot_params->acpi_rsdp_addr = get_rsdp_addr();
debug_putstr("early console in extract_kernel\n");
free_mem_ptr = heap; /* Heap */
free_mem_end_ptr = heap + BOOT_HEAP_SIZE;
/*
* The memory hole needed for the kernel is the larger of either
* the entire decompressed kernel plus relocation table, or the
* entire decompressed kernel plus .bss and .brk sections.
*
* On X86_64, the memory is mapped with PMD pages. Round the
* size up so that the full extent of PMD pages mapped is
* included in the check against the valid memory table
* entries. This ensures the full mapped area is usable RAM
* and doesn't include any reserved areas.
*/
needed_size = max_t(unsigned long, output_len, kernel_total_size);
#ifdef CONFIG_X86_64
needed_size = ALIGN(needed_size, MIN_KERNEL_ALIGN);
#endif
/* Report initial kernel position details. */
debug_putaddr(input_data);
debug_putaddr(input_len);
debug_putaddr(output);
debug_putaddr(output_len);
debug_putaddr(kernel_total_size);
debug_putaddr(needed_size);
#ifdef CONFIG_X86_64
/* Report address of 32-bit trampoline */
debug_putaddr(trampoline_32bit);
#endif
choose_random_location((unsigned long)input_data, input_len,
(unsigned long *)&output,
needed_size,
&virt_addr);
/* Validate memory location choices. */
if ((unsigned long)output & (MIN_KERNEL_ALIGN - 1))
error("Destination physical address inappropriately aligned");
if (virt_addr & (MIN_KERNEL_ALIGN - 1))
error("Destination virtual address inappropriately aligned");
#ifdef CONFIG_X86_64
if (heap > 0x3fffffffffffUL)
error("Destination address too large");
if (virt_addr + needed_size > KERNEL_IMAGE_SIZE)
error("Destination virtual address is beyond the kernel mapping area");
#else
if (heap > ((-__PAGE_OFFSET-(128<<20)-1) & 0x7fffffff))
error("Destination address too large");
#endif
#ifndef CONFIG_RELOCATABLE
if (virt_addr != LOAD_PHYSICAL_ADDR)
error("Destination virtual address changed when not relocatable");
#endif
debug_putstr("\nDecompressing Linux... ");
if (init_unaccepted_memory()) {
debug_putstr("Accepting memory... ");
accept_memory(__pa(output), __pa(output) + needed_size);
}
entry_offset = decompress_kernel(output, virt_addr, error);
debug_putstr("done.\nBooting the kernel (entry_offset: 0x");
debug_puthex(entry_offset);
debug_putstr(").\n");
/* Disable exception handling before booting the kernel */
cleanup_exception_handling();
return output + entry_offset;
}
void fortify_panic(const char *name)
{
error("detected buffer overflow");
}
| linux-master | arch/x86/boot/compressed/misc.c |
// SPDX-License-Identifier: GPL-2.0
#include "../cpuflags.h"
#include "../string.h"
#include "../io.h"
#include "error.h"
#include <vdso/limits.h>
#include <uapi/asm/vmx.h>
#include <asm/shared/tdx.h>
/* Called from __tdx_hypercall() for unrecoverable failure */
void __tdx_hypercall_failed(void)
{
error("TDVMCALL failed. TDX module bug?");
}
static inline unsigned int tdx_io_in(int size, u16 port)
{
struct tdx_hypercall_args args = {
.r10 = TDX_HYPERCALL_STANDARD,
.r11 = hcall_func(EXIT_REASON_IO_INSTRUCTION),
.r12 = size,
.r13 = 0,
.r14 = port,
};
if (__tdx_hypercall_ret(&args))
return UINT_MAX;
return args.r11;
}
static inline void tdx_io_out(int size, u16 port, u32 value)
{
struct tdx_hypercall_args args = {
.r10 = TDX_HYPERCALL_STANDARD,
.r11 = hcall_func(EXIT_REASON_IO_INSTRUCTION),
.r12 = size,
.r13 = 1,
.r14 = port,
.r15 = value,
};
__tdx_hypercall(&args);
}
static inline u8 tdx_inb(u16 port)
{
return tdx_io_in(1, port);
}
static inline void tdx_outb(u8 value, u16 port)
{
tdx_io_out(1, port, value);
}
static inline void tdx_outw(u16 value, u16 port)
{
tdx_io_out(2, port, value);
}
void early_tdx_detect(void)
{
u32 eax, sig[3];
cpuid_count(TDX_CPUID_LEAF_ID, 0, &eax, &sig[0], &sig[2], &sig[1]);
if (memcmp(TDX_IDENT, sig, sizeof(sig)))
return;
/* Use hypercalls instead of I/O instructions */
pio_ops.f_inb = tdx_inb;
pio_ops.f_outb = tdx_outb;
pio_ops.f_outw = tdx_outw;
}
| linux-master | arch/x86/boot/compressed/tdx.c |
// SPDX-License-Identifier: GPL-2.0
/*
* AMD Encrypted Register State Support
*
* Author: Joerg Roedel <[email protected]>
*/
/*
* misc.h needs to be first because it knows how to include the other kernel
* headers in the pre-decompression code in a way that does not break
* compilation.
*/
#include "misc.h"
#include <asm/pgtable_types.h>
#include <asm/sev.h>
#include <asm/trapnr.h>
#include <asm/trap_pf.h>
#include <asm/msr-index.h>
#include <asm/fpu/xcr.h>
#include <asm/ptrace.h>
#include <asm/svm.h>
#include <asm/cpuid.h>
#include "error.h"
#include "../msr.h"
struct ghcb boot_ghcb_page __aligned(PAGE_SIZE);
struct ghcb *boot_ghcb;
/*
* Copy a version of this function here - insn-eval.c can't be used in
* pre-decompression code.
*/
static bool insn_has_rep_prefix(struct insn *insn)
{
insn_byte_t p;
int i;
insn_get_prefixes(insn);
for_each_insn_prefix(insn, i, p) {
if (p == 0xf2 || p == 0xf3)
return true;
}
return false;
}
/*
* Only a dummy for insn_get_seg_base() - Early boot-code is 64bit only and
* doesn't use segments.
*/
static unsigned long insn_get_seg_base(struct pt_regs *regs, int seg_reg_idx)
{
return 0UL;
}
static inline u64 sev_es_rd_ghcb_msr(void)
{
struct msr m;
boot_rdmsr(MSR_AMD64_SEV_ES_GHCB, &m);
return m.q;
}
static inline void sev_es_wr_ghcb_msr(u64 val)
{
struct msr m;
m.q = val;
boot_wrmsr(MSR_AMD64_SEV_ES_GHCB, &m);
}
static enum es_result vc_decode_insn(struct es_em_ctxt *ctxt)
{
char buffer[MAX_INSN_SIZE];
int ret;
memcpy(buffer, (unsigned char *)ctxt->regs->ip, MAX_INSN_SIZE);
ret = insn_decode(&ctxt->insn, buffer, MAX_INSN_SIZE, INSN_MODE_64);
if (ret < 0)
return ES_DECODE_FAILED;
return ES_OK;
}
static enum es_result vc_write_mem(struct es_em_ctxt *ctxt,
void *dst, char *buf, size_t size)
{
memcpy(dst, buf, size);
return ES_OK;
}
static enum es_result vc_read_mem(struct es_em_ctxt *ctxt,
void *src, char *buf, size_t size)
{
memcpy(buf, src, size);
return ES_OK;
}
#undef __init
#define __init
#define __BOOT_COMPRESSED
/* Basic instruction decoding support needed */
#include "../../lib/inat.c"
#include "../../lib/insn.c"
/* Include code for early handlers */
#include "../../kernel/sev-shared.c"
bool sev_snp_enabled(void)
{
return sev_status & MSR_AMD64_SEV_SNP_ENABLED;
}
static void __page_state_change(unsigned long paddr, enum psc_op op)
{
u64 val;
if (!sev_snp_enabled())
return;
/*
* If private -> shared then invalidate the page before requesting the
* state change in the RMP table.
*/
if (op == SNP_PAGE_STATE_SHARED && pvalidate(paddr, RMP_PG_SIZE_4K, 0))
sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PVALIDATE);
/* Issue VMGEXIT to change the page state in RMP table. */
sev_es_wr_ghcb_msr(GHCB_MSR_PSC_REQ_GFN(paddr >> PAGE_SHIFT, op));
VMGEXIT();
/* Read the response of the VMGEXIT. */
val = sev_es_rd_ghcb_msr();
if ((GHCB_RESP_CODE(val) != GHCB_MSR_PSC_RESP) || GHCB_MSR_PSC_RESP_VAL(val))
sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PSC);
/*
* Now that page state is changed in the RMP table, validate it so that it is
* consistent with the RMP entry.
*/
if (op == SNP_PAGE_STATE_PRIVATE && pvalidate(paddr, RMP_PG_SIZE_4K, 1))
sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PVALIDATE);
}
void snp_set_page_private(unsigned long paddr)
{
__page_state_change(paddr, SNP_PAGE_STATE_PRIVATE);
}
void snp_set_page_shared(unsigned long paddr)
{
__page_state_change(paddr, SNP_PAGE_STATE_SHARED);
}
static bool early_setup_ghcb(void)
{
if (set_page_decrypted((unsigned long)&boot_ghcb_page))
return false;
/* Page is now mapped decrypted, clear it */
memset(&boot_ghcb_page, 0, sizeof(boot_ghcb_page));
boot_ghcb = &boot_ghcb_page;
/* Initialize lookup tables for the instruction decoder */
inat_init_tables();
/* SNP guest requires the GHCB GPA must be registered */
if (sev_snp_enabled())
snp_register_ghcb_early(__pa(&boot_ghcb_page));
return true;
}
static phys_addr_t __snp_accept_memory(struct snp_psc_desc *desc,
phys_addr_t pa, phys_addr_t pa_end)
{
struct psc_hdr *hdr;
struct psc_entry *e;
unsigned int i;
hdr = &desc->hdr;
memset(hdr, 0, sizeof(*hdr));
e = desc->entries;
i = 0;
while (pa < pa_end && i < VMGEXIT_PSC_MAX_ENTRY) {
hdr->end_entry = i;
e->gfn = pa >> PAGE_SHIFT;
e->operation = SNP_PAGE_STATE_PRIVATE;
if (IS_ALIGNED(pa, PMD_SIZE) && (pa_end - pa) >= PMD_SIZE) {
e->pagesize = RMP_PG_SIZE_2M;
pa += PMD_SIZE;
} else {
e->pagesize = RMP_PG_SIZE_4K;
pa += PAGE_SIZE;
}
e++;
i++;
}
if (vmgexit_psc(boot_ghcb, desc))
sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PSC);
pvalidate_pages(desc);
return pa;
}
void snp_accept_memory(phys_addr_t start, phys_addr_t end)
{
struct snp_psc_desc desc = {};
unsigned int i;
phys_addr_t pa;
if (!boot_ghcb && !early_setup_ghcb())
sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PSC);
pa = start;
while (pa < end)
pa = __snp_accept_memory(&desc, pa, end);
}
void sev_es_shutdown_ghcb(void)
{
if (!boot_ghcb)
return;
if (!sev_es_check_cpu_features())
error("SEV-ES CPU Features missing.");
/*
* GHCB Page must be flushed from the cache and mapped encrypted again.
* Otherwise the running kernel will see strange cache effects when
* trying to use that page.
*/
if (set_page_encrypted((unsigned long)&boot_ghcb_page))
error("Can't map GHCB page encrypted");
/*
* GHCB page is mapped encrypted again and flushed from the cache.
* Mark it non-present now to catch bugs when #VC exceptions trigger
* after this point.
*/
if (set_page_non_present((unsigned long)&boot_ghcb_page))
error("Can't unmap GHCB page");
}
static void __noreturn sev_es_ghcb_terminate(struct ghcb *ghcb, unsigned int set,
unsigned int reason, u64 exit_info_2)
{
u64 exit_info_1 = SVM_VMGEXIT_TERM_REASON(set, reason);
vc_ghcb_invalidate(ghcb);
ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_TERM_REQUEST);
ghcb_set_sw_exit_info_1(ghcb, exit_info_1);
ghcb_set_sw_exit_info_2(ghcb, exit_info_2);
sev_es_wr_ghcb_msr(__pa(ghcb));
VMGEXIT();
while (true)
asm volatile("hlt\n" : : : "memory");
}
bool sev_es_check_ghcb_fault(unsigned long address)
{
/* Check whether the fault was on the GHCB page */
return ((address & PAGE_MASK) == (unsigned long)&boot_ghcb_page);
}
void do_boot_stage2_vc(struct pt_regs *regs, unsigned long exit_code)
{
struct es_em_ctxt ctxt;
enum es_result result;
if (!boot_ghcb && !early_setup_ghcb())
sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SEV_ES_GEN_REQ);
vc_ghcb_invalidate(boot_ghcb);
result = vc_init_em_ctxt(&ctxt, regs, exit_code);
if (result != ES_OK)
goto finish;
switch (exit_code) {
case SVM_EXIT_RDTSC:
case SVM_EXIT_RDTSCP:
result = vc_handle_rdtsc(boot_ghcb, &ctxt, exit_code);
break;
case SVM_EXIT_IOIO:
result = vc_handle_ioio(boot_ghcb, &ctxt);
break;
case SVM_EXIT_CPUID:
result = vc_handle_cpuid(boot_ghcb, &ctxt);
break;
default:
result = ES_UNSUPPORTED;
break;
}
finish:
if (result == ES_OK)
vc_finish_insn(&ctxt);
else if (result != ES_RETRY)
sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SEV_ES_GEN_REQ);
}
static void enforce_vmpl0(void)
{
u64 attrs;
int err;
/*
* RMPADJUST modifies RMP permissions of a lesser-privileged (numerically
* higher) privilege level. Here, clear the VMPL1 permission mask of the
* GHCB page. If the guest is not running at VMPL0, this will fail.
*
* If the guest is running at VMPL0, it will succeed. Even if that operation
* modifies permission bits, it is still ok to do so currently because Linux
* SNP guests are supported only on VMPL0 so VMPL1 or higher permission masks
* changing is a don't-care.
*/
attrs = 1;
if (rmpadjust((unsigned long)&boot_ghcb_page, RMP_PG_SIZE_4K, attrs))
sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_NOT_VMPL0);
}
/*
* SNP_FEATURES_IMPL_REQ is the mask of SNP features that will need
* guest side implementation for proper functioning of the guest. If any
* of these features are enabled in the hypervisor but are lacking guest
* side implementation, the behavior of the guest will be undefined. The
* guest could fail in non-obvious way making it difficult to debug.
*
* As the behavior of reserved feature bits is unknown to be on the
* safe side add them to the required features mask.
*/
#define SNP_FEATURES_IMPL_REQ (MSR_AMD64_SNP_VTOM | \
MSR_AMD64_SNP_REFLECT_VC | \
MSR_AMD64_SNP_RESTRICTED_INJ | \
MSR_AMD64_SNP_ALT_INJ | \
MSR_AMD64_SNP_DEBUG_SWAP | \
MSR_AMD64_SNP_VMPL_SSS | \
MSR_AMD64_SNP_SECURE_TSC | \
MSR_AMD64_SNP_VMGEXIT_PARAM | \
MSR_AMD64_SNP_VMSA_REG_PROTECTION | \
MSR_AMD64_SNP_RESERVED_BIT13 | \
MSR_AMD64_SNP_RESERVED_BIT15 | \
MSR_AMD64_SNP_RESERVED_MASK)
/*
* SNP_FEATURES_PRESENT is the mask of SNP features that are implemented
* by the guest kernel. As and when a new feature is implemented in the
* guest kernel, a corresponding bit should be added to the mask.
*/
#define SNP_FEATURES_PRESENT MSR_AMD64_SNP_DEBUG_SWAP
u64 snp_get_unsupported_features(u64 status)
{
if (!(status & MSR_AMD64_SEV_SNP_ENABLED))
return 0;
return status & SNP_FEATURES_IMPL_REQ & ~SNP_FEATURES_PRESENT;
}
void snp_check_features(void)
{
u64 unsupported;
/*
* Terminate the boot if hypervisor has enabled any feature lacking
* guest side implementation. Pass on the unsupported features mask through
* EXIT_INFO_2 of the GHCB protocol so that those features can be reported
* as part of the guest boot failure.
*/
unsupported = snp_get_unsupported_features(sev_status);
if (unsupported) {
if (ghcb_version < 2 || (!boot_ghcb && !early_setup_ghcb()))
sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SNP_UNSUPPORTED);
sev_es_ghcb_terminate(boot_ghcb, SEV_TERM_SET_GEN,
GHCB_SNP_UNSUPPORTED, unsupported);
}
}
/*
* sev_check_cpu_support - Check for SEV support in the CPU capabilities
*
* Returns < 0 if SEV is not supported, otherwise the position of the
* encryption bit in the page table descriptors.
*/
static int sev_check_cpu_support(void)
{
unsigned int eax, ebx, ecx, edx;
/* Check for the SME/SEV support leaf */
eax = 0x80000000;
ecx = 0;
native_cpuid(&eax, &ebx, &ecx, &edx);
if (eax < 0x8000001f)
return -ENODEV;
/*
* Check for the SME/SEV feature:
* CPUID Fn8000_001F[EAX]
* - Bit 0 - Secure Memory Encryption support
* - Bit 1 - Secure Encrypted Virtualization support
* CPUID Fn8000_001F[EBX]
* - Bits 5:0 - Pagetable bit position used to indicate encryption
*/
eax = 0x8000001f;
ecx = 0;
native_cpuid(&eax, &ebx, &ecx, &edx);
/* Check whether SEV is supported */
if (!(eax & BIT(1)))
return -ENODEV;
return ebx & 0x3f;
}
void sev_enable(struct boot_params *bp)
{
struct msr m;
int bitpos;
bool snp;
/*
* bp->cc_blob_address should only be set by boot/compressed kernel.
* Initialize it to 0 to ensure that uninitialized values from
* buggy bootloaders aren't propagated.
*/
if (bp)
bp->cc_blob_address = 0;
/*
* Do an initial SEV capability check before snp_init() which
* loads the CPUID page and the same checks afterwards are done
* without the hypervisor and are trustworthy.
*
* If the HV fakes SEV support, the guest will crash'n'burn
* which is good enough.
*/
if (sev_check_cpu_support() < 0)
return;
/*
* Setup/preliminary detection of SNP. This will be sanity-checked
* against CPUID/MSR values later.
*/
snp = snp_init(bp);
/* Now repeat the checks with the SNP CPUID table. */
bitpos = sev_check_cpu_support();
if (bitpos < 0) {
if (snp)
error("SEV-SNP support indicated by CC blob, but not CPUID.");
return;
}
/* Set the SME mask if this is an SEV guest. */
boot_rdmsr(MSR_AMD64_SEV, &m);
sev_status = m.q;
if (!(sev_status & MSR_AMD64_SEV_ENABLED))
return;
/* Negotiate the GHCB protocol version. */
if (sev_status & MSR_AMD64_SEV_ES_ENABLED) {
if (!sev_es_negotiate_protocol())
sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SEV_ES_PROT_UNSUPPORTED);
}
/*
* SNP is supported in v2 of the GHCB spec which mandates support for HV
* features.
*/
if (sev_status & MSR_AMD64_SEV_SNP_ENABLED) {
if (!(get_hv_features() & GHCB_HV_FT_SNP))
sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SNP_UNSUPPORTED);
enforce_vmpl0();
}
if (snp && !(sev_status & MSR_AMD64_SEV_SNP_ENABLED))
error("SEV-SNP supported indicated by CC blob, but not SEV status MSR.");
sme_me_mask = BIT_ULL(bitpos);
}
/*
* sev_get_status - Retrieve the SEV status mask
*
* Returns 0 if the CPU is not SEV capable, otherwise the value of the
* AMD64_SEV MSR.
*/
u64 sev_get_status(void)
{
struct msr m;
if (sev_check_cpu_support() < 0)
return 0;
boot_rdmsr(MSR_AMD64_SEV, &m);
return m.q;
}
/* Search for Confidential Computing blob in the EFI config table. */
static struct cc_blob_sev_info *find_cc_blob_efi(struct boot_params *bp)
{
unsigned long cfg_table_pa;
unsigned int cfg_table_len;
int ret;
ret = efi_get_conf_table(bp, &cfg_table_pa, &cfg_table_len);
if (ret)
return NULL;
return (struct cc_blob_sev_info *)efi_find_vendor_table(bp, cfg_table_pa,
cfg_table_len,
EFI_CC_BLOB_GUID);
}
/*
* Initial set up of SNP relies on information provided by the
* Confidential Computing blob, which can be passed to the boot kernel
* by firmware/bootloader in the following ways:
*
* - via an entry in the EFI config table
* - via a setup_data structure, as defined by the Linux Boot Protocol
*
* Scan for the blob in that order.
*/
static struct cc_blob_sev_info *find_cc_blob(struct boot_params *bp)
{
struct cc_blob_sev_info *cc_info;
cc_info = find_cc_blob_efi(bp);
if (cc_info)
goto found_cc_info;
cc_info = find_cc_blob_setup_data(bp);
if (!cc_info)
return NULL;
found_cc_info:
if (cc_info->magic != CC_BLOB_SEV_HDR_MAGIC)
sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SNP_UNSUPPORTED);
return cc_info;
}
/*
* Indicate SNP based on presence of SNP-specific CC blob. Subsequent checks
* will verify the SNP CPUID/MSR bits.
*/
bool snp_init(struct boot_params *bp)
{
struct cc_blob_sev_info *cc_info;
if (!bp)
return false;
cc_info = find_cc_blob(bp);
if (!cc_info)
return false;
/*
* If a SNP-specific Confidential Computing blob is present, then
* firmware/bootloader have indicated SNP support. Verifying this
* involves CPUID checks which will be more reliable if the SNP
* CPUID table is used. See comments over snp_setup_cpuid_table() for
* more details.
*/
setup_cpuid_table(cc_info);
/*
* Pass run-time kernel a pointer to CC info via boot_params so EFI
* config table doesn't need to be searched again during early startup
* phase.
*/
bp->cc_blob_address = (u32)(unsigned long)cc_info;
return true;
}
void sev_prep_identity_maps(unsigned long top_level_pgt)
{
/*
* The Confidential Computing blob is used very early in uncompressed
* kernel to find the in-memory CPUID table to handle CPUID
* instructions. Make sure an identity-mapping exists so it can be
* accessed after switchover.
*/
if (sev_snp_enabled()) {
unsigned long cc_info_pa = boot_params->cc_blob_address;
struct cc_blob_sev_info *cc_info;
kernel_add_identity_map(cc_info_pa, cc_info_pa + sizeof(*cc_info));
cc_info = (struct cc_blob_sev_info *)cc_info_pa;
kernel_add_identity_map(cc_info->cpuid_phys, cc_info->cpuid_phys + cc_info->cpuid_len);
}
sev_verify_cbit(top_level_pgt);
}
| linux-master | arch/x86/boot/compressed/sev.c |
// SPDX-License-Identifier: GPL-2.0
#include "misc.h"
#include <asm/e820/types.h>
#include <asm/processor.h>
#include "pgtable.h"
#include "../string.h"
#include "efi.h"
#define BIOS_START_MIN 0x20000U /* 128K, less than this is insane */
#define BIOS_START_MAX 0x9f000U /* 640K, absolute maximum */
#ifdef CONFIG_X86_5LEVEL
/* __pgtable_l5_enabled needs to be in .data to avoid being cleared along with .bss */
unsigned int __section(".data") __pgtable_l5_enabled;
unsigned int __section(".data") pgdir_shift = 39;
unsigned int __section(".data") ptrs_per_p4d = 1;
#endif
/* Buffer to preserve trampoline memory */
static char trampoline_save[TRAMPOLINE_32BIT_SIZE];
/*
* Trampoline address will be printed by extract_kernel() for debugging
* purposes.
*
* Avoid putting the pointer into .bss as it will be cleared between
* configure_5level_paging() and extract_kernel().
*/
unsigned long *trampoline_32bit __section(".data");
extern struct boot_params *boot_params;
int cmdline_find_option_bool(const char *option);
static unsigned long find_trampoline_placement(void)
{
unsigned long bios_start = 0, ebda_start = 0;
struct boot_e820_entry *entry;
char *signature;
int i;
/*
* Find a suitable spot for the trampoline.
* This code is based on reserve_bios_regions().
*/
/*
* EFI systems may not provide legacy ROM. The memory may not be mapped
* at all.
*
* Only look for values in the legacy ROM for non-EFI system.
*/
signature = (char *)&boot_params->efi_info.efi_loader_signature;
if (strncmp(signature, EFI32_LOADER_SIGNATURE, 4) &&
strncmp(signature, EFI64_LOADER_SIGNATURE, 4)) {
ebda_start = *(unsigned short *)0x40e << 4;
bios_start = *(unsigned short *)0x413 << 10;
}
if (bios_start < BIOS_START_MIN || bios_start > BIOS_START_MAX)
bios_start = BIOS_START_MAX;
if (ebda_start > BIOS_START_MIN && ebda_start < bios_start)
bios_start = ebda_start;
bios_start = round_down(bios_start, PAGE_SIZE);
/* Find the first usable memory region under bios_start. */
for (i = boot_params->e820_entries - 1; i >= 0; i--) {
unsigned long new = bios_start;
entry = &boot_params->e820_table[i];
/* Skip all entries above bios_start. */
if (bios_start <= entry->addr)
continue;
/* Skip non-RAM entries. */
if (entry->type != E820_TYPE_RAM)
continue;
/* Adjust bios_start to the end of the entry if needed. */
if (bios_start > entry->addr + entry->size)
new = entry->addr + entry->size;
/* Keep bios_start page-aligned. */
new = round_down(new, PAGE_SIZE);
/* Skip the entry if it's too small. */
if (new - TRAMPOLINE_32BIT_SIZE < entry->addr)
continue;
/* Protect against underflow. */
if (new - TRAMPOLINE_32BIT_SIZE > bios_start)
break;
bios_start = new;
break;
}
/* Place the trampoline just below the end of low memory */
return bios_start - TRAMPOLINE_32BIT_SIZE;
}
asmlinkage void configure_5level_paging(struct boot_params *bp, void *pgtable)
{
void (*toggle_la57)(void *cr3);
bool l5_required = false;
/* Initialize boot_params. Required for cmdline_find_option_bool(). */
boot_params = bp;
/*
* Check if LA57 is desired and supported.
*
* There are several parts to the check:
* - if the kernel supports 5-level paging: CONFIG_X86_5LEVEL=y
* - if user asked to disable 5-level paging: no5lvl in cmdline
* - if the machine supports 5-level paging:
* + CPUID leaf 7 is supported
* + the leaf has the feature bit set
*
* That's substitute for boot_cpu_has() in early boot code.
*/
if (IS_ENABLED(CONFIG_X86_5LEVEL) &&
!cmdline_find_option_bool("no5lvl") &&
native_cpuid_eax(0) >= 7 &&
(native_cpuid_ecx(7) & (1 << (X86_FEATURE_LA57 & 31)))) {
l5_required = true;
/* Initialize variables for 5-level paging */
__pgtable_l5_enabled = 1;
pgdir_shift = 48;
ptrs_per_p4d = 512;
}
/*
* The trampoline will not be used if the paging mode is already set to
* the desired one.
*/
if (l5_required == !!(native_read_cr4() & X86_CR4_LA57))
return;
trampoline_32bit = (unsigned long *)find_trampoline_placement();
/* Preserve trampoline memory */
memcpy(trampoline_save, trampoline_32bit, TRAMPOLINE_32BIT_SIZE);
/* Clear trampoline memory first */
memset(trampoline_32bit, 0, TRAMPOLINE_32BIT_SIZE);
/* Copy trampoline code in place */
toggle_la57 = memcpy(trampoline_32bit +
TRAMPOLINE_32BIT_CODE_OFFSET / sizeof(unsigned long),
&trampoline_32bit_src, TRAMPOLINE_32BIT_CODE_SIZE);
/*
* Avoid the need for a stack in the 32-bit trampoline code, by using
* LJMP rather than LRET to return back to long mode. LJMP takes an
* immediate absolute address, which needs to be adjusted based on the
* placement of the trampoline.
*/
*(u32 *)((u8 *)toggle_la57 + trampoline_ljmp_imm_offset) +=
(unsigned long)toggle_la57;
/*
* The code below prepares page table in trampoline memory.
*
* The new page table will be used by trampoline code for switching
* from 4- to 5-level paging or vice versa.
*/
if (l5_required) {
/*
* For 4- to 5-level paging transition, set up current CR3 as
* the first and the only entry in a new top-level page table.
*/
*trampoline_32bit = __native_read_cr3() | _PAGE_TABLE_NOENC;
} else {
unsigned long src;
/*
* For 5- to 4-level paging transition, copy page table pointed
* by first entry in the current top-level page table as our
* new top-level page table.
*
* We cannot just point to the page table from trampoline as it
* may be above 4G.
*/
src = *(unsigned long *)__native_read_cr3() & PAGE_MASK;
memcpy(trampoline_32bit, (void *)src, PAGE_SIZE);
}
toggle_la57(trampoline_32bit);
/*
* Move the top level page table out of trampoline memory.
*/
memcpy(pgtable, trampoline_32bit, PAGE_SIZE);
native_write_cr3((unsigned long)pgtable);
/* Restore trampoline memory */
memcpy(trampoline_32bit, trampoline_save, TRAMPOLINE_32BIT_SIZE);
}
| linux-master | arch/x86/boot/compressed/pgtable_64.c |
// SPDX-License-Identifier: GPL-2.0
#define BOOT_CTYPE_H
#include "misc.h"
#include "error.h"
#include "../string.h"
#include "efi.h"
#include <linux/numa.h>
/*
* Longest parameter of 'acpi=' is 'copy_dsdt', plus an extra '\0'
* for termination.
*/
#define MAX_ACPI_ARG_LENGTH 10
/*
* Immovable memory regions representation. Max amount of memory regions is
* MAX_NUMNODES*2.
*/
struct mem_vector immovable_mem[MAX_NUMNODES*2];
static acpi_physical_address
__efi_get_rsdp_addr(unsigned long cfg_tbl_pa, unsigned int cfg_tbl_len)
{
#ifdef CONFIG_EFI
unsigned long rsdp_addr;
int ret;
/*
* Search EFI system tables for RSDP. Preferred is ACPI_20_TABLE_GUID to
* ACPI_TABLE_GUID because it has more features.
*/
rsdp_addr = efi_find_vendor_table(boot_params, cfg_tbl_pa, cfg_tbl_len,
ACPI_20_TABLE_GUID);
if (rsdp_addr)
return (acpi_physical_address)rsdp_addr;
/* No ACPI_20_TABLE_GUID found, fallback to ACPI_TABLE_GUID. */
rsdp_addr = efi_find_vendor_table(boot_params, cfg_tbl_pa, cfg_tbl_len,
ACPI_TABLE_GUID);
if (rsdp_addr)
return (acpi_physical_address)rsdp_addr;
debug_putstr("Error getting RSDP address.\n");
#endif
return 0;
}
static acpi_physical_address efi_get_rsdp_addr(void)
{
#ifdef CONFIG_EFI
unsigned long cfg_tbl_pa = 0;
unsigned int cfg_tbl_len;
unsigned long systab_pa;
unsigned int nr_tables;
enum efi_type et;
int ret;
et = efi_get_type(boot_params);
if (et == EFI_TYPE_NONE)
return 0;
systab_pa = efi_get_system_table(boot_params);
if (!systab_pa)
error("EFI support advertised, but unable to locate system table.");
ret = efi_get_conf_table(boot_params, &cfg_tbl_pa, &cfg_tbl_len);
if (ret || !cfg_tbl_pa)
error("EFI config table not found.");
return __efi_get_rsdp_addr(cfg_tbl_pa, cfg_tbl_len);
#else
return 0;
#endif
}
static u8 compute_checksum(u8 *buffer, u32 length)
{
u8 *end = buffer + length;
u8 sum = 0;
while (buffer < end)
sum += *(buffer++);
return sum;
}
/* Search a block of memory for the RSDP signature. */
static u8 *scan_mem_for_rsdp(u8 *start, u32 length)
{
struct acpi_table_rsdp *rsdp;
u8 *address, *end;
end = start + length;
/* Search from given start address for the requested length */
for (address = start; address < end; address += ACPI_RSDP_SCAN_STEP) {
/*
* Both RSDP signature and checksum must be correct.
* Note: Sometimes there exists more than one RSDP in memory;
* the valid RSDP has a valid checksum, all others have an
* invalid checksum.
*/
rsdp = (struct acpi_table_rsdp *)address;
/* BAD Signature */
if (!ACPI_VALIDATE_RSDP_SIG(rsdp->signature))
continue;
/* Check the standard checksum */
if (compute_checksum((u8 *)rsdp, ACPI_RSDP_CHECKSUM_LENGTH))
continue;
/* Check extended checksum if table version >= 2 */
if ((rsdp->revision >= 2) &&
(compute_checksum((u8 *)rsdp, ACPI_RSDP_XCHECKSUM_LENGTH)))
continue;
/* Signature and checksum valid, we have found a real RSDP */
return address;
}
return NULL;
}
/* Search RSDP address in EBDA. */
static acpi_physical_address bios_get_rsdp_addr(void)
{
unsigned long address;
u8 *rsdp;
/* Get the location of the Extended BIOS Data Area (EBDA) */
address = *(u16 *)ACPI_EBDA_PTR_LOCATION;
address <<= 4;
/*
* Search EBDA paragraphs (EBDA is required to be a minimum of
* 1K length)
*/
if (address > 0x400) {
rsdp = scan_mem_for_rsdp((u8 *)address, ACPI_EBDA_WINDOW_SIZE);
if (rsdp)
return (acpi_physical_address)(unsigned long)rsdp;
}
/* Search upper memory: 16-byte boundaries in E0000h-FFFFFh */
rsdp = scan_mem_for_rsdp((u8 *) ACPI_HI_RSDP_WINDOW_BASE,
ACPI_HI_RSDP_WINDOW_SIZE);
if (rsdp)
return (acpi_physical_address)(unsigned long)rsdp;
return 0;
}
/* Return RSDP address on success, otherwise 0. */
acpi_physical_address get_rsdp_addr(void)
{
acpi_physical_address pa;
pa = boot_params->acpi_rsdp_addr;
if (!pa)
pa = efi_get_rsdp_addr();
if (!pa)
pa = bios_get_rsdp_addr();
return pa;
}
#if defined(CONFIG_RANDOMIZE_BASE) && defined(CONFIG_MEMORY_HOTREMOVE)
/*
* Max length of 64-bit hex address string is 19, prefix "0x" + 16 hex
* digits, and '\0' for termination.
*/
#define MAX_ADDR_LEN 19
static unsigned long get_cmdline_acpi_rsdp(void)
{
unsigned long addr = 0;
#ifdef CONFIG_KEXEC
char val[MAX_ADDR_LEN] = { };
int ret;
ret = cmdline_find_option("acpi_rsdp", val, MAX_ADDR_LEN);
if (ret < 0)
return 0;
if (boot_kstrtoul(val, 16, &addr))
return 0;
#endif
return addr;
}
/* Compute SRAT address from RSDP. */
static unsigned long get_acpi_srat_table(void)
{
unsigned long root_table, acpi_table;
struct acpi_table_header *header;
struct acpi_table_rsdp *rsdp;
u32 num_entries, size, len;
char arg[10];
u8 *entry;
/*
* Check whether we were given an RSDP on the command line. We don't
* stash this in boot params because the kernel itself may have
* different ideas about whether to trust a command-line parameter.
*/
rsdp = (struct acpi_table_rsdp *)get_cmdline_acpi_rsdp();
if (!rsdp)
rsdp = (struct acpi_table_rsdp *)(long)
boot_params->acpi_rsdp_addr;
if (!rsdp)
return 0;
/* Get ACPI root table from RSDP.*/
if (!(cmdline_find_option("acpi", arg, sizeof(arg)) == 4 &&
!strncmp(arg, "rsdt", 4)) &&
rsdp->xsdt_physical_address &&
rsdp->revision > 1) {
root_table = rsdp->xsdt_physical_address;
size = ACPI_XSDT_ENTRY_SIZE;
} else {
root_table = rsdp->rsdt_physical_address;
size = ACPI_RSDT_ENTRY_SIZE;
}
if (!root_table)
return 0;
header = (struct acpi_table_header *)root_table;
len = header->length;
if (len < sizeof(struct acpi_table_header) + size)
return 0;
num_entries = (len - sizeof(struct acpi_table_header)) / size;
entry = (u8 *)(root_table + sizeof(struct acpi_table_header));
while (num_entries--) {
if (size == ACPI_RSDT_ENTRY_SIZE)
acpi_table = *(u32 *)entry;
else
acpi_table = *(u64 *)entry;
if (acpi_table) {
header = (struct acpi_table_header *)acpi_table;
if (ACPI_COMPARE_NAMESEG(header->signature, ACPI_SIG_SRAT))
return acpi_table;
}
entry += size;
}
return 0;
}
/**
* count_immovable_mem_regions - Parse SRAT and cache the immovable
* memory regions into the immovable_mem array.
*
* Return the number of immovable memory regions on success, 0 on failure:
*
* - Too many immovable memory regions
* - ACPI off or no SRAT found
* - No immovable memory region found.
*/
int count_immovable_mem_regions(void)
{
unsigned long table_addr, table_end, table;
struct acpi_subtable_header *sub_table;
struct acpi_table_header *table_header;
char arg[MAX_ACPI_ARG_LENGTH];
int num = 0;
if (cmdline_find_option("acpi", arg, sizeof(arg)) == 3 &&
!strncmp(arg, "off", 3))
return 0;
table_addr = get_acpi_srat_table();
if (!table_addr)
return 0;
table_header = (struct acpi_table_header *)table_addr;
table_end = table_addr + table_header->length;
table = table_addr + sizeof(struct acpi_table_srat);
while (table + sizeof(struct acpi_subtable_header) < table_end) {
sub_table = (struct acpi_subtable_header *)table;
if (!sub_table->length) {
debug_putstr("Invalid zero length SRAT subtable.\n");
return 0;
}
if (sub_table->type == ACPI_SRAT_TYPE_MEMORY_AFFINITY) {
struct acpi_srat_mem_affinity *ma;
ma = (struct acpi_srat_mem_affinity *)sub_table;
if (!(ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) && ma->length) {
immovable_mem[num].start = ma->base_address;
immovable_mem[num].size = ma->length;
num++;
}
if (num >= MAX_NUMNODES*2) {
debug_putstr("Too many immovable memory regions, aborting.\n");
return 0;
}
}
table += sub_table->length;
}
return num;
}
#endif /* CONFIG_RANDOMIZE_BASE && CONFIG_MEMORY_HOTREMOVE */
| linux-master | arch/x86/boot/compressed/acpi.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/memblock.h>
#include <linux/cc_platform.h>
#include <linux/pgtable.h>
#include <asm/set_memory.h>
#include <asm/realmode.h>
#include <asm/tlbflush.h>
#include <asm/crash.h>
#include <asm/sev.h>
struct real_mode_header *real_mode_header;
u32 *trampoline_cr4_features;
/* Hold the pgd entry used on booting additional CPUs */
pgd_t trampoline_pgd_entry;
void load_trampoline_pgtable(void)
{
#ifdef CONFIG_X86_32
load_cr3(initial_page_table);
#else
/*
* This function is called before exiting to real-mode and that will
* fail with CR4.PCIDE still set.
*/
if (boot_cpu_has(X86_FEATURE_PCID))
cr4_clear_bits(X86_CR4_PCIDE);
write_cr3(real_mode_header->trampoline_pgd);
#endif
/*
* The CR3 write above will not flush global TLB entries.
* Stale, global entries from previous page tables may still be
* present. Flush those stale entries.
*
* This ensures that memory accessed while running with
* trampoline_pgd is *actually* mapped into trampoline_pgd.
*/
__flush_tlb_all();
}
void __init reserve_real_mode(void)
{
phys_addr_t mem;
size_t size = real_mode_size_needed();
if (!size)
return;
WARN_ON(slab_is_available());
/* Has to be under 1M so we can execute real-mode AP code. */
mem = memblock_phys_alloc_range(size, PAGE_SIZE, 0, 1<<20);
if (!mem)
pr_info("No sub-1M memory is available for the trampoline\n");
else
set_real_mode_mem(mem);
/*
* Unconditionally reserve the entire fisrt 1M, see comment in
* setup_arch().
*/
memblock_reserve(0, SZ_1M);
}
static void __init sme_sev_setup_real_mode(struct trampoline_header *th)
{
#ifdef CONFIG_AMD_MEM_ENCRYPT
if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT))
th->flags |= TH_FLAGS_SME_ACTIVE;
if (cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT)) {
/*
* Skip the call to verify_cpu() in secondary_startup_64 as it
* will cause #VC exceptions when the AP can't handle them yet.
*/
th->start = (u64) secondary_startup_64_no_verify;
if (sev_es_setup_ap_jump_table(real_mode_header))
panic("Failed to get/update SEV-ES AP Jump Table");
}
#endif
}
static void __init setup_real_mode(void)
{
u16 real_mode_seg;
const u32 *rel;
u32 count;
unsigned char *base;
unsigned long phys_base;
struct trampoline_header *trampoline_header;
size_t size = PAGE_ALIGN(real_mode_blob_end - real_mode_blob);
#ifdef CONFIG_X86_64
u64 *trampoline_pgd;
u64 efer;
int i;
#endif
base = (unsigned char *)real_mode_header;
/*
* If SME is active, the trampoline area will need to be in
* decrypted memory in order to bring up other processors
* successfully. This is not needed for SEV.
*/
if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT))
set_memory_decrypted((unsigned long)base, size >> PAGE_SHIFT);
memcpy(base, real_mode_blob, size);
phys_base = __pa(base);
real_mode_seg = phys_base >> 4;
rel = (u32 *) real_mode_relocs;
/* 16-bit segment relocations. */
count = *rel++;
while (count--) {
u16 *seg = (u16 *) (base + *rel++);
*seg = real_mode_seg;
}
/* 32-bit linear relocations. */
count = *rel++;
while (count--) {
u32 *ptr = (u32 *) (base + *rel++);
*ptr += phys_base;
}
/* Must be performed *after* relocation. */
trampoline_header = (struct trampoline_header *)
__va(real_mode_header->trampoline_header);
#ifdef CONFIG_X86_32
trampoline_header->start = __pa_symbol(startup_32_smp);
trampoline_header->gdt_limit = __BOOT_DS + 7;
trampoline_header->gdt_base = __pa_symbol(boot_gdt);
#else
/*
* Some AMD processors will #GP(0) if EFER.LMA is set in WRMSR
* so we need to mask it out.
*/
rdmsrl(MSR_EFER, efer);
trampoline_header->efer = efer & ~EFER_LMA;
trampoline_header->start = (u64) secondary_startup_64;
trampoline_cr4_features = &trampoline_header->cr4;
*trampoline_cr4_features = mmu_cr4_features;
trampoline_header->flags = 0;
trampoline_lock = &trampoline_header->lock;
*trampoline_lock = 0;
trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd);
/* Map the real mode stub as virtual == physical */
trampoline_pgd[0] = trampoline_pgd_entry.pgd;
/*
* Include the entirety of the kernel mapping into the trampoline
* PGD. This way, all mappings present in the normal kernel page
* tables are usable while running on trampoline_pgd.
*/
for (i = pgd_index(__PAGE_OFFSET); i < PTRS_PER_PGD; i++)
trampoline_pgd[i] = init_top_pgt[i].pgd;
#endif
sme_sev_setup_real_mode(trampoline_header);
}
/*
* reserve_real_mode() gets called very early, to guarantee the
* availability of low memory. This is before the proper kernel page
* tables are set up, so we cannot set page permissions in that
* function. Also trampoline code will be executed by APs so we
* need to mark it executable at do_pre_smp_initcalls() at least,
* thus run it as a early_initcall().
*/
static void __init set_real_mode_permissions(void)
{
unsigned char *base = (unsigned char *) real_mode_header;
size_t size = PAGE_ALIGN(real_mode_blob_end - real_mode_blob);
size_t ro_size =
PAGE_ALIGN(real_mode_header->ro_end) -
__pa(base);
size_t text_size =
PAGE_ALIGN(real_mode_header->ro_end) -
real_mode_header->text_start;
unsigned long text_start =
(unsigned long) __va(real_mode_header->text_start);
set_memory_nx((unsigned long) base, size >> PAGE_SHIFT);
set_memory_ro((unsigned long) base, ro_size >> PAGE_SHIFT);
set_memory_x((unsigned long) text_start, text_size >> PAGE_SHIFT);
}
void __init init_real_mode(void)
{
if (!real_mode_header)
panic("Real mode trampoline was not allocated");
setup_real_mode();
set_real_mode_permissions();
}
static int __init do_init_real_mode(void)
{
x86_platform.realmode_init();
return 0;
}
early_initcall(do_init_real_mode);
| linux-master | arch/x86/realmode/init.c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.