python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: GPL-2.0
/*
* Perf PMU sysfs events attributes for available CPU-measurement counters
*
*/
#include <linux/slab.h>
#include <linux/perf_event.h>
#include <asm/cpu_mf.h>
/* BEGIN: CPUM_CF COUNTER DEFINITIONS =================================== */
CPUMF_EVENT_ATTR(cf_fvn1, CPU_CYCLES, 0x0000);
CPUMF_EVENT_ATTR(cf_fvn1, INSTRUCTIONS, 0x0001);
CPUMF_EVENT_ATTR(cf_fvn1, L1I_DIR_WRITES, 0x0002);
CPUMF_EVENT_ATTR(cf_fvn1, L1I_PENALTY_CYCLES, 0x0003);
CPUMF_EVENT_ATTR(cf_fvn1, PROBLEM_STATE_CPU_CYCLES, 0x0020);
CPUMF_EVENT_ATTR(cf_fvn1, PROBLEM_STATE_INSTRUCTIONS, 0x0021);
CPUMF_EVENT_ATTR(cf_fvn1, PROBLEM_STATE_L1I_DIR_WRITES, 0x0022);
CPUMF_EVENT_ATTR(cf_fvn1, PROBLEM_STATE_L1I_PENALTY_CYCLES, 0x0023);
CPUMF_EVENT_ATTR(cf_fvn1, PROBLEM_STATE_L1D_DIR_WRITES, 0x0024);
CPUMF_EVENT_ATTR(cf_fvn1, PROBLEM_STATE_L1D_PENALTY_CYCLES, 0x0025);
CPUMF_EVENT_ATTR(cf_fvn1, L1D_DIR_WRITES, 0x0004);
CPUMF_EVENT_ATTR(cf_fvn1, L1D_PENALTY_CYCLES, 0x0005);
CPUMF_EVENT_ATTR(cf_fvn3, CPU_CYCLES, 0x0000);
CPUMF_EVENT_ATTR(cf_fvn3, INSTRUCTIONS, 0x0001);
CPUMF_EVENT_ATTR(cf_fvn3, L1I_DIR_WRITES, 0x0002);
CPUMF_EVENT_ATTR(cf_fvn3, L1I_PENALTY_CYCLES, 0x0003);
CPUMF_EVENT_ATTR(cf_fvn3, PROBLEM_STATE_CPU_CYCLES, 0x0020);
CPUMF_EVENT_ATTR(cf_fvn3, PROBLEM_STATE_INSTRUCTIONS, 0x0021);
CPUMF_EVENT_ATTR(cf_fvn3, L1D_DIR_WRITES, 0x0004);
CPUMF_EVENT_ATTR(cf_fvn3, L1D_PENALTY_CYCLES, 0x0005);
CPUMF_EVENT_ATTR(cf_svn_12345, PRNG_FUNCTIONS, 0x0040);
CPUMF_EVENT_ATTR(cf_svn_12345, PRNG_CYCLES, 0x0041);
CPUMF_EVENT_ATTR(cf_svn_12345, PRNG_BLOCKED_FUNCTIONS, 0x0042);
CPUMF_EVENT_ATTR(cf_svn_12345, PRNG_BLOCKED_CYCLES, 0x0043);
CPUMF_EVENT_ATTR(cf_svn_12345, SHA_FUNCTIONS, 0x0044);
CPUMF_EVENT_ATTR(cf_svn_12345, SHA_CYCLES, 0x0045);
CPUMF_EVENT_ATTR(cf_svn_12345, SHA_BLOCKED_FUNCTIONS, 0x0046);
CPUMF_EVENT_ATTR(cf_svn_12345, SHA_BLOCKED_CYCLES, 0x0047);
CPUMF_EVENT_ATTR(cf_svn_12345, DEA_FUNCTIONS, 0x0048);
CPUMF_EVENT_ATTR(cf_svn_12345, DEA_CYCLES, 0x0049);
CPUMF_EVENT_ATTR(cf_svn_12345, DEA_BLOCKED_FUNCTIONS, 0x004a);
CPUMF_EVENT_ATTR(cf_svn_12345, DEA_BLOCKED_CYCLES, 0x004b);
CPUMF_EVENT_ATTR(cf_svn_12345, AES_FUNCTIONS, 0x004c);
CPUMF_EVENT_ATTR(cf_svn_12345, AES_CYCLES, 0x004d);
CPUMF_EVENT_ATTR(cf_svn_12345, AES_BLOCKED_FUNCTIONS, 0x004e);
CPUMF_EVENT_ATTR(cf_svn_12345, AES_BLOCKED_CYCLES, 0x004f);
CPUMF_EVENT_ATTR(cf_svn_6, ECC_FUNCTION_COUNT, 0x0050);
CPUMF_EVENT_ATTR(cf_svn_6, ECC_CYCLES_COUNT, 0x0051);
CPUMF_EVENT_ATTR(cf_svn_6, ECC_BLOCKED_FUNCTION_COUNT, 0x0052);
CPUMF_EVENT_ATTR(cf_svn_6, ECC_BLOCKED_CYCLES_COUNT, 0x0053);
CPUMF_EVENT_ATTR(cf_z10, L1I_L2_SOURCED_WRITES, 0x0080);
CPUMF_EVENT_ATTR(cf_z10, L1D_L2_SOURCED_WRITES, 0x0081);
CPUMF_EVENT_ATTR(cf_z10, L1I_L3_LOCAL_WRITES, 0x0082);
CPUMF_EVENT_ATTR(cf_z10, L1D_L3_LOCAL_WRITES, 0x0083);
CPUMF_EVENT_ATTR(cf_z10, L1I_L3_REMOTE_WRITES, 0x0084);
CPUMF_EVENT_ATTR(cf_z10, L1D_L3_REMOTE_WRITES, 0x0085);
CPUMF_EVENT_ATTR(cf_z10, L1D_LMEM_SOURCED_WRITES, 0x0086);
CPUMF_EVENT_ATTR(cf_z10, L1I_LMEM_SOURCED_WRITES, 0x0087);
CPUMF_EVENT_ATTR(cf_z10, L1D_RO_EXCL_WRITES, 0x0088);
CPUMF_EVENT_ATTR(cf_z10, L1I_CACHELINE_INVALIDATES, 0x0089);
CPUMF_EVENT_ATTR(cf_z10, ITLB1_WRITES, 0x008a);
CPUMF_EVENT_ATTR(cf_z10, DTLB1_WRITES, 0x008b);
CPUMF_EVENT_ATTR(cf_z10, TLB2_PTE_WRITES, 0x008c);
CPUMF_EVENT_ATTR(cf_z10, TLB2_CRSTE_WRITES, 0x008d);
CPUMF_EVENT_ATTR(cf_z10, TLB2_CRSTE_HPAGE_WRITES, 0x008e);
CPUMF_EVENT_ATTR(cf_z10, ITLB1_MISSES, 0x0091);
CPUMF_EVENT_ATTR(cf_z10, DTLB1_MISSES, 0x0092);
CPUMF_EVENT_ATTR(cf_z10, L2C_STORES_SENT, 0x0093);
CPUMF_EVENT_ATTR(cf_z196, L1D_L2_SOURCED_WRITES, 0x0080);
CPUMF_EVENT_ATTR(cf_z196, L1I_L2_SOURCED_WRITES, 0x0081);
CPUMF_EVENT_ATTR(cf_z196, DTLB1_MISSES, 0x0082);
CPUMF_EVENT_ATTR(cf_z196, ITLB1_MISSES, 0x0083);
CPUMF_EVENT_ATTR(cf_z196, L2C_STORES_SENT, 0x0085);
CPUMF_EVENT_ATTR(cf_z196, L1D_OFFBOOK_L3_SOURCED_WRITES, 0x0086);
CPUMF_EVENT_ATTR(cf_z196, L1D_ONBOOK_L4_SOURCED_WRITES, 0x0087);
CPUMF_EVENT_ATTR(cf_z196, L1I_ONBOOK_L4_SOURCED_WRITES, 0x0088);
CPUMF_EVENT_ATTR(cf_z196, L1D_RO_EXCL_WRITES, 0x0089);
CPUMF_EVENT_ATTR(cf_z196, L1D_OFFBOOK_L4_SOURCED_WRITES, 0x008a);
CPUMF_EVENT_ATTR(cf_z196, L1I_OFFBOOK_L4_SOURCED_WRITES, 0x008b);
CPUMF_EVENT_ATTR(cf_z196, DTLB1_HPAGE_WRITES, 0x008c);
CPUMF_EVENT_ATTR(cf_z196, L1D_LMEM_SOURCED_WRITES, 0x008d);
CPUMF_EVENT_ATTR(cf_z196, L1I_LMEM_SOURCED_WRITES, 0x008e);
CPUMF_EVENT_ATTR(cf_z196, L1I_OFFBOOK_L3_SOURCED_WRITES, 0x008f);
CPUMF_EVENT_ATTR(cf_z196, DTLB1_WRITES, 0x0090);
CPUMF_EVENT_ATTR(cf_z196, ITLB1_WRITES, 0x0091);
CPUMF_EVENT_ATTR(cf_z196, TLB2_PTE_WRITES, 0x0092);
CPUMF_EVENT_ATTR(cf_z196, TLB2_CRSTE_HPAGE_WRITES, 0x0093);
CPUMF_EVENT_ATTR(cf_z196, TLB2_CRSTE_WRITES, 0x0094);
CPUMF_EVENT_ATTR(cf_z196, L1D_ONCHIP_L3_SOURCED_WRITES, 0x0096);
CPUMF_EVENT_ATTR(cf_z196, L1D_OFFCHIP_L3_SOURCED_WRITES, 0x0098);
CPUMF_EVENT_ATTR(cf_z196, L1I_ONCHIP_L3_SOURCED_WRITES, 0x0099);
CPUMF_EVENT_ATTR(cf_z196, L1I_OFFCHIP_L3_SOURCED_WRITES, 0x009b);
CPUMF_EVENT_ATTR(cf_zec12, DTLB1_MISSES, 0x0080);
CPUMF_EVENT_ATTR(cf_zec12, ITLB1_MISSES, 0x0081);
CPUMF_EVENT_ATTR(cf_zec12, L1D_L2I_SOURCED_WRITES, 0x0082);
CPUMF_EVENT_ATTR(cf_zec12, L1I_L2I_SOURCED_WRITES, 0x0083);
CPUMF_EVENT_ATTR(cf_zec12, L1D_L2D_SOURCED_WRITES, 0x0084);
CPUMF_EVENT_ATTR(cf_zec12, DTLB1_WRITES, 0x0085);
CPUMF_EVENT_ATTR(cf_zec12, L1D_LMEM_SOURCED_WRITES, 0x0087);
CPUMF_EVENT_ATTR(cf_zec12, L1I_LMEM_SOURCED_WRITES, 0x0089);
CPUMF_EVENT_ATTR(cf_zec12, L1D_RO_EXCL_WRITES, 0x008a);
CPUMF_EVENT_ATTR(cf_zec12, DTLB1_HPAGE_WRITES, 0x008b);
CPUMF_EVENT_ATTR(cf_zec12, ITLB1_WRITES, 0x008c);
CPUMF_EVENT_ATTR(cf_zec12, TLB2_PTE_WRITES, 0x008d);
CPUMF_EVENT_ATTR(cf_zec12, TLB2_CRSTE_HPAGE_WRITES, 0x008e);
CPUMF_EVENT_ATTR(cf_zec12, TLB2_CRSTE_WRITES, 0x008f);
CPUMF_EVENT_ATTR(cf_zec12, L1D_ONCHIP_L3_SOURCED_WRITES, 0x0090);
CPUMF_EVENT_ATTR(cf_zec12, L1D_OFFCHIP_L3_SOURCED_WRITES, 0x0091);
CPUMF_EVENT_ATTR(cf_zec12, L1D_OFFBOOK_L3_SOURCED_WRITES, 0x0092);
CPUMF_EVENT_ATTR(cf_zec12, L1D_ONBOOK_L4_SOURCED_WRITES, 0x0093);
CPUMF_EVENT_ATTR(cf_zec12, L1D_OFFBOOK_L4_SOURCED_WRITES, 0x0094);
CPUMF_EVENT_ATTR(cf_zec12, TX_NC_TEND, 0x0095);
CPUMF_EVENT_ATTR(cf_zec12, L1D_ONCHIP_L3_SOURCED_WRITES_IV, 0x0096);
CPUMF_EVENT_ATTR(cf_zec12, L1D_OFFCHIP_L3_SOURCED_WRITES_IV, 0x0097);
CPUMF_EVENT_ATTR(cf_zec12, L1D_OFFBOOK_L3_SOURCED_WRITES_IV, 0x0098);
CPUMF_EVENT_ATTR(cf_zec12, L1I_ONCHIP_L3_SOURCED_WRITES, 0x0099);
CPUMF_EVENT_ATTR(cf_zec12, L1I_OFFCHIP_L3_SOURCED_WRITES, 0x009a);
CPUMF_EVENT_ATTR(cf_zec12, L1I_OFFBOOK_L3_SOURCED_WRITES, 0x009b);
CPUMF_EVENT_ATTR(cf_zec12, L1I_ONBOOK_L4_SOURCED_WRITES, 0x009c);
CPUMF_EVENT_ATTR(cf_zec12, L1I_OFFBOOK_L4_SOURCED_WRITES, 0x009d);
CPUMF_EVENT_ATTR(cf_zec12, TX_C_TEND, 0x009e);
CPUMF_EVENT_ATTR(cf_zec12, L1I_ONCHIP_L3_SOURCED_WRITES_IV, 0x009f);
CPUMF_EVENT_ATTR(cf_zec12, L1I_OFFCHIP_L3_SOURCED_WRITES_IV, 0x00a0);
CPUMF_EVENT_ATTR(cf_zec12, L1I_OFFBOOK_L3_SOURCED_WRITES_IV, 0x00a1);
CPUMF_EVENT_ATTR(cf_zec12, TX_NC_TABORT, 0x00b1);
CPUMF_EVENT_ATTR(cf_zec12, TX_C_TABORT_NO_SPECIAL, 0x00b2);
CPUMF_EVENT_ATTR(cf_zec12, TX_C_TABORT_SPECIAL, 0x00b3);
CPUMF_EVENT_ATTR(cf_z13, L1D_RO_EXCL_WRITES, 0x0080);
CPUMF_EVENT_ATTR(cf_z13, DTLB1_WRITES, 0x0081);
CPUMF_EVENT_ATTR(cf_z13, DTLB1_MISSES, 0x0082);
CPUMF_EVENT_ATTR(cf_z13, DTLB1_HPAGE_WRITES, 0x0083);
CPUMF_EVENT_ATTR(cf_z13, DTLB1_GPAGE_WRITES, 0x0084);
CPUMF_EVENT_ATTR(cf_z13, L1D_L2D_SOURCED_WRITES, 0x0085);
CPUMF_EVENT_ATTR(cf_z13, ITLB1_WRITES, 0x0086);
CPUMF_EVENT_ATTR(cf_z13, ITLB1_MISSES, 0x0087);
CPUMF_EVENT_ATTR(cf_z13, L1I_L2I_SOURCED_WRITES, 0x0088);
CPUMF_EVENT_ATTR(cf_z13, TLB2_PTE_WRITES, 0x0089);
CPUMF_EVENT_ATTR(cf_z13, TLB2_CRSTE_HPAGE_WRITES, 0x008a);
CPUMF_EVENT_ATTR(cf_z13, TLB2_CRSTE_WRITES, 0x008b);
CPUMF_EVENT_ATTR(cf_z13, TX_C_TEND, 0x008c);
CPUMF_EVENT_ATTR(cf_z13, TX_NC_TEND, 0x008d);
CPUMF_EVENT_ATTR(cf_z13, L1C_TLB1_MISSES, 0x008f);
CPUMF_EVENT_ATTR(cf_z13, L1D_ONCHIP_L3_SOURCED_WRITES, 0x0090);
CPUMF_EVENT_ATTR(cf_z13, L1D_ONCHIP_L3_SOURCED_WRITES_IV, 0x0091);
CPUMF_EVENT_ATTR(cf_z13, L1D_ONNODE_L4_SOURCED_WRITES, 0x0092);
CPUMF_EVENT_ATTR(cf_z13, L1D_ONNODE_L3_SOURCED_WRITES_IV, 0x0093);
CPUMF_EVENT_ATTR(cf_z13, L1D_ONNODE_L3_SOURCED_WRITES, 0x0094);
CPUMF_EVENT_ATTR(cf_z13, L1D_ONDRAWER_L4_SOURCED_WRITES, 0x0095);
CPUMF_EVENT_ATTR(cf_z13, L1D_ONDRAWER_L3_SOURCED_WRITES_IV, 0x0096);
CPUMF_EVENT_ATTR(cf_z13, L1D_ONDRAWER_L3_SOURCED_WRITES, 0x0097);
CPUMF_EVENT_ATTR(cf_z13, L1D_OFFDRAWER_SCOL_L4_SOURCED_WRITES, 0x0098);
CPUMF_EVENT_ATTR(cf_z13, L1D_OFFDRAWER_SCOL_L3_SOURCED_WRITES_IV, 0x0099);
CPUMF_EVENT_ATTR(cf_z13, L1D_OFFDRAWER_SCOL_L3_SOURCED_WRITES, 0x009a);
CPUMF_EVENT_ATTR(cf_z13, L1D_OFFDRAWER_FCOL_L4_SOURCED_WRITES, 0x009b);
CPUMF_EVENT_ATTR(cf_z13, L1D_OFFDRAWER_FCOL_L3_SOURCED_WRITES_IV, 0x009c);
CPUMF_EVENT_ATTR(cf_z13, L1D_OFFDRAWER_FCOL_L3_SOURCED_WRITES, 0x009d);
CPUMF_EVENT_ATTR(cf_z13, L1D_ONNODE_MEM_SOURCED_WRITES, 0x009e);
CPUMF_EVENT_ATTR(cf_z13, L1D_ONDRAWER_MEM_SOURCED_WRITES, 0x009f);
CPUMF_EVENT_ATTR(cf_z13, L1D_OFFDRAWER_MEM_SOURCED_WRITES, 0x00a0);
CPUMF_EVENT_ATTR(cf_z13, L1D_ONCHIP_MEM_SOURCED_WRITES, 0x00a1);
CPUMF_EVENT_ATTR(cf_z13, L1I_ONCHIP_L3_SOURCED_WRITES, 0x00a2);
CPUMF_EVENT_ATTR(cf_z13, L1I_ONCHIP_L3_SOURCED_WRITES_IV, 0x00a3);
CPUMF_EVENT_ATTR(cf_z13, L1I_ONNODE_L4_SOURCED_WRITES, 0x00a4);
CPUMF_EVENT_ATTR(cf_z13, L1I_ONNODE_L3_SOURCED_WRITES_IV, 0x00a5);
CPUMF_EVENT_ATTR(cf_z13, L1I_ONNODE_L3_SOURCED_WRITES, 0x00a6);
CPUMF_EVENT_ATTR(cf_z13, L1I_ONDRAWER_L4_SOURCED_WRITES, 0x00a7);
CPUMF_EVENT_ATTR(cf_z13, L1I_ONDRAWER_L3_SOURCED_WRITES_IV, 0x00a8);
CPUMF_EVENT_ATTR(cf_z13, L1I_ONDRAWER_L3_SOURCED_WRITES, 0x00a9);
CPUMF_EVENT_ATTR(cf_z13, L1I_OFFDRAWER_SCOL_L4_SOURCED_WRITES, 0x00aa);
CPUMF_EVENT_ATTR(cf_z13, L1I_OFFDRAWER_SCOL_L3_SOURCED_WRITES_IV, 0x00ab);
CPUMF_EVENT_ATTR(cf_z13, L1I_OFFDRAWER_SCOL_L3_SOURCED_WRITES, 0x00ac);
CPUMF_EVENT_ATTR(cf_z13, L1I_OFFDRAWER_FCOL_L4_SOURCED_WRITES, 0x00ad);
CPUMF_EVENT_ATTR(cf_z13, L1I_OFFDRAWER_FCOL_L3_SOURCED_WRITES_IV, 0x00ae);
CPUMF_EVENT_ATTR(cf_z13, L1I_OFFDRAWER_FCOL_L3_SOURCED_WRITES, 0x00af);
CPUMF_EVENT_ATTR(cf_z13, L1I_ONNODE_MEM_SOURCED_WRITES, 0x00b0);
CPUMF_EVENT_ATTR(cf_z13, L1I_ONDRAWER_MEM_SOURCED_WRITES, 0x00b1);
CPUMF_EVENT_ATTR(cf_z13, L1I_OFFDRAWER_MEM_SOURCED_WRITES, 0x00b2);
CPUMF_EVENT_ATTR(cf_z13, L1I_ONCHIP_MEM_SOURCED_WRITES, 0x00b3);
CPUMF_EVENT_ATTR(cf_z13, TX_NC_TABORT, 0x00da);
CPUMF_EVENT_ATTR(cf_z13, TX_C_TABORT_NO_SPECIAL, 0x00db);
CPUMF_EVENT_ATTR(cf_z13, TX_C_TABORT_SPECIAL, 0x00dc);
CPUMF_EVENT_ATTR(cf_z13, MT_DIAG_CYCLES_ONE_THR_ACTIVE, 0x01c0);
CPUMF_EVENT_ATTR(cf_z13, MT_DIAG_CYCLES_TWO_THR_ACTIVE, 0x01c1);
CPUMF_EVENT_ATTR(cf_z14, L1D_RO_EXCL_WRITES, 0x0080);
CPUMF_EVENT_ATTR(cf_z14, DTLB2_WRITES, 0x0081);
CPUMF_EVENT_ATTR(cf_z14, DTLB2_MISSES, 0x0082);
CPUMF_EVENT_ATTR(cf_z14, DTLB2_HPAGE_WRITES, 0x0083);
CPUMF_EVENT_ATTR(cf_z14, DTLB2_GPAGE_WRITES, 0x0084);
CPUMF_EVENT_ATTR(cf_z14, L1D_L2D_SOURCED_WRITES, 0x0085);
CPUMF_EVENT_ATTR(cf_z14, ITLB2_WRITES, 0x0086);
CPUMF_EVENT_ATTR(cf_z14, ITLB2_MISSES, 0x0087);
CPUMF_EVENT_ATTR(cf_z14, L1I_L2I_SOURCED_WRITES, 0x0088);
CPUMF_EVENT_ATTR(cf_z14, TLB2_PTE_WRITES, 0x0089);
CPUMF_EVENT_ATTR(cf_z14, TLB2_CRSTE_WRITES, 0x008a);
CPUMF_EVENT_ATTR(cf_z14, TLB2_ENGINES_BUSY, 0x008b);
CPUMF_EVENT_ATTR(cf_z14, TX_C_TEND, 0x008c);
CPUMF_EVENT_ATTR(cf_z14, TX_NC_TEND, 0x008d);
CPUMF_EVENT_ATTR(cf_z14, L1C_TLB2_MISSES, 0x008f);
CPUMF_EVENT_ATTR(cf_z14, L1D_ONCHIP_L3_SOURCED_WRITES, 0x0090);
CPUMF_EVENT_ATTR(cf_z14, L1D_ONCHIP_MEMORY_SOURCED_WRITES, 0x0091);
CPUMF_EVENT_ATTR(cf_z14, L1D_ONCHIP_L3_SOURCED_WRITES_IV, 0x0092);
CPUMF_EVENT_ATTR(cf_z14, L1D_ONCLUSTER_L3_SOURCED_WRITES, 0x0093);
CPUMF_EVENT_ATTR(cf_z14, L1D_ONCLUSTER_MEMORY_SOURCED_WRITES, 0x0094);
CPUMF_EVENT_ATTR(cf_z14, L1D_ONCLUSTER_L3_SOURCED_WRITES_IV, 0x0095);
CPUMF_EVENT_ATTR(cf_z14, L1D_OFFCLUSTER_L3_SOURCED_WRITES, 0x0096);
CPUMF_EVENT_ATTR(cf_z14, L1D_OFFCLUSTER_MEMORY_SOURCED_WRITES, 0x0097);
CPUMF_EVENT_ATTR(cf_z14, L1D_OFFCLUSTER_L3_SOURCED_WRITES_IV, 0x0098);
CPUMF_EVENT_ATTR(cf_z14, L1D_OFFDRAWER_L3_SOURCED_WRITES, 0x0099);
CPUMF_EVENT_ATTR(cf_z14, L1D_OFFDRAWER_MEMORY_SOURCED_WRITES, 0x009a);
CPUMF_EVENT_ATTR(cf_z14, L1D_OFFDRAWER_L3_SOURCED_WRITES_IV, 0x009b);
CPUMF_EVENT_ATTR(cf_z14, L1D_ONDRAWER_L4_SOURCED_WRITES, 0x009c);
CPUMF_EVENT_ATTR(cf_z14, L1D_OFFDRAWER_L4_SOURCED_WRITES, 0x009d);
CPUMF_EVENT_ATTR(cf_z14, L1D_ONCHIP_L3_SOURCED_WRITES_RO, 0x009e);
CPUMF_EVENT_ATTR(cf_z14, L1I_ONCHIP_L3_SOURCED_WRITES, 0x00a2);
CPUMF_EVENT_ATTR(cf_z14, L1I_ONCHIP_MEMORY_SOURCED_WRITES, 0x00a3);
CPUMF_EVENT_ATTR(cf_z14, L1I_ONCHIP_L3_SOURCED_WRITES_IV, 0x00a4);
CPUMF_EVENT_ATTR(cf_z14, L1I_ONCLUSTER_L3_SOURCED_WRITES, 0x00a5);
CPUMF_EVENT_ATTR(cf_z14, L1I_ONCLUSTER_MEMORY_SOURCED_WRITES, 0x00a6);
CPUMF_EVENT_ATTR(cf_z14, L1I_ONCLUSTER_L3_SOURCED_WRITES_IV, 0x00a7);
CPUMF_EVENT_ATTR(cf_z14, L1I_OFFCLUSTER_L3_SOURCED_WRITES, 0x00a8);
CPUMF_EVENT_ATTR(cf_z14, L1I_OFFCLUSTER_MEMORY_SOURCED_WRITES, 0x00a9);
CPUMF_EVENT_ATTR(cf_z14, L1I_OFFCLUSTER_L3_SOURCED_WRITES_IV, 0x00aa);
CPUMF_EVENT_ATTR(cf_z14, L1I_OFFDRAWER_L3_SOURCED_WRITES, 0x00ab);
CPUMF_EVENT_ATTR(cf_z14, L1I_OFFDRAWER_MEMORY_SOURCED_WRITES, 0x00ac);
CPUMF_EVENT_ATTR(cf_z14, L1I_OFFDRAWER_L3_SOURCED_WRITES_IV, 0x00ad);
CPUMF_EVENT_ATTR(cf_z14, L1I_ONDRAWER_L4_SOURCED_WRITES, 0x00ae);
CPUMF_EVENT_ATTR(cf_z14, L1I_OFFDRAWER_L4_SOURCED_WRITES, 0x00af);
CPUMF_EVENT_ATTR(cf_z14, BCD_DFP_EXECUTION_SLOTS, 0x00e0);
CPUMF_EVENT_ATTR(cf_z14, VX_BCD_EXECUTION_SLOTS, 0x00e1);
CPUMF_EVENT_ATTR(cf_z14, DECIMAL_INSTRUCTIONS, 0x00e2);
CPUMF_EVENT_ATTR(cf_z14, LAST_HOST_TRANSLATIONS, 0x00e8);
CPUMF_EVENT_ATTR(cf_z14, TX_NC_TABORT, 0x00f3);
CPUMF_EVENT_ATTR(cf_z14, TX_C_TABORT_NO_SPECIAL, 0x00f4);
CPUMF_EVENT_ATTR(cf_z14, TX_C_TABORT_SPECIAL, 0x00f5);
CPUMF_EVENT_ATTR(cf_z14, MT_DIAG_CYCLES_ONE_THR_ACTIVE, 0x01c0);
CPUMF_EVENT_ATTR(cf_z14, MT_DIAG_CYCLES_TWO_THR_ACTIVE, 0x01c1);
CPUMF_EVENT_ATTR(cf_z15, L1D_RO_EXCL_WRITES, 0x0080);
CPUMF_EVENT_ATTR(cf_z15, DTLB2_WRITES, 0x0081);
CPUMF_EVENT_ATTR(cf_z15, DTLB2_MISSES, 0x0082);
CPUMF_EVENT_ATTR(cf_z15, DTLB2_HPAGE_WRITES, 0x0083);
CPUMF_EVENT_ATTR(cf_z15, DTLB2_GPAGE_WRITES, 0x0084);
CPUMF_EVENT_ATTR(cf_z15, L1D_L2D_SOURCED_WRITES, 0x0085);
CPUMF_EVENT_ATTR(cf_z15, ITLB2_WRITES, 0x0086);
CPUMF_EVENT_ATTR(cf_z15, ITLB2_MISSES, 0x0087);
CPUMF_EVENT_ATTR(cf_z15, L1I_L2I_SOURCED_WRITES, 0x0088);
CPUMF_EVENT_ATTR(cf_z15, TLB2_PTE_WRITES, 0x0089);
CPUMF_EVENT_ATTR(cf_z15, TLB2_CRSTE_WRITES, 0x008a);
CPUMF_EVENT_ATTR(cf_z15, TLB2_ENGINES_BUSY, 0x008b);
CPUMF_EVENT_ATTR(cf_z15, TX_C_TEND, 0x008c);
CPUMF_EVENT_ATTR(cf_z15, TX_NC_TEND, 0x008d);
CPUMF_EVENT_ATTR(cf_z15, L1C_TLB2_MISSES, 0x008f);
CPUMF_EVENT_ATTR(cf_z15, L1D_ONCHIP_L3_SOURCED_WRITES, 0x0090);
CPUMF_EVENT_ATTR(cf_z15, L1D_ONCHIP_MEMORY_SOURCED_WRITES, 0x0091);
CPUMF_EVENT_ATTR(cf_z15, L1D_ONCHIP_L3_SOURCED_WRITES_IV, 0x0092);
CPUMF_EVENT_ATTR(cf_z15, L1D_ONCLUSTER_L3_SOURCED_WRITES, 0x0093);
CPUMF_EVENT_ATTR(cf_z15, L1D_ONCLUSTER_MEMORY_SOURCED_WRITES, 0x0094);
CPUMF_EVENT_ATTR(cf_z15, L1D_ONCLUSTER_L3_SOURCED_WRITES_IV, 0x0095);
CPUMF_EVENT_ATTR(cf_z15, L1D_OFFCLUSTER_L3_SOURCED_WRITES, 0x0096);
CPUMF_EVENT_ATTR(cf_z15, L1D_OFFCLUSTER_MEMORY_SOURCED_WRITES, 0x0097);
CPUMF_EVENT_ATTR(cf_z15, L1D_OFFCLUSTER_L3_SOURCED_WRITES_IV, 0x0098);
CPUMF_EVENT_ATTR(cf_z15, L1D_OFFDRAWER_L3_SOURCED_WRITES, 0x0099);
CPUMF_EVENT_ATTR(cf_z15, L1D_OFFDRAWER_MEMORY_SOURCED_WRITES, 0x009a);
CPUMF_EVENT_ATTR(cf_z15, L1D_OFFDRAWER_L3_SOURCED_WRITES_IV, 0x009b);
CPUMF_EVENT_ATTR(cf_z15, L1D_ONDRAWER_L4_SOURCED_WRITES, 0x009c);
CPUMF_EVENT_ATTR(cf_z15, L1D_OFFDRAWER_L4_SOURCED_WRITES, 0x009d);
CPUMF_EVENT_ATTR(cf_z15, L1D_ONCHIP_L3_SOURCED_WRITES_RO, 0x009e);
CPUMF_EVENT_ATTR(cf_z15, L1I_ONCHIP_L3_SOURCED_WRITES, 0x00a2);
CPUMF_EVENT_ATTR(cf_z15, L1I_ONCHIP_MEMORY_SOURCED_WRITES, 0x00a3);
CPUMF_EVENT_ATTR(cf_z15, L1I_ONCHIP_L3_SOURCED_WRITES_IV, 0x00a4);
CPUMF_EVENT_ATTR(cf_z15, L1I_ONCLUSTER_L3_SOURCED_WRITES, 0x00a5);
CPUMF_EVENT_ATTR(cf_z15, L1I_ONCLUSTER_MEMORY_SOURCED_WRITES, 0x00a6);
CPUMF_EVENT_ATTR(cf_z15, L1I_ONCLUSTER_L3_SOURCED_WRITES_IV, 0x00a7);
CPUMF_EVENT_ATTR(cf_z15, L1I_OFFCLUSTER_L3_SOURCED_WRITES, 0x00a8);
CPUMF_EVENT_ATTR(cf_z15, L1I_OFFCLUSTER_MEMORY_SOURCED_WRITES, 0x00a9);
CPUMF_EVENT_ATTR(cf_z15, L1I_OFFCLUSTER_L3_SOURCED_WRITES_IV, 0x00aa);
CPUMF_EVENT_ATTR(cf_z15, L1I_OFFDRAWER_L3_SOURCED_WRITES, 0x00ab);
CPUMF_EVENT_ATTR(cf_z15, L1I_OFFDRAWER_MEMORY_SOURCED_WRITES, 0x00ac);
CPUMF_EVENT_ATTR(cf_z15, L1I_OFFDRAWER_L3_SOURCED_WRITES_IV, 0x00ad);
CPUMF_EVENT_ATTR(cf_z15, L1I_ONDRAWER_L4_SOURCED_WRITES, 0x00ae);
CPUMF_EVENT_ATTR(cf_z15, L1I_OFFDRAWER_L4_SOURCED_WRITES, 0x00af);
CPUMF_EVENT_ATTR(cf_z15, BCD_DFP_EXECUTION_SLOTS, 0x00e0);
CPUMF_EVENT_ATTR(cf_z15, VX_BCD_EXECUTION_SLOTS, 0x00e1);
CPUMF_EVENT_ATTR(cf_z15, DECIMAL_INSTRUCTIONS, 0x00e2);
CPUMF_EVENT_ATTR(cf_z15, LAST_HOST_TRANSLATIONS, 0x00e8);
CPUMF_EVENT_ATTR(cf_z15, TX_NC_TABORT, 0x00f3);
CPUMF_EVENT_ATTR(cf_z15, TX_C_TABORT_NO_SPECIAL, 0x00f4);
CPUMF_EVENT_ATTR(cf_z15, TX_C_TABORT_SPECIAL, 0x00f5);
CPUMF_EVENT_ATTR(cf_z15, DFLT_ACCESS, 0x00f7);
CPUMF_EVENT_ATTR(cf_z15, DFLT_CYCLES, 0x00fc);
CPUMF_EVENT_ATTR(cf_z15, DFLT_CC, 0x00108);
CPUMF_EVENT_ATTR(cf_z15, DFLT_CCFINISH, 0x00109);
CPUMF_EVENT_ATTR(cf_z15, MT_DIAG_CYCLES_ONE_THR_ACTIVE, 0x01c0);
CPUMF_EVENT_ATTR(cf_z15, MT_DIAG_CYCLES_TWO_THR_ACTIVE, 0x01c1);
CPUMF_EVENT_ATTR(cf_z16, L1D_RO_EXCL_WRITES, 0x0080);
CPUMF_EVENT_ATTR(cf_z16, DTLB2_WRITES, 0x0081);
CPUMF_EVENT_ATTR(cf_z16, DTLB2_MISSES, 0x0082);
CPUMF_EVENT_ATTR(cf_z16, CRSTE_1MB_WRITES, 0x0083);
CPUMF_EVENT_ATTR(cf_z16, DTLB2_GPAGE_WRITES, 0x0084);
CPUMF_EVENT_ATTR(cf_z16, ITLB2_WRITES, 0x0086);
CPUMF_EVENT_ATTR(cf_z16, ITLB2_MISSES, 0x0087);
CPUMF_EVENT_ATTR(cf_z16, TLB2_PTE_WRITES, 0x0089);
CPUMF_EVENT_ATTR(cf_z16, TLB2_CRSTE_WRITES, 0x008a);
CPUMF_EVENT_ATTR(cf_z16, TLB2_ENGINES_BUSY, 0x008b);
CPUMF_EVENT_ATTR(cf_z16, TX_C_TEND, 0x008c);
CPUMF_EVENT_ATTR(cf_z16, TX_NC_TEND, 0x008d);
CPUMF_EVENT_ATTR(cf_z16, L1C_TLB2_MISSES, 0x008f);
CPUMF_EVENT_ATTR(cf_z16, DCW_REQ, 0x0091);
CPUMF_EVENT_ATTR(cf_z16, DCW_REQ_IV, 0x0092);
CPUMF_EVENT_ATTR(cf_z16, DCW_REQ_CHIP_HIT, 0x0093);
CPUMF_EVENT_ATTR(cf_z16, DCW_REQ_DRAWER_HIT, 0x0094);
CPUMF_EVENT_ATTR(cf_z16, DCW_ON_CHIP, 0x0095);
CPUMF_EVENT_ATTR(cf_z16, DCW_ON_CHIP_IV, 0x0096);
CPUMF_EVENT_ATTR(cf_z16, DCW_ON_CHIP_CHIP_HIT, 0x0097);
CPUMF_EVENT_ATTR(cf_z16, DCW_ON_CHIP_DRAWER_HIT, 0x0098);
CPUMF_EVENT_ATTR(cf_z16, DCW_ON_MODULE, 0x0099);
CPUMF_EVENT_ATTR(cf_z16, DCW_ON_DRAWER, 0x009a);
CPUMF_EVENT_ATTR(cf_z16, DCW_OFF_DRAWER, 0x009b);
CPUMF_EVENT_ATTR(cf_z16, DCW_ON_CHIP_MEMORY, 0x009c);
CPUMF_EVENT_ATTR(cf_z16, DCW_ON_MODULE_MEMORY, 0x009d);
CPUMF_EVENT_ATTR(cf_z16, DCW_ON_DRAWER_MEMORY, 0x009e);
CPUMF_EVENT_ATTR(cf_z16, DCW_OFF_DRAWER_MEMORY, 0x009f);
CPUMF_EVENT_ATTR(cf_z16, IDCW_ON_MODULE_IV, 0x00a0);
CPUMF_EVENT_ATTR(cf_z16, IDCW_ON_MODULE_CHIP_HIT, 0x00a1);
CPUMF_EVENT_ATTR(cf_z16, IDCW_ON_MODULE_DRAWER_HIT, 0x00a2);
CPUMF_EVENT_ATTR(cf_z16, IDCW_ON_DRAWER_IV, 0x00a3);
CPUMF_EVENT_ATTR(cf_z16, IDCW_ON_DRAWER_CHIP_HIT, 0x00a4);
CPUMF_EVENT_ATTR(cf_z16, IDCW_ON_DRAWER_DRAWER_HIT, 0x00a5);
CPUMF_EVENT_ATTR(cf_z16, IDCW_OFF_DRAWER_IV, 0x00a6);
CPUMF_EVENT_ATTR(cf_z16, IDCW_OFF_DRAWER_CHIP_HIT, 0x00a7);
CPUMF_EVENT_ATTR(cf_z16, IDCW_OFF_DRAWER_DRAWER_HIT, 0x00a8);
CPUMF_EVENT_ATTR(cf_z16, ICW_REQ, 0x00a9);
CPUMF_EVENT_ATTR(cf_z16, ICW_REQ_IV, 0x00aa);
CPUMF_EVENT_ATTR(cf_z16, ICW_REQ_CHIP_HIT, 0x00ab);
CPUMF_EVENT_ATTR(cf_z16, ICW_REQ_DRAWER_HIT, 0x00ac);
CPUMF_EVENT_ATTR(cf_z16, ICW_ON_CHIP, 0x00ad);
CPUMF_EVENT_ATTR(cf_z16, ICW_ON_CHIP_IV, 0x00ae);
CPUMF_EVENT_ATTR(cf_z16, ICW_ON_CHIP_CHIP_HIT, 0x00af);
CPUMF_EVENT_ATTR(cf_z16, ICW_ON_CHIP_DRAWER_HIT, 0x00b0);
CPUMF_EVENT_ATTR(cf_z16, ICW_ON_MODULE, 0x00b1);
CPUMF_EVENT_ATTR(cf_z16, ICW_ON_DRAWER, 0x00b2);
CPUMF_EVENT_ATTR(cf_z16, ICW_OFF_DRAWER, 0x00b3);
CPUMF_EVENT_ATTR(cf_z16, ICW_ON_CHIP_MEMORY, 0x00b4);
CPUMF_EVENT_ATTR(cf_z16, ICW_ON_MODULE_MEMORY, 0x00b5);
CPUMF_EVENT_ATTR(cf_z16, ICW_ON_DRAWER_MEMORY, 0x00b6);
CPUMF_EVENT_ATTR(cf_z16, ICW_OFF_DRAWER_MEMORY, 0x00b7);
CPUMF_EVENT_ATTR(cf_z16, BCD_DFP_EXECUTION_SLOTS, 0x00e0);
CPUMF_EVENT_ATTR(cf_z16, VX_BCD_EXECUTION_SLOTS, 0x00e1);
CPUMF_EVENT_ATTR(cf_z16, DECIMAL_INSTRUCTIONS, 0x00e2);
CPUMF_EVENT_ATTR(cf_z16, LAST_HOST_TRANSLATIONS, 0x00e8);
CPUMF_EVENT_ATTR(cf_z16, TX_NC_TABORT, 0x00f4);
CPUMF_EVENT_ATTR(cf_z16, TX_C_TABORT_NO_SPECIAL, 0x00f5);
CPUMF_EVENT_ATTR(cf_z16, TX_C_TABORT_SPECIAL, 0x00f6);
CPUMF_EVENT_ATTR(cf_z16, DFLT_ACCESS, 0x00f8);
CPUMF_EVENT_ATTR(cf_z16, DFLT_CYCLES, 0x00fd);
CPUMF_EVENT_ATTR(cf_z16, SORTL, 0x0100);
CPUMF_EVENT_ATTR(cf_z16, DFLT_CC, 0x0109);
CPUMF_EVENT_ATTR(cf_z16, DFLT_CCFINISH, 0x010a);
CPUMF_EVENT_ATTR(cf_z16, NNPA_INVOCATIONS, 0x010b);
CPUMF_EVENT_ATTR(cf_z16, NNPA_COMPLETIONS, 0x010c);
CPUMF_EVENT_ATTR(cf_z16, NNPA_WAIT_LOCK, 0x010d);
CPUMF_EVENT_ATTR(cf_z16, NNPA_HOLD_LOCK, 0x010e);
CPUMF_EVENT_ATTR(cf_z16, MT_DIAG_CYCLES_ONE_THR_ACTIVE, 0x01c0);
CPUMF_EVENT_ATTR(cf_z16, MT_DIAG_CYCLES_TWO_THR_ACTIVE, 0x01c1);
static struct attribute *cpumcf_fvn1_pmu_event_attr[] __initdata = {
CPUMF_EVENT_PTR(cf_fvn1, CPU_CYCLES),
CPUMF_EVENT_PTR(cf_fvn1, INSTRUCTIONS),
CPUMF_EVENT_PTR(cf_fvn1, L1I_DIR_WRITES),
CPUMF_EVENT_PTR(cf_fvn1, L1I_PENALTY_CYCLES),
CPUMF_EVENT_PTR(cf_fvn1, PROBLEM_STATE_CPU_CYCLES),
CPUMF_EVENT_PTR(cf_fvn1, PROBLEM_STATE_INSTRUCTIONS),
CPUMF_EVENT_PTR(cf_fvn1, PROBLEM_STATE_L1I_DIR_WRITES),
CPUMF_EVENT_PTR(cf_fvn1, PROBLEM_STATE_L1I_PENALTY_CYCLES),
CPUMF_EVENT_PTR(cf_fvn1, PROBLEM_STATE_L1D_DIR_WRITES),
CPUMF_EVENT_PTR(cf_fvn1, PROBLEM_STATE_L1D_PENALTY_CYCLES),
CPUMF_EVENT_PTR(cf_fvn1, L1D_DIR_WRITES),
CPUMF_EVENT_PTR(cf_fvn1, L1D_PENALTY_CYCLES),
NULL,
};
static struct attribute *cpumcf_fvn3_pmu_event_attr[] __initdata = {
CPUMF_EVENT_PTR(cf_fvn3, CPU_CYCLES),
CPUMF_EVENT_PTR(cf_fvn3, INSTRUCTIONS),
CPUMF_EVENT_PTR(cf_fvn3, L1I_DIR_WRITES),
CPUMF_EVENT_PTR(cf_fvn3, L1I_PENALTY_CYCLES),
CPUMF_EVENT_PTR(cf_fvn3, PROBLEM_STATE_CPU_CYCLES),
CPUMF_EVENT_PTR(cf_fvn3, PROBLEM_STATE_INSTRUCTIONS),
CPUMF_EVENT_PTR(cf_fvn3, L1D_DIR_WRITES),
CPUMF_EVENT_PTR(cf_fvn3, L1D_PENALTY_CYCLES),
NULL,
};
static struct attribute *cpumcf_svn_12345_pmu_event_attr[] __initdata = {
CPUMF_EVENT_PTR(cf_svn_12345, PRNG_FUNCTIONS),
CPUMF_EVENT_PTR(cf_svn_12345, PRNG_CYCLES),
CPUMF_EVENT_PTR(cf_svn_12345, PRNG_BLOCKED_FUNCTIONS),
CPUMF_EVENT_PTR(cf_svn_12345, PRNG_BLOCKED_CYCLES),
CPUMF_EVENT_PTR(cf_svn_12345, SHA_FUNCTIONS),
CPUMF_EVENT_PTR(cf_svn_12345, SHA_CYCLES),
CPUMF_EVENT_PTR(cf_svn_12345, SHA_BLOCKED_FUNCTIONS),
CPUMF_EVENT_PTR(cf_svn_12345, SHA_BLOCKED_CYCLES),
CPUMF_EVENT_PTR(cf_svn_12345, DEA_FUNCTIONS),
CPUMF_EVENT_PTR(cf_svn_12345, DEA_CYCLES),
CPUMF_EVENT_PTR(cf_svn_12345, DEA_BLOCKED_FUNCTIONS),
CPUMF_EVENT_PTR(cf_svn_12345, DEA_BLOCKED_CYCLES),
CPUMF_EVENT_PTR(cf_svn_12345, AES_FUNCTIONS),
CPUMF_EVENT_PTR(cf_svn_12345, AES_CYCLES),
CPUMF_EVENT_PTR(cf_svn_12345, AES_BLOCKED_FUNCTIONS),
CPUMF_EVENT_PTR(cf_svn_12345, AES_BLOCKED_CYCLES),
NULL,
};
static struct attribute *cpumcf_svn_67_pmu_event_attr[] __initdata = {
CPUMF_EVENT_PTR(cf_svn_12345, PRNG_FUNCTIONS),
CPUMF_EVENT_PTR(cf_svn_12345, PRNG_CYCLES),
CPUMF_EVENT_PTR(cf_svn_12345, PRNG_BLOCKED_FUNCTIONS),
CPUMF_EVENT_PTR(cf_svn_12345, PRNG_BLOCKED_CYCLES),
CPUMF_EVENT_PTR(cf_svn_12345, SHA_FUNCTIONS),
CPUMF_EVENT_PTR(cf_svn_12345, SHA_CYCLES),
CPUMF_EVENT_PTR(cf_svn_12345, SHA_BLOCKED_FUNCTIONS),
CPUMF_EVENT_PTR(cf_svn_12345, SHA_BLOCKED_CYCLES),
CPUMF_EVENT_PTR(cf_svn_12345, DEA_FUNCTIONS),
CPUMF_EVENT_PTR(cf_svn_12345, DEA_CYCLES),
CPUMF_EVENT_PTR(cf_svn_12345, DEA_BLOCKED_FUNCTIONS),
CPUMF_EVENT_PTR(cf_svn_12345, DEA_BLOCKED_CYCLES),
CPUMF_EVENT_PTR(cf_svn_12345, AES_FUNCTIONS),
CPUMF_EVENT_PTR(cf_svn_12345, AES_CYCLES),
CPUMF_EVENT_PTR(cf_svn_12345, AES_BLOCKED_FUNCTIONS),
CPUMF_EVENT_PTR(cf_svn_12345, AES_BLOCKED_CYCLES),
CPUMF_EVENT_PTR(cf_svn_6, ECC_FUNCTION_COUNT),
CPUMF_EVENT_PTR(cf_svn_6, ECC_CYCLES_COUNT),
CPUMF_EVENT_PTR(cf_svn_6, ECC_BLOCKED_FUNCTION_COUNT),
CPUMF_EVENT_PTR(cf_svn_6, ECC_BLOCKED_CYCLES_COUNT),
NULL,
};
static struct attribute *cpumcf_z10_pmu_event_attr[] __initdata = {
CPUMF_EVENT_PTR(cf_z10, L1I_L2_SOURCED_WRITES),
CPUMF_EVENT_PTR(cf_z10, L1D_L2_SOURCED_WRITES),
CPUMF_EVENT_PTR(cf_z10, L1I_L3_LOCAL_WRITES),
CPUMF_EVENT_PTR(cf_z10, L1D_L3_LOCAL_WRITES),
CPUMF_EVENT_PTR(cf_z10, L1I_L3_REMOTE_WRITES),
CPUMF_EVENT_PTR(cf_z10, L1D_L3_REMOTE_WRITES),
CPUMF_EVENT_PTR(cf_z10, L1D_LMEM_SOURCED_WRITES),
CPUMF_EVENT_PTR(cf_z10, L1I_LMEM_SOURCED_WRITES),
CPUMF_EVENT_PTR(cf_z10, L1D_RO_EXCL_WRITES),
CPUMF_EVENT_PTR(cf_z10, L1I_CACHELINE_INVALIDATES),
CPUMF_EVENT_PTR(cf_z10, ITLB1_WRITES),
CPUMF_EVENT_PTR(cf_z10, DTLB1_WRITES),
CPUMF_EVENT_PTR(cf_z10, TLB2_PTE_WRITES),
CPUMF_EVENT_PTR(cf_z10, TLB2_CRSTE_WRITES),
CPUMF_EVENT_PTR(cf_z10, TLB2_CRSTE_HPAGE_WRITES),
CPUMF_EVENT_PTR(cf_z10, ITLB1_MISSES),
CPUMF_EVENT_PTR(cf_z10, DTLB1_MISSES),
CPUMF_EVENT_PTR(cf_z10, L2C_STORES_SENT),
NULL,
};
static struct attribute *cpumcf_z196_pmu_event_attr[] __initdata = {
CPUMF_EVENT_PTR(cf_z196, L1D_L2_SOURCED_WRITES),
CPUMF_EVENT_PTR(cf_z196, L1I_L2_SOURCED_WRITES),
CPUMF_EVENT_PTR(cf_z196, DTLB1_MISSES),
CPUMF_EVENT_PTR(cf_z196, ITLB1_MISSES),
CPUMF_EVENT_PTR(cf_z196, L2C_STORES_SENT),
CPUMF_EVENT_PTR(cf_z196, L1D_OFFBOOK_L3_SOURCED_WRITES),
CPUMF_EVENT_PTR(cf_z196, L1D_ONBOOK_L4_SOURCED_WRITES),
CPUMF_EVENT_PTR(cf_z196, L1I_ONBOOK_L4_SOURCED_WRITES),
CPUMF_EVENT_PTR(cf_z196, L1D_RO_EXCL_WRITES),
CPUMF_EVENT_PTR(cf_z196, L1D_OFFBOOK_L4_SOURCED_WRITES),
CPUMF_EVENT_PTR(cf_z196, L1I_OFFBOOK_L4_SOURCED_WRITES),
CPUMF_EVENT_PTR(cf_z196, DTLB1_HPAGE_WRITES),
CPUMF_EVENT_PTR(cf_z196, L1D_LMEM_SOURCED_WRITES),
CPUMF_EVENT_PTR(cf_z196, L1I_LMEM_SOURCED_WRITES),
CPUMF_EVENT_PTR(cf_z196, L1I_OFFBOOK_L3_SOURCED_WRITES),
CPUMF_EVENT_PTR(cf_z196, DTLB1_WRITES),
CPUMF_EVENT_PTR(cf_z196, ITLB1_WRITES),
CPUMF_EVENT_PTR(cf_z196, TLB2_PTE_WRITES),
CPUMF_EVENT_PTR(cf_z196, TLB2_CRSTE_HPAGE_WRITES),
CPUMF_EVENT_PTR(cf_z196, TLB2_CRSTE_WRITES),
CPUMF_EVENT_PTR(cf_z196, L1D_ONCHIP_L3_SOURCED_WRITES),
CPUMF_EVENT_PTR(cf_z196, L1D_OFFCHIP_L3_SOURCED_WRITES),
CPUMF_EVENT_PTR(cf_z196, L1I_ONCHIP_L3_SOURCED_WRITES),
CPUMF_EVENT_PTR(cf_z196, L1I_OFFCHIP_L3_SOURCED_WRITES),
NULL,
};
static struct attribute *cpumcf_zec12_pmu_event_attr[] __initdata = {
CPUMF_EVENT_PTR(cf_zec12, DTLB1_MISSES),
CPUMF_EVENT_PTR(cf_zec12, ITLB1_MISSES),
CPUMF_EVENT_PTR(cf_zec12, L1D_L2I_SOURCED_WRITES),
CPUMF_EVENT_PTR(cf_zec12, L1I_L2I_SOURCED_WRITES),
CPUMF_EVENT_PTR(cf_zec12, L1D_L2D_SOURCED_WRITES),
CPUMF_EVENT_PTR(cf_zec12, DTLB1_WRITES),
CPUMF_EVENT_PTR(cf_zec12, L1D_LMEM_SOURCED_WRITES),
CPUMF_EVENT_PTR(cf_zec12, L1I_LMEM_SOURCED_WRITES),
CPUMF_EVENT_PTR(cf_zec12, L1D_RO_EXCL_WRITES),
CPUMF_EVENT_PTR(cf_zec12, DTLB1_HPAGE_WRITES),
CPUMF_EVENT_PTR(cf_zec12, ITLB1_WRITES),
CPUMF_EVENT_PTR(cf_zec12, TLB2_PTE_WRITES),
CPUMF_EVENT_PTR(cf_zec12, TLB2_CRSTE_HPAGE_WRITES),
CPUMF_EVENT_PTR(cf_zec12, TLB2_CRSTE_WRITES),
CPUMF_EVENT_PTR(cf_zec12, L1D_ONCHIP_L3_SOURCED_WRITES),
CPUMF_EVENT_PTR(cf_zec12, L1D_OFFCHIP_L3_SOURCED_WRITES),
CPUMF_EVENT_PTR(cf_zec12, L1D_OFFBOOK_L3_SOURCED_WRITES),
CPUMF_EVENT_PTR(cf_zec12, L1D_ONBOOK_L4_SOURCED_WRITES),
CPUMF_EVENT_PTR(cf_zec12, L1D_OFFBOOK_L4_SOURCED_WRITES),
CPUMF_EVENT_PTR(cf_zec12, TX_NC_TEND),
CPUMF_EVENT_PTR(cf_zec12, L1D_ONCHIP_L3_SOURCED_WRITES_IV),
CPUMF_EVENT_PTR(cf_zec12, L1D_OFFCHIP_L3_SOURCED_WRITES_IV),
CPUMF_EVENT_PTR(cf_zec12, L1D_OFFBOOK_L3_SOURCED_WRITES_IV),
CPUMF_EVENT_PTR(cf_zec12, L1I_ONCHIP_L3_SOURCED_WRITES),
CPUMF_EVENT_PTR(cf_zec12, L1I_OFFCHIP_L3_SOURCED_WRITES),
CPUMF_EVENT_PTR(cf_zec12, L1I_OFFBOOK_L3_SOURCED_WRITES),
CPUMF_EVENT_PTR(cf_zec12, L1I_ONBOOK_L4_SOURCED_WRITES),
CPUMF_EVENT_PTR(cf_zec12, L1I_OFFBOOK_L4_SOURCED_WRITES),
CPUMF_EVENT_PTR(cf_zec12, TX_C_TEND),
CPUMF_EVENT_PTR(cf_zec12, L1I_ONCHIP_L3_SOURCED_WRITES_IV),
CPUMF_EVENT_PTR(cf_zec12, L1I_OFFCHIP_L3_SOURCED_WRITES_IV),
CPUMF_EVENT_PTR(cf_zec12, L1I_OFFBOOK_L3_SOURCED_WRITES_IV),
CPUMF_EVENT_PTR(cf_zec12, TX_NC_TABORT),
CPUMF_EVENT_PTR(cf_zec12, TX_C_TABORT_NO_SPECIAL),
CPUMF_EVENT_PTR(cf_zec12, TX_C_TABORT_SPECIAL),
NULL,
};
static struct attribute *cpumcf_z13_pmu_event_attr[] __initdata = {
CPUMF_EVENT_PTR(cf_z13, L1D_RO_EXCL_WRITES),
CPUMF_EVENT_PTR(cf_z13, DTLB1_WRITES),
CPUMF_EVENT_PTR(cf_z13, DTLB1_MISSES),
CPUMF_EVENT_PTR(cf_z13, DTLB1_HPAGE_WRITES),
CPUMF_EVENT_PTR(cf_z13, DTLB1_GPAGE_WRITES),
CPUMF_EVENT_PTR(cf_z13, L1D_L2D_SOURCED_WRITES),
CPUMF_EVENT_PTR(cf_z13, ITLB1_WRITES),
CPUMF_EVENT_PTR(cf_z13, ITLB1_MISSES),
CPUMF_EVENT_PTR(cf_z13, L1I_L2I_SOURCED_WRITES),
CPUMF_EVENT_PTR(cf_z13, TLB2_PTE_WRITES),
CPUMF_EVENT_PTR(cf_z13, TLB2_CRSTE_HPAGE_WRITES),
CPUMF_EVENT_PTR(cf_z13, TLB2_CRSTE_WRITES),
CPUMF_EVENT_PTR(cf_z13, TX_C_TEND),
CPUMF_EVENT_PTR(cf_z13, TX_NC_TEND),
CPUMF_EVENT_PTR(cf_z13, L1C_TLB1_MISSES),
CPUMF_EVENT_PTR(cf_z13, L1D_ONCHIP_L3_SOURCED_WRITES),
CPUMF_EVENT_PTR(cf_z13, L1D_ONCHIP_L3_SOURCED_WRITES_IV),
CPUMF_EVENT_PTR(cf_z13, L1D_ONNODE_L4_SOURCED_WRITES),
CPUMF_EVENT_PTR(cf_z13, L1D_ONNODE_L3_SOURCED_WRITES_IV),
CPUMF_EVENT_PTR(cf_z13, L1D_ONNODE_L3_SOURCED_WRITES),
CPUMF_EVENT_PTR(cf_z13, L1D_ONDRAWER_L4_SOURCED_WRITES),
CPUMF_EVENT_PTR(cf_z13, L1D_ONDRAWER_L3_SOURCED_WRITES_IV),
CPUMF_EVENT_PTR(cf_z13, L1D_ONDRAWER_L3_SOURCED_WRITES),
CPUMF_EVENT_PTR(cf_z13, L1D_OFFDRAWER_SCOL_L4_SOURCED_WRITES),
CPUMF_EVENT_PTR(cf_z13, L1D_OFFDRAWER_SCOL_L3_SOURCED_WRITES_IV),
CPUMF_EVENT_PTR(cf_z13, L1D_OFFDRAWER_SCOL_L3_SOURCED_WRITES),
CPUMF_EVENT_PTR(cf_z13, L1D_OFFDRAWER_FCOL_L4_SOURCED_WRITES),
CPUMF_EVENT_PTR(cf_z13, L1D_OFFDRAWER_FCOL_L3_SOURCED_WRITES_IV),
CPUMF_EVENT_PTR(cf_z13, L1D_OFFDRAWER_FCOL_L3_SOURCED_WRITES),
CPUMF_EVENT_PTR(cf_z13, L1D_ONNODE_MEM_SOURCED_WRITES),
CPUMF_EVENT_PTR(cf_z13, L1D_ONDRAWER_MEM_SOURCED_WRITES),
CPUMF_EVENT_PTR(cf_z13, L1D_OFFDRAWER_MEM_SOURCED_WRITES),
CPUMF_EVENT_PTR(cf_z13, L1D_ONCHIP_MEM_SOURCED_WRITES),
CPUMF_EVENT_PTR(cf_z13, L1I_ONCHIP_L3_SOURCED_WRITES),
CPUMF_EVENT_PTR(cf_z13, L1I_ONCHIP_L3_SOURCED_WRITES_IV),
CPUMF_EVENT_PTR(cf_z13, L1I_ONNODE_L4_SOURCED_WRITES),
CPUMF_EVENT_PTR(cf_z13, L1I_ONNODE_L3_SOURCED_WRITES_IV),
CPUMF_EVENT_PTR(cf_z13, L1I_ONNODE_L3_SOURCED_WRITES),
CPUMF_EVENT_PTR(cf_z13, L1I_ONDRAWER_L4_SOURCED_WRITES),
CPUMF_EVENT_PTR(cf_z13, L1I_ONDRAWER_L3_SOURCED_WRITES_IV),
CPUMF_EVENT_PTR(cf_z13, L1I_ONDRAWER_L3_SOURCED_WRITES),
CPUMF_EVENT_PTR(cf_z13, L1I_OFFDRAWER_SCOL_L4_SOURCED_WRITES),
CPUMF_EVENT_PTR(cf_z13, L1I_OFFDRAWER_SCOL_L3_SOURCED_WRITES_IV),
CPUMF_EVENT_PTR(cf_z13, L1I_OFFDRAWER_SCOL_L3_SOURCED_WRITES),
CPUMF_EVENT_PTR(cf_z13, L1I_OFFDRAWER_FCOL_L4_SOURCED_WRITES),
CPUMF_EVENT_PTR(cf_z13, L1I_OFFDRAWER_FCOL_L3_SOURCED_WRITES_IV),
CPUMF_EVENT_PTR(cf_z13, L1I_OFFDRAWER_FCOL_L3_SOURCED_WRITES),
CPUMF_EVENT_PTR(cf_z13, L1I_ONNODE_MEM_SOURCED_WRITES),
CPUMF_EVENT_PTR(cf_z13, L1I_ONDRAWER_MEM_SOURCED_WRITES),
CPUMF_EVENT_PTR(cf_z13, L1I_OFFDRAWER_MEM_SOURCED_WRITES),
CPUMF_EVENT_PTR(cf_z13, L1I_ONCHIP_MEM_SOURCED_WRITES),
CPUMF_EVENT_PTR(cf_z13, TX_NC_TABORT),
CPUMF_EVENT_PTR(cf_z13, TX_C_TABORT_NO_SPECIAL),
CPUMF_EVENT_PTR(cf_z13, TX_C_TABORT_SPECIAL),
CPUMF_EVENT_PTR(cf_z13, MT_DIAG_CYCLES_ONE_THR_ACTIVE),
CPUMF_EVENT_PTR(cf_z13, MT_DIAG_CYCLES_TWO_THR_ACTIVE),
NULL,
};
static struct attribute *cpumcf_z14_pmu_event_attr[] __initdata = {
CPUMF_EVENT_PTR(cf_z14, L1D_RO_EXCL_WRITES),
CPUMF_EVENT_PTR(cf_z14, DTLB2_WRITES),
CPUMF_EVENT_PTR(cf_z14, DTLB2_MISSES),
CPUMF_EVENT_PTR(cf_z14, DTLB2_HPAGE_WRITES),
CPUMF_EVENT_PTR(cf_z14, DTLB2_GPAGE_WRITES),
CPUMF_EVENT_PTR(cf_z14, L1D_L2D_SOURCED_WRITES),
CPUMF_EVENT_PTR(cf_z14, ITLB2_WRITES),
CPUMF_EVENT_PTR(cf_z14, ITLB2_MISSES),
CPUMF_EVENT_PTR(cf_z14, L1I_L2I_SOURCED_WRITES),
CPUMF_EVENT_PTR(cf_z14, TLB2_PTE_WRITES),
CPUMF_EVENT_PTR(cf_z14, TLB2_CRSTE_WRITES),
CPUMF_EVENT_PTR(cf_z14, TLB2_ENGINES_BUSY),
CPUMF_EVENT_PTR(cf_z14, TX_C_TEND),
CPUMF_EVENT_PTR(cf_z14, TX_NC_TEND),
CPUMF_EVENT_PTR(cf_z14, L1C_TLB2_MISSES),
CPUMF_EVENT_PTR(cf_z14, L1D_ONCHIP_L3_SOURCED_WRITES),
CPUMF_EVENT_PTR(cf_z14, L1D_ONCHIP_MEMORY_SOURCED_WRITES),
CPUMF_EVENT_PTR(cf_z14, L1D_ONCHIP_L3_SOURCED_WRITES_IV),
CPUMF_EVENT_PTR(cf_z14, L1D_ONCLUSTER_L3_SOURCED_WRITES),
CPUMF_EVENT_PTR(cf_z14, L1D_ONCLUSTER_MEMORY_SOURCED_WRITES),
CPUMF_EVENT_PTR(cf_z14, L1D_ONCLUSTER_L3_SOURCED_WRITES_IV),
CPUMF_EVENT_PTR(cf_z14, L1D_OFFCLUSTER_L3_SOURCED_WRITES),
CPUMF_EVENT_PTR(cf_z14, L1D_OFFCLUSTER_MEMORY_SOURCED_WRITES),
CPUMF_EVENT_PTR(cf_z14, L1D_OFFCLUSTER_L3_SOURCED_WRITES_IV),
CPUMF_EVENT_PTR(cf_z14, L1D_OFFDRAWER_L3_SOURCED_WRITES),
CPUMF_EVENT_PTR(cf_z14, L1D_OFFDRAWER_MEMORY_SOURCED_WRITES),
CPUMF_EVENT_PTR(cf_z14, L1D_OFFDRAWER_L3_SOURCED_WRITES_IV),
CPUMF_EVENT_PTR(cf_z14, L1D_ONDRAWER_L4_SOURCED_WRITES),
CPUMF_EVENT_PTR(cf_z14, L1D_OFFDRAWER_L4_SOURCED_WRITES),
CPUMF_EVENT_PTR(cf_z14, L1D_ONCHIP_L3_SOURCED_WRITES_RO),
CPUMF_EVENT_PTR(cf_z14, L1I_ONCHIP_L3_SOURCED_WRITES),
CPUMF_EVENT_PTR(cf_z14, L1I_ONCHIP_MEMORY_SOURCED_WRITES),
CPUMF_EVENT_PTR(cf_z14, L1I_ONCHIP_L3_SOURCED_WRITES_IV),
CPUMF_EVENT_PTR(cf_z14, L1I_ONCLUSTER_L3_SOURCED_WRITES),
CPUMF_EVENT_PTR(cf_z14, L1I_ONCLUSTER_MEMORY_SOURCED_WRITES),
CPUMF_EVENT_PTR(cf_z14, L1I_ONCLUSTER_L3_SOURCED_WRITES_IV),
CPUMF_EVENT_PTR(cf_z14, L1I_OFFCLUSTER_L3_SOURCED_WRITES),
CPUMF_EVENT_PTR(cf_z14, L1I_OFFCLUSTER_MEMORY_SOURCED_WRITES),
CPUMF_EVENT_PTR(cf_z14, L1I_OFFCLUSTER_L3_SOURCED_WRITES_IV),
CPUMF_EVENT_PTR(cf_z14, L1I_OFFDRAWER_L3_SOURCED_WRITES),
CPUMF_EVENT_PTR(cf_z14, L1I_OFFDRAWER_MEMORY_SOURCED_WRITES),
CPUMF_EVENT_PTR(cf_z14, L1I_OFFDRAWER_L3_SOURCED_WRITES_IV),
CPUMF_EVENT_PTR(cf_z14, L1I_ONDRAWER_L4_SOURCED_WRITES),
CPUMF_EVENT_PTR(cf_z14, L1I_OFFDRAWER_L4_SOURCED_WRITES),
CPUMF_EVENT_PTR(cf_z14, BCD_DFP_EXECUTION_SLOTS),
CPUMF_EVENT_PTR(cf_z14, VX_BCD_EXECUTION_SLOTS),
CPUMF_EVENT_PTR(cf_z14, DECIMAL_INSTRUCTIONS),
CPUMF_EVENT_PTR(cf_z14, LAST_HOST_TRANSLATIONS),
CPUMF_EVENT_PTR(cf_z14, TX_NC_TABORT),
CPUMF_EVENT_PTR(cf_z14, TX_C_TABORT_NO_SPECIAL),
CPUMF_EVENT_PTR(cf_z14, TX_C_TABORT_SPECIAL),
CPUMF_EVENT_PTR(cf_z14, MT_DIAG_CYCLES_ONE_THR_ACTIVE),
CPUMF_EVENT_PTR(cf_z14, MT_DIAG_CYCLES_TWO_THR_ACTIVE),
NULL,
};
static struct attribute *cpumcf_z15_pmu_event_attr[] __initdata = {
CPUMF_EVENT_PTR(cf_z15, L1D_RO_EXCL_WRITES),
CPUMF_EVENT_PTR(cf_z15, DTLB2_WRITES),
CPUMF_EVENT_PTR(cf_z15, DTLB2_MISSES),
CPUMF_EVENT_PTR(cf_z15, DTLB2_HPAGE_WRITES),
CPUMF_EVENT_PTR(cf_z15, DTLB2_GPAGE_WRITES),
CPUMF_EVENT_PTR(cf_z15, L1D_L2D_SOURCED_WRITES),
CPUMF_EVENT_PTR(cf_z15, ITLB2_WRITES),
CPUMF_EVENT_PTR(cf_z15, ITLB2_MISSES),
CPUMF_EVENT_PTR(cf_z15, L1I_L2I_SOURCED_WRITES),
CPUMF_EVENT_PTR(cf_z15, TLB2_PTE_WRITES),
CPUMF_EVENT_PTR(cf_z15, TLB2_CRSTE_WRITES),
CPUMF_EVENT_PTR(cf_z15, TLB2_ENGINES_BUSY),
CPUMF_EVENT_PTR(cf_z15, TX_C_TEND),
CPUMF_EVENT_PTR(cf_z15, TX_NC_TEND),
CPUMF_EVENT_PTR(cf_z15, L1C_TLB2_MISSES),
CPUMF_EVENT_PTR(cf_z15, L1D_ONCHIP_L3_SOURCED_WRITES),
CPUMF_EVENT_PTR(cf_z15, L1D_ONCHIP_MEMORY_SOURCED_WRITES),
CPUMF_EVENT_PTR(cf_z15, L1D_ONCHIP_L3_SOURCED_WRITES_IV),
CPUMF_EVENT_PTR(cf_z15, L1D_ONCLUSTER_L3_SOURCED_WRITES),
CPUMF_EVENT_PTR(cf_z15, L1D_ONCLUSTER_MEMORY_SOURCED_WRITES),
CPUMF_EVENT_PTR(cf_z15, L1D_ONCLUSTER_L3_SOURCED_WRITES_IV),
CPUMF_EVENT_PTR(cf_z15, L1D_OFFCLUSTER_L3_SOURCED_WRITES),
CPUMF_EVENT_PTR(cf_z15, L1D_OFFCLUSTER_MEMORY_SOURCED_WRITES),
CPUMF_EVENT_PTR(cf_z15, L1D_OFFCLUSTER_L3_SOURCED_WRITES_IV),
CPUMF_EVENT_PTR(cf_z15, L1D_OFFDRAWER_L3_SOURCED_WRITES),
CPUMF_EVENT_PTR(cf_z15, L1D_OFFDRAWER_MEMORY_SOURCED_WRITES),
CPUMF_EVENT_PTR(cf_z15, L1D_OFFDRAWER_L3_SOURCED_WRITES_IV),
CPUMF_EVENT_PTR(cf_z15, L1D_ONDRAWER_L4_SOURCED_WRITES),
CPUMF_EVENT_PTR(cf_z15, L1D_OFFDRAWER_L4_SOURCED_WRITES),
CPUMF_EVENT_PTR(cf_z15, L1D_ONCHIP_L3_SOURCED_WRITES_RO),
CPUMF_EVENT_PTR(cf_z15, L1I_ONCHIP_L3_SOURCED_WRITES),
CPUMF_EVENT_PTR(cf_z15, L1I_ONCHIP_MEMORY_SOURCED_WRITES),
CPUMF_EVENT_PTR(cf_z15, L1I_ONCHIP_L3_SOURCED_WRITES_IV),
CPUMF_EVENT_PTR(cf_z15, L1I_ONCLUSTER_L3_SOURCED_WRITES),
CPUMF_EVENT_PTR(cf_z15, L1I_ONCLUSTER_MEMORY_SOURCED_WRITES),
CPUMF_EVENT_PTR(cf_z15, L1I_ONCLUSTER_L3_SOURCED_WRITES_IV),
CPUMF_EVENT_PTR(cf_z15, L1I_OFFCLUSTER_L3_SOURCED_WRITES),
CPUMF_EVENT_PTR(cf_z15, L1I_OFFCLUSTER_MEMORY_SOURCED_WRITES),
CPUMF_EVENT_PTR(cf_z15, L1I_OFFCLUSTER_L3_SOURCED_WRITES_IV),
CPUMF_EVENT_PTR(cf_z15, L1I_OFFDRAWER_L3_SOURCED_WRITES),
CPUMF_EVENT_PTR(cf_z15, L1I_OFFDRAWER_MEMORY_SOURCED_WRITES),
CPUMF_EVENT_PTR(cf_z15, L1I_OFFDRAWER_L3_SOURCED_WRITES_IV),
CPUMF_EVENT_PTR(cf_z15, L1I_ONDRAWER_L4_SOURCED_WRITES),
CPUMF_EVENT_PTR(cf_z15, L1I_OFFDRAWER_L4_SOURCED_WRITES),
CPUMF_EVENT_PTR(cf_z15, BCD_DFP_EXECUTION_SLOTS),
CPUMF_EVENT_PTR(cf_z15, VX_BCD_EXECUTION_SLOTS),
CPUMF_EVENT_PTR(cf_z15, DECIMAL_INSTRUCTIONS),
CPUMF_EVENT_PTR(cf_z15, LAST_HOST_TRANSLATIONS),
CPUMF_EVENT_PTR(cf_z15, TX_NC_TABORT),
CPUMF_EVENT_PTR(cf_z15, TX_C_TABORT_NO_SPECIAL),
CPUMF_EVENT_PTR(cf_z15, TX_C_TABORT_SPECIAL),
CPUMF_EVENT_PTR(cf_z15, DFLT_ACCESS),
CPUMF_EVENT_PTR(cf_z15, DFLT_CYCLES),
CPUMF_EVENT_PTR(cf_z15, DFLT_CC),
CPUMF_EVENT_PTR(cf_z15, DFLT_CCFINISH),
CPUMF_EVENT_PTR(cf_z15, MT_DIAG_CYCLES_ONE_THR_ACTIVE),
CPUMF_EVENT_PTR(cf_z15, MT_DIAG_CYCLES_TWO_THR_ACTIVE),
NULL,
};
static struct attribute *cpumcf_z16_pmu_event_attr[] __initdata = {
CPUMF_EVENT_PTR(cf_z16, L1D_RO_EXCL_WRITES),
CPUMF_EVENT_PTR(cf_z16, DTLB2_WRITES),
CPUMF_EVENT_PTR(cf_z16, DTLB2_MISSES),
CPUMF_EVENT_PTR(cf_z16, CRSTE_1MB_WRITES),
CPUMF_EVENT_PTR(cf_z16, DTLB2_GPAGE_WRITES),
CPUMF_EVENT_PTR(cf_z16, ITLB2_WRITES),
CPUMF_EVENT_PTR(cf_z16, ITLB2_MISSES),
CPUMF_EVENT_PTR(cf_z16, TLB2_PTE_WRITES),
CPUMF_EVENT_PTR(cf_z16, TLB2_CRSTE_WRITES),
CPUMF_EVENT_PTR(cf_z16, TLB2_ENGINES_BUSY),
CPUMF_EVENT_PTR(cf_z16, TX_C_TEND),
CPUMF_EVENT_PTR(cf_z16, TX_NC_TEND),
CPUMF_EVENT_PTR(cf_z16, L1C_TLB2_MISSES),
CPUMF_EVENT_PTR(cf_z16, DCW_REQ),
CPUMF_EVENT_PTR(cf_z16, DCW_REQ_IV),
CPUMF_EVENT_PTR(cf_z16, DCW_REQ_CHIP_HIT),
CPUMF_EVENT_PTR(cf_z16, DCW_REQ_DRAWER_HIT),
CPUMF_EVENT_PTR(cf_z16, DCW_ON_CHIP),
CPUMF_EVENT_PTR(cf_z16, DCW_ON_CHIP_IV),
CPUMF_EVENT_PTR(cf_z16, DCW_ON_CHIP_CHIP_HIT),
CPUMF_EVENT_PTR(cf_z16, DCW_ON_CHIP_DRAWER_HIT),
CPUMF_EVENT_PTR(cf_z16, DCW_ON_MODULE),
CPUMF_EVENT_PTR(cf_z16, DCW_ON_DRAWER),
CPUMF_EVENT_PTR(cf_z16, DCW_OFF_DRAWER),
CPUMF_EVENT_PTR(cf_z16, DCW_ON_CHIP_MEMORY),
CPUMF_EVENT_PTR(cf_z16, DCW_ON_MODULE_MEMORY),
CPUMF_EVENT_PTR(cf_z16, DCW_ON_DRAWER_MEMORY),
CPUMF_EVENT_PTR(cf_z16, DCW_OFF_DRAWER_MEMORY),
CPUMF_EVENT_PTR(cf_z16, IDCW_ON_MODULE_IV),
CPUMF_EVENT_PTR(cf_z16, IDCW_ON_MODULE_CHIP_HIT),
CPUMF_EVENT_PTR(cf_z16, IDCW_ON_MODULE_DRAWER_HIT),
CPUMF_EVENT_PTR(cf_z16, IDCW_ON_DRAWER_IV),
CPUMF_EVENT_PTR(cf_z16, IDCW_ON_DRAWER_CHIP_HIT),
CPUMF_EVENT_PTR(cf_z16, IDCW_ON_DRAWER_DRAWER_HIT),
CPUMF_EVENT_PTR(cf_z16, IDCW_OFF_DRAWER_IV),
CPUMF_EVENT_PTR(cf_z16, IDCW_OFF_DRAWER_CHIP_HIT),
CPUMF_EVENT_PTR(cf_z16, IDCW_OFF_DRAWER_DRAWER_HIT),
CPUMF_EVENT_PTR(cf_z16, ICW_REQ),
CPUMF_EVENT_PTR(cf_z16, ICW_REQ_IV),
CPUMF_EVENT_PTR(cf_z16, ICW_REQ_CHIP_HIT),
CPUMF_EVENT_PTR(cf_z16, ICW_REQ_DRAWER_HIT),
CPUMF_EVENT_PTR(cf_z16, ICW_ON_CHIP),
CPUMF_EVENT_PTR(cf_z16, ICW_ON_CHIP_IV),
CPUMF_EVENT_PTR(cf_z16, ICW_ON_CHIP_CHIP_HIT),
CPUMF_EVENT_PTR(cf_z16, ICW_ON_CHIP_DRAWER_HIT),
CPUMF_EVENT_PTR(cf_z16, ICW_ON_MODULE),
CPUMF_EVENT_PTR(cf_z16, ICW_ON_DRAWER),
CPUMF_EVENT_PTR(cf_z16, ICW_OFF_DRAWER),
CPUMF_EVENT_PTR(cf_z16, ICW_ON_CHIP_MEMORY),
CPUMF_EVENT_PTR(cf_z16, ICW_ON_MODULE_MEMORY),
CPUMF_EVENT_PTR(cf_z16, ICW_ON_DRAWER_MEMORY),
CPUMF_EVENT_PTR(cf_z16, ICW_OFF_DRAWER_MEMORY),
CPUMF_EVENT_PTR(cf_z16, BCD_DFP_EXECUTION_SLOTS),
CPUMF_EVENT_PTR(cf_z16, VX_BCD_EXECUTION_SLOTS),
CPUMF_EVENT_PTR(cf_z16, DECIMAL_INSTRUCTIONS),
CPUMF_EVENT_PTR(cf_z16, LAST_HOST_TRANSLATIONS),
CPUMF_EVENT_PTR(cf_z16, TX_NC_TABORT),
CPUMF_EVENT_PTR(cf_z16, TX_C_TABORT_NO_SPECIAL),
CPUMF_EVENT_PTR(cf_z16, TX_C_TABORT_SPECIAL),
CPUMF_EVENT_PTR(cf_z16, DFLT_ACCESS),
CPUMF_EVENT_PTR(cf_z16, DFLT_CYCLES),
CPUMF_EVENT_PTR(cf_z16, SORTL),
CPUMF_EVENT_PTR(cf_z16, DFLT_CC),
CPUMF_EVENT_PTR(cf_z16, DFLT_CCFINISH),
CPUMF_EVENT_PTR(cf_z16, NNPA_INVOCATIONS),
CPUMF_EVENT_PTR(cf_z16, NNPA_COMPLETIONS),
CPUMF_EVENT_PTR(cf_z16, NNPA_WAIT_LOCK),
CPUMF_EVENT_PTR(cf_z16, NNPA_HOLD_LOCK),
CPUMF_EVENT_PTR(cf_z16, MT_DIAG_CYCLES_ONE_THR_ACTIVE),
CPUMF_EVENT_PTR(cf_z16, MT_DIAG_CYCLES_TWO_THR_ACTIVE),
NULL,
};
/* END: CPUM_CF COUNTER DEFINITIONS ===================================== */
static struct attribute_group cpumcf_pmu_events_group = {
.name = "events",
};
PMU_FORMAT_ATTR(event, "config:0-63");
static struct attribute *cpumcf_pmu_format_attr[] = {
&format_attr_event.attr,
NULL,
};
static struct attribute_group cpumcf_pmu_format_group = {
.name = "format",
.attrs = cpumcf_pmu_format_attr,
};
static const struct attribute_group *cpumcf_pmu_attr_groups[] = {
&cpumcf_pmu_events_group,
&cpumcf_pmu_format_group,
NULL,
};
static __init struct attribute **merge_attr(struct attribute **a,
struct attribute **b,
struct attribute **c)
{
struct attribute **new;
int j, i;
for (j = 0; a[j]; j++)
;
for (i = 0; b[i]; i++)
j++;
for (i = 0; c[i]; i++)
j++;
j++;
new = kmalloc_array(j, sizeof(struct attribute *), GFP_KERNEL);
if (!new)
return NULL;
j = 0;
for (i = 0; a[i]; i++)
new[j++] = a[i];
for (i = 0; b[i]; i++)
new[j++] = b[i];
for (i = 0; c[i]; i++)
new[j++] = c[i];
new[j] = NULL;
return new;
}
__init const struct attribute_group **cpumf_cf_event_group(void)
{
struct attribute **combined, **model, **cfvn, **csvn;
struct attribute *none[] = { NULL };
struct cpumf_ctr_info ci;
struct cpuid cpu_id;
/* Determine generic counters set(s) */
qctri(&ci);
switch (ci.cfvn) {
case 1:
cfvn = cpumcf_fvn1_pmu_event_attr;
break;
case 3:
cfvn = cpumcf_fvn3_pmu_event_attr;
break;
default:
cfvn = none;
}
/* Determine version specific crypto set */
switch (ci.csvn) {
case 1 ... 5:
csvn = cpumcf_svn_12345_pmu_event_attr;
break;
case 6 ... 7:
csvn = cpumcf_svn_67_pmu_event_attr;
break;
default:
csvn = none;
}
/* Determine model-specific counter set(s) */
get_cpu_id(&cpu_id);
switch (cpu_id.machine) {
case 0x2097:
case 0x2098:
model = cpumcf_z10_pmu_event_attr;
break;
case 0x2817:
case 0x2818:
model = cpumcf_z196_pmu_event_attr;
break;
case 0x2827:
case 0x2828:
model = cpumcf_zec12_pmu_event_attr;
break;
case 0x2964:
case 0x2965:
model = cpumcf_z13_pmu_event_attr;
break;
case 0x3906:
case 0x3907:
model = cpumcf_z14_pmu_event_attr;
break;
case 0x8561:
case 0x8562:
model = cpumcf_z15_pmu_event_attr;
break;
case 0x3931:
case 0x3932:
model = cpumcf_z16_pmu_event_attr;
break;
default:
model = none;
break;
}
combined = merge_attr(cfvn, csvn, model);
if (combined)
cpumcf_pmu_events_group.attrs = combined;
return cpumcf_pmu_attr_groups;
}
| linux-master | arch/s390/kernel/perf_cpum_cf_events.c |
// SPDX-License-Identifier: GPL-2.0
#undef __s390x__
#include <linux/audit_arch.h>
#include <asm/unistd.h>
#include "audit.h"
unsigned s390_dir_class[] = {
#include <asm-generic/audit_dir_write.h>
~0U
};
unsigned s390_chattr_class[] = {
#include <asm-generic/audit_change_attr.h>
~0U
};
unsigned s390_write_class[] = {
#include <asm-generic/audit_write.h>
~0U
};
unsigned s390_read_class[] = {
#include <asm-generic/audit_read.h>
~0U
};
unsigned s390_signal_class[] = {
#include <asm-generic/audit_signal.h>
~0U
};
int s390_classify_syscall(unsigned syscall)
{
switch(syscall) {
case __NR_open:
return AUDITSC_OPEN;
case __NR_openat:
return AUDITSC_OPENAT;
case __NR_socketcall:
return AUDITSC_SOCKETCALL;
case __NR_execve:
return AUDITSC_EXECVE;
case __NR_openat2:
return AUDITSC_OPENAT2;
default:
return AUDITSC_COMPAT;
}
}
| linux-master | arch/s390/kernel/compat_audit.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2004, 2011
* Author(s): Martin Schwidefsky <[email protected]>,
* Holger Smolinski <[email protected]>,
* Thomas Spatzier <[email protected]>,
*
* This file contains interrupt related functions.
*/
#include <linux/kernel_stat.h>
#include <linux/interrupt.h>
#include <linux/seq_file.h>
#include <linux/proc_fs.h>
#include <linux/profile.h>
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/ftrace.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/cpu.h>
#include <linux/irq.h>
#include <linux/entry-common.h>
#include <asm/irq_regs.h>
#include <asm/cputime.h>
#include <asm/lowcore.h>
#include <asm/irq.h>
#include <asm/hw_irq.h>
#include <asm/stacktrace.h>
#include <asm/softirq_stack.h>
#include "entry.h"
DEFINE_PER_CPU_SHARED_ALIGNED(struct irq_stat, irq_stat);
EXPORT_PER_CPU_SYMBOL_GPL(irq_stat);
struct irq_class {
int irq;
char *name;
char *desc;
};
/*
* The list of "main" irq classes on s390. This is the list of interrupts
* that appear both in /proc/stat ("intr" line) and /proc/interrupts.
* Historically only external and I/O interrupts have been part of /proc/stat.
* We can't add the split external and I/O sub classes since the first field
* in the "intr" line in /proc/stat is supposed to be the sum of all other
* fields.
* Since the external and I/O interrupt fields are already sums we would end
* up with having a sum which accounts each interrupt twice.
*/
static const struct irq_class irqclass_main_desc[NR_IRQS_BASE] = {
{.irq = EXT_INTERRUPT, .name = "EXT"},
{.irq = IO_INTERRUPT, .name = "I/O"},
{.irq = THIN_INTERRUPT, .name = "AIO"},
};
/*
* The list of split external and I/O interrupts that appear only in
* /proc/interrupts.
* In addition this list contains non external / I/O events like NMIs.
*/
static const struct irq_class irqclass_sub_desc[] = {
{.irq = IRQEXT_CLK, .name = "CLK", .desc = "[EXT] Clock Comparator"},
{.irq = IRQEXT_EXC, .name = "EXC", .desc = "[EXT] External Call"},
{.irq = IRQEXT_EMS, .name = "EMS", .desc = "[EXT] Emergency Signal"},
{.irq = IRQEXT_TMR, .name = "TMR", .desc = "[EXT] CPU Timer"},
{.irq = IRQEXT_TLA, .name = "TAL", .desc = "[EXT] Timing Alert"},
{.irq = IRQEXT_PFL, .name = "PFL", .desc = "[EXT] Pseudo Page Fault"},
{.irq = IRQEXT_DSD, .name = "DSD", .desc = "[EXT] DASD Diag"},
{.irq = IRQEXT_VRT, .name = "VRT", .desc = "[EXT] Virtio"},
{.irq = IRQEXT_SCP, .name = "SCP", .desc = "[EXT] Service Call"},
{.irq = IRQEXT_IUC, .name = "IUC", .desc = "[EXT] IUCV"},
{.irq = IRQEXT_CMS, .name = "CMS", .desc = "[EXT] CPU-Measurement: Sampling"},
{.irq = IRQEXT_CMC, .name = "CMC", .desc = "[EXT] CPU-Measurement: Counter"},
{.irq = IRQEXT_FTP, .name = "FTP", .desc = "[EXT] HMC FTP Service"},
{.irq = IRQIO_CIO, .name = "CIO", .desc = "[I/O] Common I/O Layer Interrupt"},
{.irq = IRQIO_DAS, .name = "DAS", .desc = "[I/O] DASD"},
{.irq = IRQIO_C15, .name = "C15", .desc = "[I/O] 3215"},
{.irq = IRQIO_C70, .name = "C70", .desc = "[I/O] 3270"},
{.irq = IRQIO_TAP, .name = "TAP", .desc = "[I/O] Tape"},
{.irq = IRQIO_VMR, .name = "VMR", .desc = "[I/O] Unit Record Devices"},
{.irq = IRQIO_LCS, .name = "LCS", .desc = "[I/O] LCS"},
{.irq = IRQIO_CTC, .name = "CTC", .desc = "[I/O] CTC"},
{.irq = IRQIO_ADM, .name = "ADM", .desc = "[I/O] EADM Subchannel"},
{.irq = IRQIO_CSC, .name = "CSC", .desc = "[I/O] CHSC Subchannel"},
{.irq = IRQIO_VIR, .name = "VIR", .desc = "[I/O] Virtual I/O Devices"},
{.irq = IRQIO_QAI, .name = "QAI", .desc = "[AIO] QDIO Adapter Interrupt"},
{.irq = IRQIO_APB, .name = "APB", .desc = "[AIO] AP Bus"},
{.irq = IRQIO_PCF, .name = "PCF", .desc = "[AIO] PCI Floating Interrupt"},
{.irq = IRQIO_PCD, .name = "PCD", .desc = "[AIO] PCI Directed Interrupt"},
{.irq = IRQIO_MSI, .name = "MSI", .desc = "[AIO] MSI Interrupt"},
{.irq = IRQIO_VAI, .name = "VAI", .desc = "[AIO] Virtual I/O Devices AI"},
{.irq = IRQIO_GAL, .name = "GAL", .desc = "[AIO] GIB Alert"},
{.irq = NMI_NMI, .name = "NMI", .desc = "[NMI] Machine Check"},
{.irq = CPU_RST, .name = "RST", .desc = "[CPU] CPU Restart"},
};
static void do_IRQ(struct pt_regs *regs, int irq)
{
if (tod_after_eq(S390_lowcore.int_clock,
S390_lowcore.clock_comparator))
/* Serve timer interrupts first. */
clock_comparator_work();
generic_handle_irq(irq);
}
static int on_async_stack(void)
{
unsigned long frame = current_frame_address();
return ((S390_lowcore.async_stack ^ frame) & ~(THREAD_SIZE - 1)) == 0;
}
static void do_irq_async(struct pt_regs *regs, int irq)
{
if (on_async_stack()) {
do_IRQ(regs, irq);
} else {
call_on_stack(2, S390_lowcore.async_stack, void, do_IRQ,
struct pt_regs *, regs, int, irq);
}
}
static int irq_pending(struct pt_regs *regs)
{
int cc;
asm volatile("tpi 0\n"
"ipm %0" : "=d" (cc) : : "cc");
return cc >> 28;
}
void noinstr do_io_irq(struct pt_regs *regs)
{
irqentry_state_t state = irqentry_enter(regs);
struct pt_regs *old_regs = set_irq_regs(regs);
bool from_idle;
irq_enter_rcu();
if (user_mode(regs)) {
update_timer_sys();
if (static_branch_likely(&cpu_has_bear))
current->thread.last_break = regs->last_break;
}
from_idle = test_and_clear_cpu_flag(CIF_ENABLED_WAIT);
if (from_idle)
account_idle_time_irq();
do {
regs->tpi_info = S390_lowcore.tpi_info;
if (S390_lowcore.tpi_info.adapter_IO)
do_irq_async(regs, THIN_INTERRUPT);
else
do_irq_async(regs, IO_INTERRUPT);
} while (MACHINE_IS_LPAR && irq_pending(regs));
irq_exit_rcu();
set_irq_regs(old_regs);
irqentry_exit(regs, state);
if (from_idle)
regs->psw.mask &= ~(PSW_MASK_EXT | PSW_MASK_IO | PSW_MASK_WAIT);
}
void noinstr do_ext_irq(struct pt_regs *regs)
{
irqentry_state_t state = irqentry_enter(regs);
struct pt_regs *old_regs = set_irq_regs(regs);
bool from_idle;
irq_enter_rcu();
if (user_mode(regs)) {
update_timer_sys();
if (static_branch_likely(&cpu_has_bear))
current->thread.last_break = regs->last_break;
}
regs->int_code = S390_lowcore.ext_int_code_addr;
regs->int_parm = S390_lowcore.ext_params;
regs->int_parm_long = S390_lowcore.ext_params2;
from_idle = test_and_clear_cpu_flag(CIF_ENABLED_WAIT);
if (from_idle)
account_idle_time_irq();
do_irq_async(regs, EXT_INTERRUPT);
irq_exit_rcu();
set_irq_regs(old_regs);
irqentry_exit(regs, state);
if (from_idle)
regs->psw.mask &= ~(PSW_MASK_EXT | PSW_MASK_IO | PSW_MASK_WAIT);
}
static void show_msi_interrupt(struct seq_file *p, int irq)
{
struct irq_desc *desc;
unsigned long flags;
int cpu;
rcu_read_lock();
desc = irq_to_desc(irq);
if (!desc)
goto out;
raw_spin_lock_irqsave(&desc->lock, flags);
seq_printf(p, "%3d: ", irq);
for_each_online_cpu(cpu)
seq_printf(p, "%10u ", irq_desc_kstat_cpu(desc, cpu));
if (desc->irq_data.chip)
seq_printf(p, " %8s", desc->irq_data.chip->name);
if (desc->action)
seq_printf(p, " %s", desc->action->name);
seq_putc(p, '\n');
raw_spin_unlock_irqrestore(&desc->lock, flags);
out:
rcu_read_unlock();
}
/*
* show_interrupts is needed by /proc/interrupts.
*/
int show_interrupts(struct seq_file *p, void *v)
{
int index = *(loff_t *) v;
int cpu, irq;
cpus_read_lock();
if (index == 0) {
seq_puts(p, " ");
for_each_online_cpu(cpu)
seq_printf(p, "CPU%-8d", cpu);
seq_putc(p, '\n');
}
if (index < NR_IRQS_BASE) {
seq_printf(p, "%s: ", irqclass_main_desc[index].name);
irq = irqclass_main_desc[index].irq;
for_each_online_cpu(cpu)
seq_printf(p, "%10u ", kstat_irqs_cpu(irq, cpu));
seq_putc(p, '\n');
goto out;
}
if (index < nr_irqs) {
show_msi_interrupt(p, index);
goto out;
}
for (index = 0; index < NR_ARCH_IRQS; index++) {
seq_printf(p, "%s: ", irqclass_sub_desc[index].name);
irq = irqclass_sub_desc[index].irq;
for_each_online_cpu(cpu)
seq_printf(p, "%10u ",
per_cpu(irq_stat, cpu).irqs[irq]);
if (irqclass_sub_desc[index].desc)
seq_printf(p, " %s", irqclass_sub_desc[index].desc);
seq_putc(p, '\n');
}
out:
cpus_read_unlock();
return 0;
}
unsigned int arch_dynirq_lower_bound(unsigned int from)
{
return from < NR_IRQS_BASE ? NR_IRQS_BASE : from;
}
/*
* ext_int_hash[index] is the list head for all external interrupts that hash
* to this index.
*/
static struct hlist_head ext_int_hash[32] ____cacheline_aligned;
struct ext_int_info {
ext_int_handler_t handler;
struct hlist_node entry;
struct rcu_head rcu;
u16 code;
};
/* ext_int_hash_lock protects the handler lists for external interrupts */
static DEFINE_SPINLOCK(ext_int_hash_lock);
static inline int ext_hash(u16 code)
{
BUILD_BUG_ON(!is_power_of_2(ARRAY_SIZE(ext_int_hash)));
return (code + (code >> 9)) & (ARRAY_SIZE(ext_int_hash) - 1);
}
int register_external_irq(u16 code, ext_int_handler_t handler)
{
struct ext_int_info *p;
unsigned long flags;
int index;
p = kmalloc(sizeof(*p), GFP_ATOMIC);
if (!p)
return -ENOMEM;
p->code = code;
p->handler = handler;
index = ext_hash(code);
spin_lock_irqsave(&ext_int_hash_lock, flags);
hlist_add_head_rcu(&p->entry, &ext_int_hash[index]);
spin_unlock_irqrestore(&ext_int_hash_lock, flags);
return 0;
}
EXPORT_SYMBOL(register_external_irq);
int unregister_external_irq(u16 code, ext_int_handler_t handler)
{
struct ext_int_info *p;
unsigned long flags;
int index = ext_hash(code);
spin_lock_irqsave(&ext_int_hash_lock, flags);
hlist_for_each_entry_rcu(p, &ext_int_hash[index], entry) {
if (p->code == code && p->handler == handler) {
hlist_del_rcu(&p->entry);
kfree_rcu(p, rcu);
}
}
spin_unlock_irqrestore(&ext_int_hash_lock, flags);
return 0;
}
EXPORT_SYMBOL(unregister_external_irq);
static irqreturn_t do_ext_interrupt(int irq, void *dummy)
{
struct pt_regs *regs = get_irq_regs();
struct ext_code ext_code;
struct ext_int_info *p;
int index;
ext_code.int_code = regs->int_code;
if (ext_code.code != EXT_IRQ_CLK_COMP)
set_cpu_flag(CIF_NOHZ_DELAY);
index = ext_hash(ext_code.code);
rcu_read_lock();
hlist_for_each_entry_rcu(p, &ext_int_hash[index], entry) {
if (unlikely(p->code != ext_code.code))
continue;
p->handler(ext_code, regs->int_parm, regs->int_parm_long);
}
rcu_read_unlock();
return IRQ_HANDLED;
}
static void __init init_ext_interrupts(void)
{
int idx;
for (idx = 0; idx < ARRAY_SIZE(ext_int_hash); idx++)
INIT_HLIST_HEAD(&ext_int_hash[idx]);
irq_set_chip_and_handler(EXT_INTERRUPT,
&dummy_irq_chip, handle_percpu_irq);
if (request_irq(EXT_INTERRUPT, do_ext_interrupt, 0, "EXT", NULL))
panic("Failed to register EXT interrupt\n");
}
void __init init_IRQ(void)
{
BUILD_BUG_ON(ARRAY_SIZE(irqclass_sub_desc) != NR_ARCH_IRQS);
init_cio_interrupts();
init_airq_interrupts();
init_ext_interrupts();
}
static DEFINE_SPINLOCK(irq_subclass_lock);
static unsigned char irq_subclass_refcount[64];
void irq_subclass_register(enum irq_subclass subclass)
{
spin_lock(&irq_subclass_lock);
if (!irq_subclass_refcount[subclass])
ctl_set_bit(0, subclass);
irq_subclass_refcount[subclass]++;
spin_unlock(&irq_subclass_lock);
}
EXPORT_SYMBOL(irq_subclass_register);
void irq_subclass_unregister(enum irq_subclass subclass)
{
spin_lock(&irq_subclass_lock);
irq_subclass_refcount[subclass]--;
if (!irq_subclass_refcount[subclass])
ctl_clear_bit(0, subclass);
spin_unlock(&irq_subclass_lock);
}
EXPORT_SYMBOL(irq_subclass_unregister);
| linux-master | arch/s390/kernel/irq.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Performance event support - Processor Activity Instrumentation Extension
* Facility
*
* Copyright IBM Corp. 2022
* Author(s): Thomas Richter <[email protected]>
*/
#define KMSG_COMPONENT "pai_ext"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/kernel.h>
#include <linux/kernel_stat.h>
#include <linux/percpu.h>
#include <linux/notifier.h>
#include <linux/init.h>
#include <linux/export.h>
#include <linux/io.h>
#include <linux/perf_event.h>
#include <asm/ctl_reg.h>
#include <asm/pai.h>
#include <asm/debug.h>
#define PAIE1_CB_SZ 0x200 /* Size of PAIE1 control block */
#define PAIE1_CTRBLOCK_SZ 0x400 /* Size of PAIE1 counter blocks */
static debug_info_t *paiext_dbg;
static unsigned int paiext_cnt; /* Extracted with QPACI instruction */
struct pai_userdata {
u16 num;
u64 value;
} __packed;
/* Create the PAI extension 1 control block area.
* The PAI extension control block 1 is pointed to by lowcore
* address 0x1508 for each CPU. This control block is 512 bytes in size
* and requires a 512 byte boundary alignment.
*/
struct paiext_cb { /* PAI extension 1 control block */
u64 header; /* Not used */
u64 reserved1;
u64 acc; /* Addr to analytics counter control block */
u8 reserved2[488];
} __packed;
struct paiext_map {
unsigned long *area; /* Area for CPU to store counters */
struct pai_userdata *save; /* Area to store non-zero counters */
enum paievt_mode mode; /* Type of event */
unsigned int active_events; /* # of PAI Extension users */
refcount_t refcnt;
struct perf_event *event; /* Perf event for sampling */
struct paiext_cb *paiext_cb; /* PAI extension control block area */
};
struct paiext_mapptr {
struct paiext_map *mapptr;
};
static struct paiext_root { /* Anchor to per CPU data */
refcount_t refcnt; /* Overall active events */
struct paiext_mapptr __percpu *mapptr;
} paiext_root;
/* Free per CPU data when the last event is removed. */
static void paiext_root_free(void)
{
if (refcount_dec_and_test(&paiext_root.refcnt)) {
free_percpu(paiext_root.mapptr);
paiext_root.mapptr = NULL;
}
}
/* On initialization of first event also allocate per CPU data dynamically.
* Start with an array of pointers, the array size is the maximum number of
* CPUs possible, which might be larger than the number of CPUs currently
* online.
*/
static int paiext_root_alloc(void)
{
if (!refcount_inc_not_zero(&paiext_root.refcnt)) {
/* The memory is already zeroed. */
paiext_root.mapptr = alloc_percpu(struct paiext_mapptr);
if (!paiext_root.mapptr) {
/* Returning without refcnt adjustment is ok. The
* error code is handled by paiext_alloc() which
* decrements refcnt when an event can not be
* created.
*/
return -ENOMEM;
}
refcount_set(&paiext_root.refcnt, 1);
}
return 0;
}
/* Protects against concurrent increment of sampler and counter member
* increments at the same time and prohibits concurrent execution of
* counting and sampling events.
* Ensures that analytics counter block is deallocated only when the
* sampling and counting on that cpu is zero.
* For details see paiext_alloc().
*/
static DEFINE_MUTEX(paiext_reserve_mutex);
/* Free all memory allocated for event counting/sampling setup */
static void paiext_free(struct paiext_mapptr *mp)
{
kfree(mp->mapptr->area);
kfree(mp->mapptr->paiext_cb);
kvfree(mp->mapptr->save);
kfree(mp->mapptr);
mp->mapptr = NULL;
}
/* Release the PMU if event is the last perf event */
static void paiext_event_destroy(struct perf_event *event)
{
struct paiext_mapptr *mp = per_cpu_ptr(paiext_root.mapptr, event->cpu);
struct paiext_map *cpump = mp->mapptr;
mutex_lock(&paiext_reserve_mutex);
cpump->event = NULL;
if (refcount_dec_and_test(&cpump->refcnt)) /* Last reference gone */
paiext_free(mp);
paiext_root_free();
mutex_unlock(&paiext_reserve_mutex);
debug_sprintf_event(paiext_dbg, 4, "%s cpu %d mapptr %p\n", __func__,
event->cpu, mp->mapptr);
}
/* Used to avoid races in checking concurrent access of counting and
* sampling for pai_extension events.
*
* Only one instance of event pai_ext/NNPA_ALL/ for sampling is
* allowed and when this event is running, no counting event is allowed.
* Several counting events are allowed in parallel, but no sampling event
* is allowed while one (or more) counting events are running.
*
* This function is called in process context and it is safe to block.
* When the event initialization functions fails, no other call back will
* be invoked.
*
* Allocate the memory for the event.
*/
static int paiext_alloc(struct perf_event_attr *a, struct perf_event *event)
{
struct paiext_mapptr *mp;
struct paiext_map *cpump;
int rc;
mutex_lock(&paiext_reserve_mutex);
rc = paiext_root_alloc();
if (rc)
goto unlock;
mp = per_cpu_ptr(paiext_root.mapptr, event->cpu);
cpump = mp->mapptr;
if (!cpump) { /* Paiext_map allocated? */
rc = -ENOMEM;
cpump = kzalloc(sizeof(*cpump), GFP_KERNEL);
if (!cpump)
goto undo;
/* Allocate memory for counter area and counter extraction.
* These are
* - a 512 byte block and requires 512 byte boundary alignment.
* - a 1KB byte block and requires 1KB boundary alignment.
* Only the first counting event has to allocate the area.
*
* Note: This works with commit 59bb47985c1d by default.
* Backporting this to kernels without this commit might
* need adjustment.
*/
mp->mapptr = cpump;
cpump->area = kzalloc(PAIE1_CTRBLOCK_SZ, GFP_KERNEL);
cpump->paiext_cb = kzalloc(PAIE1_CB_SZ, GFP_KERNEL);
cpump->save = kvmalloc_array(paiext_cnt + 1,
sizeof(struct pai_userdata),
GFP_KERNEL);
if (!cpump->save || !cpump->area || !cpump->paiext_cb) {
paiext_free(mp);
goto undo;
}
refcount_set(&cpump->refcnt, 1);
cpump->mode = a->sample_period ? PAI_MODE_SAMPLING
: PAI_MODE_COUNTING;
} else {
/* Multiple invocation, check what is active.
* Supported are multiple counter events or only one sampling
* event concurrently at any one time.
*/
if (cpump->mode == PAI_MODE_SAMPLING ||
(cpump->mode == PAI_MODE_COUNTING && a->sample_period)) {
rc = -EBUSY;
goto undo;
}
refcount_inc(&cpump->refcnt);
}
rc = 0;
cpump->event = event;
undo:
if (rc) {
/* Error in allocation of event, decrement anchor. Since
* the event in not created, its destroy() function is never
* invoked. Adjust the reference counter for the anchor.
*/
paiext_root_free();
}
unlock:
mutex_unlock(&paiext_reserve_mutex);
/* If rc is non-zero, no increment of counter/sampler was done. */
return rc;
}
/* The PAI extension 1 control block supports up to 128 entries. Return
* the index within PAIE1_CB given the event number. Also validate event
* number.
*/
static int paiext_event_valid(struct perf_event *event)
{
u64 cfg = event->attr.config;
if (cfg >= PAI_NNPA_BASE && cfg <= PAI_NNPA_BASE + paiext_cnt) {
/* Offset NNPA in paiext_cb */
event->hw.config_base = offsetof(struct paiext_cb, acc);
return 0;
}
return -EINVAL;
}
/* Might be called on different CPU than the one the event is intended for. */
static int paiext_event_init(struct perf_event *event)
{
struct perf_event_attr *a = &event->attr;
int rc;
/* PMU pai_ext registered as PERF_TYPE_RAW, check event type */
if (a->type != PERF_TYPE_RAW && event->pmu->type != a->type)
return -ENOENT;
/* PAI extension event must be valid and in supported range */
rc = paiext_event_valid(event);
if (rc)
return rc;
/* Allow only CPU wide operation, no process context for now. */
if (event->hw.target || event->cpu == -1)
return -ENOENT;
/* Allow only event NNPA_ALL for sampling. */
if (a->sample_period && a->config != PAI_NNPA_BASE)
return -EINVAL;
/* Prohibit exclude_user event selection */
if (a->exclude_user)
return -EINVAL;
rc = paiext_alloc(a, event);
if (rc)
return rc;
event->hw.last_tag = 0;
event->destroy = paiext_event_destroy;
if (a->sample_period) {
a->sample_period = 1;
a->freq = 0;
/* Register for paicrypt_sched_task() to be called */
event->attach_state |= PERF_ATTACH_SCHED_CB;
/* Add raw data which are the memory mapped counters */
a->sample_type |= PERF_SAMPLE_RAW;
/* Turn off inheritance */
a->inherit = 0;
}
return 0;
}
static u64 paiext_getctr(struct paiext_map *cpump, int nr)
{
return cpump->area[nr];
}
/* Read the counter values. Return value from location in buffer. For event
* NNPA_ALL sum up all events.
*/
static u64 paiext_getdata(struct perf_event *event)
{
struct paiext_mapptr *mp = this_cpu_ptr(paiext_root.mapptr);
struct paiext_map *cpump = mp->mapptr;
u64 sum = 0;
int i;
if (event->attr.config != PAI_NNPA_BASE)
return paiext_getctr(cpump, event->attr.config - PAI_NNPA_BASE);
for (i = 1; i <= paiext_cnt; i++)
sum += paiext_getctr(cpump, i);
return sum;
}
static u64 paiext_getall(struct perf_event *event)
{
return paiext_getdata(event);
}
static void paiext_read(struct perf_event *event)
{
u64 prev, new, delta;
prev = local64_read(&event->hw.prev_count);
new = paiext_getall(event);
local64_set(&event->hw.prev_count, new);
delta = new - prev;
local64_add(delta, &event->count);
}
static void paiext_start(struct perf_event *event, int flags)
{
u64 sum;
if (event->hw.last_tag)
return;
event->hw.last_tag = 1;
sum = paiext_getall(event); /* Get current value */
local64_set(&event->hw.prev_count, sum);
local64_set(&event->count, 0);
}
static int paiext_add(struct perf_event *event, int flags)
{
struct paiext_mapptr *mp = this_cpu_ptr(paiext_root.mapptr);
struct paiext_map *cpump = mp->mapptr;
struct paiext_cb *pcb = cpump->paiext_cb;
if (++cpump->active_events == 1) {
S390_lowcore.aicd = virt_to_phys(cpump->paiext_cb);
pcb->acc = virt_to_phys(cpump->area) | 0x1;
/* Enable CPU instruction lookup for PAIE1 control block */
__ctl_set_bit(0, 49);
debug_sprintf_event(paiext_dbg, 4, "%s 1508 %llx acc %llx\n",
__func__, S390_lowcore.aicd, pcb->acc);
}
if (flags & PERF_EF_START && !event->attr.sample_period) {
/* Only counting needs initial counter value */
paiext_start(event, PERF_EF_RELOAD);
}
event->hw.state = 0;
if (event->attr.sample_period) {
cpump->event = event;
perf_sched_cb_inc(event->pmu);
}
return 0;
}
static void paiext_stop(struct perf_event *event, int flags)
{
paiext_read(event);
event->hw.state = PERF_HES_STOPPED;
}
static void paiext_del(struct perf_event *event, int flags)
{
struct paiext_mapptr *mp = this_cpu_ptr(paiext_root.mapptr);
struct paiext_map *cpump = mp->mapptr;
struct paiext_cb *pcb = cpump->paiext_cb;
if (event->attr.sample_period)
perf_sched_cb_dec(event->pmu);
if (!event->attr.sample_period) {
/* Only counting needs to read counter */
paiext_stop(event, PERF_EF_UPDATE);
}
if (--cpump->active_events == 0) {
/* Disable CPU instruction lookup for PAIE1 control block */
__ctl_clear_bit(0, 49);
pcb->acc = 0;
S390_lowcore.aicd = 0;
debug_sprintf_event(paiext_dbg, 4, "%s 1508 %llx acc %llx\n",
__func__, S390_lowcore.aicd, pcb->acc);
}
}
/* Create raw data and save it in buffer. Returns number of bytes copied.
* Saves only positive counter entries of the form
* 2 bytes: Number of counter
* 8 bytes: Value of counter
*/
static size_t paiext_copy(struct paiext_map *cpump)
{
struct pai_userdata *userdata = cpump->save;
int i, outidx = 0;
for (i = 1; i <= paiext_cnt; i++) {
u64 val = paiext_getctr(cpump, i);
if (val) {
userdata[outidx].num = i;
userdata[outidx].value = val;
outidx++;
}
}
return outidx * sizeof(*userdata);
}
/* Write sample when one or more counters values are nonzero.
*
* Note: The function paiext_sched_task() and paiext_push_sample() are not
* invoked after function paiext_del() has been called because of function
* perf_sched_cb_dec().
* The function paiext_sched_task() and paiext_push_sample() are only
* called when sampling is active. Function perf_sched_cb_inc()
* has been invoked to install function paiext_sched_task() as call back
* to run at context switch time (see paiext_add()).
*
* This causes function perf_event_context_sched_out() and
* perf_event_context_sched_in() to check whether the PMU has installed an
* sched_task() callback. That callback is not active after paiext_del()
* returns and has deleted the event on that CPU.
*/
static int paiext_push_sample(void)
{
struct paiext_mapptr *mp = this_cpu_ptr(paiext_root.mapptr);
struct paiext_map *cpump = mp->mapptr;
struct perf_event *event = cpump->event;
struct perf_sample_data data;
struct perf_raw_record raw;
struct pt_regs regs;
size_t rawsize;
int overflow;
rawsize = paiext_copy(cpump);
if (!rawsize) /* No incremented counters */
return 0;
/* Setup perf sample */
memset(®s, 0, sizeof(regs));
memset(&raw, 0, sizeof(raw));
memset(&data, 0, sizeof(data));
perf_sample_data_init(&data, 0, event->hw.last_period);
if (event->attr.sample_type & PERF_SAMPLE_TID) {
data.tid_entry.pid = task_tgid_nr(current);
data.tid_entry.tid = task_pid_nr(current);
}
if (event->attr.sample_type & PERF_SAMPLE_TIME)
data.time = event->clock();
if (event->attr.sample_type & (PERF_SAMPLE_ID | PERF_SAMPLE_IDENTIFIER))
data.id = event->id;
if (event->attr.sample_type & PERF_SAMPLE_CPU)
data.cpu_entry.cpu = smp_processor_id();
if (event->attr.sample_type & PERF_SAMPLE_RAW) {
raw.frag.size = rawsize;
raw.frag.data = cpump->save;
perf_sample_save_raw_data(&data, &raw);
}
overflow = perf_event_overflow(event, &data, ®s);
perf_event_update_userpage(event);
/* Clear lowcore area after read */
memset(cpump->area, 0, PAIE1_CTRBLOCK_SZ);
return overflow;
}
/* Called on schedule-in and schedule-out. No access to event structure,
* but for sampling only event NNPA_ALL is allowed.
*/
static void paiext_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in)
{
/* We started with a clean page on event installation. So read out
* results on schedule_out and if page was dirty, clear values.
*/
if (!sched_in)
paiext_push_sample();
}
/* Attribute definitions for pai extension1 interface. As with other CPU
* Measurement Facilities, there is one attribute per mapped counter.
* The number of mapped counters may vary per machine generation. Use
* the QUERY PROCESSOR ACTIVITY COUNTER INFORMATION (QPACI) instruction
* to determine the number of mapped counters. The instructions returns
* a positive number, which is the highest number of supported counters.
* All counters less than this number are also supported, there are no
* holes. A returned number of zero means no support for mapped counters.
*
* The identification of the counter is a unique number. The chosen range
* is 0x1800 + offset in mapped kernel page.
* All CPU Measurement Facility counters identifiers must be unique and
* the numbers from 0 to 496 are already used for the CPU Measurement
* Counter facility. Number 0x1000 to 0x103e are used for PAI cryptography
* counters.
* Numbers 0xb0000, 0xbc000 and 0xbd000 are already
* used for the CPU Measurement Sampling facility.
*/
PMU_FORMAT_ATTR(event, "config:0-63");
static struct attribute *paiext_format_attr[] = {
&format_attr_event.attr,
NULL,
};
static struct attribute_group paiext_events_group = {
.name = "events",
.attrs = NULL, /* Filled in attr_event_init() */
};
static struct attribute_group paiext_format_group = {
.name = "format",
.attrs = paiext_format_attr,
};
static const struct attribute_group *paiext_attr_groups[] = {
&paiext_events_group,
&paiext_format_group,
NULL,
};
/* Performance monitoring unit for mapped counters */
static struct pmu paiext = {
.task_ctx_nr = perf_invalid_context,
.event_init = paiext_event_init,
.add = paiext_add,
.del = paiext_del,
.start = paiext_start,
.stop = paiext_stop,
.read = paiext_read,
.sched_task = paiext_sched_task,
.attr_groups = paiext_attr_groups,
};
/* List of symbolic PAI extension 1 NNPA counter names. */
static const char * const paiext_ctrnames[] = {
[0] = "NNPA_ALL",
[1] = "NNPA_ADD",
[2] = "NNPA_SUB",
[3] = "NNPA_MUL",
[4] = "NNPA_DIV",
[5] = "NNPA_MIN",
[6] = "NNPA_MAX",
[7] = "NNPA_LOG",
[8] = "NNPA_EXP",
[9] = "NNPA_IBM_RESERVED_9",
[10] = "NNPA_RELU",
[11] = "NNPA_TANH",
[12] = "NNPA_SIGMOID",
[13] = "NNPA_SOFTMAX",
[14] = "NNPA_BATCHNORM",
[15] = "NNPA_MAXPOOL2D",
[16] = "NNPA_AVGPOOL2D",
[17] = "NNPA_LSTMACT",
[18] = "NNPA_GRUACT",
[19] = "NNPA_CONVOLUTION",
[20] = "NNPA_MATMUL_OP",
[21] = "NNPA_MATMUL_OP_BCAST23",
[22] = "NNPA_SMALLBATCH",
[23] = "NNPA_LARGEDIM",
[24] = "NNPA_SMALLTENSOR",
[25] = "NNPA_1MFRAME",
[26] = "NNPA_2GFRAME",
[27] = "NNPA_ACCESSEXCEPT",
};
static void __init attr_event_free(struct attribute **attrs, int num)
{
struct perf_pmu_events_attr *pa;
struct device_attribute *dap;
int i;
for (i = 0; i < num; i++) {
dap = container_of(attrs[i], struct device_attribute, attr);
pa = container_of(dap, struct perf_pmu_events_attr, attr);
kfree(pa);
}
kfree(attrs);
}
static int __init attr_event_init_one(struct attribute **attrs, int num)
{
struct perf_pmu_events_attr *pa;
pa = kzalloc(sizeof(*pa), GFP_KERNEL);
if (!pa)
return -ENOMEM;
sysfs_attr_init(&pa->attr.attr);
pa->id = PAI_NNPA_BASE + num;
pa->attr.attr.name = paiext_ctrnames[num];
pa->attr.attr.mode = 0444;
pa->attr.show = cpumf_events_sysfs_show;
pa->attr.store = NULL;
attrs[num] = &pa->attr.attr;
return 0;
}
/* Create PMU sysfs event attributes on the fly. */
static int __init attr_event_init(void)
{
struct attribute **attrs;
int ret, i;
attrs = kmalloc_array(ARRAY_SIZE(paiext_ctrnames) + 1, sizeof(*attrs),
GFP_KERNEL);
if (!attrs)
return -ENOMEM;
for (i = 0; i < ARRAY_SIZE(paiext_ctrnames); i++) {
ret = attr_event_init_one(attrs, i);
if (ret) {
attr_event_free(attrs, i - 1);
return ret;
}
}
attrs[i] = NULL;
paiext_events_group.attrs = attrs;
return 0;
}
static int __init paiext_init(void)
{
struct qpaci_info_block ib;
int rc = -ENOMEM;
if (!test_facility(197))
return 0;
qpaci(&ib);
paiext_cnt = ib.num_nnpa;
if (paiext_cnt >= PAI_NNPA_MAXCTR)
paiext_cnt = PAI_NNPA_MAXCTR;
if (!paiext_cnt)
return 0;
rc = attr_event_init();
if (rc) {
pr_err("Creation of PMU " KMSG_COMPONENT " /sysfs failed\n");
return rc;
}
/* Setup s390dbf facility */
paiext_dbg = debug_register(KMSG_COMPONENT, 2, 256, 128);
if (!paiext_dbg) {
pr_err("Registration of s390dbf " KMSG_COMPONENT " failed\n");
rc = -ENOMEM;
goto out_init;
}
debug_register_view(paiext_dbg, &debug_sprintf_view);
rc = perf_pmu_register(&paiext, KMSG_COMPONENT, -1);
if (rc) {
pr_err("Registration of " KMSG_COMPONENT " PMU failed with "
"rc=%i\n", rc);
goto out_pmu;
}
return 0;
out_pmu:
debug_unregister_view(paiext_dbg, &debug_sprintf_view);
debug_unregister(paiext_dbg);
out_init:
attr_event_free(paiext_events_group.attrs,
ARRAY_SIZE(paiext_ctrnames) + 1);
return rc;
}
device_initcall(paiext_init);
| linux-master | arch/s390/kernel/perf_pai_ext.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/device.h>
#include <linux/cpu.h>
#include <asm/facility.h>
#include <asm/nospec-branch.h>
ssize_t cpu_show_spectre_v1(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sprintf(buf, "Mitigation: __user pointer sanitization\n");
}
ssize_t cpu_show_spectre_v2(struct device *dev,
struct device_attribute *attr, char *buf)
{
if (test_facility(156))
return sprintf(buf, "Mitigation: etokens\n");
if (nospec_uses_trampoline())
return sprintf(buf, "Mitigation: execute trampolines\n");
if (__test_facility(82, alt_stfle_fac_list))
return sprintf(buf, "Mitigation: limited branch prediction\n");
return sprintf(buf, "Vulnerable\n");
}
| linux-master | arch/s390/kernel/nospec-sysfs.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Ptrace user space interface.
*
* Copyright IBM Corp. 1999, 2010
* Author(s): Denis Joseph Barrow
* Martin Schwidefsky ([email protected])
*/
#include "asm/ptrace.h"
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/sched/task_stack.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/errno.h>
#include <linux/ptrace.h>
#include <linux/user.h>
#include <linux/security.h>
#include <linux/audit.h>
#include <linux/signal.h>
#include <linux/elf.h>
#include <linux/regset.h>
#include <linux/seccomp.h>
#include <linux/compat.h>
#include <trace/syscall.h>
#include <asm/page.h>
#include <linux/uaccess.h>
#include <asm/unistd.h>
#include <asm/switch_to.h>
#include <asm/runtime_instr.h>
#include <asm/facility.h>
#include "entry.h"
#ifdef CONFIG_COMPAT
#include "compat_ptrace.h"
#endif
void update_cr_regs(struct task_struct *task)
{
struct pt_regs *regs = task_pt_regs(task);
struct thread_struct *thread = &task->thread;
struct per_regs old, new;
union ctlreg0 cr0_old, cr0_new;
union ctlreg2 cr2_old, cr2_new;
int cr0_changed, cr2_changed;
__ctl_store(cr0_old.val, 0, 0);
__ctl_store(cr2_old.val, 2, 2);
cr0_new = cr0_old;
cr2_new = cr2_old;
/* Take care of the enable/disable of transactional execution. */
if (MACHINE_HAS_TE) {
/* Set or clear transaction execution TXC bit 8. */
cr0_new.tcx = 1;
if (task->thread.per_flags & PER_FLAG_NO_TE)
cr0_new.tcx = 0;
/* Set or clear transaction execution TDC bits 62 and 63. */
cr2_new.tdc = 0;
if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND) {
if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND_TEND)
cr2_new.tdc = 1;
else
cr2_new.tdc = 2;
}
}
/* Take care of enable/disable of guarded storage. */
if (MACHINE_HAS_GS) {
cr2_new.gse = 0;
if (task->thread.gs_cb)
cr2_new.gse = 1;
}
/* Load control register 0/2 iff changed */
cr0_changed = cr0_new.val != cr0_old.val;
cr2_changed = cr2_new.val != cr2_old.val;
if (cr0_changed)
__ctl_load(cr0_new.val, 0, 0);
if (cr2_changed)
__ctl_load(cr2_new.val, 2, 2);
/* Copy user specified PER registers */
new.control = thread->per_user.control;
new.start = thread->per_user.start;
new.end = thread->per_user.end;
/* merge TIF_SINGLE_STEP into user specified PER registers. */
if (test_tsk_thread_flag(task, TIF_SINGLE_STEP) ||
test_tsk_thread_flag(task, TIF_UPROBE_SINGLESTEP)) {
if (test_tsk_thread_flag(task, TIF_BLOCK_STEP))
new.control |= PER_EVENT_BRANCH;
else
new.control |= PER_EVENT_IFETCH;
new.control |= PER_CONTROL_SUSPENSION;
new.control |= PER_EVENT_TRANSACTION_END;
if (test_tsk_thread_flag(task, TIF_UPROBE_SINGLESTEP))
new.control |= PER_EVENT_IFETCH;
new.start = 0;
new.end = -1UL;
}
/* Take care of the PER enablement bit in the PSW. */
if (!(new.control & PER_EVENT_MASK)) {
regs->psw.mask &= ~PSW_MASK_PER;
return;
}
regs->psw.mask |= PSW_MASK_PER;
__ctl_store(old, 9, 11);
if (memcmp(&new, &old, sizeof(struct per_regs)) != 0)
__ctl_load(new, 9, 11);
}
void user_enable_single_step(struct task_struct *task)
{
clear_tsk_thread_flag(task, TIF_BLOCK_STEP);
set_tsk_thread_flag(task, TIF_SINGLE_STEP);
}
void user_disable_single_step(struct task_struct *task)
{
clear_tsk_thread_flag(task, TIF_BLOCK_STEP);
clear_tsk_thread_flag(task, TIF_SINGLE_STEP);
}
void user_enable_block_step(struct task_struct *task)
{
set_tsk_thread_flag(task, TIF_SINGLE_STEP);
set_tsk_thread_flag(task, TIF_BLOCK_STEP);
}
/*
* Called by kernel/ptrace.c when detaching..
*
* Clear all debugging related fields.
*/
void ptrace_disable(struct task_struct *task)
{
memset(&task->thread.per_user, 0, sizeof(task->thread.per_user));
memset(&task->thread.per_event, 0, sizeof(task->thread.per_event));
clear_tsk_thread_flag(task, TIF_SINGLE_STEP);
clear_tsk_thread_flag(task, TIF_PER_TRAP);
task->thread.per_flags = 0;
}
#define __ADDR_MASK 7
static inline unsigned long __peek_user_per(struct task_struct *child,
addr_t addr)
{
if (addr == offsetof(struct per_struct_kernel, cr9))
/* Control bits of the active per set. */
return test_thread_flag(TIF_SINGLE_STEP) ?
PER_EVENT_IFETCH : child->thread.per_user.control;
else if (addr == offsetof(struct per_struct_kernel, cr10))
/* Start address of the active per set. */
return test_thread_flag(TIF_SINGLE_STEP) ?
0 : child->thread.per_user.start;
else if (addr == offsetof(struct per_struct_kernel, cr11))
/* End address of the active per set. */
return test_thread_flag(TIF_SINGLE_STEP) ?
-1UL : child->thread.per_user.end;
else if (addr == offsetof(struct per_struct_kernel, bits))
/* Single-step bit. */
return test_thread_flag(TIF_SINGLE_STEP) ?
(1UL << (BITS_PER_LONG - 1)) : 0;
else if (addr == offsetof(struct per_struct_kernel, starting_addr))
/* Start address of the user specified per set. */
return child->thread.per_user.start;
else if (addr == offsetof(struct per_struct_kernel, ending_addr))
/* End address of the user specified per set. */
return child->thread.per_user.end;
else if (addr == offsetof(struct per_struct_kernel, perc_atmid))
/* PER code, ATMID and AI of the last PER trap */
return (unsigned long)
child->thread.per_event.cause << (BITS_PER_LONG - 16);
else if (addr == offsetof(struct per_struct_kernel, address))
/* Address of the last PER trap */
return child->thread.per_event.address;
else if (addr == offsetof(struct per_struct_kernel, access_id))
/* Access id of the last PER trap */
return (unsigned long)
child->thread.per_event.paid << (BITS_PER_LONG - 8);
return 0;
}
/*
* Read the word at offset addr from the user area of a process. The
* trouble here is that the information is littered over different
* locations. The process registers are found on the kernel stack,
* the floating point stuff and the trace settings are stored in
* the task structure. In addition the different structures in
* struct user contain pad bytes that should be read as zeroes.
* Lovely...
*/
static unsigned long __peek_user(struct task_struct *child, addr_t addr)
{
addr_t offset, tmp;
if (addr < offsetof(struct user, regs.acrs)) {
/*
* psw and gprs are stored on the stack
*/
tmp = *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr);
if (addr == offsetof(struct user, regs.psw.mask)) {
/* Return a clean psw mask. */
tmp &= PSW_MASK_USER | PSW_MASK_RI;
tmp |= PSW_USER_BITS;
}
} else if (addr < offsetof(struct user, regs.orig_gpr2)) {
/*
* access registers are stored in the thread structure
*/
offset = addr - offsetof(struct user, regs.acrs);
/*
* Very special case: old & broken 64 bit gdb reading
* from acrs[15]. Result is a 64 bit value. Read the
* 32 bit acrs[15] value and shift it by 32. Sick...
*/
if (addr == offsetof(struct user, regs.acrs[15]))
tmp = ((unsigned long) child->thread.acrs[15]) << 32;
else
tmp = *(addr_t *)((addr_t) &child->thread.acrs + offset);
} else if (addr == offsetof(struct user, regs.orig_gpr2)) {
/*
* orig_gpr2 is stored on the kernel stack
*/
tmp = (addr_t) task_pt_regs(child)->orig_gpr2;
} else if (addr < offsetof(struct user, regs.fp_regs)) {
/*
* prevent reads of padding hole between
* orig_gpr2 and fp_regs on s390.
*/
tmp = 0;
} else if (addr == offsetof(struct user, regs.fp_regs.fpc)) {
/*
* floating point control reg. is in the thread structure
*/
tmp = child->thread.fpu.fpc;
tmp <<= BITS_PER_LONG - 32;
} else if (addr < offsetof(struct user, regs.fp_regs) + sizeof(s390_fp_regs)) {
/*
* floating point regs. are either in child->thread.fpu
* or the child->thread.fpu.vxrs array
*/
offset = addr - offsetof(struct user, regs.fp_regs.fprs);
if (MACHINE_HAS_VX)
tmp = *(addr_t *)
((addr_t) child->thread.fpu.vxrs + 2*offset);
else
tmp = *(addr_t *)
((addr_t) child->thread.fpu.fprs + offset);
} else if (addr < offsetof(struct user, regs.per_info) + sizeof(per_struct)) {
/*
* Handle access to the per_info structure.
*/
addr -= offsetof(struct user, regs.per_info);
tmp = __peek_user_per(child, addr);
} else
tmp = 0;
return tmp;
}
static int
peek_user(struct task_struct *child, addr_t addr, addr_t data)
{
addr_t tmp, mask;
/*
* Stupid gdb peeks/pokes the access registers in 64 bit with
* an alignment of 4. Programmers from hell...
*/
mask = __ADDR_MASK;
if (addr >= offsetof(struct user, regs.acrs) &&
addr < offsetof(struct user, regs.orig_gpr2))
mask = 3;
if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
return -EIO;
tmp = __peek_user(child, addr);
return put_user(tmp, (addr_t __user *) data);
}
static inline void __poke_user_per(struct task_struct *child,
addr_t addr, addr_t data)
{
/*
* There are only three fields in the per_info struct that the
* debugger user can write to.
* 1) cr9: the debugger wants to set a new PER event mask
* 2) starting_addr: the debugger wants to set a new starting
* address to use with the PER event mask.
* 3) ending_addr: the debugger wants to set a new ending
* address to use with the PER event mask.
* The user specified PER event mask and the start and end
* addresses are used only if single stepping is not in effect.
* Writes to any other field in per_info are ignored.
*/
if (addr == offsetof(struct per_struct_kernel, cr9))
/* PER event mask of the user specified per set. */
child->thread.per_user.control =
data & (PER_EVENT_MASK | PER_CONTROL_MASK);
else if (addr == offsetof(struct per_struct_kernel, starting_addr))
/* Starting address of the user specified per set. */
child->thread.per_user.start = data;
else if (addr == offsetof(struct per_struct_kernel, ending_addr))
/* Ending address of the user specified per set. */
child->thread.per_user.end = data;
}
/*
* Write a word to the user area of a process at location addr. This
* operation does have an additional problem compared to peek_user.
* Stores to the program status word and on the floating point
* control register needs to get checked for validity.
*/
static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
{
addr_t offset;
if (addr < offsetof(struct user, regs.acrs)) {
struct pt_regs *regs = task_pt_regs(child);
/*
* psw and gprs are stored on the stack
*/
if (addr == offsetof(struct user, regs.psw.mask)) {
unsigned long mask = PSW_MASK_USER;
mask |= is_ri_task(child) ? PSW_MASK_RI : 0;
if ((data ^ PSW_USER_BITS) & ~mask)
/* Invalid psw mask. */
return -EINVAL;
if ((data & PSW_MASK_ASC) == PSW_ASC_HOME)
/* Invalid address-space-control bits */
return -EINVAL;
if ((data & PSW_MASK_EA) && !(data & PSW_MASK_BA))
/* Invalid addressing mode bits */
return -EINVAL;
}
if (test_pt_regs_flag(regs, PIF_SYSCALL) &&
addr == offsetof(struct user, regs.gprs[2])) {
struct pt_regs *regs = task_pt_regs(child);
regs->int_code = 0x20000 | (data & 0xffff);
}
*(addr_t *)((addr_t) ®s->psw + addr) = data;
} else if (addr < offsetof(struct user, regs.orig_gpr2)) {
/*
* access registers are stored in the thread structure
*/
offset = addr - offsetof(struct user, regs.acrs);
/*
* Very special case: old & broken 64 bit gdb writing
* to acrs[15] with a 64 bit value. Ignore the lower
* half of the value and write the upper 32 bit to
* acrs[15]. Sick...
*/
if (addr == offsetof(struct user, regs.acrs[15]))
child->thread.acrs[15] = (unsigned int) (data >> 32);
else
*(addr_t *)((addr_t) &child->thread.acrs + offset) = data;
} else if (addr == offsetof(struct user, regs.orig_gpr2)) {
/*
* orig_gpr2 is stored on the kernel stack
*/
task_pt_regs(child)->orig_gpr2 = data;
} else if (addr < offsetof(struct user, regs.fp_regs)) {
/*
* prevent writes of padding hole between
* orig_gpr2 and fp_regs on s390.
*/
return 0;
} else if (addr == offsetof(struct user, regs.fp_regs.fpc)) {
/*
* floating point control reg. is in the thread structure
*/
if ((unsigned int) data != 0 ||
test_fp_ctl(data >> (BITS_PER_LONG - 32)))
return -EINVAL;
child->thread.fpu.fpc = data >> (BITS_PER_LONG - 32);
} else if (addr < offsetof(struct user, regs.fp_regs) + sizeof(s390_fp_regs)) {
/*
* floating point regs. are either in child->thread.fpu
* or the child->thread.fpu.vxrs array
*/
offset = addr - offsetof(struct user, regs.fp_regs.fprs);
if (MACHINE_HAS_VX)
*(addr_t *)((addr_t)
child->thread.fpu.vxrs + 2*offset) = data;
else
*(addr_t *)((addr_t)
child->thread.fpu.fprs + offset) = data;
} else if (addr < offsetof(struct user, regs.per_info) + sizeof(per_struct)) {
/*
* Handle access to the per_info structure.
*/
addr -= offsetof(struct user, regs.per_info);
__poke_user_per(child, addr, data);
}
return 0;
}
static int poke_user(struct task_struct *child, addr_t addr, addr_t data)
{
addr_t mask;
/*
* Stupid gdb peeks/pokes the access registers in 64 bit with
* an alignment of 4. Programmers from hell indeed...
*/
mask = __ADDR_MASK;
if (addr >= offsetof(struct user, regs.acrs) &&
addr < offsetof(struct user, regs.orig_gpr2))
mask = 3;
if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
return -EIO;
return __poke_user(child, addr, data);
}
long arch_ptrace(struct task_struct *child, long request,
unsigned long addr, unsigned long data)
{
ptrace_area parea;
int copied, ret;
switch (request) {
case PTRACE_PEEKUSR:
/* read the word at location addr in the USER area. */
return peek_user(child, addr, data);
case PTRACE_POKEUSR:
/* write the word at location addr in the USER area */
return poke_user(child, addr, data);
case PTRACE_PEEKUSR_AREA:
case PTRACE_POKEUSR_AREA:
if (copy_from_user(&parea, (void __force __user *) addr,
sizeof(parea)))
return -EFAULT;
addr = parea.kernel_addr;
data = parea.process_addr;
copied = 0;
while (copied < parea.len) {
if (request == PTRACE_PEEKUSR_AREA)
ret = peek_user(child, addr, data);
else {
addr_t utmp;
if (get_user(utmp,
(addr_t __force __user *) data))
return -EFAULT;
ret = poke_user(child, addr, utmp);
}
if (ret)
return ret;
addr += sizeof(unsigned long);
data += sizeof(unsigned long);
copied += sizeof(unsigned long);
}
return 0;
case PTRACE_GET_LAST_BREAK:
return put_user(child->thread.last_break, (unsigned long __user *)data);
case PTRACE_ENABLE_TE:
if (!MACHINE_HAS_TE)
return -EIO;
child->thread.per_flags &= ~PER_FLAG_NO_TE;
return 0;
case PTRACE_DISABLE_TE:
if (!MACHINE_HAS_TE)
return -EIO;
child->thread.per_flags |= PER_FLAG_NO_TE;
child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND;
return 0;
case PTRACE_TE_ABORT_RAND:
if (!MACHINE_HAS_TE || (child->thread.per_flags & PER_FLAG_NO_TE))
return -EIO;
switch (data) {
case 0UL:
child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND;
break;
case 1UL:
child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND;
child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND_TEND;
break;
case 2UL:
child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND;
child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND_TEND;
break;
default:
return -EINVAL;
}
return 0;
default:
return ptrace_request(child, request, addr, data);
}
}
#ifdef CONFIG_COMPAT
/*
* Now the fun part starts... a 31 bit program running in the
* 31 bit emulation tracing another program. PTRACE_PEEKTEXT,
* PTRACE_PEEKDATA, PTRACE_POKETEXT and PTRACE_POKEDATA are easy
* to handle, the difference to the 64 bit versions of the requests
* is that the access is done in multiples of 4 byte instead of
* 8 bytes (sizeof(unsigned long) on 31/64 bit).
* The ugly part are PTRACE_PEEKUSR, PTRACE_PEEKUSR_AREA,
* PTRACE_POKEUSR and PTRACE_POKEUSR_AREA. If the traced program
* is a 31 bit program too, the content of struct user can be
* emulated. A 31 bit program peeking into the struct user of
* a 64 bit program is a no-no.
*/
/*
* Same as peek_user_per but for a 31 bit program.
*/
static inline __u32 __peek_user_per_compat(struct task_struct *child,
addr_t addr)
{
if (addr == offsetof(struct compat_per_struct_kernel, cr9))
/* Control bits of the active per set. */
return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
PER_EVENT_IFETCH : child->thread.per_user.control;
else if (addr == offsetof(struct compat_per_struct_kernel, cr10))
/* Start address of the active per set. */
return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
0 : child->thread.per_user.start;
else if (addr == offsetof(struct compat_per_struct_kernel, cr11))
/* End address of the active per set. */
return test_thread_flag(TIF_SINGLE_STEP) ?
PSW32_ADDR_INSN : child->thread.per_user.end;
else if (addr == offsetof(struct compat_per_struct_kernel, bits))
/* Single-step bit. */
return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
0x80000000 : 0;
else if (addr == offsetof(struct compat_per_struct_kernel, starting_addr))
/* Start address of the user specified per set. */
return (__u32) child->thread.per_user.start;
else if (addr == offsetof(struct compat_per_struct_kernel, ending_addr))
/* End address of the user specified per set. */
return (__u32) child->thread.per_user.end;
else if (addr == offsetof(struct compat_per_struct_kernel, perc_atmid))
/* PER code, ATMID and AI of the last PER trap */
return (__u32) child->thread.per_event.cause << 16;
else if (addr == offsetof(struct compat_per_struct_kernel, address))
/* Address of the last PER trap */
return (__u32) child->thread.per_event.address;
else if (addr == offsetof(struct compat_per_struct_kernel, access_id))
/* Access id of the last PER trap */
return (__u32) child->thread.per_event.paid << 24;
return 0;
}
/*
* Same as peek_user but for a 31 bit program.
*/
static u32 __peek_user_compat(struct task_struct *child, addr_t addr)
{
addr_t offset;
__u32 tmp;
if (addr < offsetof(struct compat_user, regs.acrs)) {
struct pt_regs *regs = task_pt_regs(child);
/*
* psw and gprs are stored on the stack
*/
if (addr == offsetof(struct compat_user, regs.psw.mask)) {
/* Fake a 31 bit psw mask. */
tmp = (__u32)(regs->psw.mask >> 32);
tmp &= PSW32_MASK_USER | PSW32_MASK_RI;
tmp |= PSW32_USER_BITS;
} else if (addr == offsetof(struct compat_user, regs.psw.addr)) {
/* Fake a 31 bit psw address. */
tmp = (__u32) regs->psw.addr |
(__u32)(regs->psw.mask & PSW_MASK_BA);
} else {
/* gpr 0-15 */
tmp = *(__u32 *)((addr_t) ®s->psw + addr*2 + 4);
}
} else if (addr < offsetof(struct compat_user, regs.orig_gpr2)) {
/*
* access registers are stored in the thread structure
*/
offset = addr - offsetof(struct compat_user, regs.acrs);
tmp = *(__u32*)((addr_t) &child->thread.acrs + offset);
} else if (addr == offsetof(struct compat_user, regs.orig_gpr2)) {
/*
* orig_gpr2 is stored on the kernel stack
*/
tmp = *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4);
} else if (addr < offsetof(struct compat_user, regs.fp_regs)) {
/*
* prevent reads of padding hole between
* orig_gpr2 and fp_regs on s390.
*/
tmp = 0;
} else if (addr == offsetof(struct compat_user, regs.fp_regs.fpc)) {
/*
* floating point control reg. is in the thread structure
*/
tmp = child->thread.fpu.fpc;
} else if (addr < offsetof(struct compat_user, regs.fp_regs) + sizeof(s390_fp_regs)) {
/*
* floating point regs. are either in child->thread.fpu
* or the child->thread.fpu.vxrs array
*/
offset = addr - offsetof(struct compat_user, regs.fp_regs.fprs);
if (MACHINE_HAS_VX)
tmp = *(__u32 *)
((addr_t) child->thread.fpu.vxrs + 2*offset);
else
tmp = *(__u32 *)
((addr_t) child->thread.fpu.fprs + offset);
} else if (addr < offsetof(struct compat_user, regs.per_info) + sizeof(struct compat_per_struct_kernel)) {
/*
* Handle access to the per_info structure.
*/
addr -= offsetof(struct compat_user, regs.per_info);
tmp = __peek_user_per_compat(child, addr);
} else
tmp = 0;
return tmp;
}
static int peek_user_compat(struct task_struct *child,
addr_t addr, addr_t data)
{
__u32 tmp;
if (!is_compat_task() || (addr & 3) || addr > sizeof(struct user) - 3)
return -EIO;
tmp = __peek_user_compat(child, addr);
return put_user(tmp, (__u32 __user *) data);
}
/*
* Same as poke_user_per but for a 31 bit program.
*/
static inline void __poke_user_per_compat(struct task_struct *child,
addr_t addr, __u32 data)
{
if (addr == offsetof(struct compat_per_struct_kernel, cr9))
/* PER event mask of the user specified per set. */
child->thread.per_user.control =
data & (PER_EVENT_MASK | PER_CONTROL_MASK);
else if (addr == offsetof(struct compat_per_struct_kernel, starting_addr))
/* Starting address of the user specified per set. */
child->thread.per_user.start = data;
else if (addr == offsetof(struct compat_per_struct_kernel, ending_addr))
/* Ending address of the user specified per set. */
child->thread.per_user.end = data;
}
/*
* Same as poke_user but for a 31 bit program.
*/
static int __poke_user_compat(struct task_struct *child,
addr_t addr, addr_t data)
{
__u32 tmp = (__u32) data;
addr_t offset;
if (addr < offsetof(struct compat_user, regs.acrs)) {
struct pt_regs *regs = task_pt_regs(child);
/*
* psw, gprs, acrs and orig_gpr2 are stored on the stack
*/
if (addr == offsetof(struct compat_user, regs.psw.mask)) {
__u32 mask = PSW32_MASK_USER;
mask |= is_ri_task(child) ? PSW32_MASK_RI : 0;
/* Build a 64 bit psw mask from 31 bit mask. */
if ((tmp ^ PSW32_USER_BITS) & ~mask)
/* Invalid psw mask. */
return -EINVAL;
if ((data & PSW32_MASK_ASC) == PSW32_ASC_HOME)
/* Invalid address-space-control bits */
return -EINVAL;
regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) |
(regs->psw.mask & PSW_MASK_BA) |
(__u64)(tmp & mask) << 32;
} else if (addr == offsetof(struct compat_user, regs.psw.addr)) {
/* Build a 64 bit psw address from 31 bit address. */
regs->psw.addr = (__u64) tmp & PSW32_ADDR_INSN;
/* Transfer 31 bit amode bit to psw mask. */
regs->psw.mask = (regs->psw.mask & ~PSW_MASK_BA) |
(__u64)(tmp & PSW32_ADDR_AMODE);
} else {
if (test_pt_regs_flag(regs, PIF_SYSCALL) &&
addr == offsetof(struct compat_user, regs.gprs[2])) {
struct pt_regs *regs = task_pt_regs(child);
regs->int_code = 0x20000 | (data & 0xffff);
}
/* gpr 0-15 */
*(__u32*)((addr_t) ®s->psw + addr*2 + 4) = tmp;
}
} else if (addr < offsetof(struct compat_user, regs.orig_gpr2)) {
/*
* access registers are stored in the thread structure
*/
offset = addr - offsetof(struct compat_user, regs.acrs);
*(__u32*)((addr_t) &child->thread.acrs + offset) = tmp;
} else if (addr == offsetof(struct compat_user, regs.orig_gpr2)) {
/*
* orig_gpr2 is stored on the kernel stack
*/
*(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4) = tmp;
} else if (addr < offsetof(struct compat_user, regs.fp_regs)) {
/*
* prevent writess of padding hole between
* orig_gpr2 and fp_regs on s390.
*/
return 0;
} else if (addr == offsetof(struct compat_user, regs.fp_regs.fpc)) {
/*
* floating point control reg. is in the thread structure
*/
if (test_fp_ctl(tmp))
return -EINVAL;
child->thread.fpu.fpc = data;
} else if (addr < offsetof(struct compat_user, regs.fp_regs) + sizeof(s390_fp_regs)) {
/*
* floating point regs. are either in child->thread.fpu
* or the child->thread.fpu.vxrs array
*/
offset = addr - offsetof(struct compat_user, regs.fp_regs.fprs);
if (MACHINE_HAS_VX)
*(__u32 *)((addr_t)
child->thread.fpu.vxrs + 2*offset) = tmp;
else
*(__u32 *)((addr_t)
child->thread.fpu.fprs + offset) = tmp;
} else if (addr < offsetof(struct compat_user, regs.per_info) + sizeof(struct compat_per_struct_kernel)) {
/*
* Handle access to the per_info structure.
*/
addr -= offsetof(struct compat_user, regs.per_info);
__poke_user_per_compat(child, addr, data);
}
return 0;
}
static int poke_user_compat(struct task_struct *child,
addr_t addr, addr_t data)
{
if (!is_compat_task() || (addr & 3) ||
addr > sizeof(struct compat_user) - 3)
return -EIO;
return __poke_user_compat(child, addr, data);
}
long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
compat_ulong_t caddr, compat_ulong_t cdata)
{
unsigned long addr = caddr;
unsigned long data = cdata;
compat_ptrace_area parea;
int copied, ret;
switch (request) {
case PTRACE_PEEKUSR:
/* read the word at location addr in the USER area. */
return peek_user_compat(child, addr, data);
case PTRACE_POKEUSR:
/* write the word at location addr in the USER area */
return poke_user_compat(child, addr, data);
case PTRACE_PEEKUSR_AREA:
case PTRACE_POKEUSR_AREA:
if (copy_from_user(&parea, (void __force __user *) addr,
sizeof(parea)))
return -EFAULT;
addr = parea.kernel_addr;
data = parea.process_addr;
copied = 0;
while (copied < parea.len) {
if (request == PTRACE_PEEKUSR_AREA)
ret = peek_user_compat(child, addr, data);
else {
__u32 utmp;
if (get_user(utmp,
(__u32 __force __user *) data))
return -EFAULT;
ret = poke_user_compat(child, addr, utmp);
}
if (ret)
return ret;
addr += sizeof(unsigned int);
data += sizeof(unsigned int);
copied += sizeof(unsigned int);
}
return 0;
case PTRACE_GET_LAST_BREAK:
return put_user(child->thread.last_break, (unsigned int __user *)data);
}
return compat_ptrace_request(child, request, addr, data);
}
#endif
/*
* user_regset definitions.
*/
static int s390_regs_get(struct task_struct *target,
const struct user_regset *regset,
struct membuf to)
{
unsigned pos;
if (target == current)
save_access_regs(target->thread.acrs);
for (pos = 0; pos < sizeof(s390_regs); pos += sizeof(long))
membuf_store(&to, __peek_user(target, pos));
return 0;
}
static int s390_regs_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
int rc = 0;
if (target == current)
save_access_regs(target->thread.acrs);
if (kbuf) {
const unsigned long *k = kbuf;
while (count > 0 && !rc) {
rc = __poke_user(target, pos, *k++);
count -= sizeof(*k);
pos += sizeof(*k);
}
} else {
const unsigned long __user *u = ubuf;
while (count > 0 && !rc) {
unsigned long word;
rc = __get_user(word, u++);
if (rc)
break;
rc = __poke_user(target, pos, word);
count -= sizeof(*u);
pos += sizeof(*u);
}
}
if (rc == 0 && target == current)
restore_access_regs(target->thread.acrs);
return rc;
}
static int s390_fpregs_get(struct task_struct *target,
const struct user_regset *regset,
struct membuf to)
{
_s390_fp_regs fp_regs;
if (target == current)
save_fpu_regs();
fp_regs.fpc = target->thread.fpu.fpc;
fpregs_store(&fp_regs, &target->thread.fpu);
return membuf_write(&to, &fp_regs, sizeof(fp_regs));
}
static int s390_fpregs_set(struct task_struct *target,
const struct user_regset *regset, unsigned int pos,
unsigned int count, const void *kbuf,
const void __user *ubuf)
{
int rc = 0;
freg_t fprs[__NUM_FPRS];
if (target == current)
save_fpu_regs();
if (MACHINE_HAS_VX)
convert_vx_to_fp(fprs, target->thread.fpu.vxrs);
else
memcpy(&fprs, target->thread.fpu.fprs, sizeof(fprs));
/* If setting FPC, must validate it first. */
if (count > 0 && pos < offsetof(s390_fp_regs, fprs)) {
u32 ufpc[2] = { target->thread.fpu.fpc, 0 };
rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ufpc,
0, offsetof(s390_fp_regs, fprs));
if (rc)
return rc;
if (ufpc[1] != 0 || test_fp_ctl(ufpc[0]))
return -EINVAL;
target->thread.fpu.fpc = ufpc[0];
}
if (rc == 0 && count > 0)
rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
fprs, offsetof(s390_fp_regs, fprs), -1);
if (rc)
return rc;
if (MACHINE_HAS_VX)
convert_fp_to_vx(target->thread.fpu.vxrs, fprs);
else
memcpy(target->thread.fpu.fprs, &fprs, sizeof(fprs));
return rc;
}
static int s390_last_break_get(struct task_struct *target,
const struct user_regset *regset,
struct membuf to)
{
return membuf_store(&to, target->thread.last_break);
}
static int s390_last_break_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
return 0;
}
static int s390_tdb_get(struct task_struct *target,
const struct user_regset *regset,
struct membuf to)
{
struct pt_regs *regs = task_pt_regs(target);
size_t size;
if (!(regs->int_code & 0x200))
return -ENODATA;
size = sizeof(target->thread.trap_tdb.data);
return membuf_write(&to, target->thread.trap_tdb.data, size);
}
static int s390_tdb_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
return 0;
}
static int s390_vxrs_low_get(struct task_struct *target,
const struct user_regset *regset,
struct membuf to)
{
__u64 vxrs[__NUM_VXRS_LOW];
int i;
if (!MACHINE_HAS_VX)
return -ENODEV;
if (target == current)
save_fpu_regs();
for (i = 0; i < __NUM_VXRS_LOW; i++)
vxrs[i] = target->thread.fpu.vxrs[i].low;
return membuf_write(&to, vxrs, sizeof(vxrs));
}
static int s390_vxrs_low_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
__u64 vxrs[__NUM_VXRS_LOW];
int i, rc;
if (!MACHINE_HAS_VX)
return -ENODEV;
if (target == current)
save_fpu_regs();
for (i = 0; i < __NUM_VXRS_LOW; i++)
vxrs[i] = target->thread.fpu.vxrs[i].low;
rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1);
if (rc == 0)
for (i = 0; i < __NUM_VXRS_LOW; i++)
target->thread.fpu.vxrs[i].low = vxrs[i];
return rc;
}
static int s390_vxrs_high_get(struct task_struct *target,
const struct user_regset *regset,
struct membuf to)
{
if (!MACHINE_HAS_VX)
return -ENODEV;
if (target == current)
save_fpu_regs();
return membuf_write(&to, target->thread.fpu.vxrs + __NUM_VXRS_LOW,
__NUM_VXRS_HIGH * sizeof(__vector128));
}
static int s390_vxrs_high_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
int rc;
if (!MACHINE_HAS_VX)
return -ENODEV;
if (target == current)
save_fpu_regs();
rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
target->thread.fpu.vxrs + __NUM_VXRS_LOW, 0, -1);
return rc;
}
static int s390_system_call_get(struct task_struct *target,
const struct user_regset *regset,
struct membuf to)
{
return membuf_store(&to, target->thread.system_call);
}
static int s390_system_call_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
unsigned int *data = &target->thread.system_call;
return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
data, 0, sizeof(unsigned int));
}
static int s390_gs_cb_get(struct task_struct *target,
const struct user_regset *regset,
struct membuf to)
{
struct gs_cb *data = target->thread.gs_cb;
if (!MACHINE_HAS_GS)
return -ENODEV;
if (!data)
return -ENODATA;
if (target == current)
save_gs_cb(data);
return membuf_write(&to, data, sizeof(struct gs_cb));
}
static int s390_gs_cb_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
struct gs_cb gs_cb = { }, *data = NULL;
int rc;
if (!MACHINE_HAS_GS)
return -ENODEV;
if (!target->thread.gs_cb) {
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
}
if (!target->thread.gs_cb)
gs_cb.gsd = 25;
else if (target == current)
save_gs_cb(&gs_cb);
else
gs_cb = *target->thread.gs_cb;
rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&gs_cb, 0, sizeof(gs_cb));
if (rc) {
kfree(data);
return -EFAULT;
}
preempt_disable();
if (!target->thread.gs_cb)
target->thread.gs_cb = data;
*target->thread.gs_cb = gs_cb;
if (target == current) {
__ctl_set_bit(2, 4);
restore_gs_cb(target->thread.gs_cb);
}
preempt_enable();
return rc;
}
static int s390_gs_bc_get(struct task_struct *target,
const struct user_regset *regset,
struct membuf to)
{
struct gs_cb *data = target->thread.gs_bc_cb;
if (!MACHINE_HAS_GS)
return -ENODEV;
if (!data)
return -ENODATA;
return membuf_write(&to, data, sizeof(struct gs_cb));
}
static int s390_gs_bc_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
struct gs_cb *data = target->thread.gs_bc_cb;
if (!MACHINE_HAS_GS)
return -ENODEV;
if (!data) {
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
target->thread.gs_bc_cb = data;
}
return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
data, 0, sizeof(struct gs_cb));
}
static bool is_ri_cb_valid(struct runtime_instr_cb *cb)
{
return (cb->rca & 0x1f) == 0 &&
(cb->roa & 0xfff) == 0 &&
(cb->rla & 0xfff) == 0xfff &&
cb->s == 1 &&
cb->k == 1 &&
cb->h == 0 &&
cb->reserved1 == 0 &&
cb->ps == 1 &&
cb->qs == 0 &&
cb->pc == 1 &&
cb->qc == 0 &&
cb->reserved2 == 0 &&
cb->reserved3 == 0 &&
cb->reserved4 == 0 &&
cb->reserved5 == 0 &&
cb->reserved6 == 0 &&
cb->reserved7 == 0 &&
cb->reserved8 == 0 &&
cb->rla >= cb->roa &&
cb->rca >= cb->roa &&
cb->rca <= cb->rla+1 &&
cb->m < 3;
}
static int s390_runtime_instr_get(struct task_struct *target,
const struct user_regset *regset,
struct membuf to)
{
struct runtime_instr_cb *data = target->thread.ri_cb;
if (!test_facility(64))
return -ENODEV;
if (!data)
return -ENODATA;
return membuf_write(&to, data, sizeof(struct runtime_instr_cb));
}
static int s390_runtime_instr_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
struct runtime_instr_cb ri_cb = { }, *data = NULL;
int rc;
if (!test_facility(64))
return -ENODEV;
if (!target->thread.ri_cb) {
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
}
if (target->thread.ri_cb) {
if (target == current)
store_runtime_instr_cb(&ri_cb);
else
ri_cb = *target->thread.ri_cb;
}
rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&ri_cb, 0, sizeof(struct runtime_instr_cb));
if (rc) {
kfree(data);
return -EFAULT;
}
if (!is_ri_cb_valid(&ri_cb)) {
kfree(data);
return -EINVAL;
}
/*
* Override access key in any case, since user space should
* not be able to set it, nor should it care about it.
*/
ri_cb.key = PAGE_DEFAULT_KEY >> 4;
preempt_disable();
if (!target->thread.ri_cb)
target->thread.ri_cb = data;
*target->thread.ri_cb = ri_cb;
if (target == current)
load_runtime_instr_cb(target->thread.ri_cb);
preempt_enable();
return 0;
}
static const struct user_regset s390_regsets[] = {
{
.core_note_type = NT_PRSTATUS,
.n = sizeof(s390_regs) / sizeof(long),
.size = sizeof(long),
.align = sizeof(long),
.regset_get = s390_regs_get,
.set = s390_regs_set,
},
{
.core_note_type = NT_PRFPREG,
.n = sizeof(s390_fp_regs) / sizeof(long),
.size = sizeof(long),
.align = sizeof(long),
.regset_get = s390_fpregs_get,
.set = s390_fpregs_set,
},
{
.core_note_type = NT_S390_SYSTEM_CALL,
.n = 1,
.size = sizeof(unsigned int),
.align = sizeof(unsigned int),
.regset_get = s390_system_call_get,
.set = s390_system_call_set,
},
{
.core_note_type = NT_S390_LAST_BREAK,
.n = 1,
.size = sizeof(long),
.align = sizeof(long),
.regset_get = s390_last_break_get,
.set = s390_last_break_set,
},
{
.core_note_type = NT_S390_TDB,
.n = 1,
.size = 256,
.align = 1,
.regset_get = s390_tdb_get,
.set = s390_tdb_set,
},
{
.core_note_type = NT_S390_VXRS_LOW,
.n = __NUM_VXRS_LOW,
.size = sizeof(__u64),
.align = sizeof(__u64),
.regset_get = s390_vxrs_low_get,
.set = s390_vxrs_low_set,
},
{
.core_note_type = NT_S390_VXRS_HIGH,
.n = __NUM_VXRS_HIGH,
.size = sizeof(__vector128),
.align = sizeof(__vector128),
.regset_get = s390_vxrs_high_get,
.set = s390_vxrs_high_set,
},
{
.core_note_type = NT_S390_GS_CB,
.n = sizeof(struct gs_cb) / sizeof(__u64),
.size = sizeof(__u64),
.align = sizeof(__u64),
.regset_get = s390_gs_cb_get,
.set = s390_gs_cb_set,
},
{
.core_note_type = NT_S390_GS_BC,
.n = sizeof(struct gs_cb) / sizeof(__u64),
.size = sizeof(__u64),
.align = sizeof(__u64),
.regset_get = s390_gs_bc_get,
.set = s390_gs_bc_set,
},
{
.core_note_type = NT_S390_RI_CB,
.n = sizeof(struct runtime_instr_cb) / sizeof(__u64),
.size = sizeof(__u64),
.align = sizeof(__u64),
.regset_get = s390_runtime_instr_get,
.set = s390_runtime_instr_set,
},
};
static const struct user_regset_view user_s390_view = {
.name = "s390x",
.e_machine = EM_S390,
.regsets = s390_regsets,
.n = ARRAY_SIZE(s390_regsets)
};
#ifdef CONFIG_COMPAT
static int s390_compat_regs_get(struct task_struct *target,
const struct user_regset *regset,
struct membuf to)
{
unsigned n;
if (target == current)
save_access_regs(target->thread.acrs);
for (n = 0; n < sizeof(s390_compat_regs); n += sizeof(compat_ulong_t))
membuf_store(&to, __peek_user_compat(target, n));
return 0;
}
static int s390_compat_regs_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
int rc = 0;
if (target == current)
save_access_regs(target->thread.acrs);
if (kbuf) {
const compat_ulong_t *k = kbuf;
while (count > 0 && !rc) {
rc = __poke_user_compat(target, pos, *k++);
count -= sizeof(*k);
pos += sizeof(*k);
}
} else {
const compat_ulong_t __user *u = ubuf;
while (count > 0 && !rc) {
compat_ulong_t word;
rc = __get_user(word, u++);
if (rc)
break;
rc = __poke_user_compat(target, pos, word);
count -= sizeof(*u);
pos += sizeof(*u);
}
}
if (rc == 0 && target == current)
restore_access_regs(target->thread.acrs);
return rc;
}
static int s390_compat_regs_high_get(struct task_struct *target,
const struct user_regset *regset,
struct membuf to)
{
compat_ulong_t *gprs_high;
int i;
gprs_high = (compat_ulong_t *)task_pt_regs(target)->gprs;
for (i = 0; i < NUM_GPRS; i++, gprs_high += 2)
membuf_store(&to, *gprs_high);
return 0;
}
static int s390_compat_regs_high_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
compat_ulong_t *gprs_high;
int rc = 0;
gprs_high = (compat_ulong_t *)
&task_pt_regs(target)->gprs[pos / sizeof(compat_ulong_t)];
if (kbuf) {
const compat_ulong_t *k = kbuf;
while (count > 0) {
*gprs_high = *k++;
*gprs_high += 2;
count -= sizeof(*k);
}
} else {
const compat_ulong_t __user *u = ubuf;
while (count > 0 && !rc) {
unsigned long word;
rc = __get_user(word, u++);
if (rc)
break;
*gprs_high = word;
*gprs_high += 2;
count -= sizeof(*u);
}
}
return rc;
}
static int s390_compat_last_break_get(struct task_struct *target,
const struct user_regset *regset,
struct membuf to)
{
compat_ulong_t last_break = target->thread.last_break;
return membuf_store(&to, (unsigned long)last_break);
}
static int s390_compat_last_break_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
return 0;
}
static const struct user_regset s390_compat_regsets[] = {
{
.core_note_type = NT_PRSTATUS,
.n = sizeof(s390_compat_regs) / sizeof(compat_long_t),
.size = sizeof(compat_long_t),
.align = sizeof(compat_long_t),
.regset_get = s390_compat_regs_get,
.set = s390_compat_regs_set,
},
{
.core_note_type = NT_PRFPREG,
.n = sizeof(s390_fp_regs) / sizeof(compat_long_t),
.size = sizeof(compat_long_t),
.align = sizeof(compat_long_t),
.regset_get = s390_fpregs_get,
.set = s390_fpregs_set,
},
{
.core_note_type = NT_S390_SYSTEM_CALL,
.n = 1,
.size = sizeof(compat_uint_t),
.align = sizeof(compat_uint_t),
.regset_get = s390_system_call_get,
.set = s390_system_call_set,
},
{
.core_note_type = NT_S390_LAST_BREAK,
.n = 1,
.size = sizeof(long),
.align = sizeof(long),
.regset_get = s390_compat_last_break_get,
.set = s390_compat_last_break_set,
},
{
.core_note_type = NT_S390_TDB,
.n = 1,
.size = 256,
.align = 1,
.regset_get = s390_tdb_get,
.set = s390_tdb_set,
},
{
.core_note_type = NT_S390_VXRS_LOW,
.n = __NUM_VXRS_LOW,
.size = sizeof(__u64),
.align = sizeof(__u64),
.regset_get = s390_vxrs_low_get,
.set = s390_vxrs_low_set,
},
{
.core_note_type = NT_S390_VXRS_HIGH,
.n = __NUM_VXRS_HIGH,
.size = sizeof(__vector128),
.align = sizeof(__vector128),
.regset_get = s390_vxrs_high_get,
.set = s390_vxrs_high_set,
},
{
.core_note_type = NT_S390_HIGH_GPRS,
.n = sizeof(s390_compat_regs_high) / sizeof(compat_long_t),
.size = sizeof(compat_long_t),
.align = sizeof(compat_long_t),
.regset_get = s390_compat_regs_high_get,
.set = s390_compat_regs_high_set,
},
{
.core_note_type = NT_S390_GS_CB,
.n = sizeof(struct gs_cb) / sizeof(__u64),
.size = sizeof(__u64),
.align = sizeof(__u64),
.regset_get = s390_gs_cb_get,
.set = s390_gs_cb_set,
},
{
.core_note_type = NT_S390_GS_BC,
.n = sizeof(struct gs_cb) / sizeof(__u64),
.size = sizeof(__u64),
.align = sizeof(__u64),
.regset_get = s390_gs_bc_get,
.set = s390_gs_bc_set,
},
{
.core_note_type = NT_S390_RI_CB,
.n = sizeof(struct runtime_instr_cb) / sizeof(__u64),
.size = sizeof(__u64),
.align = sizeof(__u64),
.regset_get = s390_runtime_instr_get,
.set = s390_runtime_instr_set,
},
};
static const struct user_regset_view user_s390_compat_view = {
.name = "s390",
.e_machine = EM_S390,
.regsets = s390_compat_regsets,
.n = ARRAY_SIZE(s390_compat_regsets)
};
#endif
const struct user_regset_view *task_user_regset_view(struct task_struct *task)
{
#ifdef CONFIG_COMPAT
if (test_tsk_thread_flag(task, TIF_31BIT))
return &user_s390_compat_view;
#endif
return &user_s390_view;
}
static const char *gpr_names[NUM_GPRS] = {
"r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
"r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
};
unsigned long regs_get_register(struct pt_regs *regs, unsigned int offset)
{
if (offset >= NUM_GPRS)
return 0;
return regs->gprs[offset];
}
int regs_query_register_offset(const char *name)
{
unsigned long offset;
if (!name || *name != 'r')
return -EINVAL;
if (kstrtoul(name + 1, 10, &offset))
return -EINVAL;
if (offset >= NUM_GPRS)
return -EINVAL;
return offset;
}
const char *regs_query_register_name(unsigned int offset)
{
if (offset >= NUM_GPRS)
return NULL;
return gpr_names[offset];
}
static int regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
{
unsigned long ksp = kernel_stack_pointer(regs);
return (addr & ~(THREAD_SIZE - 1)) == (ksp & ~(THREAD_SIZE - 1));
}
/**
* regs_get_kernel_stack_nth() - get Nth entry of the stack
* @regs:pt_regs which contains kernel stack pointer.
* @n:stack entry number.
*
* regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
* is specifined by @regs. If the @n th entry is NOT in the kernel stack,
* this returns 0.
*/
unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n)
{
unsigned long addr;
addr = kernel_stack_pointer(regs) + n * sizeof(long);
if (!regs_within_kernel_stack(regs, addr))
return 0;
return *(unsigned long *)addr;
}
| linux-master | arch/s390/kernel/ptrace.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Implementation of s390 diagnose codes
*
* Copyright IBM Corp. 2007
* Author(s): Michael Holzheu <[email protected]>
*/
#include <linux/export.h>
#include <linux/init.h>
#include <linux/cpu.h>
#include <linux/seq_file.h>
#include <linux/debugfs.h>
#include <linux/vmalloc.h>
#include <asm/asm-extable.h>
#include <asm/diag.h>
#include <asm/trace/diag.h>
#include <asm/sections.h>
#include "entry.h"
struct diag_stat {
unsigned int counter[NR_DIAG_STAT];
};
static DEFINE_PER_CPU(struct diag_stat, diag_stat);
struct diag_desc {
int code;
char *name;
};
static const struct diag_desc diag_map[NR_DIAG_STAT] = {
[DIAG_STAT_X008] = { .code = 0x008, .name = "Console Function" },
[DIAG_STAT_X00C] = { .code = 0x00c, .name = "Pseudo Timer" },
[DIAG_STAT_X010] = { .code = 0x010, .name = "Release Pages" },
[DIAG_STAT_X014] = { .code = 0x014, .name = "Spool File Services" },
[DIAG_STAT_X044] = { .code = 0x044, .name = "Voluntary Timeslice End" },
[DIAG_STAT_X064] = { .code = 0x064, .name = "NSS Manipulation" },
[DIAG_STAT_X08C] = { .code = 0x08c, .name = "Access 3270 Display Device Information" },
[DIAG_STAT_X09C] = { .code = 0x09c, .name = "Relinquish Timeslice" },
[DIAG_STAT_X0DC] = { .code = 0x0dc, .name = "Appldata Control" },
[DIAG_STAT_X204] = { .code = 0x204, .name = "Logical-CPU Utilization" },
[DIAG_STAT_X210] = { .code = 0x210, .name = "Device Information" },
[DIAG_STAT_X224] = { .code = 0x224, .name = "EBCDIC-Name Table" },
[DIAG_STAT_X250] = { .code = 0x250, .name = "Block I/O" },
[DIAG_STAT_X258] = { .code = 0x258, .name = "Page-Reference Services" },
[DIAG_STAT_X26C] = { .code = 0x26c, .name = "Certain System Information" },
[DIAG_STAT_X288] = { .code = 0x288, .name = "Time Bomb" },
[DIAG_STAT_X2C4] = { .code = 0x2c4, .name = "FTP Services" },
[DIAG_STAT_X2FC] = { .code = 0x2fc, .name = "Guest Performance Data" },
[DIAG_STAT_X304] = { .code = 0x304, .name = "Partition-Resource Service" },
[DIAG_STAT_X308] = { .code = 0x308, .name = "List-Directed IPL" },
[DIAG_STAT_X318] = { .code = 0x318, .name = "CP Name and Version Codes" },
[DIAG_STAT_X320] = { .code = 0x320, .name = "Certificate Store" },
[DIAG_STAT_X500] = { .code = 0x500, .name = "Virtio Service" },
};
struct diag_ops __amode31_ref diag_amode31_ops = {
.diag210 = _diag210_amode31,
.diag26c = _diag26c_amode31,
.diag14 = _diag14_amode31,
.diag0c = _diag0c_amode31,
.diag8c = _diag8c_amode31,
.diag308_reset = _diag308_reset_amode31
};
static struct diag210 _diag210_tmp_amode31 __section(".amode31.data");
struct diag210 __amode31_ref *__diag210_tmp_amode31 = &_diag210_tmp_amode31;
static struct diag8c _diag8c_tmp_amode31 __section(".amode31.data");
static struct diag8c __amode31_ref *__diag8c_tmp_amode31 = &_diag8c_tmp_amode31;
static int show_diag_stat(struct seq_file *m, void *v)
{
struct diag_stat *stat;
unsigned long n = (unsigned long) v - 1;
int cpu, prec, tmp;
cpus_read_lock();
if (n == 0) {
seq_puts(m, " ");
for_each_online_cpu(cpu) {
prec = 10;
for (tmp = 10; cpu >= tmp; tmp *= 10)
prec--;
seq_printf(m, "%*s%d", prec, "CPU", cpu);
}
seq_putc(m, '\n');
} else if (n <= NR_DIAG_STAT) {
seq_printf(m, "diag %03x:", diag_map[n-1].code);
for_each_online_cpu(cpu) {
stat = &per_cpu(diag_stat, cpu);
seq_printf(m, " %10u", stat->counter[n-1]);
}
seq_printf(m, " %s\n", diag_map[n-1].name);
}
cpus_read_unlock();
return 0;
}
static void *show_diag_stat_start(struct seq_file *m, loff_t *pos)
{
return *pos <= NR_DIAG_STAT ? (void *)((unsigned long) *pos + 1) : NULL;
}
static void *show_diag_stat_next(struct seq_file *m, void *v, loff_t *pos)
{
++*pos;
return show_diag_stat_start(m, pos);
}
static void show_diag_stat_stop(struct seq_file *m, void *v)
{
}
static const struct seq_operations show_diag_stat_sops = {
.start = show_diag_stat_start,
.next = show_diag_stat_next,
.stop = show_diag_stat_stop,
.show = show_diag_stat,
};
DEFINE_SEQ_ATTRIBUTE(show_diag_stat);
static int __init show_diag_stat_init(void)
{
debugfs_create_file("diag_stat", 0400, NULL, NULL,
&show_diag_stat_fops);
return 0;
}
device_initcall(show_diag_stat_init);
void diag_stat_inc(enum diag_stat_enum nr)
{
this_cpu_inc(diag_stat.counter[nr]);
trace_s390_diagnose(diag_map[nr].code);
}
EXPORT_SYMBOL(diag_stat_inc);
void notrace diag_stat_inc_norecursion(enum diag_stat_enum nr)
{
this_cpu_inc(diag_stat.counter[nr]);
trace_s390_diagnose_norecursion(diag_map[nr].code);
}
EXPORT_SYMBOL(diag_stat_inc_norecursion);
/*
* Diagnose 14: Input spool file manipulation
*/
int diag14(unsigned long rx, unsigned long ry1, unsigned long subcode)
{
diag_stat_inc(DIAG_STAT_X014);
return diag_amode31_ops.diag14(rx, ry1, subcode);
}
EXPORT_SYMBOL(diag14);
static inline int __diag204(unsigned long *subcode, unsigned long size, void *addr)
{
union register_pair rp = { .even = *subcode, .odd = size };
asm volatile(
" diag %[addr],%[rp],0x204\n"
"0: nopr %%r7\n"
EX_TABLE(0b,0b)
: [rp] "+&d" (rp.pair) : [addr] "d" (addr) : "memory");
*subcode = rp.even;
return rp.odd;
}
/**
* diag204() - Issue diagnose 204 call.
* @subcode: Subcode of diagnose 204 to be executed.
* @size: Size of area in pages which @area points to, if given.
* @addr: Vmalloc'ed memory area where the result is written to.
*
* Execute diagnose 204 with the given subcode and write the result to the
* memory area specified with @addr. For subcodes which do not write a
* result to memory both @size and @addr must be zero. If @addr is
* specified it must be page aligned and must have been allocated with
* vmalloc(). Conversion to real / physical addresses will be handled by
* this function if required.
*/
int diag204(unsigned long subcode, unsigned long size, void *addr)
{
if (addr) {
if (WARN_ON_ONCE(!is_vmalloc_addr(addr)))
return -1;
if (WARN_ON_ONCE(!IS_ALIGNED((unsigned long)addr, PAGE_SIZE)))
return -1;
}
if ((subcode & DIAG204_SUBCODE_MASK) == DIAG204_SUBC_STIB4)
addr = (void *)pfn_to_phys(vmalloc_to_pfn(addr));
diag_stat_inc(DIAG_STAT_X204);
size = __diag204(&subcode, size, addr);
if (subcode)
return -1;
return size;
}
EXPORT_SYMBOL(diag204);
/*
* Diagnose 210: Get information about a virtual device
*/
int diag210(struct diag210 *addr)
{
static DEFINE_SPINLOCK(diag210_lock);
unsigned long flags;
int ccode;
spin_lock_irqsave(&diag210_lock, flags);
*__diag210_tmp_amode31 = *addr;
diag_stat_inc(DIAG_STAT_X210);
ccode = diag_amode31_ops.diag210(__diag210_tmp_amode31);
*addr = *__diag210_tmp_amode31;
spin_unlock_irqrestore(&diag210_lock, flags);
return ccode;
}
EXPORT_SYMBOL(diag210);
/*
* Diagnose 8C: Access 3270 Display Device Information
*/
int diag8c(struct diag8c *addr, struct ccw_dev_id *devno)
{
static DEFINE_SPINLOCK(diag8c_lock);
unsigned long flags;
int ccode;
spin_lock_irqsave(&diag8c_lock, flags);
diag_stat_inc(DIAG_STAT_X08C);
ccode = diag_amode31_ops.diag8c(__diag8c_tmp_amode31, devno, sizeof(*addr));
*addr = *__diag8c_tmp_amode31;
spin_unlock_irqrestore(&diag8c_lock, flags);
return ccode;
}
EXPORT_SYMBOL(diag8c);
int diag224(void *ptr)
{
int rc = -EOPNOTSUPP;
diag_stat_inc(DIAG_STAT_X224);
asm volatile(
" diag %1,%2,0x224\n"
"0: lhi %0,0x0\n"
"1:\n"
EX_TABLE(0b,1b)
: "+d" (rc) :"d" (0), "d" (ptr) : "memory");
return rc;
}
EXPORT_SYMBOL(diag224);
/*
* Diagnose 26C: Access Certain System Information
*/
int diag26c(void *req, void *resp, enum diag26c_sc subcode)
{
diag_stat_inc(DIAG_STAT_X26C);
return diag_amode31_ops.diag26c(req, resp, subcode);
}
EXPORT_SYMBOL(diag26c);
| linux-master | arch/s390/kernel/diag.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Image loader for kexec_file_load system call.
*
* Copyright IBM Corp. 2018
*
* Author(s): Philipp Rudo <[email protected]>
*/
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/kexec.h>
#include <asm/ipl.h>
#include <asm/setup.h>
static int kexec_file_add_kernel_image(struct kimage *image,
struct s390_load_data *data)
{
struct kexec_buf buf;
buf.image = image;
buf.buffer = image->kernel_buf;
buf.bufsz = image->kernel_buf_len;
buf.mem = 0;
if (image->type == KEXEC_TYPE_CRASH)
buf.mem += crashk_res.start;
buf.memsz = buf.bufsz;
data->kernel_buf = image->kernel_buf;
data->kernel_mem = buf.mem;
data->parm = image->kernel_buf + PARMAREA;
data->memsz += buf.memsz;
ipl_report_add_component(data->report, &buf,
IPL_RB_COMPONENT_FLAG_SIGNED |
IPL_RB_COMPONENT_FLAG_VERIFIED,
IPL_RB_CERT_UNKNOWN);
return kexec_add_buffer(&buf);
}
static void *s390_image_load(struct kimage *image,
char *kernel, unsigned long kernel_len,
char *initrd, unsigned long initrd_len,
char *cmdline, unsigned long cmdline_len)
{
return kexec_file_add_components(image, kexec_file_add_kernel_image);
}
static int s390_image_probe(const char *buf, unsigned long len)
{
/* Can't reliably tell if an image is valid. Therefore give the
* user whatever he wants.
*/
return 0;
}
const struct kexec_file_ops s390_kexec_image_ops = {
.probe = s390_image_probe,
.load = s390_image_load,
#ifdef CONFIG_KEXEC_SIG
.verify_sig = s390_verify_sig,
#endif /* CONFIG_KEXEC_SIG */
};
| linux-master | arch/s390/kernel/kexec_image.c |
// SPDX-License-Identifier: GPL-2.0
/*
* S390 version
* Copyright IBM Corp. 1999, 2000
* Author(s): Martin Schwidefsky ([email protected]),
* Denis Joseph Barrow ([email protected],[email protected]),
*
* Derived from "arch/i386/kernel/traps.c"
* Copyright (C) 1991, 1992 Linus Torvalds
*/
/*
* 'Traps.c' handles hardware traps and faults after we have saved some
* state in 'asm.s'.
*/
#include "asm/irqflags.h"
#include "asm/ptrace.h"
#include <linux/kprobes.h>
#include <linux/kdebug.h>
#include <linux/randomize_kstack.h>
#include <linux/extable.h>
#include <linux/ptrace.h>
#include <linux/sched.h>
#include <linux/sched/debug.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/cpu.h>
#include <linux/entry-common.h>
#include <asm/asm-extable.h>
#include <asm/fpu/api.h>
#include <asm/vtime.h>
#include "entry.h"
static inline void __user *get_trap_ip(struct pt_regs *regs)
{
unsigned long address;
if (regs->int_code & 0x200)
address = current->thread.trap_tdb.data[3];
else
address = regs->psw.addr;
return (void __user *) (address - (regs->int_code >> 16));
}
int is_valid_bugaddr(unsigned long addr)
{
return 1;
}
void do_report_trap(struct pt_regs *regs, int si_signo, int si_code, char *str)
{
if (user_mode(regs)) {
force_sig_fault(si_signo, si_code, get_trap_ip(regs));
report_user_fault(regs, si_signo, 0);
} else {
if (!fixup_exception(regs))
die(regs, str);
}
}
static void do_trap(struct pt_regs *regs, int si_signo, int si_code, char *str)
{
if (notify_die(DIE_TRAP, str, regs, 0,
regs->int_code, si_signo) == NOTIFY_STOP)
return;
do_report_trap(regs, si_signo, si_code, str);
}
NOKPROBE_SYMBOL(do_trap);
void do_per_trap(struct pt_regs *regs)
{
if (notify_die(DIE_SSTEP, "sstep", regs, 0, 0, SIGTRAP) == NOTIFY_STOP)
return;
if (!current->ptrace)
return;
force_sig_fault(SIGTRAP, TRAP_HWBKPT,
(void __force __user *) current->thread.per_event.address);
}
NOKPROBE_SYMBOL(do_per_trap);
static void default_trap_handler(struct pt_regs *regs)
{
if (user_mode(regs)) {
report_user_fault(regs, SIGSEGV, 0);
force_exit_sig(SIGSEGV);
} else
die(regs, "Unknown program exception");
}
#define DO_ERROR_INFO(name, signr, sicode, str) \
static void name(struct pt_regs *regs) \
{ \
do_trap(regs, signr, sicode, str); \
}
DO_ERROR_INFO(addressing_exception, SIGILL, ILL_ILLADR,
"addressing exception")
DO_ERROR_INFO(execute_exception, SIGILL, ILL_ILLOPN,
"execute exception")
DO_ERROR_INFO(divide_exception, SIGFPE, FPE_INTDIV,
"fixpoint divide exception")
DO_ERROR_INFO(overflow_exception, SIGFPE, FPE_INTOVF,
"fixpoint overflow exception")
DO_ERROR_INFO(hfp_overflow_exception, SIGFPE, FPE_FLTOVF,
"HFP overflow exception")
DO_ERROR_INFO(hfp_underflow_exception, SIGFPE, FPE_FLTUND,
"HFP underflow exception")
DO_ERROR_INFO(hfp_significance_exception, SIGFPE, FPE_FLTRES,
"HFP significance exception")
DO_ERROR_INFO(hfp_divide_exception, SIGFPE, FPE_FLTDIV,
"HFP divide exception")
DO_ERROR_INFO(hfp_sqrt_exception, SIGFPE, FPE_FLTINV,
"HFP square root exception")
DO_ERROR_INFO(operand_exception, SIGILL, ILL_ILLOPN,
"operand exception")
DO_ERROR_INFO(privileged_op, SIGILL, ILL_PRVOPC,
"privileged operation")
DO_ERROR_INFO(special_op_exception, SIGILL, ILL_ILLOPN,
"special operation exception")
DO_ERROR_INFO(transaction_exception, SIGILL, ILL_ILLOPN,
"transaction constraint exception")
static inline void do_fp_trap(struct pt_regs *regs, __u32 fpc)
{
int si_code = 0;
/* FPC[2] is Data Exception Code */
if ((fpc & 0x00000300) == 0) {
/* bits 6 and 7 of DXC are 0 iff IEEE exception */
if (fpc & 0x8000) /* invalid fp operation */
si_code = FPE_FLTINV;
else if (fpc & 0x4000) /* div by 0 */
si_code = FPE_FLTDIV;
else if (fpc & 0x2000) /* overflow */
si_code = FPE_FLTOVF;
else if (fpc & 0x1000) /* underflow */
si_code = FPE_FLTUND;
else if (fpc & 0x0800) /* inexact */
si_code = FPE_FLTRES;
}
do_trap(regs, SIGFPE, si_code, "floating point exception");
}
static void translation_specification_exception(struct pt_regs *regs)
{
/* May never happen. */
panic("Translation-Specification Exception");
}
static void illegal_op(struct pt_regs *regs)
{
__u8 opcode[6];
__u16 __user *location;
int is_uprobe_insn = 0;
int signal = 0;
location = get_trap_ip(regs);
if (user_mode(regs)) {
if (get_user(*((__u16 *) opcode), (__u16 __user *) location))
return;
if (*((__u16 *) opcode) == S390_BREAKPOINT_U16) {
if (current->ptrace)
force_sig_fault(SIGTRAP, TRAP_BRKPT, location);
else
signal = SIGILL;
#ifdef CONFIG_UPROBES
} else if (*((__u16 *) opcode) == UPROBE_SWBP_INSN) {
is_uprobe_insn = 1;
#endif
} else
signal = SIGILL;
}
/*
* We got either an illegal op in kernel mode, or user space trapped
* on a uprobes illegal instruction. See if kprobes or uprobes picks
* it up. If not, SIGILL.
*/
if (is_uprobe_insn || !user_mode(regs)) {
if (notify_die(DIE_BPT, "bpt", regs, 0,
3, SIGTRAP) != NOTIFY_STOP)
signal = SIGILL;
}
if (signal)
do_trap(regs, signal, ILL_ILLOPC, "illegal operation");
}
NOKPROBE_SYMBOL(illegal_op);
DO_ERROR_INFO(specification_exception, SIGILL, ILL_ILLOPN,
"specification exception");
static void vector_exception(struct pt_regs *regs)
{
int si_code, vic;
if (!MACHINE_HAS_VX) {
do_trap(regs, SIGILL, ILL_ILLOPN, "illegal operation");
return;
}
/* get vector interrupt code from fpc */
save_fpu_regs();
vic = (current->thread.fpu.fpc & 0xf00) >> 8;
switch (vic) {
case 1: /* invalid vector operation */
si_code = FPE_FLTINV;
break;
case 2: /* division by zero */
si_code = FPE_FLTDIV;
break;
case 3: /* overflow */
si_code = FPE_FLTOVF;
break;
case 4: /* underflow */
si_code = FPE_FLTUND;
break;
case 5: /* inexact */
si_code = FPE_FLTRES;
break;
default: /* unknown cause */
si_code = 0;
}
do_trap(regs, SIGFPE, si_code, "vector exception");
}
static void data_exception(struct pt_regs *regs)
{
save_fpu_regs();
if (current->thread.fpu.fpc & FPC_DXC_MASK)
do_fp_trap(regs, current->thread.fpu.fpc);
else
do_trap(regs, SIGILL, ILL_ILLOPN, "data exception");
}
static void space_switch_exception(struct pt_regs *regs)
{
/* Set user psw back to home space mode. */
if (user_mode(regs))
regs->psw.mask |= PSW_ASC_HOME;
/* Send SIGILL. */
do_trap(regs, SIGILL, ILL_PRVOPC, "space switch event");
}
static void monitor_event_exception(struct pt_regs *regs)
{
if (user_mode(regs))
return;
switch (report_bug(regs->psw.addr - (regs->int_code >> 16), regs)) {
case BUG_TRAP_TYPE_NONE:
fixup_exception(regs);
break;
case BUG_TRAP_TYPE_WARN:
break;
case BUG_TRAP_TYPE_BUG:
die(regs, "monitor event");
break;
}
}
void kernel_stack_overflow(struct pt_regs *regs)
{
bust_spinlocks(1);
printk("Kernel stack overflow.\n");
show_regs(regs);
bust_spinlocks(0);
panic("Corrupt kernel stack, can't continue.");
}
NOKPROBE_SYMBOL(kernel_stack_overflow);
static void __init test_monitor_call(void)
{
int val = 1;
if (!IS_ENABLED(CONFIG_BUG))
return;
asm volatile(
" mc 0,0\n"
"0: xgr %0,%0\n"
"1:\n"
EX_TABLE(0b,1b)
: "+d" (val));
if (!val)
panic("Monitor call doesn't work!\n");
}
void __init trap_init(void)
{
local_mcck_enable();
test_monitor_call();
}
static void (*pgm_check_table[128])(struct pt_regs *regs);
void noinstr __do_pgm_check(struct pt_regs *regs)
{
unsigned int trapnr;
irqentry_state_t state;
regs->int_code = S390_lowcore.pgm_int_code;
regs->int_parm_long = S390_lowcore.trans_exc_code;
state = irqentry_enter(regs);
if (user_mode(regs)) {
update_timer_sys();
if (!static_branch_likely(&cpu_has_bear)) {
if (regs->last_break < 4096)
regs->last_break = 1;
}
current->thread.last_break = regs->last_break;
}
if (S390_lowcore.pgm_code & 0x0200) {
/* transaction abort */
current->thread.trap_tdb = S390_lowcore.pgm_tdb;
}
if (S390_lowcore.pgm_code & PGM_INT_CODE_PER) {
if (user_mode(regs)) {
struct per_event *ev = ¤t->thread.per_event;
set_thread_flag(TIF_PER_TRAP);
ev->address = S390_lowcore.per_address;
ev->cause = S390_lowcore.per_code_combined;
ev->paid = S390_lowcore.per_access_id;
} else {
/* PER event in kernel is kprobes */
__arch_local_irq_ssm(regs->psw.mask & ~PSW_MASK_PER);
do_per_trap(regs);
goto out;
}
}
if (!irqs_disabled_flags(regs->psw.mask))
trace_hardirqs_on();
__arch_local_irq_ssm(regs->psw.mask & ~PSW_MASK_PER);
trapnr = regs->int_code & PGM_INT_CODE_MASK;
if (trapnr)
pgm_check_table[trapnr](regs);
out:
local_irq_disable();
irqentry_exit(regs, state);
}
/*
* The program check table contains exactly 128 (0x00-0x7f) entries. Each
* line defines the function to be called corresponding to the program check
* interruption code.
*/
static void (*pgm_check_table[128])(struct pt_regs *regs) = {
[0x00] = default_trap_handler,
[0x01] = illegal_op,
[0x02] = privileged_op,
[0x03] = execute_exception,
[0x04] = do_protection_exception,
[0x05] = addressing_exception,
[0x06] = specification_exception,
[0x07] = data_exception,
[0x08] = overflow_exception,
[0x09] = divide_exception,
[0x0a] = overflow_exception,
[0x0b] = divide_exception,
[0x0c] = hfp_overflow_exception,
[0x0d] = hfp_underflow_exception,
[0x0e] = hfp_significance_exception,
[0x0f] = hfp_divide_exception,
[0x10] = do_dat_exception,
[0x11] = do_dat_exception,
[0x12] = translation_specification_exception,
[0x13] = special_op_exception,
[0x14] = default_trap_handler,
[0x15] = operand_exception,
[0x16] = default_trap_handler,
[0x17] = default_trap_handler,
[0x18] = transaction_exception,
[0x19] = default_trap_handler,
[0x1a] = default_trap_handler,
[0x1b] = vector_exception,
[0x1c] = space_switch_exception,
[0x1d] = hfp_sqrt_exception,
[0x1e ... 0x37] = default_trap_handler,
[0x38] = do_dat_exception,
[0x39] = do_dat_exception,
[0x3a] = do_dat_exception,
[0x3b] = do_dat_exception,
[0x3c] = default_trap_handler,
[0x3d] = do_secure_storage_access,
[0x3e] = do_non_secure_storage_access,
[0x3f] = do_secure_storage_violation,
[0x40] = monitor_event_exception,
[0x41 ... 0x7f] = default_trap_handler,
};
#define COND_TRAP(x) asm( \
".weak " __stringify(x) "\n\t" \
".set " __stringify(x) "," \
__stringify(default_trap_handler))
COND_TRAP(do_secure_storage_access);
COND_TRAP(do_non_secure_storage_access);
COND_TRAP(do_secure_storage_violation);
| linux-master | arch/s390/kernel/traps.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/ima.h>
#include <asm/boot_data.h>
bool arch_ima_get_secureboot(void)
{
return ipl_secure_flag;
}
const char * const *arch_get_ima_policy(void)
{
return NULL;
}
| linux-master | arch/s390/kernel/ima_arch.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Generate definitions needed by assembly language modules.
* This code generates raw asm output which is post-processed to extract
* and format the required data.
*/
#define ASM_OFFSETS_C
#include <linux/kbuild.h>
#include <linux/kvm_host.h>
#include <linux/sched.h>
#include <linux/purgatory.h>
#include <linux/pgtable.h>
#include <linux/ftrace.h>
#include <asm/idle.h>
#include <asm/gmap.h>
#include <asm/stacktrace.h>
int main(void)
{
/* task struct offsets */
OFFSET(__TASK_stack, task_struct, stack);
OFFSET(__TASK_thread, task_struct, thread);
OFFSET(__TASK_pid, task_struct, pid);
BLANK();
/* thread struct offsets */
OFFSET(__THREAD_ksp, thread_struct, ksp);
BLANK();
/* thread info offsets */
OFFSET(__TI_flags, task_struct, thread_info.flags);
BLANK();
/* pt_regs offsets */
OFFSET(__PT_PSW, pt_regs, psw);
OFFSET(__PT_GPRS, pt_regs, gprs);
OFFSET(__PT_R0, pt_regs, gprs[0]);
OFFSET(__PT_R1, pt_regs, gprs[1]);
OFFSET(__PT_R2, pt_regs, gprs[2]);
OFFSET(__PT_R3, pt_regs, gprs[3]);
OFFSET(__PT_R4, pt_regs, gprs[4]);
OFFSET(__PT_R5, pt_regs, gprs[5]);
OFFSET(__PT_R6, pt_regs, gprs[6]);
OFFSET(__PT_R7, pt_regs, gprs[7]);
OFFSET(__PT_R8, pt_regs, gprs[8]);
OFFSET(__PT_R9, pt_regs, gprs[9]);
OFFSET(__PT_R10, pt_regs, gprs[10]);
OFFSET(__PT_R11, pt_regs, gprs[11]);
OFFSET(__PT_R12, pt_regs, gprs[12]);
OFFSET(__PT_R13, pt_regs, gprs[13]);
OFFSET(__PT_R14, pt_regs, gprs[14]);
OFFSET(__PT_R15, pt_regs, gprs[15]);
OFFSET(__PT_ORIG_GPR2, pt_regs, orig_gpr2);
OFFSET(__PT_FLAGS, pt_regs, flags);
OFFSET(__PT_CR1, pt_regs, cr1);
OFFSET(__PT_LAST_BREAK, pt_regs, last_break);
DEFINE(__PT_SIZE, sizeof(struct pt_regs));
BLANK();
/* stack_frame offsets */
OFFSET(__SF_BACKCHAIN, stack_frame, back_chain);
OFFSET(__SF_GPRS, stack_frame, gprs);
OFFSET(__SF_EMPTY, stack_frame, empty[0]);
OFFSET(__SF_SIE_CONTROL, stack_frame, sie_control_block);
OFFSET(__SF_SIE_SAVEAREA, stack_frame, sie_savearea);
OFFSET(__SF_SIE_REASON, stack_frame, sie_reason);
OFFSET(__SF_SIE_FLAGS, stack_frame, sie_flags);
OFFSET(__SF_SIE_CONTROL_PHYS, stack_frame, sie_control_block_phys);
DEFINE(STACK_FRAME_OVERHEAD, sizeof(struct stack_frame));
BLANK();
/* idle data offsets */
OFFSET(__CLOCK_IDLE_ENTER, s390_idle_data, clock_idle_enter);
OFFSET(__TIMER_IDLE_ENTER, s390_idle_data, timer_idle_enter);
OFFSET(__MT_CYCLES_ENTER, s390_idle_data, mt_cycles_enter);
BLANK();
/* hardware defined lowcore locations 0x000 - 0x1ff */
OFFSET(__LC_EXT_PARAMS, lowcore, ext_params);
OFFSET(__LC_EXT_CPU_ADDR, lowcore, ext_cpu_addr);
OFFSET(__LC_EXT_INT_CODE, lowcore, ext_int_code);
OFFSET(__LC_PGM_ILC, lowcore, pgm_ilc);
OFFSET(__LC_PGM_INT_CODE, lowcore, pgm_code);
OFFSET(__LC_DATA_EXC_CODE, lowcore, data_exc_code);
OFFSET(__LC_MON_CLASS_NR, lowcore, mon_class_num);
OFFSET(__LC_PER_CODE, lowcore, per_code);
OFFSET(__LC_PER_ATMID, lowcore, per_atmid);
OFFSET(__LC_PER_ADDRESS, lowcore, per_address);
OFFSET(__LC_EXC_ACCESS_ID, lowcore, exc_access_id);
OFFSET(__LC_PER_ACCESS_ID, lowcore, per_access_id);
OFFSET(__LC_OP_ACCESS_ID, lowcore, op_access_id);
OFFSET(__LC_AR_MODE_ID, lowcore, ar_mode_id);
OFFSET(__LC_TRANS_EXC_CODE, lowcore, trans_exc_code);
OFFSET(__LC_MON_CODE, lowcore, monitor_code);
OFFSET(__LC_SUBCHANNEL_ID, lowcore, subchannel_id);
OFFSET(__LC_SUBCHANNEL_NR, lowcore, subchannel_nr);
OFFSET(__LC_IO_INT_PARM, lowcore, io_int_parm);
OFFSET(__LC_IO_INT_WORD, lowcore, io_int_word);
OFFSET(__LC_MCCK_CODE, lowcore, mcck_interruption_code);
OFFSET(__LC_EXT_DAMAGE_CODE, lowcore, external_damage_code);
OFFSET(__LC_MCCK_FAIL_STOR_ADDR, lowcore, failing_storage_address);
OFFSET(__LC_PGM_LAST_BREAK, lowcore, pgm_last_break);
OFFSET(__LC_RETURN_LPSWE, lowcore, return_lpswe);
OFFSET(__LC_RETURN_MCCK_LPSWE, lowcore, return_mcck_lpswe);
OFFSET(__LC_RST_OLD_PSW, lowcore, restart_old_psw);
OFFSET(__LC_EXT_OLD_PSW, lowcore, external_old_psw);
OFFSET(__LC_SVC_OLD_PSW, lowcore, svc_old_psw);
OFFSET(__LC_PGM_OLD_PSW, lowcore, program_old_psw);
OFFSET(__LC_MCK_OLD_PSW, lowcore, mcck_old_psw);
OFFSET(__LC_IO_OLD_PSW, lowcore, io_old_psw);
OFFSET(__LC_RST_NEW_PSW, lowcore, restart_psw);
OFFSET(__LC_EXT_NEW_PSW, lowcore, external_new_psw);
OFFSET(__LC_SVC_NEW_PSW, lowcore, svc_new_psw);
OFFSET(__LC_PGM_NEW_PSW, lowcore, program_new_psw);
OFFSET(__LC_MCK_NEW_PSW, lowcore, mcck_new_psw);
OFFSET(__LC_IO_NEW_PSW, lowcore, io_new_psw);
/* software defined lowcore locations 0x200 - 0xdff*/
OFFSET(__LC_SAVE_AREA_SYNC, lowcore, save_area_sync);
OFFSET(__LC_SAVE_AREA_ASYNC, lowcore, save_area_async);
OFFSET(__LC_SAVE_AREA_RESTART, lowcore, save_area_restart);
OFFSET(__LC_CPU_FLAGS, lowcore, cpu_flags);
OFFSET(__LC_RETURN_PSW, lowcore, return_psw);
OFFSET(__LC_RETURN_MCCK_PSW, lowcore, return_mcck_psw);
OFFSET(__LC_SYS_ENTER_TIMER, lowcore, sys_enter_timer);
OFFSET(__LC_MCCK_ENTER_TIMER, lowcore, mcck_enter_timer);
OFFSET(__LC_EXIT_TIMER, lowcore, exit_timer);
OFFSET(__LC_LAST_UPDATE_TIMER, lowcore, last_update_timer);
OFFSET(__LC_LAST_UPDATE_CLOCK, lowcore, last_update_clock);
OFFSET(__LC_INT_CLOCK, lowcore, int_clock);
OFFSET(__LC_BOOT_CLOCK, lowcore, boot_clock);
OFFSET(__LC_CURRENT, lowcore, current_task);
OFFSET(__LC_KERNEL_STACK, lowcore, kernel_stack);
OFFSET(__LC_ASYNC_STACK, lowcore, async_stack);
OFFSET(__LC_NODAT_STACK, lowcore, nodat_stack);
OFFSET(__LC_RESTART_STACK, lowcore, restart_stack);
OFFSET(__LC_MCCK_STACK, lowcore, mcck_stack);
OFFSET(__LC_RESTART_FN, lowcore, restart_fn);
OFFSET(__LC_RESTART_DATA, lowcore, restart_data);
OFFSET(__LC_RESTART_SOURCE, lowcore, restart_source);
OFFSET(__LC_RESTART_FLAGS, lowcore, restart_flags);
OFFSET(__LC_KERNEL_ASCE, lowcore, kernel_asce);
OFFSET(__LC_USER_ASCE, lowcore, user_asce);
OFFSET(__LC_LPP, lowcore, lpp);
OFFSET(__LC_CURRENT_PID, lowcore, current_pid);
OFFSET(__LC_GMAP, lowcore, gmap);
OFFSET(__LC_LAST_BREAK, lowcore, last_break);
/* software defined ABI-relevant lowcore locations 0xe00 - 0xe20 */
OFFSET(__LC_DUMP_REIPL, lowcore, ipib);
OFFSET(__LC_VMCORE_INFO, lowcore, vmcore_info);
OFFSET(__LC_OS_INFO, lowcore, os_info);
/* hardware defined lowcore locations 0x1000 - 0x18ff */
OFFSET(__LC_MCESAD, lowcore, mcesad);
OFFSET(__LC_EXT_PARAMS2, lowcore, ext_params2);
OFFSET(__LC_FPREGS_SAVE_AREA, lowcore, floating_pt_save_area);
OFFSET(__LC_GPREGS_SAVE_AREA, lowcore, gpregs_save_area);
OFFSET(__LC_PSW_SAVE_AREA, lowcore, psw_save_area);
OFFSET(__LC_PREFIX_SAVE_AREA, lowcore, prefixreg_save_area);
OFFSET(__LC_FP_CREG_SAVE_AREA, lowcore, fpt_creg_save_area);
OFFSET(__LC_TOD_PROGREG_SAVE_AREA, lowcore, tod_progreg_save_area);
OFFSET(__LC_CPU_TIMER_SAVE_AREA, lowcore, cpu_timer_save_area);
OFFSET(__LC_CLOCK_COMP_SAVE_AREA, lowcore, clock_comp_save_area);
OFFSET(__LC_LAST_BREAK_SAVE_AREA, lowcore, last_break_save_area);
OFFSET(__LC_AREGS_SAVE_AREA, lowcore, access_regs_save_area);
OFFSET(__LC_CREGS_SAVE_AREA, lowcore, cregs_save_area);
OFFSET(__LC_PGM_TDB, lowcore, pgm_tdb);
BLANK();
/* gmap/sie offsets */
OFFSET(__GMAP_ASCE, gmap, asce);
OFFSET(__SIE_PROG0C, kvm_s390_sie_block, prog0c);
OFFSET(__SIE_PROG20, kvm_s390_sie_block, prog20);
/* kexec_sha_region */
OFFSET(__KEXEC_SHA_REGION_START, kexec_sha_region, start);
OFFSET(__KEXEC_SHA_REGION_LEN, kexec_sha_region, len);
DEFINE(__KEXEC_SHA_REGION_SIZE, sizeof(struct kexec_sha_region));
/* sizeof kernel parameter area */
DEFINE(__PARMAREA_SIZE, sizeof(struct parmarea));
/* kernel parameter area offsets */
DEFINE(IPL_DEVICE, PARMAREA + offsetof(struct parmarea, ipl_device));
DEFINE(INITRD_START, PARMAREA + offsetof(struct parmarea, initrd_start));
DEFINE(INITRD_SIZE, PARMAREA + offsetof(struct parmarea, initrd_size));
DEFINE(OLDMEM_BASE, PARMAREA + offsetof(struct parmarea, oldmem_base));
DEFINE(OLDMEM_SIZE, PARMAREA + offsetof(struct parmarea, oldmem_size));
DEFINE(COMMAND_LINE, PARMAREA + offsetof(struct parmarea, command_line));
DEFINE(MAX_COMMAND_LINE_SIZE, PARMAREA + offsetof(struct parmarea, max_command_line_size));
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
/* function graph return value tracing */
OFFSET(__FGRAPH_RET_GPR2, fgraph_ret_regs, gpr2);
OFFSET(__FGRAPH_RET_FP, fgraph_ret_regs, fp);
DEFINE(__FGRAPH_RET_SIZE, sizeof(struct fgraph_ret_regs));
#endif
OFFSET(__FTRACE_REGS_PT_REGS, ftrace_regs, regs);
DEFINE(__FTRACE_REGS_SIZE, sizeof(struct ftrace_regs));
return 0;
}
| linux-master | arch/s390/kernel/asm-offsets.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2016
* Author(s): Martin Schwidefsky <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/syscalls.h>
#include <linux/signal.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <asm/guarded_storage.h>
#include "entry.h"
void guarded_storage_release(struct task_struct *tsk)
{
kfree(tsk->thread.gs_cb);
kfree(tsk->thread.gs_bc_cb);
}
static int gs_enable(void)
{
struct gs_cb *gs_cb;
if (!current->thread.gs_cb) {
gs_cb = kzalloc(sizeof(*gs_cb), GFP_KERNEL);
if (!gs_cb)
return -ENOMEM;
gs_cb->gsd = 25;
preempt_disable();
__ctl_set_bit(2, 4);
load_gs_cb(gs_cb);
current->thread.gs_cb = gs_cb;
preempt_enable();
}
return 0;
}
static int gs_disable(void)
{
if (current->thread.gs_cb) {
preempt_disable();
kfree(current->thread.gs_cb);
current->thread.gs_cb = NULL;
__ctl_clear_bit(2, 4);
preempt_enable();
}
return 0;
}
static int gs_set_bc_cb(struct gs_cb __user *u_gs_cb)
{
struct gs_cb *gs_cb;
gs_cb = current->thread.gs_bc_cb;
if (!gs_cb) {
gs_cb = kzalloc(sizeof(*gs_cb), GFP_KERNEL);
if (!gs_cb)
return -ENOMEM;
current->thread.gs_bc_cb = gs_cb;
}
if (copy_from_user(gs_cb, u_gs_cb, sizeof(*gs_cb)))
return -EFAULT;
return 0;
}
static int gs_clear_bc_cb(void)
{
struct gs_cb *gs_cb;
gs_cb = current->thread.gs_bc_cb;
current->thread.gs_bc_cb = NULL;
kfree(gs_cb);
return 0;
}
void gs_load_bc_cb(struct pt_regs *regs)
{
struct gs_cb *gs_cb;
preempt_disable();
clear_thread_flag(TIF_GUARDED_STORAGE);
gs_cb = current->thread.gs_bc_cb;
if (gs_cb) {
kfree(current->thread.gs_cb);
current->thread.gs_bc_cb = NULL;
__ctl_set_bit(2, 4);
load_gs_cb(gs_cb);
current->thread.gs_cb = gs_cb;
}
preempt_enable();
}
static int gs_broadcast(void)
{
struct task_struct *sibling;
read_lock(&tasklist_lock);
for_each_thread(current, sibling) {
if (!sibling->thread.gs_bc_cb)
continue;
if (test_and_set_tsk_thread_flag(sibling, TIF_GUARDED_STORAGE))
kick_process(sibling);
}
read_unlock(&tasklist_lock);
return 0;
}
SYSCALL_DEFINE2(s390_guarded_storage, int, command,
struct gs_cb __user *, gs_cb)
{
if (!MACHINE_HAS_GS)
return -EOPNOTSUPP;
switch (command) {
case GS_ENABLE:
return gs_enable();
case GS_DISABLE:
return gs_disable();
case GS_SET_BC_CB:
return gs_set_bc_cb(gs_cb);
case GS_CLEAR_BC_CB:
return gs_clear_bc_cb();
case GS_BROADCAST:
return gs_broadcast();
default:
return -EINVAL;
}
}
| linux-master | arch/s390/kernel/guarded_storage.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2007, 2011
*/
#define KMSG_COMPONENT "cpu"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/workqueue.h>
#include <linux/memblock.h>
#include <linux/uaccess.h>
#include <linux/sysctl.h>
#include <linux/cpuset.h>
#include <linux/device.h>
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/sched/topology.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/cpu.h>
#include <linux/smp.h>
#include <linux/mm.h>
#include <linux/nodemask.h>
#include <linux/node.h>
#include <asm/sysinfo.h>
#define PTF_HORIZONTAL (0UL)
#define PTF_VERTICAL (1UL)
#define PTF_CHECK (2UL)
enum {
TOPOLOGY_MODE_HW,
TOPOLOGY_MODE_SINGLE,
TOPOLOGY_MODE_PACKAGE,
TOPOLOGY_MODE_UNINITIALIZED
};
struct mask_info {
struct mask_info *next;
unsigned char id;
cpumask_t mask;
};
static int topology_mode = TOPOLOGY_MODE_UNINITIALIZED;
static void set_topology_timer(void);
static void topology_work_fn(struct work_struct *work);
static struct sysinfo_15_1_x *tl_info;
static DECLARE_WORK(topology_work, topology_work_fn);
/*
* Socket/Book linked lists and cpu_topology updates are
* protected by "sched_domains_mutex".
*/
static struct mask_info socket_info;
static struct mask_info book_info;
static struct mask_info drawer_info;
struct cpu_topology_s390 cpu_topology[NR_CPUS];
EXPORT_SYMBOL_GPL(cpu_topology);
static void cpu_group_map(cpumask_t *dst, struct mask_info *info, unsigned int cpu)
{
static cpumask_t mask;
cpumask_clear(&mask);
if (!cpumask_test_cpu(cpu, &cpu_setup_mask))
goto out;
cpumask_set_cpu(cpu, &mask);
switch (topology_mode) {
case TOPOLOGY_MODE_HW:
while (info) {
if (cpumask_test_cpu(cpu, &info->mask)) {
cpumask_copy(&mask, &info->mask);
break;
}
info = info->next;
}
break;
case TOPOLOGY_MODE_PACKAGE:
cpumask_copy(&mask, cpu_present_mask);
break;
default:
fallthrough;
case TOPOLOGY_MODE_SINGLE:
break;
}
cpumask_and(&mask, &mask, &cpu_setup_mask);
out:
cpumask_copy(dst, &mask);
}
static void cpu_thread_map(cpumask_t *dst, unsigned int cpu)
{
static cpumask_t mask;
unsigned int max_cpu;
cpumask_clear(&mask);
if (!cpumask_test_cpu(cpu, &cpu_setup_mask))
goto out;
cpumask_set_cpu(cpu, &mask);
if (topology_mode != TOPOLOGY_MODE_HW)
goto out;
cpu -= cpu % (smp_cpu_mtid + 1);
max_cpu = min(cpu + smp_cpu_mtid, nr_cpu_ids - 1);
for (; cpu <= max_cpu; cpu++) {
if (cpumask_test_cpu(cpu, &cpu_setup_mask))
cpumask_set_cpu(cpu, &mask);
}
out:
cpumask_copy(dst, &mask);
}
#define TOPOLOGY_CORE_BITS 64
static void add_cpus_to_mask(struct topology_core *tl_core,
struct mask_info *drawer,
struct mask_info *book,
struct mask_info *socket)
{
struct cpu_topology_s390 *topo;
unsigned int core;
for_each_set_bit(core, &tl_core->mask, TOPOLOGY_CORE_BITS) {
unsigned int max_cpu, rcore;
int cpu;
rcore = TOPOLOGY_CORE_BITS - 1 - core + tl_core->origin;
cpu = smp_find_processor_id(rcore << smp_cpu_mt_shift);
if (cpu < 0)
continue;
max_cpu = min(cpu + smp_cpu_mtid, nr_cpu_ids - 1);
for (; cpu <= max_cpu; cpu++) {
topo = &cpu_topology[cpu];
topo->drawer_id = drawer->id;
topo->book_id = book->id;
topo->socket_id = socket->id;
topo->core_id = rcore;
topo->thread_id = cpu;
topo->dedicated = tl_core->d;
cpumask_set_cpu(cpu, &drawer->mask);
cpumask_set_cpu(cpu, &book->mask);
cpumask_set_cpu(cpu, &socket->mask);
smp_cpu_set_polarization(cpu, tl_core->pp);
}
}
}
static void clear_masks(void)
{
struct mask_info *info;
info = &socket_info;
while (info) {
cpumask_clear(&info->mask);
info = info->next;
}
info = &book_info;
while (info) {
cpumask_clear(&info->mask);
info = info->next;
}
info = &drawer_info;
while (info) {
cpumask_clear(&info->mask);
info = info->next;
}
}
static union topology_entry *next_tle(union topology_entry *tle)
{
if (!tle->nl)
return (union topology_entry *)((struct topology_core *)tle + 1);
return (union topology_entry *)((struct topology_container *)tle + 1);
}
static void tl_to_masks(struct sysinfo_15_1_x *info)
{
struct mask_info *socket = &socket_info;
struct mask_info *book = &book_info;
struct mask_info *drawer = &drawer_info;
union topology_entry *tle, *end;
clear_masks();
tle = info->tle;
end = (union topology_entry *)((unsigned long)info + info->length);
while (tle < end) {
switch (tle->nl) {
case 3:
drawer = drawer->next;
drawer->id = tle->container.id;
break;
case 2:
book = book->next;
book->id = tle->container.id;
break;
case 1:
socket = socket->next;
socket->id = tle->container.id;
break;
case 0:
add_cpus_to_mask(&tle->cpu, drawer, book, socket);
break;
default:
clear_masks();
return;
}
tle = next_tle(tle);
}
}
static void topology_update_polarization_simple(void)
{
int cpu;
for_each_possible_cpu(cpu)
smp_cpu_set_polarization(cpu, POLARIZATION_HRZ);
}
static int ptf(unsigned long fc)
{
int rc;
asm volatile(
" .insn rre,0xb9a20000,%1,%1\n"
" ipm %0\n"
" srl %0,28\n"
: "=d" (rc)
: "d" (fc) : "cc");
return rc;
}
int topology_set_cpu_management(int fc)
{
int cpu, rc;
if (!MACHINE_HAS_TOPOLOGY)
return -EOPNOTSUPP;
if (fc)
rc = ptf(PTF_VERTICAL);
else
rc = ptf(PTF_HORIZONTAL);
if (rc)
return -EBUSY;
for_each_possible_cpu(cpu)
smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
return rc;
}
void update_cpu_masks(void)
{
struct cpu_topology_s390 *topo, *topo_package, *topo_sibling;
int cpu, sibling, pkg_first, smt_first, id;
for_each_possible_cpu(cpu) {
topo = &cpu_topology[cpu];
cpu_thread_map(&topo->thread_mask, cpu);
cpu_group_map(&topo->core_mask, &socket_info, cpu);
cpu_group_map(&topo->book_mask, &book_info, cpu);
cpu_group_map(&topo->drawer_mask, &drawer_info, cpu);
topo->booted_cores = 0;
if (topology_mode != TOPOLOGY_MODE_HW) {
id = topology_mode == TOPOLOGY_MODE_PACKAGE ? 0 : cpu;
topo->thread_id = cpu;
topo->core_id = cpu;
topo->socket_id = id;
topo->book_id = id;
topo->drawer_id = id;
}
}
for_each_online_cpu(cpu) {
topo = &cpu_topology[cpu];
pkg_first = cpumask_first(&topo->core_mask);
topo_package = &cpu_topology[pkg_first];
if (cpu == pkg_first) {
for_each_cpu(sibling, &topo->core_mask) {
topo_sibling = &cpu_topology[sibling];
smt_first = cpumask_first(&topo_sibling->thread_mask);
if (sibling == smt_first)
topo_package->booted_cores++;
}
} else {
topo->booted_cores = topo_package->booted_cores;
}
}
}
void store_topology(struct sysinfo_15_1_x *info)
{
stsi(info, 15, 1, topology_mnest_limit());
}
static void __arch_update_dedicated_flag(void *arg)
{
if (topology_cpu_dedicated(smp_processor_id()))
set_cpu_flag(CIF_DEDICATED_CPU);
else
clear_cpu_flag(CIF_DEDICATED_CPU);
}
static int __arch_update_cpu_topology(void)
{
struct sysinfo_15_1_x *info = tl_info;
int rc = 0;
mutex_lock(&smp_cpu_state_mutex);
if (MACHINE_HAS_TOPOLOGY) {
rc = 1;
store_topology(info);
tl_to_masks(info);
}
update_cpu_masks();
if (!MACHINE_HAS_TOPOLOGY)
topology_update_polarization_simple();
mutex_unlock(&smp_cpu_state_mutex);
return rc;
}
int arch_update_cpu_topology(void)
{
struct device *dev;
int cpu, rc;
rc = __arch_update_cpu_topology();
on_each_cpu(__arch_update_dedicated_flag, NULL, 0);
for_each_online_cpu(cpu) {
dev = get_cpu_device(cpu);
if (dev)
kobject_uevent(&dev->kobj, KOBJ_CHANGE);
}
return rc;
}
static void topology_work_fn(struct work_struct *work)
{
rebuild_sched_domains();
}
void topology_schedule_update(void)
{
schedule_work(&topology_work);
}
static void topology_flush_work(void)
{
flush_work(&topology_work);
}
static void topology_timer_fn(struct timer_list *unused)
{
if (ptf(PTF_CHECK))
topology_schedule_update();
set_topology_timer();
}
static struct timer_list topology_timer;
static atomic_t topology_poll = ATOMIC_INIT(0);
static void set_topology_timer(void)
{
if (atomic_add_unless(&topology_poll, -1, 0))
mod_timer(&topology_timer, jiffies + msecs_to_jiffies(100));
else
mod_timer(&topology_timer, jiffies + msecs_to_jiffies(60 * MSEC_PER_SEC));
}
void topology_expect_change(void)
{
if (!MACHINE_HAS_TOPOLOGY)
return;
/* This is racy, but it doesn't matter since it is just a heuristic.
* Worst case is that we poll in a higher frequency for a bit longer.
*/
if (atomic_read(&topology_poll) > 60)
return;
atomic_add(60, &topology_poll);
set_topology_timer();
}
static int cpu_management;
static ssize_t dispatching_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
ssize_t count;
mutex_lock(&smp_cpu_state_mutex);
count = sprintf(buf, "%d\n", cpu_management);
mutex_unlock(&smp_cpu_state_mutex);
return count;
}
static ssize_t dispatching_store(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t count)
{
int val, rc;
char delim;
if (sscanf(buf, "%d %c", &val, &delim) != 1)
return -EINVAL;
if (val != 0 && val != 1)
return -EINVAL;
rc = 0;
cpus_read_lock();
mutex_lock(&smp_cpu_state_mutex);
if (cpu_management == val)
goto out;
rc = topology_set_cpu_management(val);
if (rc)
goto out;
cpu_management = val;
topology_expect_change();
out:
mutex_unlock(&smp_cpu_state_mutex);
cpus_read_unlock();
return rc ? rc : count;
}
static DEVICE_ATTR_RW(dispatching);
static ssize_t cpu_polarization_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int cpu = dev->id;
ssize_t count;
mutex_lock(&smp_cpu_state_mutex);
switch (smp_cpu_get_polarization(cpu)) {
case POLARIZATION_HRZ:
count = sprintf(buf, "horizontal\n");
break;
case POLARIZATION_VL:
count = sprintf(buf, "vertical:low\n");
break;
case POLARIZATION_VM:
count = sprintf(buf, "vertical:medium\n");
break;
case POLARIZATION_VH:
count = sprintf(buf, "vertical:high\n");
break;
default:
count = sprintf(buf, "unknown\n");
break;
}
mutex_unlock(&smp_cpu_state_mutex);
return count;
}
static DEVICE_ATTR(polarization, 0444, cpu_polarization_show, NULL);
static struct attribute *topology_cpu_attrs[] = {
&dev_attr_polarization.attr,
NULL,
};
static struct attribute_group topology_cpu_attr_group = {
.attrs = topology_cpu_attrs,
};
static ssize_t cpu_dedicated_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int cpu = dev->id;
ssize_t count;
mutex_lock(&smp_cpu_state_mutex);
count = sprintf(buf, "%d\n", topology_cpu_dedicated(cpu));
mutex_unlock(&smp_cpu_state_mutex);
return count;
}
static DEVICE_ATTR(dedicated, 0444, cpu_dedicated_show, NULL);
static struct attribute *topology_extra_cpu_attrs[] = {
&dev_attr_dedicated.attr,
NULL,
};
static struct attribute_group topology_extra_cpu_attr_group = {
.attrs = topology_extra_cpu_attrs,
};
int topology_cpu_init(struct cpu *cpu)
{
int rc;
rc = sysfs_create_group(&cpu->dev.kobj, &topology_cpu_attr_group);
if (rc || !MACHINE_HAS_TOPOLOGY)
return rc;
rc = sysfs_create_group(&cpu->dev.kobj, &topology_extra_cpu_attr_group);
if (rc)
sysfs_remove_group(&cpu->dev.kobj, &topology_cpu_attr_group);
return rc;
}
static const struct cpumask *cpu_thread_mask(int cpu)
{
return &cpu_topology[cpu].thread_mask;
}
const struct cpumask *cpu_coregroup_mask(int cpu)
{
return &cpu_topology[cpu].core_mask;
}
static const struct cpumask *cpu_book_mask(int cpu)
{
return &cpu_topology[cpu].book_mask;
}
static const struct cpumask *cpu_drawer_mask(int cpu)
{
return &cpu_topology[cpu].drawer_mask;
}
static struct sched_domain_topology_level s390_topology[] = {
{ cpu_thread_mask, cpu_smt_flags, SD_INIT_NAME(SMT) },
{ cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
{ cpu_book_mask, SD_INIT_NAME(BOOK) },
{ cpu_drawer_mask, SD_INIT_NAME(DRAWER) },
{ cpu_cpu_mask, SD_INIT_NAME(DIE) },
{ NULL, },
};
static void __init alloc_masks(struct sysinfo_15_1_x *info,
struct mask_info *mask, int offset)
{
int i, nr_masks;
nr_masks = info->mag[TOPOLOGY_NR_MAG - offset];
for (i = 0; i < info->mnest - offset; i++)
nr_masks *= info->mag[TOPOLOGY_NR_MAG - offset - 1 - i];
nr_masks = max(nr_masks, 1);
for (i = 0; i < nr_masks; i++) {
mask->next = memblock_alloc(sizeof(*mask->next), 8);
if (!mask->next)
panic("%s: Failed to allocate %zu bytes align=0x%x\n",
__func__, sizeof(*mask->next), 8);
mask = mask->next;
}
}
void __init topology_init_early(void)
{
struct sysinfo_15_1_x *info;
set_sched_topology(s390_topology);
if (topology_mode == TOPOLOGY_MODE_UNINITIALIZED) {
if (MACHINE_HAS_TOPOLOGY)
topology_mode = TOPOLOGY_MODE_HW;
else
topology_mode = TOPOLOGY_MODE_SINGLE;
}
if (!MACHINE_HAS_TOPOLOGY)
goto out;
tl_info = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
if (!tl_info)
panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
__func__, PAGE_SIZE, PAGE_SIZE);
info = tl_info;
store_topology(info);
pr_info("The CPU configuration topology of the machine is: %d %d %d %d %d %d / %d\n",
info->mag[0], info->mag[1], info->mag[2], info->mag[3],
info->mag[4], info->mag[5], info->mnest);
alloc_masks(info, &socket_info, 1);
alloc_masks(info, &book_info, 2);
alloc_masks(info, &drawer_info, 3);
out:
cpumask_set_cpu(0, &cpu_setup_mask);
__arch_update_cpu_topology();
__arch_update_dedicated_flag(NULL);
}
static inline int topology_get_mode(int enabled)
{
if (!enabled)
return TOPOLOGY_MODE_SINGLE;
return MACHINE_HAS_TOPOLOGY ? TOPOLOGY_MODE_HW : TOPOLOGY_MODE_PACKAGE;
}
static inline int topology_is_enabled(void)
{
return topology_mode != TOPOLOGY_MODE_SINGLE;
}
static int __init topology_setup(char *str)
{
bool enabled;
int rc;
rc = kstrtobool(str, &enabled);
if (rc)
return rc;
topology_mode = topology_get_mode(enabled);
return 0;
}
early_param("topology", topology_setup);
static int topology_ctl_handler(struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int enabled = topology_is_enabled();
int new_mode;
int rc;
struct ctl_table ctl_entry = {
.procname = ctl->procname,
.data = &enabled,
.maxlen = sizeof(int),
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE,
};
rc = proc_douintvec_minmax(&ctl_entry, write, buffer, lenp, ppos);
if (rc < 0 || !write)
return rc;
mutex_lock(&smp_cpu_state_mutex);
new_mode = topology_get_mode(enabled);
if (topology_mode != new_mode) {
topology_mode = new_mode;
topology_schedule_update();
}
mutex_unlock(&smp_cpu_state_mutex);
topology_flush_work();
return rc;
}
static struct ctl_table topology_ctl_table[] = {
{
.procname = "topology",
.mode = 0644,
.proc_handler = topology_ctl_handler,
},
{ },
};
static int __init topology_init(void)
{
struct device *dev_root;
int rc = 0;
timer_setup(&topology_timer, topology_timer_fn, TIMER_DEFERRABLE);
if (MACHINE_HAS_TOPOLOGY)
set_topology_timer();
else
topology_update_polarization_simple();
register_sysctl("s390", topology_ctl_table);
dev_root = bus_get_dev_root(&cpu_subsys);
if (dev_root) {
rc = device_create_file(dev_root, &dev_attr_dispatching);
put_device(dev_root);
}
return rc;
}
device_initcall(topology_init);
| linux-master | arch/s390/kernel/topology.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Kernel module help for s390.
*
* S390 version
* Copyright IBM Corp. 2002, 2003
* Author(s): Arnd Bergmann ([email protected])
* Martin Schwidefsky ([email protected])
*
* based on i386 version
* Copyright (C) 2001 Rusty Russell.
*/
#include <linux/module.h>
#include <linux/elf.h>
#include <linux/vmalloc.h>
#include <linux/fs.h>
#include <linux/ftrace.h>
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/kasan.h>
#include <linux/moduleloader.h>
#include <linux/bug.h>
#include <linux/memory.h>
#include <asm/alternative.h>
#include <asm/nospec-branch.h>
#include <asm/facility.h>
#include <asm/ftrace.lds.h>
#include <asm/set_memory.h>
#include <asm/setup.h>
#if 0
#define DEBUGP printk
#else
#define DEBUGP(fmt , ...)
#endif
#define PLT_ENTRY_SIZE 22
static unsigned long get_module_load_offset(void)
{
static DEFINE_MUTEX(module_kaslr_mutex);
static unsigned long module_load_offset;
if (!kaslr_enabled())
return 0;
/*
* Calculate the module_load_offset the first time this code
* is called. Once calculated it stays the same until reboot.
*/
mutex_lock(&module_kaslr_mutex);
if (!module_load_offset)
module_load_offset = get_random_u32_inclusive(1, 1024) * PAGE_SIZE;
mutex_unlock(&module_kaslr_mutex);
return module_load_offset;
}
void *module_alloc(unsigned long size)
{
gfp_t gfp_mask = GFP_KERNEL;
void *p;
if (PAGE_ALIGN(size) > MODULES_LEN)
return NULL;
p = __vmalloc_node_range(size, MODULE_ALIGN,
MODULES_VADDR + get_module_load_offset(),
MODULES_END, gfp_mask, PAGE_KERNEL,
VM_FLUSH_RESET_PERMS | VM_DEFER_KMEMLEAK,
NUMA_NO_NODE, __builtin_return_address(0));
if (p && (kasan_alloc_module_shadow(p, size, gfp_mask) < 0)) {
vfree(p);
return NULL;
}
return p;
}
#ifdef CONFIG_FUNCTION_TRACER
void module_arch_cleanup(struct module *mod)
{
module_memfree(mod->arch.trampolines_start);
}
#endif
void module_arch_freeing_init(struct module *mod)
{
if (is_livepatch_module(mod) &&
mod->state == MODULE_STATE_LIVE)
return;
vfree(mod->arch.syminfo);
mod->arch.syminfo = NULL;
}
static void check_rela(Elf_Rela *rela, struct module *me)
{
struct mod_arch_syminfo *info;
info = me->arch.syminfo + ELF_R_SYM (rela->r_info);
switch (ELF_R_TYPE (rela->r_info)) {
case R_390_GOT12: /* 12 bit GOT offset. */
case R_390_GOT16: /* 16 bit GOT offset. */
case R_390_GOT20: /* 20 bit GOT offset. */
case R_390_GOT32: /* 32 bit GOT offset. */
case R_390_GOT64: /* 64 bit GOT offset. */
case R_390_GOTENT: /* 32 bit PC rel. to GOT entry shifted by 1. */
case R_390_GOTPLT12: /* 12 bit offset to jump slot. */
case R_390_GOTPLT16: /* 16 bit offset to jump slot. */
case R_390_GOTPLT20: /* 20 bit offset to jump slot. */
case R_390_GOTPLT32: /* 32 bit offset to jump slot. */
case R_390_GOTPLT64: /* 64 bit offset to jump slot. */
case R_390_GOTPLTENT: /* 32 bit rel. offset to jump slot >> 1. */
if (info->got_offset == -1UL) {
info->got_offset = me->arch.got_size;
me->arch.got_size += sizeof(void*);
}
break;
case R_390_PLT16DBL: /* 16 bit PC rel. PLT shifted by 1. */
case R_390_PLT32DBL: /* 32 bit PC rel. PLT shifted by 1. */
case R_390_PLT32: /* 32 bit PC relative PLT address. */
case R_390_PLT64: /* 64 bit PC relative PLT address. */
case R_390_PLTOFF16: /* 16 bit offset from GOT to PLT. */
case R_390_PLTOFF32: /* 32 bit offset from GOT to PLT. */
case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
if (info->plt_offset == -1UL) {
info->plt_offset = me->arch.plt_size;
me->arch.plt_size += PLT_ENTRY_SIZE;
}
break;
case R_390_COPY:
case R_390_GLOB_DAT:
case R_390_JMP_SLOT:
case R_390_RELATIVE:
/* Only needed if we want to support loading of
modules linked with -shared. */
break;
}
}
/*
* Account for GOT and PLT relocations. We can't add sections for
* got and plt but we can increase the core module size.
*/
int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
char *secstrings, struct module *me)
{
Elf_Shdr *symtab;
Elf_Sym *symbols;
Elf_Rela *rela;
char *strings;
int nrela, i, j;
struct module_memory *mod_mem;
/* Find symbol table and string table. */
symtab = NULL;
for (i = 0; i < hdr->e_shnum; i++)
switch (sechdrs[i].sh_type) {
case SHT_SYMTAB:
symtab = sechdrs + i;
break;
}
if (!symtab) {
printk(KERN_ERR "module %s: no symbol table\n", me->name);
return -ENOEXEC;
}
/* Allocate one syminfo structure per symbol. */
me->arch.nsyms = symtab->sh_size / sizeof(Elf_Sym);
me->arch.syminfo = vmalloc(array_size(sizeof(struct mod_arch_syminfo),
me->arch.nsyms));
if (!me->arch.syminfo)
return -ENOMEM;
symbols = (void *) hdr + symtab->sh_offset;
strings = (void *) hdr + sechdrs[symtab->sh_link].sh_offset;
for (i = 0; i < me->arch.nsyms; i++) {
if (symbols[i].st_shndx == SHN_UNDEF &&
strcmp(strings + symbols[i].st_name,
"_GLOBAL_OFFSET_TABLE_") == 0)
/* "Define" it as absolute. */
symbols[i].st_shndx = SHN_ABS;
me->arch.syminfo[i].got_offset = -1UL;
me->arch.syminfo[i].plt_offset = -1UL;
me->arch.syminfo[i].got_initialized = 0;
me->arch.syminfo[i].plt_initialized = 0;
}
/* Search for got/plt relocations. */
me->arch.got_size = me->arch.plt_size = 0;
for (i = 0; i < hdr->e_shnum; i++) {
if (sechdrs[i].sh_type != SHT_RELA)
continue;
nrela = sechdrs[i].sh_size / sizeof(Elf_Rela);
rela = (void *) hdr + sechdrs[i].sh_offset;
for (j = 0; j < nrela; j++)
check_rela(rela + j, me);
}
/* Increase core size by size of got & plt and set start
offsets for got and plt. */
mod_mem = &me->mem[MOD_TEXT];
mod_mem->size = ALIGN(mod_mem->size, 4);
me->arch.got_offset = mod_mem->size;
mod_mem->size += me->arch.got_size;
me->arch.plt_offset = mod_mem->size;
if (me->arch.plt_size) {
if (IS_ENABLED(CONFIG_EXPOLINE) && !nospec_disable)
me->arch.plt_size += PLT_ENTRY_SIZE;
mod_mem->size += me->arch.plt_size;
}
return 0;
}
static int apply_rela_bits(Elf_Addr loc, Elf_Addr val,
int sign, int bits, int shift,
void *(*write)(void *dest, const void *src, size_t len))
{
unsigned long umax;
long min, max;
void *dest = (void *)loc;
if (val & ((1UL << shift) - 1))
return -ENOEXEC;
if (sign) {
val = (Elf_Addr)(((long) val) >> shift);
min = -(1L << (bits - 1));
max = (1L << (bits - 1)) - 1;
if ((long) val < min || (long) val > max)
return -ENOEXEC;
} else {
val >>= shift;
umax = ((1UL << (bits - 1)) << 1) - 1;
if ((unsigned long) val > umax)
return -ENOEXEC;
}
if (bits == 8) {
unsigned char tmp = val;
write(dest, &tmp, 1);
} else if (bits == 12) {
unsigned short tmp = (val & 0xfff) |
(*(unsigned short *) loc & 0xf000);
write(dest, &tmp, 2);
} else if (bits == 16) {
unsigned short tmp = val;
write(dest, &tmp, 2);
} else if (bits == 20) {
unsigned int tmp = (val & 0xfff) << 16 |
(val & 0xff000) >> 4 | (*(unsigned int *) loc & 0xf00000ff);
write(dest, &tmp, 4);
} else if (bits == 32) {
unsigned int tmp = val;
write(dest, &tmp, 4);
} else if (bits == 64) {
unsigned long tmp = val;
write(dest, &tmp, 8);
}
return 0;
}
static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
const char *strtab, struct module *me,
void *(*write)(void *dest, const void *src, size_t len))
{
struct mod_arch_syminfo *info;
Elf_Addr loc, val;
int r_type, r_sym;
int rc = -ENOEXEC;
/* This is where to make the change */
loc = base + rela->r_offset;
/* This is the symbol it is referring to. Note that all
undefined symbols have been resolved. */
r_sym = ELF_R_SYM(rela->r_info);
r_type = ELF_R_TYPE(rela->r_info);
info = me->arch.syminfo + r_sym;
val = symtab[r_sym].st_value;
switch (r_type) {
case R_390_NONE: /* No relocation. */
rc = 0;
break;
case R_390_8: /* Direct 8 bit. */
case R_390_12: /* Direct 12 bit. */
case R_390_16: /* Direct 16 bit. */
case R_390_20: /* Direct 20 bit. */
case R_390_32: /* Direct 32 bit. */
case R_390_64: /* Direct 64 bit. */
val += rela->r_addend;
if (r_type == R_390_8)
rc = apply_rela_bits(loc, val, 0, 8, 0, write);
else if (r_type == R_390_12)
rc = apply_rela_bits(loc, val, 0, 12, 0, write);
else if (r_type == R_390_16)
rc = apply_rela_bits(loc, val, 0, 16, 0, write);
else if (r_type == R_390_20)
rc = apply_rela_bits(loc, val, 1, 20, 0, write);
else if (r_type == R_390_32)
rc = apply_rela_bits(loc, val, 0, 32, 0, write);
else if (r_type == R_390_64)
rc = apply_rela_bits(loc, val, 0, 64, 0, write);
break;
case R_390_PC16: /* PC relative 16 bit. */
case R_390_PC16DBL: /* PC relative 16 bit shifted by 1. */
case R_390_PC32DBL: /* PC relative 32 bit shifted by 1. */
case R_390_PC32: /* PC relative 32 bit. */
case R_390_PC64: /* PC relative 64 bit. */
val += rela->r_addend - loc;
if (r_type == R_390_PC16)
rc = apply_rela_bits(loc, val, 1, 16, 0, write);
else if (r_type == R_390_PC16DBL)
rc = apply_rela_bits(loc, val, 1, 16, 1, write);
else if (r_type == R_390_PC32DBL)
rc = apply_rela_bits(loc, val, 1, 32, 1, write);
else if (r_type == R_390_PC32)
rc = apply_rela_bits(loc, val, 1, 32, 0, write);
else if (r_type == R_390_PC64)
rc = apply_rela_bits(loc, val, 1, 64, 0, write);
break;
case R_390_GOT12: /* 12 bit GOT offset. */
case R_390_GOT16: /* 16 bit GOT offset. */
case R_390_GOT20: /* 20 bit GOT offset. */
case R_390_GOT32: /* 32 bit GOT offset. */
case R_390_GOT64: /* 64 bit GOT offset. */
case R_390_GOTENT: /* 32 bit PC rel. to GOT entry shifted by 1. */
case R_390_GOTPLT12: /* 12 bit offset to jump slot. */
case R_390_GOTPLT20: /* 20 bit offset to jump slot. */
case R_390_GOTPLT16: /* 16 bit offset to jump slot. */
case R_390_GOTPLT32: /* 32 bit offset to jump slot. */
case R_390_GOTPLT64: /* 64 bit offset to jump slot. */
case R_390_GOTPLTENT: /* 32 bit rel. offset to jump slot >> 1. */
if (info->got_initialized == 0) {
Elf_Addr *gotent = me->mem[MOD_TEXT].base +
me->arch.got_offset +
info->got_offset;
write(gotent, &val, sizeof(*gotent));
info->got_initialized = 1;
}
val = info->got_offset + rela->r_addend;
if (r_type == R_390_GOT12 ||
r_type == R_390_GOTPLT12)
rc = apply_rela_bits(loc, val, 0, 12, 0, write);
else if (r_type == R_390_GOT16 ||
r_type == R_390_GOTPLT16)
rc = apply_rela_bits(loc, val, 0, 16, 0, write);
else if (r_type == R_390_GOT20 ||
r_type == R_390_GOTPLT20)
rc = apply_rela_bits(loc, val, 1, 20, 0, write);
else if (r_type == R_390_GOT32 ||
r_type == R_390_GOTPLT32)
rc = apply_rela_bits(loc, val, 0, 32, 0, write);
else if (r_type == R_390_GOT64 ||
r_type == R_390_GOTPLT64)
rc = apply_rela_bits(loc, val, 0, 64, 0, write);
else if (r_type == R_390_GOTENT ||
r_type == R_390_GOTPLTENT) {
val += (Elf_Addr)me->mem[MOD_TEXT].base +
me->arch.got_offset - loc;
rc = apply_rela_bits(loc, val, 1, 32, 1, write);
}
break;
case R_390_PLT16DBL: /* 16 bit PC rel. PLT shifted by 1. */
case R_390_PLT32DBL: /* 32 bit PC rel. PLT shifted by 1. */
case R_390_PLT32: /* 32 bit PC relative PLT address. */
case R_390_PLT64: /* 64 bit PC relative PLT address. */
case R_390_PLTOFF16: /* 16 bit offset from GOT to PLT. */
case R_390_PLTOFF32: /* 32 bit offset from GOT to PLT. */
case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
if (info->plt_initialized == 0) {
unsigned char insn[PLT_ENTRY_SIZE];
char *plt_base;
char *ip;
plt_base = me->mem[MOD_TEXT].base + me->arch.plt_offset;
ip = plt_base + info->plt_offset;
*(int *)insn = 0x0d10e310; /* basr 1,0 */
*(int *)&insn[4] = 0x100c0004; /* lg 1,12(1) */
if (IS_ENABLED(CONFIG_EXPOLINE) && !nospec_disable) {
char *jump_r1;
jump_r1 = plt_base + me->arch.plt_size -
PLT_ENTRY_SIZE;
/* brcl 0xf,__jump_r1 */
*(short *)&insn[8] = 0xc0f4;
*(int *)&insn[10] = (jump_r1 - (ip + 8)) / 2;
} else {
*(int *)&insn[8] = 0x07f10000; /* br %r1 */
}
*(long *)&insn[14] = val;
write(ip, insn, sizeof(insn));
info->plt_initialized = 1;
}
if (r_type == R_390_PLTOFF16 ||
r_type == R_390_PLTOFF32 ||
r_type == R_390_PLTOFF64)
val = me->arch.plt_offset - me->arch.got_offset +
info->plt_offset + rela->r_addend;
else {
if (!((r_type == R_390_PLT16DBL &&
val - loc + 0xffffUL < 0x1ffffeUL) ||
(r_type == R_390_PLT32DBL &&
val - loc + 0xffffffffULL < 0x1fffffffeULL)))
val = (Elf_Addr) me->mem[MOD_TEXT].base +
me->arch.plt_offset +
info->plt_offset;
val += rela->r_addend - loc;
}
if (r_type == R_390_PLT16DBL)
rc = apply_rela_bits(loc, val, 1, 16, 1, write);
else if (r_type == R_390_PLTOFF16)
rc = apply_rela_bits(loc, val, 0, 16, 0, write);
else if (r_type == R_390_PLT32DBL)
rc = apply_rela_bits(loc, val, 1, 32, 1, write);
else if (r_type == R_390_PLT32 ||
r_type == R_390_PLTOFF32)
rc = apply_rela_bits(loc, val, 0, 32, 0, write);
else if (r_type == R_390_PLT64 ||
r_type == R_390_PLTOFF64)
rc = apply_rela_bits(loc, val, 0, 64, 0, write);
break;
case R_390_GOTOFF16: /* 16 bit offset to GOT. */
case R_390_GOTOFF32: /* 32 bit offset to GOT. */
case R_390_GOTOFF64: /* 64 bit offset to GOT. */
val = val + rela->r_addend -
((Elf_Addr) me->mem[MOD_TEXT].base + me->arch.got_offset);
if (r_type == R_390_GOTOFF16)
rc = apply_rela_bits(loc, val, 0, 16, 0, write);
else if (r_type == R_390_GOTOFF32)
rc = apply_rela_bits(loc, val, 0, 32, 0, write);
else if (r_type == R_390_GOTOFF64)
rc = apply_rela_bits(loc, val, 0, 64, 0, write);
break;
case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
val = (Elf_Addr) me->mem[MOD_TEXT].base + me->arch.got_offset +
rela->r_addend - loc;
if (r_type == R_390_GOTPC)
rc = apply_rela_bits(loc, val, 1, 32, 0, write);
else if (r_type == R_390_GOTPCDBL)
rc = apply_rela_bits(loc, val, 1, 32, 1, write);
break;
case R_390_COPY:
case R_390_GLOB_DAT: /* Create GOT entry. */
case R_390_JMP_SLOT: /* Create PLT entry. */
case R_390_RELATIVE: /* Adjust by program base. */
/* Only needed if we want to support loading of
modules linked with -shared. */
return -ENOEXEC;
default:
printk(KERN_ERR "module %s: unknown relocation: %u\n",
me->name, r_type);
return -ENOEXEC;
}
if (rc) {
printk(KERN_ERR "module %s: relocation error for symbol %s "
"(r_type %i, value 0x%lx)\n",
me->name, strtab + symtab[r_sym].st_name,
r_type, (unsigned long) val);
return rc;
}
return 0;
}
static int __apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
unsigned int symindex, unsigned int relsec,
struct module *me,
void *(*write)(void *dest, const void *src, size_t len))
{
Elf_Addr base;
Elf_Sym *symtab;
Elf_Rela *rela;
unsigned long i, n;
int rc;
DEBUGP("Applying relocate section %u to %u\n",
relsec, sechdrs[relsec].sh_info);
base = sechdrs[sechdrs[relsec].sh_info].sh_addr;
symtab = (Elf_Sym *) sechdrs[symindex].sh_addr;
rela = (Elf_Rela *) sechdrs[relsec].sh_addr;
n = sechdrs[relsec].sh_size / sizeof(Elf_Rela);
for (i = 0; i < n; i++, rela++) {
rc = apply_rela(rela, base, symtab, strtab, me, write);
if (rc)
return rc;
}
return 0;
}
int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
unsigned int symindex, unsigned int relsec,
struct module *me)
{
bool early = me->state == MODULE_STATE_UNFORMED;
void *(*write)(void *, const void *, size_t) = memcpy;
if (!early)
write = s390_kernel_write;
return __apply_relocate_add(sechdrs, strtab, symindex, relsec, me,
write);
}
#ifdef CONFIG_FUNCTION_TRACER
static int module_alloc_ftrace_hotpatch_trampolines(struct module *me,
const Elf_Shdr *s)
{
char *start, *end;
int numpages;
size_t size;
size = FTRACE_HOTPATCH_TRAMPOLINES_SIZE(s->sh_size);
numpages = DIV_ROUND_UP(size, PAGE_SIZE);
start = module_alloc(numpages * PAGE_SIZE);
if (!start)
return -ENOMEM;
set_memory_rox((unsigned long)start, numpages);
end = start + size;
me->arch.trampolines_start = (struct ftrace_hotpatch_trampoline *)start;
me->arch.trampolines_end = (struct ftrace_hotpatch_trampoline *)end;
me->arch.next_trampoline = me->arch.trampolines_start;
return 0;
}
#endif /* CONFIG_FUNCTION_TRACER */
int module_finalize(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs,
struct module *me)
{
const Elf_Shdr *s;
char *secstrings, *secname;
void *aseg;
#ifdef CONFIG_FUNCTION_TRACER
int ret;
#endif
if (IS_ENABLED(CONFIG_EXPOLINE) &&
!nospec_disable && me->arch.plt_size) {
unsigned int *ij;
ij = me->mem[MOD_TEXT].base + me->arch.plt_offset +
me->arch.plt_size - PLT_ENTRY_SIZE;
ij[0] = 0xc6000000; /* exrl %r0,.+10 */
ij[1] = 0x0005a7f4; /* j . */
ij[2] = 0x000007f1; /* br %r1 */
}
secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) {
aseg = (void *) s->sh_addr;
secname = secstrings + s->sh_name;
if (!strcmp(".altinstructions", secname))
/* patch .altinstructions */
apply_alternatives(aseg, aseg + s->sh_size);
if (IS_ENABLED(CONFIG_EXPOLINE) &&
(str_has_prefix(secname, ".s390_indirect")))
nospec_revert(aseg, aseg + s->sh_size);
if (IS_ENABLED(CONFIG_EXPOLINE) &&
(str_has_prefix(secname, ".s390_return")))
nospec_revert(aseg, aseg + s->sh_size);
#ifdef CONFIG_FUNCTION_TRACER
if (!strcmp(FTRACE_CALLSITE_SECTION, secname)) {
ret = module_alloc_ftrace_hotpatch_trampolines(me, s);
if (ret < 0)
return ret;
}
#endif /* CONFIG_FUNCTION_TRACER */
}
return 0;
}
| linux-master | arch/s390/kernel/module.c |
// SPDX-License-Identifier: GPL-2.0
/*
* ELF loader for kexec_file_load system call.
*
* Copyright IBM Corp. 2018
*
* Author(s): Philipp Rudo <[email protected]>
*/
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/kexec.h>
#include <asm/ipl.h>
#include <asm/setup.h>
static int kexec_file_add_kernel_elf(struct kimage *image,
struct s390_load_data *data)
{
struct kexec_buf buf;
const Elf_Ehdr *ehdr;
const Elf_Phdr *phdr;
Elf_Addr entry;
void *kernel;
int i, ret;
kernel = image->kernel_buf;
ehdr = (Elf_Ehdr *)kernel;
buf.image = image;
if (image->type == KEXEC_TYPE_CRASH)
entry = STARTUP_KDUMP_OFFSET;
else
entry = ehdr->e_entry;
phdr = (void *)ehdr + ehdr->e_phoff;
for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
if (phdr->p_type != PT_LOAD)
continue;
buf.buffer = kernel + phdr->p_offset;
buf.bufsz = phdr->p_filesz;
buf.mem = ALIGN(phdr->p_paddr, phdr->p_align);
if (image->type == KEXEC_TYPE_CRASH)
buf.mem += crashk_res.start;
buf.memsz = phdr->p_memsz;
data->memsz = ALIGN(data->memsz, phdr->p_align) + buf.memsz;
if (entry - phdr->p_paddr < phdr->p_memsz) {
data->kernel_buf = buf.buffer;
data->kernel_mem = buf.mem;
data->parm = buf.buffer + PARMAREA;
}
ipl_report_add_component(data->report, &buf,
IPL_RB_COMPONENT_FLAG_SIGNED |
IPL_RB_COMPONENT_FLAG_VERIFIED,
IPL_RB_CERT_UNKNOWN);
ret = kexec_add_buffer(&buf);
if (ret)
return ret;
}
return data->memsz ? 0 : -EINVAL;
}
static void *s390_elf_load(struct kimage *image,
char *kernel, unsigned long kernel_len,
char *initrd, unsigned long initrd_len,
char *cmdline, unsigned long cmdline_len)
{
const Elf_Ehdr *ehdr;
const Elf_Phdr *phdr;
size_t size;
int i;
/* image->fobs->probe already checked for valid ELF magic number. */
ehdr = (Elf_Ehdr *)kernel;
if (ehdr->e_type != ET_EXEC ||
ehdr->e_ident[EI_CLASS] != ELFCLASS64 ||
!elf_check_arch(ehdr))
return ERR_PTR(-EINVAL);
if (!ehdr->e_phnum || ehdr->e_phentsize != sizeof(Elf_Phdr))
return ERR_PTR(-EINVAL);
size = ehdr->e_ehsize + ehdr->e_phoff;
size += ehdr->e_phentsize * ehdr->e_phnum;
if (size > kernel_len)
return ERR_PTR(-EINVAL);
phdr = (void *)ehdr + ehdr->e_phoff;
size = ALIGN(size, phdr->p_align);
for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
if (phdr->p_type == PT_INTERP)
return ERR_PTR(-EINVAL);
if (phdr->p_offset > kernel_len)
return ERR_PTR(-EINVAL);
size += ALIGN(phdr->p_filesz, phdr->p_align);
}
if (size > kernel_len)
return ERR_PTR(-EINVAL);
return kexec_file_add_components(image, kexec_file_add_kernel_elf);
}
static int s390_elf_probe(const char *buf, unsigned long len)
{
const Elf_Ehdr *ehdr;
if (len < sizeof(Elf_Ehdr))
return -ENOEXEC;
ehdr = (Elf_Ehdr *)buf;
/* Only check the ELF magic number here and do proper validity check
* in the loader. Any check here that fails would send the erroneous
* ELF file to the image loader that does not care what it gets.
* (Most likely) causing behavior not intended by the user.
*/
if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG) != 0)
return -ENOEXEC;
return 0;
}
const struct kexec_file_ops s390_kexec_elf_ops = {
.probe = s390_elf_probe,
.load = s390_elf_load,
#ifdef CONFIG_KEXEC_SIG
.verify_sig = s390_verify_sig,
#endif /* CONFIG_KEXEC_SIG */
};
| linux-master | arch/s390/kernel/kexec_elf.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2000, 2006
* Author(s): Denis Joseph Barrow ([email protected],[email protected])
* Gerhard Tonn ([email protected])
*
* Copyright (C) 1991, 1992 Linus Torvalds
*
* 1997-11-28 Modified for POSIX.1b signals by Richard Henderson
*/
#include <linux/compat.h>
#include <linux/sched.h>
#include <linux/sched/task_stack.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/kernel.h>
#include <linux/signal.h>
#include <linux/errno.h>
#include <linux/wait.h>
#include <linux/ptrace.h>
#include <linux/unistd.h>
#include <linux/stddef.h>
#include <linux/tty.h>
#include <linux/personality.h>
#include <linux/binfmts.h>
#include <asm/ucontext.h>
#include <linux/uaccess.h>
#include <asm/lowcore.h>
#include <asm/switch_to.h>
#include <asm/vdso.h>
#include "compat_linux.h"
#include "compat_ptrace.h"
#include "entry.h"
typedef struct
{
__u8 callee_used_stack[__SIGNAL_FRAMESIZE32];
struct sigcontext32 sc;
_sigregs32 sregs;
int signo;
_sigregs_ext32 sregs_ext;
__u16 svc_insn; /* Offset of svc_insn is NOT fixed! */
} sigframe32;
typedef struct
{
__u8 callee_used_stack[__SIGNAL_FRAMESIZE32];
__u16 svc_insn;
compat_siginfo_t info;
struct ucontext32 uc;
} rt_sigframe32;
/* Store registers needed to create the signal frame */
static void store_sigregs(void)
{
save_access_regs(current->thread.acrs);
save_fpu_regs();
}
/* Load registers after signal return */
static void load_sigregs(void)
{
restore_access_regs(current->thread.acrs);
}
static int save_sigregs32(struct pt_regs *regs, _sigregs32 __user *sregs)
{
_sigregs32 user_sregs;
int i;
user_sregs.regs.psw.mask = (__u32)(regs->psw.mask >> 32);
user_sregs.regs.psw.mask &= PSW32_MASK_USER | PSW32_MASK_RI;
user_sregs.regs.psw.mask |= PSW32_USER_BITS;
user_sregs.regs.psw.addr = (__u32) regs->psw.addr |
(__u32)(regs->psw.mask & PSW_MASK_BA);
for (i = 0; i < NUM_GPRS; i++)
user_sregs.regs.gprs[i] = (__u32) regs->gprs[i];
memcpy(&user_sregs.regs.acrs, current->thread.acrs,
sizeof(user_sregs.regs.acrs));
fpregs_store((_s390_fp_regs *) &user_sregs.fpregs, ¤t->thread.fpu);
if (__copy_to_user(sregs, &user_sregs, sizeof(_sigregs32)))
return -EFAULT;
return 0;
}
static int restore_sigregs32(struct pt_regs *regs,_sigregs32 __user *sregs)
{
_sigregs32 user_sregs;
int i;
/* Always make any pending restarted system call return -EINTR */
current->restart_block.fn = do_no_restart_syscall;
if (__copy_from_user(&user_sregs, &sregs->regs, sizeof(user_sregs)))
return -EFAULT;
if (!is_ri_task(current) && (user_sregs.regs.psw.mask & PSW32_MASK_RI))
return -EINVAL;
/* Test the floating-point-control word. */
if (test_fp_ctl(user_sregs.fpregs.fpc))
return -EINVAL;
/* Use regs->psw.mask instead of PSW_USER_BITS to preserve PER bit. */
regs->psw.mask = (regs->psw.mask & ~(PSW_MASK_USER | PSW_MASK_RI)) |
(__u64)(user_sregs.regs.psw.mask & PSW32_MASK_USER) << 32 |
(__u64)(user_sregs.regs.psw.mask & PSW32_MASK_RI) << 32 |
(__u64)(user_sregs.regs.psw.addr & PSW32_ADDR_AMODE);
/* Check for invalid user address space control. */
if ((regs->psw.mask & PSW_MASK_ASC) == PSW_ASC_HOME)
regs->psw.mask = PSW_ASC_PRIMARY |
(regs->psw.mask & ~PSW_MASK_ASC);
regs->psw.addr = (__u64)(user_sregs.regs.psw.addr & PSW32_ADDR_INSN);
for (i = 0; i < NUM_GPRS; i++)
regs->gprs[i] = (__u64) user_sregs.regs.gprs[i];
memcpy(¤t->thread.acrs, &user_sregs.regs.acrs,
sizeof(current->thread.acrs));
fpregs_load((_s390_fp_regs *) &user_sregs.fpregs, ¤t->thread.fpu);
clear_pt_regs_flag(regs, PIF_SYSCALL); /* No longer in a system call */
return 0;
}
static int save_sigregs_ext32(struct pt_regs *regs,
_sigregs_ext32 __user *sregs_ext)
{
__u32 gprs_high[NUM_GPRS];
__u64 vxrs[__NUM_VXRS_LOW];
int i;
/* Save high gprs to signal stack */
for (i = 0; i < NUM_GPRS; i++)
gprs_high[i] = regs->gprs[i] >> 32;
if (__copy_to_user(&sregs_ext->gprs_high, &gprs_high,
sizeof(sregs_ext->gprs_high)))
return -EFAULT;
/* Save vector registers to signal stack */
if (MACHINE_HAS_VX) {
for (i = 0; i < __NUM_VXRS_LOW; i++)
vxrs[i] = current->thread.fpu.vxrs[i].low;
if (__copy_to_user(&sregs_ext->vxrs_low, vxrs,
sizeof(sregs_ext->vxrs_low)) ||
__copy_to_user(&sregs_ext->vxrs_high,
current->thread.fpu.vxrs + __NUM_VXRS_LOW,
sizeof(sregs_ext->vxrs_high)))
return -EFAULT;
}
return 0;
}
static int restore_sigregs_ext32(struct pt_regs *regs,
_sigregs_ext32 __user *sregs_ext)
{
__u32 gprs_high[NUM_GPRS];
__u64 vxrs[__NUM_VXRS_LOW];
int i;
/* Restore high gprs from signal stack */
if (__copy_from_user(&gprs_high, &sregs_ext->gprs_high,
sizeof(sregs_ext->gprs_high)))
return -EFAULT;
for (i = 0; i < NUM_GPRS; i++)
*(__u32 *)®s->gprs[i] = gprs_high[i];
/* Restore vector registers from signal stack */
if (MACHINE_HAS_VX) {
if (__copy_from_user(vxrs, &sregs_ext->vxrs_low,
sizeof(sregs_ext->vxrs_low)) ||
__copy_from_user(current->thread.fpu.vxrs + __NUM_VXRS_LOW,
&sregs_ext->vxrs_high,
sizeof(sregs_ext->vxrs_high)))
return -EFAULT;
for (i = 0; i < __NUM_VXRS_LOW; i++)
current->thread.fpu.vxrs[i].low = vxrs[i];
}
return 0;
}
COMPAT_SYSCALL_DEFINE0(sigreturn)
{
struct pt_regs *regs = task_pt_regs(current);
sigframe32 __user *frame = (sigframe32 __user *)regs->gprs[15];
sigset_t set;
if (get_compat_sigset(&set, (compat_sigset_t __user *)frame->sc.oldmask))
goto badframe;
set_current_blocked(&set);
save_fpu_regs();
if (restore_sigregs32(regs, &frame->sregs))
goto badframe;
if (restore_sigregs_ext32(regs, &frame->sregs_ext))
goto badframe;
load_sigregs();
return regs->gprs[2];
badframe:
force_sig(SIGSEGV);
return 0;
}
COMPAT_SYSCALL_DEFINE0(rt_sigreturn)
{
struct pt_regs *regs = task_pt_regs(current);
rt_sigframe32 __user *frame = (rt_sigframe32 __user *)regs->gprs[15];
sigset_t set;
if (get_compat_sigset(&set, &frame->uc.uc_sigmask))
goto badframe;
set_current_blocked(&set);
if (compat_restore_altstack(&frame->uc.uc_stack))
goto badframe;
save_fpu_regs();
if (restore_sigregs32(regs, &frame->uc.uc_mcontext))
goto badframe;
if (restore_sigregs_ext32(regs, &frame->uc.uc_mcontext_ext))
goto badframe;
load_sigregs();
return regs->gprs[2];
badframe:
force_sig(SIGSEGV);
return 0;
}
/*
* Set up a signal frame.
*/
/*
* Determine which stack to use..
*/
static inline void __user *
get_sigframe(struct k_sigaction *ka, struct pt_regs * regs, size_t frame_size)
{
unsigned long sp;
/* Default to using normal stack */
sp = (unsigned long) A(regs->gprs[15]);
/* Overflow on alternate signal stack gives SIGSEGV. */
if (on_sig_stack(sp) && !on_sig_stack((sp - frame_size) & -8UL))
return (void __user *) -1UL;
/* This is the X/Open sanctioned signal stack switching. */
if (ka->sa.sa_flags & SA_ONSTACK) {
if (! sas_ss_flags(sp))
sp = current->sas_ss_sp + current->sas_ss_size;
}
return (void __user *)((sp - frame_size) & -8ul);
}
static int setup_frame32(struct ksignal *ksig, sigset_t *set,
struct pt_regs *regs)
{
int sig = ksig->sig;
sigframe32 __user *frame;
unsigned long restorer;
size_t frame_size;
/*
* gprs_high are always present for 31-bit compat tasks.
* The space for vector registers is only allocated if
* the machine supports it
*/
frame_size = sizeof(*frame) - sizeof(frame->sregs_ext.__reserved);
if (!MACHINE_HAS_VX)
frame_size -= sizeof(frame->sregs_ext.vxrs_low) +
sizeof(frame->sregs_ext.vxrs_high);
frame = get_sigframe(&ksig->ka, regs, frame_size);
if (frame == (void __user *) -1UL)
return -EFAULT;
/* Set up backchain. */
if (__put_user(regs->gprs[15], (unsigned int __user *) frame))
return -EFAULT;
/* Create struct sigcontext32 on the signal stack */
if (put_compat_sigset((compat_sigset_t __user *)frame->sc.oldmask,
set, sizeof(compat_sigset_t)))
return -EFAULT;
if (__put_user(ptr_to_compat(&frame->sregs), &frame->sc.sregs))
return -EFAULT;
/* Store registers needed to create the signal frame */
store_sigregs();
/* Create _sigregs32 on the signal stack */
if (save_sigregs32(regs, &frame->sregs))
return -EFAULT;
/* Place signal number on stack to allow backtrace from handler. */
if (__put_user(regs->gprs[2], (int __force __user *) &frame->signo))
return -EFAULT;
/* Create _sigregs_ext32 on the signal stack */
if (save_sigregs_ext32(regs, &frame->sregs_ext))
return -EFAULT;
/* Set up to return from userspace. If provided, use a stub
already in userspace. */
if (ksig->ka.sa.sa_flags & SA_RESTORER) {
restorer = (unsigned long __force)
ksig->ka.sa.sa_restorer | PSW32_ADDR_AMODE;
} else {
restorer = VDSO32_SYMBOL(current, sigreturn);
}
/* Set up registers for signal handler */
regs->gprs[14] = restorer;
regs->gprs[15] = (__force __u64) frame;
/* Force 31 bit amode and default user address space control. */
regs->psw.mask = PSW_MASK_BA |
(PSW_USER_BITS & PSW_MASK_ASC) |
(regs->psw.mask & ~PSW_MASK_ASC);
regs->psw.addr = (__force __u64) ksig->ka.sa.sa_handler;
regs->gprs[2] = sig;
regs->gprs[3] = (__force __u64) &frame->sc;
/* We forgot to include these in the sigcontext.
To avoid breaking binary compatibility, they are passed as args. */
if (sig == SIGSEGV || sig == SIGBUS || sig == SIGILL ||
sig == SIGTRAP || sig == SIGFPE) {
/* set extra registers only for synchronous signals */
regs->gprs[4] = regs->int_code & 127;
regs->gprs[5] = regs->int_parm_long;
regs->gprs[6] = current->thread.last_break;
}
return 0;
}
static int setup_rt_frame32(struct ksignal *ksig, sigset_t *set,
struct pt_regs *regs)
{
rt_sigframe32 __user *frame;
unsigned long restorer;
size_t frame_size;
u32 uc_flags;
frame_size = sizeof(*frame) -
sizeof(frame->uc.uc_mcontext_ext.__reserved);
/*
* gprs_high are always present for 31-bit compat tasks.
* The space for vector registers is only allocated if
* the machine supports it
*/
uc_flags = UC_GPRS_HIGH;
if (MACHINE_HAS_VX) {
uc_flags |= UC_VXRS;
} else
frame_size -= sizeof(frame->uc.uc_mcontext_ext.vxrs_low) +
sizeof(frame->uc.uc_mcontext_ext.vxrs_high);
frame = get_sigframe(&ksig->ka, regs, frame_size);
if (frame == (void __user *) -1UL)
return -EFAULT;
/* Set up backchain. */
if (__put_user(regs->gprs[15], (unsigned int __force __user *) frame))
return -EFAULT;
/* Set up to return from userspace. If provided, use a stub
already in userspace. */
if (ksig->ka.sa.sa_flags & SA_RESTORER) {
restorer = (unsigned long __force)
ksig->ka.sa.sa_restorer | PSW32_ADDR_AMODE;
} else {
restorer = VDSO32_SYMBOL(current, rt_sigreturn);
}
/* Create siginfo on the signal stack */
if (copy_siginfo_to_user32(&frame->info, &ksig->info))
return -EFAULT;
/* Store registers needed to create the signal frame */
store_sigregs();
/* Create ucontext on the signal stack. */
if (__put_user(uc_flags, &frame->uc.uc_flags) ||
__put_user(0, &frame->uc.uc_link) ||
__compat_save_altstack(&frame->uc.uc_stack, regs->gprs[15]) ||
save_sigregs32(regs, &frame->uc.uc_mcontext) ||
put_compat_sigset(&frame->uc.uc_sigmask, set, sizeof(compat_sigset_t)) ||
save_sigregs_ext32(regs, &frame->uc.uc_mcontext_ext))
return -EFAULT;
/* Set up registers for signal handler */
regs->gprs[14] = restorer;
regs->gprs[15] = (__force __u64) frame;
/* Force 31 bit amode and default user address space control. */
regs->psw.mask = PSW_MASK_BA |
(PSW_USER_BITS & PSW_MASK_ASC) |
(regs->psw.mask & ~PSW_MASK_ASC);
regs->psw.addr = (__u64 __force) ksig->ka.sa.sa_handler;
regs->gprs[2] = ksig->sig;
regs->gprs[3] = (__force __u64) &frame->info;
regs->gprs[4] = (__force __u64) &frame->uc;
regs->gprs[5] = current->thread.last_break;
return 0;
}
/*
* OK, we're invoking a handler
*/
void handle_signal32(struct ksignal *ksig, sigset_t *oldset,
struct pt_regs *regs)
{
int ret;
/* Set up the stack frame */
if (ksig->ka.sa.sa_flags & SA_SIGINFO)
ret = setup_rt_frame32(ksig, oldset, regs);
else
ret = setup_frame32(ksig, oldset, regs);
signal_setup_done(ret, ksig, test_thread_flag(TIF_SINGLE_STEP));
}
| linux-master | arch/s390/kernel/compat_signal.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Performance event support for the System z CPU-measurement Sampling Facility
*
* Copyright IBM Corp. 2013, 2018
* Author(s): Hendrik Brueckner <[email protected]>
*/
#define KMSG_COMPONENT "cpum_sf"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/kernel.h>
#include <linux/kernel_stat.h>
#include <linux/perf_event.h>
#include <linux/percpu.h>
#include <linux/pid.h>
#include <linux/notifier.h>
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/moduleparam.h>
#include <asm/cpu_mf.h>
#include <asm/irq.h>
#include <asm/debug.h>
#include <asm/timex.h>
#include <linux/io.h>
/* Minimum number of sample-data-block-tables:
* At least one table is required for the sampling buffer structure.
* A single table contains up to 511 pointers to sample-data-blocks.
*/
#define CPUM_SF_MIN_SDBT 1
/* Number of sample-data-blocks per sample-data-block-table (SDBT):
* A table contains SDB pointers (8 bytes) and one table-link entry
* that points to the origin of the next SDBT.
*/
#define CPUM_SF_SDB_PER_TABLE ((PAGE_SIZE - 8) / 8)
/* Maximum page offset for an SDBT table-link entry:
* If this page offset is reached, a table-link entry to the next SDBT
* must be added.
*/
#define CPUM_SF_SDBT_TL_OFFSET (CPUM_SF_SDB_PER_TABLE * 8)
static inline int require_table_link(const void *sdbt)
{
return ((unsigned long)sdbt & ~PAGE_MASK) == CPUM_SF_SDBT_TL_OFFSET;
}
/* Minimum and maximum sampling buffer sizes:
*
* This number represents the maximum size of the sampling buffer taking
* the number of sample-data-block-tables into account. Note that these
* numbers apply to the basic-sampling function only.
* The maximum number of SDBs is increased by CPUM_SF_SDB_DIAG_FACTOR if
* the diagnostic-sampling function is active.
*
* Sampling buffer size Buffer characteristics
* ---------------------------------------------------
* 64KB == 16 pages (4KB per page)
* 1 page for SDB-tables
* 15 pages for SDBs
*
* 32MB == 8192 pages (4KB per page)
* 16 pages for SDB-tables
* 8176 pages for SDBs
*/
static unsigned long __read_mostly CPUM_SF_MIN_SDB = 15;
static unsigned long __read_mostly CPUM_SF_MAX_SDB = 8176;
static unsigned long __read_mostly CPUM_SF_SDB_DIAG_FACTOR = 1;
struct sf_buffer {
unsigned long *sdbt; /* Sample-data-block-table origin */
/* buffer characteristics (required for buffer increments) */
unsigned long num_sdb; /* Number of sample-data-blocks */
unsigned long num_sdbt; /* Number of sample-data-block-tables */
unsigned long *tail; /* last sample-data-block-table */
};
struct aux_buffer {
struct sf_buffer sfb;
unsigned long head; /* index of SDB of buffer head */
unsigned long alert_mark; /* index of SDB of alert request position */
unsigned long empty_mark; /* mark of SDB not marked full */
unsigned long *sdb_index; /* SDB address for fast lookup */
unsigned long *sdbt_index; /* SDBT address for fast lookup */
};
struct cpu_hw_sf {
/* CPU-measurement sampling information block */
struct hws_qsi_info_block qsi;
/* CPU-measurement sampling control block */
struct hws_lsctl_request_block lsctl;
struct sf_buffer sfb; /* Sampling buffer */
unsigned int flags; /* Status flags */
struct perf_event *event; /* Scheduled perf event */
struct perf_output_handle handle; /* AUX buffer output handle */
};
static DEFINE_PER_CPU(struct cpu_hw_sf, cpu_hw_sf);
/* Debug feature */
static debug_info_t *sfdbg;
/* Sampling control helper functions */
static inline unsigned long freq_to_sample_rate(struct hws_qsi_info_block *qsi,
unsigned long freq)
{
return (USEC_PER_SEC / freq) * qsi->cpu_speed;
}
static inline unsigned long sample_rate_to_freq(struct hws_qsi_info_block *qsi,
unsigned long rate)
{
return USEC_PER_SEC * qsi->cpu_speed / rate;
}
/* Return TOD timestamp contained in an trailer entry */
static inline unsigned long long trailer_timestamp(struct hws_trailer_entry *te)
{
/* TOD in STCKE format */
if (te->header.t)
return *((unsigned long long *)&te->timestamp[1]);
/* TOD in STCK format */
return *((unsigned long long *)&te->timestamp[0]);
}
/* Return pointer to trailer entry of an sample data block */
static inline struct hws_trailer_entry *trailer_entry_ptr(unsigned long v)
{
void *ret;
ret = (void *)v;
ret += PAGE_SIZE;
ret -= sizeof(struct hws_trailer_entry);
return ret;
}
/*
* Return true if the entry in the sample data block table (sdbt)
* is a link to the next sdbt
*/
static inline int is_link_entry(unsigned long *s)
{
return *s & 0x1UL ? 1 : 0;
}
/* Return pointer to the linked sdbt */
static inline unsigned long *get_next_sdbt(unsigned long *s)
{
return phys_to_virt(*s & ~0x1UL);
}
/*
* sf_disable() - Switch off sampling facility
*/
static int sf_disable(void)
{
struct hws_lsctl_request_block sreq;
memset(&sreq, 0, sizeof(sreq));
return lsctl(&sreq);
}
/*
* sf_buffer_available() - Check for an allocated sampling buffer
*/
static int sf_buffer_available(struct cpu_hw_sf *cpuhw)
{
return !!cpuhw->sfb.sdbt;
}
/*
* deallocate sampling facility buffer
*/
static void free_sampling_buffer(struct sf_buffer *sfb)
{
unsigned long *sdbt, *curr;
if (!sfb->sdbt)
return;
sdbt = sfb->sdbt;
curr = sdbt;
/* Free the SDBT after all SDBs are processed... */
while (1) {
if (!*curr || !sdbt)
break;
/* Process table-link entries */
if (is_link_entry(curr)) {
curr = get_next_sdbt(curr);
if (sdbt)
free_page((unsigned long)sdbt);
/* If the origin is reached, sampling buffer is freed */
if (curr == sfb->sdbt)
break;
else
sdbt = curr;
} else {
/* Process SDB pointer */
if (*curr) {
free_page((unsigned long)phys_to_virt(*curr));
curr++;
}
}
}
debug_sprintf_event(sfdbg, 5, "%s: freed sdbt %#lx\n", __func__,
(unsigned long)sfb->sdbt);
memset(sfb, 0, sizeof(*sfb));
}
static int alloc_sample_data_block(unsigned long *sdbt, gfp_t gfp_flags)
{
struct hws_trailer_entry *te;
unsigned long sdb;
/* Allocate and initialize sample-data-block */
sdb = get_zeroed_page(gfp_flags);
if (!sdb)
return -ENOMEM;
te = trailer_entry_ptr(sdb);
te->header.a = 1;
/* Link SDB into the sample-data-block-table */
*sdbt = virt_to_phys((void *)sdb);
return 0;
}
/*
* realloc_sampling_buffer() - extend sampler memory
*
* Allocates new sample-data-blocks and adds them to the specified sampling
* buffer memory.
*
* Important: This modifies the sampling buffer and must be called when the
* sampling facility is disabled.
*
* Returns zero on success, non-zero otherwise.
*/
static int realloc_sampling_buffer(struct sf_buffer *sfb,
unsigned long num_sdb, gfp_t gfp_flags)
{
int i, rc;
unsigned long *new, *tail, *tail_prev = NULL;
if (!sfb->sdbt || !sfb->tail)
return -EINVAL;
if (!is_link_entry(sfb->tail))
return -EINVAL;
/* Append to the existing sampling buffer, overwriting the table-link
* register.
* The tail variables always points to the "tail" (last and table-link)
* entry in an SDB-table.
*/
tail = sfb->tail;
/* Do a sanity check whether the table-link entry points to
* the sampling buffer origin.
*/
if (sfb->sdbt != get_next_sdbt(tail)) {
debug_sprintf_event(sfdbg, 3, "%s: "
"sampling buffer is not linked: origin %#lx"
" tail %#lx\n", __func__,
(unsigned long)sfb->sdbt,
(unsigned long)tail);
return -EINVAL;
}
/* Allocate remaining SDBs */
rc = 0;
for (i = 0; i < num_sdb; i++) {
/* Allocate a new SDB-table if it is full. */
if (require_table_link(tail)) {
new = (unsigned long *)get_zeroed_page(gfp_flags);
if (!new) {
rc = -ENOMEM;
break;
}
sfb->num_sdbt++;
/* Link current page to tail of chain */
*tail = virt_to_phys((void *)new) + 1;
tail_prev = tail;
tail = new;
}
/* Allocate a new sample-data-block.
* If there is not enough memory, stop the realloc process
* and simply use what was allocated. If this is a temporary
* issue, a new realloc call (if required) might succeed.
*/
rc = alloc_sample_data_block(tail, gfp_flags);
if (rc) {
/* Undo last SDBT. An SDBT with no SDB at its first
* entry but with an SDBT entry instead can not be
* handled by the interrupt handler code.
* Avoid this situation.
*/
if (tail_prev) {
sfb->num_sdbt--;
free_page((unsigned long)new);
tail = tail_prev;
}
break;
}
sfb->num_sdb++;
tail++;
tail_prev = new = NULL; /* Allocated at least one SBD */
}
/* Link sampling buffer to its origin */
*tail = virt_to_phys(sfb->sdbt) + 1;
sfb->tail = tail;
debug_sprintf_event(sfdbg, 4, "%s: new buffer"
" settings: sdbt %lu sdb %lu\n", __func__,
sfb->num_sdbt, sfb->num_sdb);
return rc;
}
/*
* allocate_sampling_buffer() - allocate sampler memory
*
* Allocates and initializes a sampling buffer structure using the
* specified number of sample-data-blocks (SDB). For each allocation,
* a 4K page is used. The number of sample-data-block-tables (SDBT)
* are calculated from SDBs.
* Also set the ALERT_REQ mask in each SDBs trailer.
*
* Returns zero on success, non-zero otherwise.
*/
static int alloc_sampling_buffer(struct sf_buffer *sfb, unsigned long num_sdb)
{
int rc;
if (sfb->sdbt)
return -EINVAL;
/* Allocate the sample-data-block-table origin */
sfb->sdbt = (unsigned long *)get_zeroed_page(GFP_KERNEL);
if (!sfb->sdbt)
return -ENOMEM;
sfb->num_sdb = 0;
sfb->num_sdbt = 1;
/* Link the table origin to point to itself to prepare for
* realloc_sampling_buffer() invocation.
*/
sfb->tail = sfb->sdbt;
*sfb->tail = virt_to_phys((void *)sfb->sdbt) + 1;
/* Allocate requested number of sample-data-blocks */
rc = realloc_sampling_buffer(sfb, num_sdb, GFP_KERNEL);
if (rc) {
free_sampling_buffer(sfb);
debug_sprintf_event(sfdbg, 4, "%s: "
"realloc_sampling_buffer failed with rc %i\n",
__func__, rc);
} else
debug_sprintf_event(sfdbg, 4,
"%s: tear %#lx dear %#lx\n", __func__,
(unsigned long)sfb->sdbt, (unsigned long)*sfb->sdbt);
return rc;
}
static void sfb_set_limits(unsigned long min, unsigned long max)
{
struct hws_qsi_info_block si;
CPUM_SF_MIN_SDB = min;
CPUM_SF_MAX_SDB = max;
memset(&si, 0, sizeof(si));
if (!qsi(&si))
CPUM_SF_SDB_DIAG_FACTOR = DIV_ROUND_UP(si.dsdes, si.bsdes);
}
static unsigned long sfb_max_limit(struct hw_perf_event *hwc)
{
return SAMPL_DIAG_MODE(hwc) ? CPUM_SF_MAX_SDB * CPUM_SF_SDB_DIAG_FACTOR
: CPUM_SF_MAX_SDB;
}
static unsigned long sfb_pending_allocs(struct sf_buffer *sfb,
struct hw_perf_event *hwc)
{
if (!sfb->sdbt)
return SFB_ALLOC_REG(hwc);
if (SFB_ALLOC_REG(hwc) > sfb->num_sdb)
return SFB_ALLOC_REG(hwc) - sfb->num_sdb;
return 0;
}
static int sfb_has_pending_allocs(struct sf_buffer *sfb,
struct hw_perf_event *hwc)
{
return sfb_pending_allocs(sfb, hwc) > 0;
}
static void sfb_account_allocs(unsigned long num, struct hw_perf_event *hwc)
{
/* Limit the number of SDBs to not exceed the maximum */
num = min_t(unsigned long, num, sfb_max_limit(hwc) - SFB_ALLOC_REG(hwc));
if (num)
SFB_ALLOC_REG(hwc) += num;
}
static void sfb_init_allocs(unsigned long num, struct hw_perf_event *hwc)
{
SFB_ALLOC_REG(hwc) = 0;
sfb_account_allocs(num, hwc);
}
static void deallocate_buffers(struct cpu_hw_sf *cpuhw)
{
if (cpuhw->sfb.sdbt)
free_sampling_buffer(&cpuhw->sfb);
}
static int allocate_buffers(struct cpu_hw_sf *cpuhw, struct hw_perf_event *hwc)
{
unsigned long n_sdb, freq;
size_t sample_size;
/* Calculate sampling buffers using 4K pages
*
* 1. The sampling size is 32 bytes for basic sampling. This size
* is the same for all machine types. Diagnostic
* sampling uses auxlilary data buffer setup which provides the
* memory for SDBs using linux common code auxiliary trace
* setup.
*
* 2. Function alloc_sampling_buffer() sets the Alert Request
* Control indicator to trigger a measurement-alert to harvest
* sample-data-blocks (SDB). This is done per SDB. This
* measurement alert interrupt fires quick enough to handle
* one SDB, on very high frequency and work loads there might
* be 2 to 3 SBDs available for sample processing.
* Currently there is no need for setup alert request on every
* n-th page. This is counterproductive as one IRQ triggers
* a very high number of samples to be processed at one IRQ.
*
* 3. Use the sampling frequency as input.
* Compute the number of SDBs and ensure a minimum
* of CPUM_SF_MIN_SDB. Depending on frequency add some more
* SDBs to handle a higher sampling rate.
* Use a minimum of CPUM_SF_MIN_SDB and allow for 100 samples
* (one SDB) for every 10000 HZ frequency increment.
*
* 4. Compute the number of sample-data-block-tables (SDBT) and
* ensure a minimum of CPUM_SF_MIN_SDBT (one table can manage up
* to 511 SDBs).
*/
sample_size = sizeof(struct hws_basic_entry);
freq = sample_rate_to_freq(&cpuhw->qsi, SAMPL_RATE(hwc));
n_sdb = CPUM_SF_MIN_SDB + DIV_ROUND_UP(freq, 10000);
/* If there is already a sampling buffer allocated, it is very likely
* that the sampling facility is enabled too. If the event to be
* initialized requires a greater sampling buffer, the allocation must
* be postponed. Changing the sampling buffer requires the sampling
* facility to be in the disabled state. So, account the number of
* required SDBs and let cpumsf_pmu_enable() resize the buffer just
* before the event is started.
*/
sfb_init_allocs(n_sdb, hwc);
if (sf_buffer_available(cpuhw))
return 0;
debug_sprintf_event(sfdbg, 3,
"%s: rate %lu f %lu sdb %lu/%lu"
" sample_size %lu cpuhw %p\n", __func__,
SAMPL_RATE(hwc), freq, n_sdb, sfb_max_limit(hwc),
sample_size, cpuhw);
return alloc_sampling_buffer(&cpuhw->sfb,
sfb_pending_allocs(&cpuhw->sfb, hwc));
}
static unsigned long min_percent(unsigned int percent, unsigned long base,
unsigned long min)
{
return min_t(unsigned long, min, DIV_ROUND_UP(percent * base, 100));
}
static unsigned long compute_sfb_extent(unsigned long ratio, unsigned long base)
{
/* Use a percentage-based approach to extend the sampling facility
* buffer. Accept up to 5% sample data loss.
* Vary the extents between 1% to 5% of the current number of
* sample-data-blocks.
*/
if (ratio <= 5)
return 0;
if (ratio <= 25)
return min_percent(1, base, 1);
if (ratio <= 50)
return min_percent(1, base, 1);
if (ratio <= 75)
return min_percent(2, base, 2);
if (ratio <= 100)
return min_percent(3, base, 3);
if (ratio <= 250)
return min_percent(4, base, 4);
return min_percent(5, base, 8);
}
static void sfb_account_overflows(struct cpu_hw_sf *cpuhw,
struct hw_perf_event *hwc)
{
unsigned long ratio, num;
if (!OVERFLOW_REG(hwc))
return;
/* The sample_overflow contains the average number of sample data
* that has been lost because sample-data-blocks were full.
*
* Calculate the total number of sample data entries that has been
* discarded. Then calculate the ratio of lost samples to total samples
* per second in percent.
*/
ratio = DIV_ROUND_UP(100 * OVERFLOW_REG(hwc) * cpuhw->sfb.num_sdb,
sample_rate_to_freq(&cpuhw->qsi, SAMPL_RATE(hwc)));
/* Compute number of sample-data-blocks */
num = compute_sfb_extent(ratio, cpuhw->sfb.num_sdb);
if (num)
sfb_account_allocs(num, hwc);
debug_sprintf_event(sfdbg, 5, "%s: overflow %llu ratio %lu num %lu\n",
__func__, OVERFLOW_REG(hwc), ratio, num);
OVERFLOW_REG(hwc) = 0;
}
/* extend_sampling_buffer() - Extend sampling buffer
* @sfb: Sampling buffer structure (for local CPU)
* @hwc: Perf event hardware structure
*
* Use this function to extend the sampling buffer based on the overflow counter
* and postponed allocation extents stored in the specified Perf event hardware.
*
* Important: This function disables the sampling facility in order to safely
* change the sampling buffer structure. Do not call this function
* when the PMU is active.
*/
static void extend_sampling_buffer(struct sf_buffer *sfb,
struct hw_perf_event *hwc)
{
unsigned long num, num_old;
int rc;
num = sfb_pending_allocs(sfb, hwc);
if (!num)
return;
num_old = sfb->num_sdb;
/* Disable the sampling facility to reset any states and also
* clear pending measurement alerts.
*/
sf_disable();
/* Extend the sampling buffer.
* This memory allocation typically happens in an atomic context when
* called by perf. Because this is a reallocation, it is fine if the
* new SDB-request cannot be satisfied immediately.
*/
rc = realloc_sampling_buffer(sfb, num, GFP_ATOMIC);
if (rc)
debug_sprintf_event(sfdbg, 5, "%s: realloc failed with rc %i\n",
__func__, rc);
if (sfb_has_pending_allocs(sfb, hwc))
debug_sprintf_event(sfdbg, 5, "%s: "
"req %lu alloc %lu remaining %lu\n",
__func__, num, sfb->num_sdb - num_old,
sfb_pending_allocs(sfb, hwc));
}
/* Number of perf events counting hardware events */
static atomic_t num_events;
/* Used to avoid races in calling reserve/release_cpumf_hardware */
static DEFINE_MUTEX(pmc_reserve_mutex);
#define PMC_INIT 0
#define PMC_RELEASE 1
#define PMC_FAILURE 2
static void setup_pmc_cpu(void *flags)
{
struct cpu_hw_sf *cpusf = this_cpu_ptr(&cpu_hw_sf);
int err = 0;
switch (*((int *)flags)) {
case PMC_INIT:
memset(cpusf, 0, sizeof(*cpusf));
err = qsi(&cpusf->qsi);
if (err)
break;
cpusf->flags |= PMU_F_RESERVED;
err = sf_disable();
break;
case PMC_RELEASE:
cpusf->flags &= ~PMU_F_RESERVED;
err = sf_disable();
if (!err)
deallocate_buffers(cpusf);
break;
}
if (err) {
*((int *)flags) |= PMC_FAILURE;
pr_err("Switching off the sampling facility failed with rc %i\n", err);
}
}
static void release_pmc_hardware(void)
{
int flags = PMC_RELEASE;
irq_subclass_unregister(IRQ_SUBCLASS_MEASUREMENT_ALERT);
on_each_cpu(setup_pmc_cpu, &flags, 1);
}
static int reserve_pmc_hardware(void)
{
int flags = PMC_INIT;
on_each_cpu(setup_pmc_cpu, &flags, 1);
if (flags & PMC_FAILURE) {
release_pmc_hardware();
return -ENODEV;
}
irq_subclass_register(IRQ_SUBCLASS_MEASUREMENT_ALERT);
return 0;
}
static void hw_perf_event_destroy(struct perf_event *event)
{
/* Release PMC if this is the last perf event */
if (!atomic_add_unless(&num_events, -1, 1)) {
mutex_lock(&pmc_reserve_mutex);
if (atomic_dec_return(&num_events) == 0)
release_pmc_hardware();
mutex_unlock(&pmc_reserve_mutex);
}
}
static void hw_init_period(struct hw_perf_event *hwc, u64 period)
{
hwc->sample_period = period;
hwc->last_period = hwc->sample_period;
local64_set(&hwc->period_left, hwc->sample_period);
}
static unsigned long hw_limit_rate(const struct hws_qsi_info_block *si,
unsigned long rate)
{
return clamp_t(unsigned long, rate,
si->min_sampl_rate, si->max_sampl_rate);
}
static u32 cpumsf_pid_type(struct perf_event *event,
u32 pid, enum pid_type type)
{
struct task_struct *tsk;
/* Idle process */
if (!pid)
goto out;
tsk = find_task_by_pid_ns(pid, &init_pid_ns);
pid = -1;
if (tsk) {
/*
* Only top level events contain the pid namespace in which
* they are created.
*/
if (event->parent)
event = event->parent;
pid = __task_pid_nr_ns(tsk, type, event->ns);
/*
* See also 1d953111b648
* "perf/core: Don't report zero PIDs for exiting tasks".
*/
if (!pid && !pid_alive(tsk))
pid = -1;
}
out:
return pid;
}
static void cpumsf_output_event_pid(struct perf_event *event,
struct perf_sample_data *data,
struct pt_regs *regs)
{
u32 pid;
struct perf_event_header header;
struct perf_output_handle handle;
/*
* Obtain the PID from the basic-sampling data entry and
* correct the data->tid_entry.pid value.
*/
pid = data->tid_entry.pid;
/* Protect callchain buffers, tasks */
rcu_read_lock();
perf_prepare_sample(data, event, regs);
perf_prepare_header(&header, data, event, regs);
if (perf_output_begin(&handle, data, event, header.size))
goto out;
/* Update the process ID (see also kernel/events/core.c) */
data->tid_entry.pid = cpumsf_pid_type(event, pid, PIDTYPE_TGID);
data->tid_entry.tid = cpumsf_pid_type(event, pid, PIDTYPE_PID);
perf_output_sample(&handle, &header, data, event);
perf_output_end(&handle);
out:
rcu_read_unlock();
}
static unsigned long getrate(bool freq, unsigned long sample,
struct hws_qsi_info_block *si)
{
unsigned long rate;
if (freq) {
rate = freq_to_sample_rate(si, sample);
rate = hw_limit_rate(si, rate);
} else {
/* The min/max sampling rates specifies the valid range
* of sample periods. If the specified sample period is
* out of range, limit the period to the range boundary.
*/
rate = hw_limit_rate(si, sample);
/* The perf core maintains a maximum sample rate that is
* configurable through the sysctl interface. Ensure the
* sampling rate does not exceed this value. This also helps
* to avoid throttling when pushing samples with
* perf_event_overflow().
*/
if (sample_rate_to_freq(si, rate) >
sysctl_perf_event_sample_rate) {
debug_sprintf_event(sfdbg, 1, "%s: "
"Sampling rate exceeds maximum "
"perf sample rate\n", __func__);
rate = 0;
}
}
return rate;
}
/* The sampling information (si) contains information about the
* min/max sampling intervals and the CPU speed. So calculate the
* correct sampling interval and avoid the whole period adjust
* feedback loop.
*
* Since the CPU Measurement sampling facility can not handle frequency
* calculate the sampling interval when frequency is specified using
* this formula:
* interval := cpu_speed * 1000000 / sample_freq
*
* Returns errno on bad input and zero on success with parameter interval
* set to the correct sampling rate.
*
* Note: This function turns off freq bit to avoid calling function
* perf_adjust_period(). This causes frequency adjustment in the common
* code part which causes tremendous variations in the counter values.
*/
static int __hw_perf_event_init_rate(struct perf_event *event,
struct hws_qsi_info_block *si)
{
struct perf_event_attr *attr = &event->attr;
struct hw_perf_event *hwc = &event->hw;
unsigned long rate;
if (attr->freq) {
if (!attr->sample_freq)
return -EINVAL;
rate = getrate(attr->freq, attr->sample_freq, si);
attr->freq = 0; /* Don't call perf_adjust_period() */
SAMPL_FLAGS(hwc) |= PERF_CPUM_SF_FREQ_MODE;
} else {
rate = getrate(attr->freq, attr->sample_period, si);
if (!rate)
return -EINVAL;
}
attr->sample_period = rate;
SAMPL_RATE(hwc) = rate;
hw_init_period(hwc, SAMPL_RATE(hwc));
debug_sprintf_event(sfdbg, 4, "%s: cpu %d period %#llx freq %d,%#lx\n",
__func__, event->cpu, event->attr.sample_period,
event->attr.freq, SAMPLE_FREQ_MODE(hwc));
return 0;
}
static int __hw_perf_event_init(struct perf_event *event)
{
struct cpu_hw_sf *cpuhw;
struct hws_qsi_info_block si;
struct perf_event_attr *attr = &event->attr;
struct hw_perf_event *hwc = &event->hw;
int cpu, err;
/* Reserve CPU-measurement sampling facility */
err = 0;
if (!atomic_inc_not_zero(&num_events)) {
mutex_lock(&pmc_reserve_mutex);
if (atomic_read(&num_events) == 0 && reserve_pmc_hardware())
err = -EBUSY;
else
atomic_inc(&num_events);
mutex_unlock(&pmc_reserve_mutex);
}
event->destroy = hw_perf_event_destroy;
if (err)
goto out;
/* Access per-CPU sampling information (query sampling info) */
/*
* The event->cpu value can be -1 to count on every CPU, for example,
* when attaching to a task. If this is specified, use the query
* sampling info from the current CPU, otherwise use event->cpu to
* retrieve the per-CPU information.
* Later, cpuhw indicates whether to allocate sampling buffers for a
* particular CPU (cpuhw!=NULL) or each online CPU (cpuw==NULL).
*/
memset(&si, 0, sizeof(si));
cpuhw = NULL;
if (event->cpu == -1)
qsi(&si);
else {
/* Event is pinned to a particular CPU, retrieve the per-CPU
* sampling structure for accessing the CPU-specific QSI.
*/
cpuhw = &per_cpu(cpu_hw_sf, event->cpu);
si = cpuhw->qsi;
}
/* Check sampling facility authorization and, if not authorized,
* fall back to other PMUs. It is safe to check any CPU because
* the authorization is identical for all configured CPUs.
*/
if (!si.as) {
err = -ENOENT;
goto out;
}
if (si.ribm & CPU_MF_SF_RIBM_NOTAV) {
pr_warn("CPU Measurement Facility sampling is temporarily not available\n");
err = -EBUSY;
goto out;
}
/* Always enable basic sampling */
SAMPL_FLAGS(hwc) = PERF_CPUM_SF_BASIC_MODE;
/* Check if diagnostic sampling is requested. Deny if the required
* sampling authorization is missing.
*/
if (attr->config == PERF_EVENT_CPUM_SF_DIAG) {
if (!si.ad) {
err = -EPERM;
goto out;
}
SAMPL_FLAGS(hwc) |= PERF_CPUM_SF_DIAG_MODE;
}
err = __hw_perf_event_init_rate(event, &si);
if (err)
goto out;
/* Initialize sample data overflow accounting */
hwc->extra_reg.reg = REG_OVERFLOW;
OVERFLOW_REG(hwc) = 0;
/* Use AUX buffer. No need to allocate it by ourself */
if (attr->config == PERF_EVENT_CPUM_SF_DIAG)
return 0;
/* Allocate the per-CPU sampling buffer using the CPU information
* from the event. If the event is not pinned to a particular
* CPU (event->cpu == -1; or cpuhw == NULL), allocate sampling
* buffers for each online CPU.
*/
if (cpuhw)
/* Event is pinned to a particular CPU */
err = allocate_buffers(cpuhw, hwc);
else {
/* Event is not pinned, allocate sampling buffer on
* each online CPU
*/
for_each_online_cpu(cpu) {
cpuhw = &per_cpu(cpu_hw_sf, cpu);
err = allocate_buffers(cpuhw, hwc);
if (err)
break;
}
}
/* If PID/TID sampling is active, replace the default overflow
* handler to extract and resolve the PIDs from the basic-sampling
* data entries.
*/
if (event->attr.sample_type & PERF_SAMPLE_TID)
if (is_default_overflow_handler(event))
event->overflow_handler = cpumsf_output_event_pid;
out:
return err;
}
static bool is_callchain_event(struct perf_event *event)
{
u64 sample_type = event->attr.sample_type;
return sample_type & (PERF_SAMPLE_CALLCHAIN | PERF_SAMPLE_REGS_USER |
PERF_SAMPLE_STACK_USER);
}
static int cpumsf_pmu_event_init(struct perf_event *event)
{
int err;
/* No support for taken branch sampling */
/* No support for callchain, stacks and registers */
if (has_branch_stack(event) || is_callchain_event(event))
return -EOPNOTSUPP;
switch (event->attr.type) {
case PERF_TYPE_RAW:
if ((event->attr.config != PERF_EVENT_CPUM_SF) &&
(event->attr.config != PERF_EVENT_CPUM_SF_DIAG))
return -ENOENT;
break;
case PERF_TYPE_HARDWARE:
/* Support sampling of CPU cycles in addition to the
* counter facility. However, the counter facility
* is more precise and, hence, restrict this PMU to
* sampling events only.
*/
if (event->attr.config != PERF_COUNT_HW_CPU_CYCLES)
return -ENOENT;
if (!is_sampling_event(event))
return -ENOENT;
break;
default:
return -ENOENT;
}
/* Force reset of idle/hv excludes regardless of what the
* user requested.
*/
if (event->attr.exclude_hv)
event->attr.exclude_hv = 0;
if (event->attr.exclude_idle)
event->attr.exclude_idle = 0;
err = __hw_perf_event_init(event);
if (unlikely(err))
if (event->destroy)
event->destroy(event);
return err;
}
static void cpumsf_pmu_enable(struct pmu *pmu)
{
struct cpu_hw_sf *cpuhw = this_cpu_ptr(&cpu_hw_sf);
struct hw_perf_event *hwc;
int err;
if (cpuhw->flags & PMU_F_ENABLED)
return;
if (cpuhw->flags & PMU_F_ERR_MASK)
return;
/* Check whether to extent the sampling buffer.
*
* Two conditions trigger an increase of the sampling buffer for a
* perf event:
* 1. Postponed buffer allocations from the event initialization.
* 2. Sampling overflows that contribute to pending allocations.
*
* Note that the extend_sampling_buffer() function disables the sampling
* facility, but it can be fully re-enabled using sampling controls that
* have been saved in cpumsf_pmu_disable().
*/
if (cpuhw->event) {
hwc = &cpuhw->event->hw;
if (!(SAMPL_DIAG_MODE(hwc))) {
/*
* Account number of overflow-designated
* buffer extents
*/
sfb_account_overflows(cpuhw, hwc);
extend_sampling_buffer(&cpuhw->sfb, hwc);
}
/* Rate may be adjusted with ioctl() */
cpuhw->lsctl.interval = SAMPL_RATE(&cpuhw->event->hw);
}
/* (Re)enable the PMU and sampling facility */
cpuhw->flags |= PMU_F_ENABLED;
barrier();
err = lsctl(&cpuhw->lsctl);
if (err) {
cpuhw->flags &= ~PMU_F_ENABLED;
pr_err("Loading sampling controls failed: op 1 err %i\n", err);
return;
}
/* Load current program parameter */
lpp(&S390_lowcore.lpp);
debug_sprintf_event(sfdbg, 6, "%s: es %i cs %i ed %i cd %i "
"interval %#lx tear %#lx dear %#lx\n", __func__,
cpuhw->lsctl.es, cpuhw->lsctl.cs, cpuhw->lsctl.ed,
cpuhw->lsctl.cd, cpuhw->lsctl.interval,
cpuhw->lsctl.tear, cpuhw->lsctl.dear);
}
static void cpumsf_pmu_disable(struct pmu *pmu)
{
struct cpu_hw_sf *cpuhw = this_cpu_ptr(&cpu_hw_sf);
struct hws_lsctl_request_block inactive;
struct hws_qsi_info_block si;
int err;
if (!(cpuhw->flags & PMU_F_ENABLED))
return;
if (cpuhw->flags & PMU_F_ERR_MASK)
return;
/* Switch off sampling activation control */
inactive = cpuhw->lsctl;
inactive.cs = 0;
inactive.cd = 0;
err = lsctl(&inactive);
if (err) {
pr_err("Loading sampling controls failed: op 2 err %i\n", err);
return;
}
/* Save state of TEAR and DEAR register contents */
err = qsi(&si);
if (!err) {
/* TEAR/DEAR values are valid only if the sampling facility is
* enabled. Note that cpumsf_pmu_disable() might be called even
* for a disabled sampling facility because cpumsf_pmu_enable()
* controls the enable/disable state.
*/
if (si.es) {
cpuhw->lsctl.tear = si.tear;
cpuhw->lsctl.dear = si.dear;
}
} else
debug_sprintf_event(sfdbg, 3, "%s: qsi() failed with err %i\n",
__func__, err);
cpuhw->flags &= ~PMU_F_ENABLED;
}
/* perf_exclude_event() - Filter event
* @event: The perf event
* @regs: pt_regs structure
* @sde_regs: Sample-data-entry (sde) regs structure
*
* Filter perf events according to their exclude specification.
*
* Return non-zero if the event shall be excluded.
*/
static int perf_exclude_event(struct perf_event *event, struct pt_regs *regs,
struct perf_sf_sde_regs *sde_regs)
{
if (event->attr.exclude_user && user_mode(regs))
return 1;
if (event->attr.exclude_kernel && !user_mode(regs))
return 1;
if (event->attr.exclude_guest && sde_regs->in_guest)
return 1;
if (event->attr.exclude_host && !sde_regs->in_guest)
return 1;
return 0;
}
/* perf_push_sample() - Push samples to perf
* @event: The perf event
* @sample: Hardware sample data
*
* Use the hardware sample data to create perf event sample. The sample
* is the pushed to the event subsystem and the function checks for
* possible event overflows. If an event overflow occurs, the PMU is
* stopped.
*
* Return non-zero if an event overflow occurred.
*/
static int perf_push_sample(struct perf_event *event,
struct hws_basic_entry *basic)
{
int overflow;
struct pt_regs regs;
struct perf_sf_sde_regs *sde_regs;
struct perf_sample_data data;
/* Setup perf sample */
perf_sample_data_init(&data, 0, event->hw.last_period);
/* Setup pt_regs to look like an CPU-measurement external interrupt
* using the Program Request Alert code. The regs.int_parm_long
* field which is unused contains additional sample-data-entry related
* indicators.
*/
memset(®s, 0, sizeof(regs));
regs.int_code = 0x1407;
regs.int_parm = CPU_MF_INT_SF_PRA;
sde_regs = (struct perf_sf_sde_regs *) ®s.int_parm_long;
psw_bits(regs.psw).ia = basic->ia;
psw_bits(regs.psw).dat = basic->T;
psw_bits(regs.psw).wait = basic->W;
psw_bits(regs.psw).pstate = basic->P;
psw_bits(regs.psw).as = basic->AS;
/*
* Use the hardware provided configuration level to decide if the
* sample belongs to a guest or host. If that is not available,
* fall back to the following heuristics:
* A non-zero guest program parameter always indicates a guest
* sample. Some early samples or samples from guests without
* lpp usage would be misaccounted to the host. We use the asn
* value as an addon heuristic to detect most of these guest samples.
* If the value differs from 0xffff (the host value), we assume to
* be a KVM guest.
*/
switch (basic->CL) {
case 1: /* logical partition */
sde_regs->in_guest = 0;
break;
case 2: /* virtual machine */
sde_regs->in_guest = 1;
break;
default: /* old machine, use heuristics */
if (basic->gpp || basic->prim_asn != 0xffff)
sde_regs->in_guest = 1;
break;
}
/*
* Store the PID value from the sample-data-entry to be
* processed and resolved by cpumsf_output_event_pid().
*/
data.tid_entry.pid = basic->hpp & LPP_PID_MASK;
overflow = 0;
if (perf_exclude_event(event, ®s, sde_regs))
goto out;
if (perf_event_overflow(event, &data, ®s)) {
overflow = 1;
event->pmu->stop(event, 0);
}
perf_event_update_userpage(event);
out:
return overflow;
}
static void perf_event_count_update(struct perf_event *event, u64 count)
{
local64_add(count, &event->count);
}
/* hw_collect_samples() - Walk through a sample-data-block and collect samples
* @event: The perf event
* @sdbt: Sample-data-block table
* @overflow: Event overflow counter
*
* Walks through a sample-data-block and collects sampling data entries that are
* then pushed to the perf event subsystem. Depending on the sampling function,
* there can be either basic-sampling or combined-sampling data entries. A
* combined-sampling data entry consists of a basic- and a diagnostic-sampling
* data entry. The sampling function is determined by the flags in the perf
* event hardware structure. The function always works with a combined-sampling
* data entry but ignores the the diagnostic portion if it is not available.
*
* Note that the implementation focuses on basic-sampling data entries and, if
* such an entry is not valid, the entire combined-sampling data entry is
* ignored.
*
* The overflow variables counts the number of samples that has been discarded
* due to a perf event overflow.
*/
static void hw_collect_samples(struct perf_event *event, unsigned long *sdbt,
unsigned long long *overflow)
{
struct hws_trailer_entry *te;
struct hws_basic_entry *sample;
te = trailer_entry_ptr((unsigned long)sdbt);
sample = (struct hws_basic_entry *)sdbt;
while ((unsigned long *)sample < (unsigned long *)te) {
/* Check for an empty sample */
if (!sample->def || sample->LS)
break;
/* Update perf event period */
perf_event_count_update(event, SAMPL_RATE(&event->hw));
/* Check whether sample is valid */
if (sample->def == 0x0001) {
/* If an event overflow occurred, the PMU is stopped to
* throttle event delivery. Remaining sample data is
* discarded.
*/
if (!*overflow) {
/* Check whether sample is consistent */
if (sample->I == 0 && sample->W == 0) {
/* Deliver sample data to perf */
*overflow = perf_push_sample(event,
sample);
}
} else
/* Count discarded samples */
*overflow += 1;
} else {
debug_sprintf_event(sfdbg, 4,
"%s: Found unknown"
" sampling data entry: te->f %i"
" basic.def %#4x (%p)\n", __func__,
te->header.f, sample->def, sample);
/* Sample slot is not yet written or other record.
*
* This condition can occur if the buffer was reused
* from a combined basic- and diagnostic-sampling.
* If only basic-sampling is then active, entries are
* written into the larger diagnostic entries.
* This is typically the case for sample-data-blocks
* that are not full. Stop processing if the first
* invalid format was detected.
*/
if (!te->header.f)
break;
}
/* Reset sample slot and advance to next sample */
sample->def = 0;
sample++;
}
}
/* hw_perf_event_update() - Process sampling buffer
* @event: The perf event
* @flush_all: Flag to also flush partially filled sample-data-blocks
*
* Processes the sampling buffer and create perf event samples.
* The sampling buffer position are retrieved and saved in the TEAR_REG
* register of the specified perf event.
*
* Only full sample-data-blocks are processed. Specify the flush_all flag
* to also walk through partially filled sample-data-blocks.
*/
static void hw_perf_event_update(struct perf_event *event, int flush_all)
{
unsigned long long event_overflow, sampl_overflow, num_sdb;
union hws_trailer_header old, prev, new;
struct hw_perf_event *hwc = &event->hw;
struct hws_trailer_entry *te;
unsigned long *sdbt, sdb;
int done;
/*
* AUX buffer is used when in diagnostic sampling mode.
* No perf events/samples are created.
*/
if (SAMPL_DIAG_MODE(&event->hw))
return;
sdbt = (unsigned long *)TEAR_REG(hwc);
done = event_overflow = sampl_overflow = num_sdb = 0;
while (!done) {
/* Get the trailer entry of the sample-data-block */
sdb = (unsigned long)phys_to_virt(*sdbt);
te = trailer_entry_ptr(sdb);
/* Leave loop if no more work to do (block full indicator) */
if (!te->header.f) {
done = 1;
if (!flush_all)
break;
}
/* Check the sample overflow count */
if (te->header.overflow)
/* Account sample overflows and, if a particular limit
* is reached, extend the sampling buffer.
* For details, see sfb_account_overflows().
*/
sampl_overflow += te->header.overflow;
/* Timestamps are valid for full sample-data-blocks only */
debug_sprintf_event(sfdbg, 6, "%s: sdbt %#lx/%#lx "
"overflow %llu timestamp %#llx\n",
__func__, sdb, (unsigned long)sdbt,
te->header.overflow,
(te->header.f) ? trailer_timestamp(te) : 0ULL);
/* Collect all samples from a single sample-data-block and
* flag if an (perf) event overflow happened. If so, the PMU
* is stopped and remaining samples will be discarded.
*/
hw_collect_samples(event, (unsigned long *)sdb, &event_overflow);
num_sdb++;
/* Reset trailer (using compare-double-and-swap) */
prev.val = READ_ONCE_ALIGNED_128(te->header.val);
do {
old.val = prev.val;
new.val = prev.val;
new.f = 0;
new.a = 1;
new.overflow = 0;
prev.val = cmpxchg128(&te->header.val, old.val, new.val);
} while (prev.val != old.val);
/* Advance to next sample-data-block */
sdbt++;
if (is_link_entry(sdbt))
sdbt = get_next_sdbt(sdbt);
/* Update event hardware registers */
TEAR_REG(hwc) = (unsigned long) sdbt;
/* Stop processing sample-data if all samples of the current
* sample-data-block were flushed even if it was not full.
*/
if (flush_all && done)
break;
}
/* Account sample overflows in the event hardware structure */
if (sampl_overflow)
OVERFLOW_REG(hwc) = DIV_ROUND_UP(OVERFLOW_REG(hwc) +
sampl_overflow, 1 + num_sdb);
/* Perf_event_overflow() and perf_event_account_interrupt() limit
* the interrupt rate to an upper limit. Roughly 1000 samples per
* task tick.
* Hitting this limit results in a large number
* of throttled REF_REPORT_THROTTLE entries and the samples
* are dropped.
* Slightly increase the interval to avoid hitting this limit.
*/
if (event_overflow) {
SAMPL_RATE(hwc) += DIV_ROUND_UP(SAMPL_RATE(hwc), 10);
debug_sprintf_event(sfdbg, 1, "%s: rate adjustment %ld\n",
__func__,
DIV_ROUND_UP(SAMPL_RATE(hwc), 10));
}
if (sampl_overflow || event_overflow)
debug_sprintf_event(sfdbg, 4, "%s: "
"overflows: sample %llu event %llu"
" total %llu num_sdb %llu\n",
__func__, sampl_overflow, event_overflow,
OVERFLOW_REG(hwc), num_sdb);
}
static inline unsigned long aux_sdb_index(struct aux_buffer *aux,
unsigned long i)
{
return i % aux->sfb.num_sdb;
}
static inline unsigned long aux_sdb_num(unsigned long start, unsigned long end)
{
return end >= start ? end - start + 1 : 0;
}
static inline unsigned long aux_sdb_num_alert(struct aux_buffer *aux)
{
return aux_sdb_num(aux->head, aux->alert_mark);
}
static inline unsigned long aux_sdb_num_empty(struct aux_buffer *aux)
{
return aux_sdb_num(aux->head, aux->empty_mark);
}
/*
* Get trailer entry by index of SDB.
*/
static struct hws_trailer_entry *aux_sdb_trailer(struct aux_buffer *aux,
unsigned long index)
{
unsigned long sdb;
index = aux_sdb_index(aux, index);
sdb = aux->sdb_index[index];
return trailer_entry_ptr(sdb);
}
/*
* Finish sampling on the cpu. Called by cpumsf_pmu_del() with pmu
* disabled. Collect the full SDBs in AUX buffer which have not reached
* the point of alert indicator. And ignore the SDBs which are not
* full.
*
* 1. Scan SDBs to see how much data is there and consume them.
* 2. Remove alert indicator in the buffer.
*/
static void aux_output_end(struct perf_output_handle *handle)
{
unsigned long i, range_scan, idx;
struct aux_buffer *aux;
struct hws_trailer_entry *te;
aux = perf_get_aux(handle);
if (!aux)
return;
range_scan = aux_sdb_num_alert(aux);
for (i = 0, idx = aux->head; i < range_scan; i++, idx++) {
te = aux_sdb_trailer(aux, idx);
if (!te->header.f)
break;
}
/* i is num of SDBs which are full */
perf_aux_output_end(handle, i << PAGE_SHIFT);
/* Remove alert indicators in the buffer */
te = aux_sdb_trailer(aux, aux->alert_mark);
te->header.a = 0;
debug_sprintf_event(sfdbg, 6, "%s: SDBs %ld range %ld head %ld\n",
__func__, i, range_scan, aux->head);
}
/*
* Start sampling on the CPU. Called by cpumsf_pmu_add() when an event
* is first added to the CPU or rescheduled again to the CPU. It is called
* with pmu disabled.
*
* 1. Reset the trailer of SDBs to get ready for new data.
* 2. Tell the hardware where to put the data by reset the SDBs buffer
* head(tear/dear).
*/
static int aux_output_begin(struct perf_output_handle *handle,
struct aux_buffer *aux,
struct cpu_hw_sf *cpuhw)
{
unsigned long range, i, range_scan, idx, head, base, offset;
struct hws_trailer_entry *te;
if (WARN_ON_ONCE(handle->head & ~PAGE_MASK))
return -EINVAL;
aux->head = handle->head >> PAGE_SHIFT;
range = (handle->size + 1) >> PAGE_SHIFT;
if (range <= 1)
return -ENOMEM;
/*
* SDBs between aux->head and aux->empty_mark are already ready
* for new data. range_scan is num of SDBs not within them.
*/
debug_sprintf_event(sfdbg, 6,
"%s: range %ld head %ld alert %ld empty %ld\n",
__func__, range, aux->head, aux->alert_mark,
aux->empty_mark);
if (range > aux_sdb_num_empty(aux)) {
range_scan = range - aux_sdb_num_empty(aux);
idx = aux->empty_mark + 1;
for (i = 0; i < range_scan; i++, idx++) {
te = aux_sdb_trailer(aux, idx);
te->header.f = 0;
te->header.a = 0;
te->header.overflow = 0;
}
/* Save the position of empty SDBs */
aux->empty_mark = aux->head + range - 1;
}
/* Set alert indicator */
aux->alert_mark = aux->head + range/2 - 1;
te = aux_sdb_trailer(aux, aux->alert_mark);
te->header.a = 1;
/* Reset hardware buffer head */
head = aux_sdb_index(aux, aux->head);
base = aux->sdbt_index[head / CPUM_SF_SDB_PER_TABLE];
offset = head % CPUM_SF_SDB_PER_TABLE;
cpuhw->lsctl.tear = virt_to_phys((void *)base) + offset * sizeof(unsigned long);
cpuhw->lsctl.dear = virt_to_phys((void *)aux->sdb_index[head]);
debug_sprintf_event(sfdbg, 6, "%s: head %ld alert %ld empty %ld "
"index %ld tear %#lx dear %#lx\n", __func__,
aux->head, aux->alert_mark, aux->empty_mark,
head / CPUM_SF_SDB_PER_TABLE,
cpuhw->lsctl.tear, cpuhw->lsctl.dear);
return 0;
}
/*
* Set alert indicator on SDB at index @alert_index while sampler is running.
*
* Return true if successfully.
* Return false if full indicator is already set by hardware sampler.
*/
static bool aux_set_alert(struct aux_buffer *aux, unsigned long alert_index,
unsigned long long *overflow)
{
union hws_trailer_header old, prev, new;
struct hws_trailer_entry *te;
te = aux_sdb_trailer(aux, alert_index);
prev.val = READ_ONCE_ALIGNED_128(te->header.val);
do {
old.val = prev.val;
new.val = prev.val;
*overflow = old.overflow;
if (old.f) {
/*
* SDB is already set by hardware.
* Abort and try to set somewhere
* behind.
*/
return false;
}
new.a = 1;
new.overflow = 0;
prev.val = cmpxchg128(&te->header.val, old.val, new.val);
} while (prev.val != old.val);
return true;
}
/*
* aux_reset_buffer() - Scan and setup SDBs for new samples
* @aux: The AUX buffer to set
* @range: The range of SDBs to scan started from aux->head
* @overflow: Set to overflow count
*
* Set alert indicator on the SDB at index of aux->alert_mark. If this SDB is
* marked as empty, check if it is already set full by the hardware sampler.
* If yes, that means new data is already there before we can set an alert
* indicator. Caller should try to set alert indicator to some position behind.
*
* Scan the SDBs in AUX buffer from behind aux->empty_mark. They are used
* previously and have already been consumed by user space. Reset these SDBs
* (clear full indicator and alert indicator) for new data.
* If aux->alert_mark fall in this area, just set it. Overflow count is
* recorded while scanning.
*
* SDBs between aux->head and aux->empty_mark are already reset at last time.
* and ready for new samples. So scanning on this area could be skipped.
*
* Return true if alert indicator is set successfully and false if not.
*/
static bool aux_reset_buffer(struct aux_buffer *aux, unsigned long range,
unsigned long long *overflow)
{
unsigned long i, range_scan, idx, idx_old;
union hws_trailer_header old, prev, new;
unsigned long long orig_overflow;
struct hws_trailer_entry *te;
debug_sprintf_event(sfdbg, 6, "%s: range %ld head %ld alert %ld "
"empty %ld\n", __func__, range, aux->head,
aux->alert_mark, aux->empty_mark);
if (range <= aux_sdb_num_empty(aux))
/*
* No need to scan. All SDBs in range are marked as empty.
* Just set alert indicator. Should check race with hardware
* sampler.
*/
return aux_set_alert(aux, aux->alert_mark, overflow);
if (aux->alert_mark <= aux->empty_mark)
/*
* Set alert indicator on empty SDB. Should check race
* with hardware sampler.
*/
if (!aux_set_alert(aux, aux->alert_mark, overflow))
return false;
/*
* Scan the SDBs to clear full and alert indicator used previously.
* Start scanning from one SDB behind empty_mark. If the new alert
* indicator fall into this range, set it.
*/
range_scan = range - aux_sdb_num_empty(aux);
idx_old = idx = aux->empty_mark + 1;
for (i = 0; i < range_scan; i++, idx++) {
te = aux_sdb_trailer(aux, idx);
prev.val = READ_ONCE_ALIGNED_128(te->header.val);
do {
old.val = prev.val;
new.val = prev.val;
orig_overflow = old.overflow;
new.f = 0;
new.overflow = 0;
if (idx == aux->alert_mark)
new.a = 1;
else
new.a = 0;
prev.val = cmpxchg128(&te->header.val, old.val, new.val);
} while (prev.val != old.val);
*overflow += orig_overflow;
}
/* Update empty_mark to new position */
aux->empty_mark = aux->head + range - 1;
debug_sprintf_event(sfdbg, 6, "%s: range_scan %ld idx %ld..%ld "
"empty %ld\n", __func__, range_scan, idx_old,
idx - 1, aux->empty_mark);
return true;
}
/*
* Measurement alert handler for diagnostic mode sampling.
*/
static void hw_collect_aux(struct cpu_hw_sf *cpuhw)
{
struct aux_buffer *aux;
int done = 0;
unsigned long range = 0, size;
unsigned long long overflow = 0;
struct perf_output_handle *handle = &cpuhw->handle;
unsigned long num_sdb;
aux = perf_get_aux(handle);
if (WARN_ON_ONCE(!aux))
return;
/* Inform user space new data arrived */
size = aux_sdb_num_alert(aux) << PAGE_SHIFT;
debug_sprintf_event(sfdbg, 6, "%s: #alert %ld\n", __func__,
size >> PAGE_SHIFT);
perf_aux_output_end(handle, size);
num_sdb = aux->sfb.num_sdb;
while (!done) {
/* Get an output handle */
aux = perf_aux_output_begin(handle, cpuhw->event);
if (handle->size == 0) {
pr_err("The AUX buffer with %lu pages for the "
"diagnostic-sampling mode is full\n",
num_sdb);
break;
}
if (WARN_ON_ONCE(!aux))
return;
/* Update head and alert_mark to new position */
aux->head = handle->head >> PAGE_SHIFT;
range = (handle->size + 1) >> PAGE_SHIFT;
if (range == 1)
aux->alert_mark = aux->head;
else
aux->alert_mark = aux->head + range/2 - 1;
if (aux_reset_buffer(aux, range, &overflow)) {
if (!overflow) {
done = 1;
break;
}
size = range << PAGE_SHIFT;
perf_aux_output_end(&cpuhw->handle, size);
pr_err("Sample data caused the AUX buffer with %lu "
"pages to overflow\n", aux->sfb.num_sdb);
debug_sprintf_event(sfdbg, 1, "%s: head %ld range %ld "
"overflow %lld\n", __func__,
aux->head, range, overflow);
} else {
size = aux_sdb_num_alert(aux) << PAGE_SHIFT;
perf_aux_output_end(&cpuhw->handle, size);
debug_sprintf_event(sfdbg, 6, "%s: head %ld alert %ld "
"already full, try another\n",
__func__,
aux->head, aux->alert_mark);
}
}
if (done)
debug_sprintf_event(sfdbg, 6, "%s: head %ld alert %ld "
"empty %ld\n", __func__, aux->head,
aux->alert_mark, aux->empty_mark);
}
/*
* Callback when freeing AUX buffers.
*/
static void aux_buffer_free(void *data)
{
struct aux_buffer *aux = data;
unsigned long i, num_sdbt;
if (!aux)
return;
/* Free SDBT. SDB is freed by the caller */
num_sdbt = aux->sfb.num_sdbt;
for (i = 0; i < num_sdbt; i++)
free_page(aux->sdbt_index[i]);
kfree(aux->sdbt_index);
kfree(aux->sdb_index);
kfree(aux);
debug_sprintf_event(sfdbg, 4, "%s: SDBTs %lu\n", __func__, num_sdbt);
}
static void aux_sdb_init(unsigned long sdb)
{
struct hws_trailer_entry *te;
te = trailer_entry_ptr(sdb);
/* Save clock base */
te->clock_base = 1;
te->progusage2 = tod_clock_base.tod;
}
/*
* aux_buffer_setup() - Setup AUX buffer for diagnostic mode sampling
* @event: Event the buffer is setup for, event->cpu == -1 means current
* @pages: Array of pointers to buffer pages passed from perf core
* @nr_pages: Total pages
* @snapshot: Flag for snapshot mode
*
* This is the callback when setup an event using AUX buffer. Perf tool can
* trigger this by an additional mmap() call on the event. Unlike the buffer
* for basic samples, AUX buffer belongs to the event. It is scheduled with
* the task among online cpus when it is a per-thread event.
*
* Return the private AUX buffer structure if success or NULL if fails.
*/
static void *aux_buffer_setup(struct perf_event *event, void **pages,
int nr_pages, bool snapshot)
{
struct sf_buffer *sfb;
struct aux_buffer *aux;
unsigned long *new, *tail;
int i, n_sdbt;
if (!nr_pages || !pages)
return NULL;
if (nr_pages > CPUM_SF_MAX_SDB * CPUM_SF_SDB_DIAG_FACTOR) {
pr_err("AUX buffer size (%i pages) is larger than the "
"maximum sampling buffer limit\n",
nr_pages);
return NULL;
} else if (nr_pages < CPUM_SF_MIN_SDB * CPUM_SF_SDB_DIAG_FACTOR) {
pr_err("AUX buffer size (%i pages) is less than the "
"minimum sampling buffer limit\n",
nr_pages);
return NULL;
}
/* Allocate aux_buffer struct for the event */
aux = kzalloc(sizeof(struct aux_buffer), GFP_KERNEL);
if (!aux)
goto no_aux;
sfb = &aux->sfb;
/* Allocate sdbt_index for fast reference */
n_sdbt = DIV_ROUND_UP(nr_pages, CPUM_SF_SDB_PER_TABLE);
aux->sdbt_index = kmalloc_array(n_sdbt, sizeof(void *), GFP_KERNEL);
if (!aux->sdbt_index)
goto no_sdbt_index;
/* Allocate sdb_index for fast reference */
aux->sdb_index = kmalloc_array(nr_pages, sizeof(void *), GFP_KERNEL);
if (!aux->sdb_index)
goto no_sdb_index;
/* Allocate the first SDBT */
sfb->num_sdbt = 0;
sfb->sdbt = (unsigned long *)get_zeroed_page(GFP_KERNEL);
if (!sfb->sdbt)
goto no_sdbt;
aux->sdbt_index[sfb->num_sdbt++] = (unsigned long)sfb->sdbt;
tail = sfb->tail = sfb->sdbt;
/*
* Link the provided pages of AUX buffer to SDBT.
* Allocate SDBT if needed.
*/
for (i = 0; i < nr_pages; i++, tail++) {
if (require_table_link(tail)) {
new = (unsigned long *)get_zeroed_page(GFP_KERNEL);
if (!new)
goto no_sdbt;
aux->sdbt_index[sfb->num_sdbt++] = (unsigned long)new;
/* Link current page to tail of chain */
*tail = virt_to_phys(new) + 1;
tail = new;
}
/* Tail is the entry in a SDBT */
*tail = virt_to_phys(pages[i]);
aux->sdb_index[i] = (unsigned long)pages[i];
aux_sdb_init((unsigned long)pages[i]);
}
sfb->num_sdb = nr_pages;
/* Link the last entry in the SDBT to the first SDBT */
*tail = virt_to_phys(sfb->sdbt) + 1;
sfb->tail = tail;
/*
* Initial all SDBs are zeroed. Mark it as empty.
* So there is no need to clear the full indicator
* when this event is first added.
*/
aux->empty_mark = sfb->num_sdb - 1;
debug_sprintf_event(sfdbg, 4, "%s: SDBTs %lu SDBs %lu\n", __func__,
sfb->num_sdbt, sfb->num_sdb);
return aux;
no_sdbt:
/* SDBs (AUX buffer pages) are freed by caller */
for (i = 0; i < sfb->num_sdbt; i++)
free_page(aux->sdbt_index[i]);
kfree(aux->sdb_index);
no_sdb_index:
kfree(aux->sdbt_index);
no_sdbt_index:
kfree(aux);
no_aux:
return NULL;
}
static void cpumsf_pmu_read(struct perf_event *event)
{
/* Nothing to do ... updates are interrupt-driven */
}
/* Check if the new sampling period/frequency is appropriate.
*
* Return non-zero on error and zero on passed checks.
*/
static int cpumsf_pmu_check_period(struct perf_event *event, u64 value)
{
struct hws_qsi_info_block si;
unsigned long rate;
bool do_freq;
memset(&si, 0, sizeof(si));
if (event->cpu == -1) {
if (qsi(&si))
return -ENODEV;
} else {
/* Event is pinned to a particular CPU, retrieve the per-CPU
* sampling structure for accessing the CPU-specific QSI.
*/
struct cpu_hw_sf *cpuhw = &per_cpu(cpu_hw_sf, event->cpu);
si = cpuhw->qsi;
}
do_freq = !!SAMPLE_FREQ_MODE(&event->hw);
rate = getrate(do_freq, value, &si);
if (!rate)
return -EINVAL;
event->attr.sample_period = rate;
SAMPL_RATE(&event->hw) = rate;
hw_init_period(&event->hw, SAMPL_RATE(&event->hw));
debug_sprintf_event(sfdbg, 4, "%s:"
" cpu %d value %#llx period %#llx freq %d\n",
__func__, event->cpu, value,
event->attr.sample_period, do_freq);
return 0;
}
/* Activate sampling control.
* Next call of pmu_enable() starts sampling.
*/
static void cpumsf_pmu_start(struct perf_event *event, int flags)
{
struct cpu_hw_sf *cpuhw = this_cpu_ptr(&cpu_hw_sf);
if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
return;
if (flags & PERF_EF_RELOAD)
WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
perf_pmu_disable(event->pmu);
event->hw.state = 0;
cpuhw->lsctl.cs = 1;
if (SAMPL_DIAG_MODE(&event->hw))
cpuhw->lsctl.cd = 1;
perf_pmu_enable(event->pmu);
}
/* Deactivate sampling control.
* Next call of pmu_enable() stops sampling.
*/
static void cpumsf_pmu_stop(struct perf_event *event, int flags)
{
struct cpu_hw_sf *cpuhw = this_cpu_ptr(&cpu_hw_sf);
if (event->hw.state & PERF_HES_STOPPED)
return;
perf_pmu_disable(event->pmu);
cpuhw->lsctl.cs = 0;
cpuhw->lsctl.cd = 0;
event->hw.state |= PERF_HES_STOPPED;
if ((flags & PERF_EF_UPDATE) && !(event->hw.state & PERF_HES_UPTODATE)) {
hw_perf_event_update(event, 1);
event->hw.state |= PERF_HES_UPTODATE;
}
perf_pmu_enable(event->pmu);
}
static int cpumsf_pmu_add(struct perf_event *event, int flags)
{
struct cpu_hw_sf *cpuhw = this_cpu_ptr(&cpu_hw_sf);
struct aux_buffer *aux;
int err;
if (cpuhw->flags & PMU_F_IN_USE)
return -EAGAIN;
if (!SAMPL_DIAG_MODE(&event->hw) && !cpuhw->sfb.sdbt)
return -EINVAL;
err = 0;
perf_pmu_disable(event->pmu);
event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
/* Set up sampling controls. Always program the sampling register
* using the SDB-table start. Reset TEAR_REG event hardware register
* that is used by hw_perf_event_update() to store the sampling buffer
* position after samples have been flushed.
*/
cpuhw->lsctl.s = 0;
cpuhw->lsctl.h = 1;
cpuhw->lsctl.interval = SAMPL_RATE(&event->hw);
if (!SAMPL_DIAG_MODE(&event->hw)) {
cpuhw->lsctl.tear = virt_to_phys(cpuhw->sfb.sdbt);
cpuhw->lsctl.dear = *(unsigned long *)cpuhw->sfb.sdbt;
TEAR_REG(&event->hw) = (unsigned long)cpuhw->sfb.sdbt;
}
/* Ensure sampling functions are in the disabled state. If disabled,
* switch on sampling enable control. */
if (WARN_ON_ONCE(cpuhw->lsctl.es == 1 || cpuhw->lsctl.ed == 1)) {
err = -EAGAIN;
goto out;
}
if (SAMPL_DIAG_MODE(&event->hw)) {
aux = perf_aux_output_begin(&cpuhw->handle, event);
if (!aux) {
err = -EINVAL;
goto out;
}
err = aux_output_begin(&cpuhw->handle, aux, cpuhw);
if (err)
goto out;
cpuhw->lsctl.ed = 1;
}
cpuhw->lsctl.es = 1;
/* Set in_use flag and store event */
cpuhw->event = event;
cpuhw->flags |= PMU_F_IN_USE;
if (flags & PERF_EF_START)
cpumsf_pmu_start(event, PERF_EF_RELOAD);
out:
perf_event_update_userpage(event);
perf_pmu_enable(event->pmu);
return err;
}
static void cpumsf_pmu_del(struct perf_event *event, int flags)
{
struct cpu_hw_sf *cpuhw = this_cpu_ptr(&cpu_hw_sf);
perf_pmu_disable(event->pmu);
cpumsf_pmu_stop(event, PERF_EF_UPDATE);
cpuhw->lsctl.es = 0;
cpuhw->lsctl.ed = 0;
cpuhw->flags &= ~PMU_F_IN_USE;
cpuhw->event = NULL;
if (SAMPL_DIAG_MODE(&event->hw))
aux_output_end(&cpuhw->handle);
perf_event_update_userpage(event);
perf_pmu_enable(event->pmu);
}
CPUMF_EVENT_ATTR(SF, SF_CYCLES_BASIC, PERF_EVENT_CPUM_SF);
CPUMF_EVENT_ATTR(SF, SF_CYCLES_BASIC_DIAG, PERF_EVENT_CPUM_SF_DIAG);
/* Attribute list for CPU_SF.
*
* The availablitiy depends on the CPU_MF sampling facility authorization
* for basic + diagnositic samples. This is determined at initialization
* time by the sampling facility device driver.
* If the authorization for basic samples is turned off, it should be
* also turned off for diagnostic sampling.
*
* During initialization of the device driver, check the authorization
* level for diagnostic sampling and installs the attribute
* file for diagnostic sampling if necessary.
*
* For now install a placeholder to reference all possible attributes:
* SF_CYCLES_BASIC and SF_CYCLES_BASIC_DIAG.
* Add another entry for the final NULL pointer.
*/
enum {
SF_CYCLES_BASIC_ATTR_IDX = 0,
SF_CYCLES_BASIC_DIAG_ATTR_IDX,
SF_CYCLES_ATTR_MAX
};
static struct attribute *cpumsf_pmu_events_attr[SF_CYCLES_ATTR_MAX + 1] = {
[SF_CYCLES_BASIC_ATTR_IDX] = CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC)
};
PMU_FORMAT_ATTR(event, "config:0-63");
static struct attribute *cpumsf_pmu_format_attr[] = {
&format_attr_event.attr,
NULL,
};
static struct attribute_group cpumsf_pmu_events_group = {
.name = "events",
.attrs = cpumsf_pmu_events_attr,
};
static struct attribute_group cpumsf_pmu_format_group = {
.name = "format",
.attrs = cpumsf_pmu_format_attr,
};
static const struct attribute_group *cpumsf_pmu_attr_groups[] = {
&cpumsf_pmu_events_group,
&cpumsf_pmu_format_group,
NULL,
};
static struct pmu cpumf_sampling = {
.pmu_enable = cpumsf_pmu_enable,
.pmu_disable = cpumsf_pmu_disable,
.event_init = cpumsf_pmu_event_init,
.add = cpumsf_pmu_add,
.del = cpumsf_pmu_del,
.start = cpumsf_pmu_start,
.stop = cpumsf_pmu_stop,
.read = cpumsf_pmu_read,
.attr_groups = cpumsf_pmu_attr_groups,
.setup_aux = aux_buffer_setup,
.free_aux = aux_buffer_free,
.check_period = cpumsf_pmu_check_period,
};
static void cpumf_measurement_alert(struct ext_code ext_code,
unsigned int alert, unsigned long unused)
{
struct cpu_hw_sf *cpuhw;
if (!(alert & CPU_MF_INT_SF_MASK))
return;
inc_irq_stat(IRQEXT_CMS);
cpuhw = this_cpu_ptr(&cpu_hw_sf);
/* Measurement alerts are shared and might happen when the PMU
* is not reserved. Ignore these alerts in this case. */
if (!(cpuhw->flags & PMU_F_RESERVED))
return;
/* The processing below must take care of multiple alert events that
* might be indicated concurrently. */
/* Program alert request */
if (alert & CPU_MF_INT_SF_PRA) {
if (cpuhw->flags & PMU_F_IN_USE)
if (SAMPL_DIAG_MODE(&cpuhw->event->hw))
hw_collect_aux(cpuhw);
else
hw_perf_event_update(cpuhw->event, 0);
else
WARN_ON_ONCE(!(cpuhw->flags & PMU_F_IN_USE));
}
/* Report measurement alerts only for non-PRA codes */
if (alert != CPU_MF_INT_SF_PRA)
debug_sprintf_event(sfdbg, 6, "%s: alert %#x\n", __func__,
alert);
/* Sampling authorization change request */
if (alert & CPU_MF_INT_SF_SACA)
qsi(&cpuhw->qsi);
/* Loss of sample data due to high-priority machine activities */
if (alert & CPU_MF_INT_SF_LSDA) {
pr_err("Sample data was lost\n");
cpuhw->flags |= PMU_F_ERR_LSDA;
sf_disable();
}
/* Invalid sampling buffer entry */
if (alert & (CPU_MF_INT_SF_IAE|CPU_MF_INT_SF_ISE)) {
pr_err("A sampling buffer entry is incorrect (alert=0x%x)\n",
alert);
cpuhw->flags |= PMU_F_ERR_IBE;
sf_disable();
}
}
static int cpusf_pmu_setup(unsigned int cpu, int flags)
{
/* Ignore the notification if no events are scheduled on the PMU.
* This might be racy...
*/
if (!atomic_read(&num_events))
return 0;
local_irq_disable();
setup_pmc_cpu(&flags);
local_irq_enable();
return 0;
}
static int s390_pmu_sf_online_cpu(unsigned int cpu)
{
return cpusf_pmu_setup(cpu, PMC_INIT);
}
static int s390_pmu_sf_offline_cpu(unsigned int cpu)
{
return cpusf_pmu_setup(cpu, PMC_RELEASE);
}
static int param_get_sfb_size(char *buffer, const struct kernel_param *kp)
{
if (!cpum_sf_avail())
return -ENODEV;
return sprintf(buffer, "%lu,%lu", CPUM_SF_MIN_SDB, CPUM_SF_MAX_SDB);
}
static int param_set_sfb_size(const char *val, const struct kernel_param *kp)
{
int rc;
unsigned long min, max;
if (!cpum_sf_avail())
return -ENODEV;
if (!val || !strlen(val))
return -EINVAL;
/* Valid parameter values: "min,max" or "max" */
min = CPUM_SF_MIN_SDB;
max = CPUM_SF_MAX_SDB;
if (strchr(val, ','))
rc = (sscanf(val, "%lu,%lu", &min, &max) == 2) ? 0 : -EINVAL;
else
rc = kstrtoul(val, 10, &max);
if (min < 2 || min >= max || max > get_num_physpages())
rc = -EINVAL;
if (rc)
return rc;
sfb_set_limits(min, max);
pr_info("The sampling buffer limits have changed to: "
"min %lu max %lu (diag %lu)\n",
CPUM_SF_MIN_SDB, CPUM_SF_MAX_SDB, CPUM_SF_SDB_DIAG_FACTOR);
return 0;
}
#define param_check_sfb_size(name, p) __param_check(name, p, void)
static const struct kernel_param_ops param_ops_sfb_size = {
.set = param_set_sfb_size,
.get = param_get_sfb_size,
};
#define RS_INIT_FAILURE_QSI 0x0001
#define RS_INIT_FAILURE_BSDES 0x0002
#define RS_INIT_FAILURE_ALRT 0x0003
#define RS_INIT_FAILURE_PERF 0x0004
static void __init pr_cpumsf_err(unsigned int reason)
{
pr_err("Sampling facility support for perf is not available: "
"reason %#x\n", reason);
}
static int __init init_cpum_sampling_pmu(void)
{
struct hws_qsi_info_block si;
int err;
if (!cpum_sf_avail())
return -ENODEV;
memset(&si, 0, sizeof(si));
if (qsi(&si)) {
pr_cpumsf_err(RS_INIT_FAILURE_QSI);
return -ENODEV;
}
if (!si.as && !si.ad)
return -ENODEV;
if (si.bsdes != sizeof(struct hws_basic_entry)) {
pr_cpumsf_err(RS_INIT_FAILURE_BSDES);
return -EINVAL;
}
if (si.ad) {
sfb_set_limits(CPUM_SF_MIN_SDB, CPUM_SF_MAX_SDB);
/* Sampling of diagnostic data authorized,
* install event into attribute list of PMU device.
*/
cpumsf_pmu_events_attr[SF_CYCLES_BASIC_DIAG_ATTR_IDX] =
CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC_DIAG);
}
sfdbg = debug_register(KMSG_COMPONENT, 2, 1, 80);
if (!sfdbg) {
pr_err("Registering for s390dbf failed\n");
return -ENOMEM;
}
debug_register_view(sfdbg, &debug_sprintf_view);
err = register_external_irq(EXT_IRQ_MEASURE_ALERT,
cpumf_measurement_alert);
if (err) {
pr_cpumsf_err(RS_INIT_FAILURE_ALRT);
debug_unregister(sfdbg);
goto out;
}
err = perf_pmu_register(&cpumf_sampling, "cpum_sf", PERF_TYPE_RAW);
if (err) {
pr_cpumsf_err(RS_INIT_FAILURE_PERF);
unregister_external_irq(EXT_IRQ_MEASURE_ALERT,
cpumf_measurement_alert);
debug_unregister(sfdbg);
goto out;
}
cpuhp_setup_state(CPUHP_AP_PERF_S390_SF_ONLINE, "perf/s390/sf:online",
s390_pmu_sf_online_cpu, s390_pmu_sf_offline_cpu);
out:
return err;
}
arch_initcall(init_cpum_sampling_pmu);
core_param(cpum_sfb_size, CPUM_SF_MAX_SDB, sfb_size, 0644);
| linux-master | arch/s390/kernel/perf_cpum_sf.c |
// SPDX-License-Identifier: GPL-2.0
/*
* vdso setup for s390
*
* Copyright IBM Corp. 2008
* Author(s): Martin Schwidefsky ([email protected])
*/
#include <linux/binfmts.h>
#include <linux/compat.h>
#include <linux/elf.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/smp.h>
#include <linux/time_namespace.h>
#include <linux/random.h>
#include <vdso/datapage.h>
#include <asm/vdso.h>
extern char vdso64_start[], vdso64_end[];
extern char vdso32_start[], vdso32_end[];
static struct vm_special_mapping vvar_mapping;
static union {
struct vdso_data data[CS_BASES];
u8 page[PAGE_SIZE];
} vdso_data_store __page_aligned_data;
struct vdso_data *vdso_data = vdso_data_store.data;
enum vvar_pages {
VVAR_DATA_PAGE_OFFSET,
VVAR_TIMENS_PAGE_OFFSET,
VVAR_NR_PAGES,
};
#ifdef CONFIG_TIME_NS
struct vdso_data *arch_get_vdso_data(void *vvar_page)
{
return (struct vdso_data *)(vvar_page);
}
/*
* The VVAR page layout depends on whether a task belongs to the root or
* non-root time namespace. Whenever a task changes its namespace, the VVAR
* page tables are cleared and then they will be re-faulted with a
* corresponding layout.
* See also the comment near timens_setup_vdso_data() for details.
*/
int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
{
struct mm_struct *mm = task->mm;
VMA_ITERATOR(vmi, mm, 0);
struct vm_area_struct *vma;
mmap_read_lock(mm);
for_each_vma(vmi, vma) {
if (!vma_is_special_mapping(vma, &vvar_mapping))
continue;
zap_vma_pages(vma);
break;
}
mmap_read_unlock(mm);
return 0;
}
#endif
static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct page *timens_page = find_timens_vvar_page(vma);
unsigned long addr, pfn;
vm_fault_t err;
switch (vmf->pgoff) {
case VVAR_DATA_PAGE_OFFSET:
pfn = virt_to_pfn(vdso_data);
if (timens_page) {
/*
* Fault in VVAR page too, since it will be accessed
* to get clock data anyway.
*/
addr = vmf->address + VVAR_TIMENS_PAGE_OFFSET * PAGE_SIZE;
err = vmf_insert_pfn(vma, addr, pfn);
if (unlikely(err & VM_FAULT_ERROR))
return err;
pfn = page_to_pfn(timens_page);
}
break;
#ifdef CONFIG_TIME_NS
case VVAR_TIMENS_PAGE_OFFSET:
/*
* If a task belongs to a time namespace then a namespace
* specific VVAR is mapped with the VVAR_DATA_PAGE_OFFSET and
* the real VVAR page is mapped with the VVAR_TIMENS_PAGE_OFFSET
* offset.
* See also the comment near timens_setup_vdso_data().
*/
if (!timens_page)
return VM_FAULT_SIGBUS;
pfn = virt_to_pfn(vdso_data);
break;
#endif /* CONFIG_TIME_NS */
default:
return VM_FAULT_SIGBUS;
}
return vmf_insert_pfn(vma, vmf->address, pfn);
}
static int vdso_mremap(const struct vm_special_mapping *sm,
struct vm_area_struct *vma)
{
current->mm->context.vdso_base = vma->vm_start;
return 0;
}
static struct vm_special_mapping vvar_mapping = {
.name = "[vvar]",
.fault = vvar_fault,
};
static struct vm_special_mapping vdso64_mapping = {
.name = "[vdso]",
.mremap = vdso_mremap,
};
static struct vm_special_mapping vdso32_mapping = {
.name = "[vdso]",
.mremap = vdso_mremap,
};
int vdso_getcpu_init(void)
{
set_tod_programmable_field(smp_processor_id());
return 0;
}
early_initcall(vdso_getcpu_init); /* Must be called before SMP init */
static int map_vdso(unsigned long addr, unsigned long vdso_mapping_len)
{
unsigned long vvar_start, vdso_text_start, vdso_text_len;
struct vm_special_mapping *vdso_mapping;
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
int rc;
BUILD_BUG_ON(VVAR_NR_PAGES != __VVAR_PAGES);
if (mmap_write_lock_killable(mm))
return -EINTR;
if (is_compat_task()) {
vdso_text_len = vdso32_end - vdso32_start;
vdso_mapping = &vdso32_mapping;
} else {
vdso_text_len = vdso64_end - vdso64_start;
vdso_mapping = &vdso64_mapping;
}
vvar_start = get_unmapped_area(NULL, addr, vdso_mapping_len, 0, 0);
rc = vvar_start;
if (IS_ERR_VALUE(vvar_start))
goto out;
vma = _install_special_mapping(mm, vvar_start, VVAR_NR_PAGES*PAGE_SIZE,
VM_READ|VM_MAYREAD|VM_IO|VM_DONTDUMP|
VM_PFNMAP,
&vvar_mapping);
rc = PTR_ERR(vma);
if (IS_ERR(vma))
goto out;
vdso_text_start = vvar_start + VVAR_NR_PAGES * PAGE_SIZE;
/* VM_MAYWRITE for COW so gdb can set breakpoints */
vma = _install_special_mapping(mm, vdso_text_start, vdso_text_len,
VM_READ|VM_EXEC|
VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
vdso_mapping);
if (IS_ERR(vma)) {
do_munmap(mm, vvar_start, PAGE_SIZE, NULL);
rc = PTR_ERR(vma);
} else {
current->mm->context.vdso_base = vdso_text_start;
rc = 0;
}
out:
mmap_write_unlock(mm);
return rc;
}
static unsigned long vdso_addr(unsigned long start, unsigned long len)
{
unsigned long addr, end, offset;
/*
* Round up the start address. It can start out unaligned as a result
* of stack start randomization.
*/
start = PAGE_ALIGN(start);
/* Round the lowest possible end address up to a PMD boundary. */
end = (start + len + PMD_SIZE - 1) & PMD_MASK;
if (end >= VDSO_BASE)
end = VDSO_BASE;
end -= len;
if (end > start) {
offset = get_random_u32_below(((end - start) >> PAGE_SHIFT) + 1);
addr = start + (offset << PAGE_SHIFT);
} else {
addr = start;
}
return addr;
}
unsigned long vdso_size(void)
{
unsigned long size = VVAR_NR_PAGES * PAGE_SIZE;
if (is_compat_task())
size += vdso32_end - vdso32_start;
else
size += vdso64_end - vdso64_start;
return PAGE_ALIGN(size);
}
int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
{
unsigned long addr = VDSO_BASE;
unsigned long size = vdso_size();
if (current->flags & PF_RANDOMIZE)
addr = vdso_addr(current->mm->start_stack + PAGE_SIZE, size);
return map_vdso(addr, size);
}
static struct page ** __init vdso_setup_pages(void *start, void *end)
{
int pages = (end - start) >> PAGE_SHIFT;
struct page **pagelist;
int i;
pagelist = kcalloc(pages + 1, sizeof(struct page *), GFP_KERNEL);
if (!pagelist)
panic("%s: Cannot allocate page list for VDSO", __func__);
for (i = 0; i < pages; i++)
pagelist[i] = virt_to_page(start + i * PAGE_SIZE);
return pagelist;
}
static int __init vdso_init(void)
{
vdso64_mapping.pages = vdso_setup_pages(vdso64_start, vdso64_end);
if (IS_ENABLED(CONFIG_COMPAT))
vdso32_mapping.pages = vdso_setup_pages(vdso32_start, vdso32_end);
return 0;
}
arch_initcall(vdso_init);
| linux-master | arch/s390/kernel/vdso.c |
// SPDX-License-Identifier: GPL-2.0
/*
* S390 version
* Copyright IBM Corp. 2000
* Author(s): Martin Schwidefsky ([email protected]),
* Gerhard Tonn ([email protected])
* Thomas Spatzier ([email protected])
*
* Conversion between 31bit and 64bit native syscalls.
*
* Heavily inspired by the 32-bit Sparc compat code which is
* Copyright (C) 1997,1998 Jakub Jelinek ([email protected])
* Copyright (C) 1997 David S. Miller ([email protected])
*
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/file.h>
#include <linux/signal.h>
#include <linux/resource.h>
#include <linux/times.h>
#include <linux/smp.h>
#include <linux/sem.h>
#include <linux/msg.h>
#include <linux/shm.h>
#include <linux/uio.h>
#include <linux/quota.h>
#include <linux/poll.h>
#include <linux/personality.h>
#include <linux/stat.h>
#include <linux/filter.h>
#include <linux/highmem.h>
#include <linux/mman.h>
#include <linux/ipv6.h>
#include <linux/in.h>
#include <linux/icmpv6.h>
#include <linux/syscalls.h>
#include <linux/sysctl.h>
#include <linux/binfmts.h>
#include <linux/capability.h>
#include <linux/compat.h>
#include <linux/vfs.h>
#include <linux/ptrace.h>
#include <linux/fadvise.h>
#include <linux/ipc.h>
#include <linux/slab.h>
#include <asm/types.h>
#include <linux/uaccess.h>
#include <net/scm.h>
#include <net/sock.h>
#include "compat_linux.h"
#ifdef CONFIG_SYSVIPC
COMPAT_SYSCALL_DEFINE5(s390_ipc, uint, call, int, first, compat_ulong_t, second,
compat_ulong_t, third, compat_uptr_t, ptr)
{
if (call >> 16) /* hack for backward compatibility */
return -EINVAL;
return compat_ksys_ipc(call, first, second, third, ptr, third);
}
#endif
COMPAT_SYSCALL_DEFINE3(s390_truncate64, const char __user *, path, u32, high, u32, low)
{
return ksys_truncate(path, (unsigned long)high << 32 | low);
}
COMPAT_SYSCALL_DEFINE3(s390_ftruncate64, unsigned int, fd, u32, high, u32, low)
{
return ksys_ftruncate(fd, (unsigned long)high << 32 | low);
}
COMPAT_SYSCALL_DEFINE5(s390_pread64, unsigned int, fd, char __user *, ubuf,
compat_size_t, count, u32, high, u32, low)
{
if ((compat_ssize_t) count < 0)
return -EINVAL;
return ksys_pread64(fd, ubuf, count, (unsigned long)high << 32 | low);
}
COMPAT_SYSCALL_DEFINE5(s390_pwrite64, unsigned int, fd, const char __user *, ubuf,
compat_size_t, count, u32, high, u32, low)
{
if ((compat_ssize_t) count < 0)
return -EINVAL;
return ksys_pwrite64(fd, ubuf, count, (unsigned long)high << 32 | low);
}
COMPAT_SYSCALL_DEFINE4(s390_readahead, int, fd, u32, high, u32, low, s32, count)
{
return ksys_readahead(fd, (unsigned long)high << 32 | low, count);
}
struct stat64_emu31 {
unsigned long long st_dev;
unsigned int __pad1;
#define STAT64_HAS_BROKEN_ST_INO 1
u32 __st_ino;
unsigned int st_mode;
unsigned int st_nlink;
u32 st_uid;
u32 st_gid;
unsigned long long st_rdev;
unsigned int __pad3;
long st_size;
u32 st_blksize;
unsigned char __pad4[4];
u32 __pad5; /* future possible st_blocks high bits */
u32 st_blocks; /* Number 512-byte blocks allocated. */
u32 st_atime;
u32 __pad6;
u32 st_mtime;
u32 __pad7;
u32 st_ctime;
u32 __pad8; /* will be high 32 bits of ctime someday */
unsigned long st_ino;
};
static int cp_stat64(struct stat64_emu31 __user *ubuf, struct kstat *stat)
{
struct stat64_emu31 tmp;
memset(&tmp, 0, sizeof(tmp));
tmp.st_dev = huge_encode_dev(stat->dev);
tmp.st_ino = stat->ino;
tmp.__st_ino = (u32)stat->ino;
tmp.st_mode = stat->mode;
tmp.st_nlink = (unsigned int)stat->nlink;
tmp.st_uid = from_kuid_munged(current_user_ns(), stat->uid);
tmp.st_gid = from_kgid_munged(current_user_ns(), stat->gid);
tmp.st_rdev = huge_encode_dev(stat->rdev);
tmp.st_size = stat->size;
tmp.st_blksize = (u32)stat->blksize;
tmp.st_blocks = (u32)stat->blocks;
tmp.st_atime = (u32)stat->atime.tv_sec;
tmp.st_mtime = (u32)stat->mtime.tv_sec;
tmp.st_ctime = (u32)stat->ctime.tv_sec;
return copy_to_user(ubuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
}
COMPAT_SYSCALL_DEFINE2(s390_stat64, const char __user *, filename, struct stat64_emu31 __user *, statbuf)
{
struct kstat stat;
int ret = vfs_stat(filename, &stat);
if (!ret)
ret = cp_stat64(statbuf, &stat);
return ret;
}
COMPAT_SYSCALL_DEFINE2(s390_lstat64, const char __user *, filename, struct stat64_emu31 __user *, statbuf)
{
struct kstat stat;
int ret = vfs_lstat(filename, &stat);
if (!ret)
ret = cp_stat64(statbuf, &stat);
return ret;
}
COMPAT_SYSCALL_DEFINE2(s390_fstat64, unsigned int, fd, struct stat64_emu31 __user *, statbuf)
{
struct kstat stat;
int ret = vfs_fstat(fd, &stat);
if (!ret)
ret = cp_stat64(statbuf, &stat);
return ret;
}
COMPAT_SYSCALL_DEFINE4(s390_fstatat64, unsigned int, dfd, const char __user *, filename,
struct stat64_emu31 __user *, statbuf, int, flag)
{
struct kstat stat;
int error;
error = vfs_fstatat(dfd, filename, &stat, flag);
if (error)
return error;
return cp_stat64(statbuf, &stat);
}
/*
* Linux/i386 didn't use to be able to handle more than
* 4 system call parameters, so these system calls used a memory
* block for parameter passing..
*/
struct mmap_arg_struct_emu31 {
compat_ulong_t addr;
compat_ulong_t len;
compat_ulong_t prot;
compat_ulong_t flags;
compat_ulong_t fd;
compat_ulong_t offset;
};
COMPAT_SYSCALL_DEFINE1(s390_old_mmap, struct mmap_arg_struct_emu31 __user *, arg)
{
struct mmap_arg_struct_emu31 a;
if (copy_from_user(&a, arg, sizeof(a)))
return -EFAULT;
if (a.offset & ~PAGE_MASK)
return -EINVAL;
return ksys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
a.offset >> PAGE_SHIFT);
}
COMPAT_SYSCALL_DEFINE1(s390_mmap2, struct mmap_arg_struct_emu31 __user *, arg)
{
struct mmap_arg_struct_emu31 a;
if (copy_from_user(&a, arg, sizeof(a)))
return -EFAULT;
return ksys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, a.offset);
}
COMPAT_SYSCALL_DEFINE3(s390_read, unsigned int, fd, char __user *, buf, compat_size_t, count)
{
if ((compat_ssize_t) count < 0)
return -EINVAL;
return ksys_read(fd, buf, count);
}
COMPAT_SYSCALL_DEFINE3(s390_write, unsigned int, fd, const char __user *, buf, compat_size_t, count)
{
if ((compat_ssize_t) count < 0)
return -EINVAL;
return ksys_write(fd, buf, count);
}
/*
* 31 bit emulation wrapper functions for sys_fadvise64/fadvise64_64.
* These need to rewrite the advise values for POSIX_FADV_{DONTNEED,NOREUSE}
* because the 31 bit values differ from the 64 bit values.
*/
COMPAT_SYSCALL_DEFINE5(s390_fadvise64, int, fd, u32, high, u32, low, compat_size_t, len, int, advise)
{
if (advise == 4)
advise = POSIX_FADV_DONTNEED;
else if (advise == 5)
advise = POSIX_FADV_NOREUSE;
return ksys_fadvise64_64(fd, (unsigned long)high << 32 | low, len,
advise);
}
struct fadvise64_64_args {
int fd;
long long offset;
long long len;
int advice;
};
COMPAT_SYSCALL_DEFINE1(s390_fadvise64_64, struct fadvise64_64_args __user *, args)
{
struct fadvise64_64_args a;
if ( copy_from_user(&a, args, sizeof(a)) )
return -EFAULT;
if (a.advice == 4)
a.advice = POSIX_FADV_DONTNEED;
else if (a.advice == 5)
a.advice = POSIX_FADV_NOREUSE;
return ksys_fadvise64_64(a.fd, a.offset, a.len, a.advice);
}
COMPAT_SYSCALL_DEFINE6(s390_sync_file_range, int, fd, u32, offhigh, u32, offlow,
u32, nhigh, u32, nlow, unsigned int, flags)
{
return ksys_sync_file_range(fd, ((loff_t)offhigh << 32) + offlow,
((u64)nhigh << 32) + nlow, flags);
}
COMPAT_SYSCALL_DEFINE6(s390_fallocate, int, fd, int, mode, u32, offhigh, u32, offlow,
u32, lenhigh, u32, lenlow)
{
return ksys_fallocate(fd, mode, ((loff_t)offhigh << 32) + offlow,
((u64)lenhigh << 32) + lenlow);
}
| linux-master | arch/s390/kernel/compat_linux.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/module.h>
#include <linux/device.h>
#include <linux/cpu.h>
#include <asm/nospec-branch.h>
static int __init nobp_setup_early(char *str)
{
bool enabled;
int rc;
rc = kstrtobool(str, &enabled);
if (rc)
return rc;
if (enabled && test_facility(82)) {
/*
* The user explicitly requested nobp=1, enable it and
* disable the expoline support.
*/
__set_facility(82, alt_stfle_fac_list);
if (IS_ENABLED(CONFIG_EXPOLINE))
nospec_disable = 1;
} else {
__clear_facility(82, alt_stfle_fac_list);
}
return 0;
}
early_param("nobp", nobp_setup_early);
static int __init nospec_setup_early(char *str)
{
__clear_facility(82, alt_stfle_fac_list);
return 0;
}
early_param("nospec", nospec_setup_early);
static int __init nospec_report(void)
{
if (test_facility(156))
pr_info("Spectre V2 mitigation: etokens\n");
if (nospec_uses_trampoline())
pr_info("Spectre V2 mitigation: execute trampolines\n");
if (__test_facility(82, alt_stfle_fac_list))
pr_info("Spectre V2 mitigation: limited branch prediction\n");
return 0;
}
arch_initcall(nospec_report);
#ifdef CONFIG_EXPOLINE
int nospec_disable = IS_ENABLED(CONFIG_EXPOLINE_OFF);
static int __init nospectre_v2_setup_early(char *str)
{
nospec_disable = 1;
return 0;
}
early_param("nospectre_v2", nospectre_v2_setup_early);
void __init nospec_auto_detect(void)
{
if (test_facility(156) || cpu_mitigations_off()) {
/*
* The machine supports etokens.
* Disable expolines and disable nobp.
*/
if (__is_defined(CC_USING_EXPOLINE))
nospec_disable = 1;
__clear_facility(82, alt_stfle_fac_list);
} else if (__is_defined(CC_USING_EXPOLINE)) {
/*
* The kernel has been compiled with expolines.
* Keep expolines enabled and disable nobp.
*/
nospec_disable = 0;
__clear_facility(82, alt_stfle_fac_list);
}
/*
* If the kernel has not been compiled with expolines the
* nobp setting decides what is done, this depends on the
* CONFIG_KERNEL_NP option and the nobp/nospec parameters.
*/
}
static int __init spectre_v2_setup_early(char *str)
{
if (str && !strncmp(str, "on", 2)) {
nospec_disable = 0;
__clear_facility(82, alt_stfle_fac_list);
}
if (str && !strncmp(str, "off", 3))
nospec_disable = 1;
if (str && !strncmp(str, "auto", 4))
nospec_auto_detect();
return 0;
}
early_param("spectre_v2", spectre_v2_setup_early);
static void __init_or_module __nospec_revert(s32 *start, s32 *end)
{
enum { BRCL_EXPOLINE, BRASL_EXPOLINE } type;
static const u8 branch[] = { 0x47, 0x00, 0x07, 0x00 };
u8 *instr, *thunk, *br;
u8 insnbuf[6];
s32 *epo;
/* Second part of the instruction replace is always a nop */
memcpy(insnbuf + 2, branch, sizeof(branch));
for (epo = start; epo < end; epo++) {
instr = (u8 *) epo + *epo;
if (instr[0] == 0xc0 && (instr[1] & 0x0f) == 0x04)
type = BRCL_EXPOLINE; /* brcl instruction */
else if (instr[0] == 0xc0 && (instr[1] & 0x0f) == 0x05)
type = BRASL_EXPOLINE; /* brasl instruction */
else
continue;
thunk = instr + (*(int *)(instr + 2)) * 2;
if (thunk[0] == 0xc6 && thunk[1] == 0x00)
/* exrl %r0,<target-br> */
br = thunk + (*(int *)(thunk + 2)) * 2;
else
continue;
if (br[0] != 0x07 || (br[1] & 0xf0) != 0xf0)
continue;
switch (type) {
case BRCL_EXPOLINE:
/* brcl to thunk, replace with br + nop */
insnbuf[0] = br[0];
insnbuf[1] = (instr[1] & 0xf0) | (br[1] & 0x0f);
break;
case BRASL_EXPOLINE:
/* brasl to thunk, replace with basr + nop */
insnbuf[0] = 0x0d;
insnbuf[1] = (instr[1] & 0xf0) | (br[1] & 0x0f);
break;
}
s390_kernel_write(instr, insnbuf, 6);
}
}
void __init_or_module nospec_revert(s32 *start, s32 *end)
{
if (nospec_disable)
__nospec_revert(start, end);
}
extern s32 __nospec_call_start[], __nospec_call_end[];
extern s32 __nospec_return_start[], __nospec_return_end[];
void __init nospec_init_branches(void)
{
nospec_revert(__nospec_call_start, __nospec_call_end);
nospec_revert(__nospec_return_start, __nospec_return_end);
}
#endif /* CONFIG_EXPOLINE */
| linux-master | arch/s390/kernel/nospec-branch.c |
// SPDX-License-Identifier: GPL-2.0
/*
* S390 kdump implementation
*
* Copyright IBM Corp. 2011
* Author(s): Michael Holzheu <[email protected]>
*/
#include <linux/crash_dump.h>
#include <asm/lowcore.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/gfp.h>
#include <linux/slab.h>
#include <linux/memblock.h>
#include <linux/elf.h>
#include <linux/uio.h>
#include <asm/asm-offsets.h>
#include <asm/os_info.h>
#include <asm/elf.h>
#include <asm/ipl.h>
#include <asm/sclp.h>
#include <asm/maccess.h>
#define PTR_ADD(x, y) (((char *) (x)) + ((unsigned long) (y)))
#define PTR_SUB(x, y) (((char *) (x)) - ((unsigned long) (y)))
#define PTR_DIFF(x, y) ((unsigned long)(((char *) (x)) - ((unsigned long) (y))))
static struct memblock_region oldmem_region;
static struct memblock_type oldmem_type = {
.cnt = 1,
.max = 1,
.total_size = 0,
.regions = &oldmem_region,
.name = "oldmem",
};
struct save_area {
struct list_head list;
u64 psw[2];
u64 ctrs[16];
u64 gprs[16];
u32 acrs[16];
u64 fprs[16];
u32 fpc;
u32 prefix;
u32 todpreg;
u64 timer;
u64 todcmp;
u64 vxrs_low[16];
__vector128 vxrs_high[16];
};
static LIST_HEAD(dump_save_areas);
/*
* Allocate a save area
*/
struct save_area * __init save_area_alloc(bool is_boot_cpu)
{
struct save_area *sa;
sa = memblock_alloc(sizeof(*sa), 8);
if (!sa)
return NULL;
if (is_boot_cpu)
list_add(&sa->list, &dump_save_areas);
else
list_add_tail(&sa->list, &dump_save_areas);
return sa;
}
/*
* Return the address of the save area for the boot CPU
*/
struct save_area * __init save_area_boot_cpu(void)
{
return list_first_entry_or_null(&dump_save_areas, struct save_area, list);
}
/*
* Copy CPU registers into the save area
*/
void __init save_area_add_regs(struct save_area *sa, void *regs)
{
struct lowcore *lc;
lc = (struct lowcore *)(regs - __LC_FPREGS_SAVE_AREA);
memcpy(&sa->psw, &lc->psw_save_area, sizeof(sa->psw));
memcpy(&sa->ctrs, &lc->cregs_save_area, sizeof(sa->ctrs));
memcpy(&sa->gprs, &lc->gpregs_save_area, sizeof(sa->gprs));
memcpy(&sa->acrs, &lc->access_regs_save_area, sizeof(sa->acrs));
memcpy(&sa->fprs, &lc->floating_pt_save_area, sizeof(sa->fprs));
memcpy(&sa->fpc, &lc->fpt_creg_save_area, sizeof(sa->fpc));
memcpy(&sa->prefix, &lc->prefixreg_save_area, sizeof(sa->prefix));
memcpy(&sa->todpreg, &lc->tod_progreg_save_area, sizeof(sa->todpreg));
memcpy(&sa->timer, &lc->cpu_timer_save_area, sizeof(sa->timer));
memcpy(&sa->todcmp, &lc->clock_comp_save_area, sizeof(sa->todcmp));
}
/*
* Copy vector registers into the save area
*/
void __init save_area_add_vxrs(struct save_area *sa, __vector128 *vxrs)
{
int i;
/* Copy lower halves of vector registers 0-15 */
for (i = 0; i < 16; i++)
sa->vxrs_low[i] = vxrs[i].low;
/* Copy vector registers 16-31 */
memcpy(sa->vxrs_high, vxrs + 16, 16 * sizeof(__vector128));
}
static size_t copy_oldmem_iter(struct iov_iter *iter, unsigned long src, size_t count)
{
size_t len, copied, res = 0;
while (count) {
if (!oldmem_data.start && src < sclp.hsa_size) {
/* Copy from zfcp/nvme dump HSA area */
len = min(count, sclp.hsa_size - src);
copied = memcpy_hsa_iter(iter, src, len);
} else {
/* Check for swapped kdump oldmem areas */
if (oldmem_data.start && src - oldmem_data.start < oldmem_data.size) {
src -= oldmem_data.start;
len = min(count, oldmem_data.size - src);
} else if (oldmem_data.start && src < oldmem_data.size) {
len = min(count, oldmem_data.size - src);
src += oldmem_data.start;
} else {
len = count;
}
copied = memcpy_real_iter(iter, src, len);
}
count -= copied;
src += copied;
res += copied;
if (copied < len)
break;
}
return res;
}
int copy_oldmem_kernel(void *dst, unsigned long src, size_t count)
{
struct iov_iter iter;
struct kvec kvec;
kvec.iov_base = dst;
kvec.iov_len = count;
iov_iter_kvec(&iter, ITER_DEST, &kvec, 1, count);
if (copy_oldmem_iter(&iter, src, count) < count)
return -EFAULT;
return 0;
}
/*
* Copy one page from "oldmem"
*/
ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn, size_t csize,
unsigned long offset)
{
unsigned long src;
src = pfn_to_phys(pfn) + offset;
return copy_oldmem_iter(iter, src, csize);
}
/*
* Remap "oldmem" for kdump
*
* For the kdump reserved memory this functions performs a swap operation:
* [0 - OLDMEM_SIZE] is mapped to [OLDMEM_BASE - OLDMEM_BASE + OLDMEM_SIZE]
*/
static int remap_oldmem_pfn_range_kdump(struct vm_area_struct *vma,
unsigned long from, unsigned long pfn,
unsigned long size, pgprot_t prot)
{
unsigned long size_old;
int rc;
if (pfn < oldmem_data.size >> PAGE_SHIFT) {
size_old = min(size, oldmem_data.size - (pfn << PAGE_SHIFT));
rc = remap_pfn_range(vma, from,
pfn + (oldmem_data.start >> PAGE_SHIFT),
size_old, prot);
if (rc || size == size_old)
return rc;
size -= size_old;
from += size_old;
pfn += size_old >> PAGE_SHIFT;
}
return remap_pfn_range(vma, from, pfn, size, prot);
}
/*
* Remap "oldmem" for zfcp/nvme dump
*
* We only map available memory above HSA size. Memory below HSA size
* is read on demand using the copy_oldmem_page() function.
*/
static int remap_oldmem_pfn_range_zfcpdump(struct vm_area_struct *vma,
unsigned long from,
unsigned long pfn,
unsigned long size, pgprot_t prot)
{
unsigned long hsa_end = sclp.hsa_size;
unsigned long size_hsa;
if (pfn < hsa_end >> PAGE_SHIFT) {
size_hsa = min(size, hsa_end - (pfn << PAGE_SHIFT));
if (size == size_hsa)
return 0;
size -= size_hsa;
from += size_hsa;
pfn += size_hsa >> PAGE_SHIFT;
}
return remap_pfn_range(vma, from, pfn, size, prot);
}
/*
* Remap "oldmem" for kdump or zfcp/nvme dump
*/
int remap_oldmem_pfn_range(struct vm_area_struct *vma, unsigned long from,
unsigned long pfn, unsigned long size, pgprot_t prot)
{
if (oldmem_data.start)
return remap_oldmem_pfn_range_kdump(vma, from, pfn, size, prot);
else
return remap_oldmem_pfn_range_zfcpdump(vma, from, pfn, size,
prot);
}
static const char *nt_name(Elf64_Word type)
{
const char *name = "LINUX";
if (type == NT_PRPSINFO || type == NT_PRSTATUS || type == NT_PRFPREG)
name = KEXEC_CORE_NOTE_NAME;
return name;
}
/*
* Initialize ELF note
*/
static void *nt_init_name(void *buf, Elf64_Word type, void *desc, int d_len,
const char *name)
{
Elf64_Nhdr *note;
u64 len;
note = (Elf64_Nhdr *)buf;
note->n_namesz = strlen(name) + 1;
note->n_descsz = d_len;
note->n_type = type;
len = sizeof(Elf64_Nhdr);
memcpy(buf + len, name, note->n_namesz);
len = roundup(len + note->n_namesz, 4);
memcpy(buf + len, desc, note->n_descsz);
len = roundup(len + note->n_descsz, 4);
return PTR_ADD(buf, len);
}
static inline void *nt_init(void *buf, Elf64_Word type, void *desc, int d_len)
{
return nt_init_name(buf, type, desc, d_len, nt_name(type));
}
/*
* Calculate the size of ELF note
*/
static size_t nt_size_name(int d_len, const char *name)
{
size_t size;
size = sizeof(Elf64_Nhdr);
size += roundup(strlen(name) + 1, 4);
size += roundup(d_len, 4);
return size;
}
static inline size_t nt_size(Elf64_Word type, int d_len)
{
return nt_size_name(d_len, nt_name(type));
}
/*
* Fill ELF notes for one CPU with save area registers
*/
static void *fill_cpu_elf_notes(void *ptr, int cpu, struct save_area *sa)
{
struct elf_prstatus nt_prstatus;
elf_fpregset_t nt_fpregset;
/* Prepare prstatus note */
memset(&nt_prstatus, 0, sizeof(nt_prstatus));
memcpy(&nt_prstatus.pr_reg.gprs, sa->gprs, sizeof(sa->gprs));
memcpy(&nt_prstatus.pr_reg.psw, sa->psw, sizeof(sa->psw));
memcpy(&nt_prstatus.pr_reg.acrs, sa->acrs, sizeof(sa->acrs));
nt_prstatus.common.pr_pid = cpu;
/* Prepare fpregset (floating point) note */
memset(&nt_fpregset, 0, sizeof(nt_fpregset));
memcpy(&nt_fpregset.fpc, &sa->fpc, sizeof(sa->fpc));
memcpy(&nt_fpregset.fprs, &sa->fprs, sizeof(sa->fprs));
/* Create ELF notes for the CPU */
ptr = nt_init(ptr, NT_PRSTATUS, &nt_prstatus, sizeof(nt_prstatus));
ptr = nt_init(ptr, NT_PRFPREG, &nt_fpregset, sizeof(nt_fpregset));
ptr = nt_init(ptr, NT_S390_TIMER, &sa->timer, sizeof(sa->timer));
ptr = nt_init(ptr, NT_S390_TODCMP, &sa->todcmp, sizeof(sa->todcmp));
ptr = nt_init(ptr, NT_S390_TODPREG, &sa->todpreg, sizeof(sa->todpreg));
ptr = nt_init(ptr, NT_S390_CTRS, &sa->ctrs, sizeof(sa->ctrs));
ptr = nt_init(ptr, NT_S390_PREFIX, &sa->prefix, sizeof(sa->prefix));
if (MACHINE_HAS_VX) {
ptr = nt_init(ptr, NT_S390_VXRS_HIGH,
&sa->vxrs_high, sizeof(sa->vxrs_high));
ptr = nt_init(ptr, NT_S390_VXRS_LOW,
&sa->vxrs_low, sizeof(sa->vxrs_low));
}
return ptr;
}
/*
* Calculate size of ELF notes per cpu
*/
static size_t get_cpu_elf_notes_size(void)
{
struct save_area *sa = NULL;
size_t size;
size = nt_size(NT_PRSTATUS, sizeof(struct elf_prstatus));
size += nt_size(NT_PRFPREG, sizeof(elf_fpregset_t));
size += nt_size(NT_S390_TIMER, sizeof(sa->timer));
size += nt_size(NT_S390_TODCMP, sizeof(sa->todcmp));
size += nt_size(NT_S390_TODPREG, sizeof(sa->todpreg));
size += nt_size(NT_S390_CTRS, sizeof(sa->ctrs));
size += nt_size(NT_S390_PREFIX, sizeof(sa->prefix));
if (MACHINE_HAS_VX) {
size += nt_size(NT_S390_VXRS_HIGH, sizeof(sa->vxrs_high));
size += nt_size(NT_S390_VXRS_LOW, sizeof(sa->vxrs_low));
}
return size;
}
/*
* Initialize prpsinfo note (new kernel)
*/
static void *nt_prpsinfo(void *ptr)
{
struct elf_prpsinfo prpsinfo;
memset(&prpsinfo, 0, sizeof(prpsinfo));
prpsinfo.pr_sname = 'R';
strcpy(prpsinfo.pr_fname, "vmlinux");
return nt_init(ptr, NT_PRPSINFO, &prpsinfo, sizeof(prpsinfo));
}
/*
* Get vmcoreinfo using lowcore->vmcore_info (new kernel)
*/
static void *get_vmcoreinfo_old(unsigned long *size)
{
char nt_name[11], *vmcoreinfo;
unsigned long addr;
Elf64_Nhdr note;
if (copy_oldmem_kernel(&addr, __LC_VMCORE_INFO, sizeof(addr)))
return NULL;
memset(nt_name, 0, sizeof(nt_name));
if (copy_oldmem_kernel(¬e, addr, sizeof(note)))
return NULL;
if (copy_oldmem_kernel(nt_name, addr + sizeof(note),
sizeof(nt_name) - 1))
return NULL;
if (strcmp(nt_name, VMCOREINFO_NOTE_NAME) != 0)
return NULL;
vmcoreinfo = kzalloc(note.n_descsz, GFP_KERNEL);
if (!vmcoreinfo)
return NULL;
if (copy_oldmem_kernel(vmcoreinfo, addr + 24, note.n_descsz)) {
kfree(vmcoreinfo);
return NULL;
}
*size = note.n_descsz;
return vmcoreinfo;
}
/*
* Initialize vmcoreinfo note (new kernel)
*/
static void *nt_vmcoreinfo(void *ptr)
{
const char *name = VMCOREINFO_NOTE_NAME;
unsigned long size;
void *vmcoreinfo;
vmcoreinfo = os_info_old_entry(OS_INFO_VMCOREINFO, &size);
if (vmcoreinfo)
return nt_init_name(ptr, 0, vmcoreinfo, size, name);
vmcoreinfo = get_vmcoreinfo_old(&size);
if (!vmcoreinfo)
return ptr;
ptr = nt_init_name(ptr, 0, vmcoreinfo, size, name);
kfree(vmcoreinfo);
return ptr;
}
static size_t nt_vmcoreinfo_size(void)
{
const char *name = VMCOREINFO_NOTE_NAME;
unsigned long size;
void *vmcoreinfo;
vmcoreinfo = os_info_old_entry(OS_INFO_VMCOREINFO, &size);
if (vmcoreinfo)
return nt_size_name(size, name);
vmcoreinfo = get_vmcoreinfo_old(&size);
if (!vmcoreinfo)
return 0;
kfree(vmcoreinfo);
return nt_size_name(size, name);
}
/*
* Initialize final note (needed for /proc/vmcore code)
*/
static void *nt_final(void *ptr)
{
Elf64_Nhdr *note;
note = (Elf64_Nhdr *) ptr;
note->n_namesz = 0;
note->n_descsz = 0;
note->n_type = 0;
return PTR_ADD(ptr, sizeof(Elf64_Nhdr));
}
/*
* Initialize ELF header (new kernel)
*/
static void *ehdr_init(Elf64_Ehdr *ehdr, int mem_chunk_cnt)
{
memset(ehdr, 0, sizeof(*ehdr));
memcpy(ehdr->e_ident, ELFMAG, SELFMAG);
ehdr->e_ident[EI_CLASS] = ELFCLASS64;
ehdr->e_ident[EI_DATA] = ELFDATA2MSB;
ehdr->e_ident[EI_VERSION] = EV_CURRENT;
memset(ehdr->e_ident + EI_PAD, 0, EI_NIDENT - EI_PAD);
ehdr->e_type = ET_CORE;
ehdr->e_machine = EM_S390;
ehdr->e_version = EV_CURRENT;
ehdr->e_phoff = sizeof(Elf64_Ehdr);
ehdr->e_ehsize = sizeof(Elf64_Ehdr);
ehdr->e_phentsize = sizeof(Elf64_Phdr);
ehdr->e_phnum = mem_chunk_cnt + 1;
return ehdr + 1;
}
/*
* Return CPU count for ELF header (new kernel)
*/
static int get_cpu_cnt(void)
{
struct save_area *sa;
int cpus = 0;
list_for_each_entry(sa, &dump_save_areas, list)
if (sa->prefix != 0)
cpus++;
return cpus;
}
/*
* Return memory chunk count for ELF header (new kernel)
*/
static int get_mem_chunk_cnt(void)
{
int cnt = 0;
u64 idx;
for_each_physmem_range(idx, &oldmem_type, NULL, NULL)
cnt++;
return cnt;
}
/*
* Initialize ELF loads (new kernel)
*/
static void loads_init(Elf64_Phdr *phdr, u64 loads_offset)
{
phys_addr_t start, end;
u64 idx;
for_each_physmem_range(idx, &oldmem_type, &start, &end) {
phdr->p_filesz = end - start;
phdr->p_type = PT_LOAD;
phdr->p_offset = start;
phdr->p_vaddr = start;
phdr->p_paddr = start;
phdr->p_memsz = end - start;
phdr->p_flags = PF_R | PF_W | PF_X;
phdr->p_align = PAGE_SIZE;
phdr++;
}
}
/*
* Initialize notes (new kernel)
*/
static void *notes_init(Elf64_Phdr *phdr, void *ptr, u64 notes_offset)
{
struct save_area *sa;
void *ptr_start = ptr;
int cpu;
ptr = nt_prpsinfo(ptr);
cpu = 1;
list_for_each_entry(sa, &dump_save_areas, list)
if (sa->prefix != 0)
ptr = fill_cpu_elf_notes(ptr, cpu++, sa);
ptr = nt_vmcoreinfo(ptr);
ptr = nt_final(ptr);
memset(phdr, 0, sizeof(*phdr));
phdr->p_type = PT_NOTE;
phdr->p_offset = notes_offset;
phdr->p_filesz = (unsigned long) PTR_SUB(ptr, ptr_start);
phdr->p_memsz = phdr->p_filesz;
return ptr;
}
static size_t get_elfcorehdr_size(int mem_chunk_cnt)
{
size_t size;
size = sizeof(Elf64_Ehdr);
/* PT_NOTES */
size += sizeof(Elf64_Phdr);
/* nt_prpsinfo */
size += nt_size(NT_PRPSINFO, sizeof(struct elf_prpsinfo));
/* regsets */
size += get_cpu_cnt() * get_cpu_elf_notes_size();
/* nt_vmcoreinfo */
size += nt_vmcoreinfo_size();
/* nt_final */
size += sizeof(Elf64_Nhdr);
/* PT_LOADS */
size += mem_chunk_cnt * sizeof(Elf64_Phdr);
return size;
}
/*
* Create ELF core header (new kernel)
*/
int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size)
{
Elf64_Phdr *phdr_notes, *phdr_loads;
size_t alloc_size;
int mem_chunk_cnt;
void *ptr, *hdr;
u64 hdr_off;
/* If we are not in kdump or zfcp/nvme dump mode return */
if (!oldmem_data.start && !is_ipl_type_dump())
return 0;
/* If we cannot get HSA size for zfcp/nvme dump return error */
if (is_ipl_type_dump() && !sclp.hsa_size)
return -ENODEV;
/* For kdump, exclude previous crashkernel memory */
if (oldmem_data.start) {
oldmem_region.base = oldmem_data.start;
oldmem_region.size = oldmem_data.size;
oldmem_type.total_size = oldmem_data.size;
}
mem_chunk_cnt = get_mem_chunk_cnt();
alloc_size = get_elfcorehdr_size(mem_chunk_cnt);
hdr = kzalloc(alloc_size, GFP_KERNEL);
/* Without elfcorehdr /proc/vmcore cannot be created. Thus creating
* a dump with this crash kernel will fail. Panic now to allow other
* dump mechanisms to take over.
*/
if (!hdr)
panic("s390 kdump allocating elfcorehdr failed");
/* Init elf header */
ptr = ehdr_init(hdr, mem_chunk_cnt);
/* Init program headers */
phdr_notes = ptr;
ptr = PTR_ADD(ptr, sizeof(Elf64_Phdr));
phdr_loads = ptr;
ptr = PTR_ADD(ptr, sizeof(Elf64_Phdr) * mem_chunk_cnt);
/* Init notes */
hdr_off = PTR_DIFF(ptr, hdr);
ptr = notes_init(phdr_notes, ptr, ((unsigned long) hdr) + hdr_off);
/* Init loads */
hdr_off = PTR_DIFF(ptr, hdr);
loads_init(phdr_loads, hdr_off);
*addr = (unsigned long long) hdr;
*size = (unsigned long long) hdr_off;
BUG_ON(elfcorehdr_size > alloc_size);
return 0;
}
/*
* Free ELF core header (new kernel)
*/
void elfcorehdr_free(unsigned long long addr)
{
kfree((void *)(unsigned long)addr);
}
/*
* Read from ELF header
*/
ssize_t elfcorehdr_read(char *buf, size_t count, u64 *ppos)
{
void *src = (void *)(unsigned long)*ppos;
memcpy(buf, src, count);
*ppos += count;
return count;
}
/*
* Read from ELF notes data
*/
ssize_t elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos)
{
void *src = (void *)(unsigned long)*ppos;
memcpy(buf, src, count);
*ppos += count;
return count;
}
| linux-master | arch/s390/kernel/crash_dump.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/module.h>
#include <linux/cpu.h>
#include <linux/smp.h>
#include <asm/text-patching.h>
#include <asm/alternative.h>
#include <asm/facility.h>
#include <asm/nospec-branch.h>
static int __initdata_or_module alt_instr_disabled;
static int __init disable_alternative_instructions(char *str)
{
alt_instr_disabled = 1;
return 0;
}
early_param("noaltinstr", disable_alternative_instructions);
static void __init_or_module __apply_alternatives(struct alt_instr *start,
struct alt_instr *end)
{
struct alt_instr *a;
u8 *instr, *replacement;
/*
* The scan order should be from start to end. A later scanned
* alternative code can overwrite previously scanned alternative code.
*/
for (a = start; a < end; a++) {
instr = (u8 *)&a->instr_offset + a->instr_offset;
replacement = (u8 *)&a->repl_offset + a->repl_offset;
if (!__test_facility(a->facility, alt_stfle_fac_list))
continue;
if (unlikely(a->instrlen % 2)) {
WARN_ONCE(1, "cpu alternatives instructions length is "
"odd, skipping patching\n");
continue;
}
s390_kernel_write(instr, replacement, a->instrlen);
}
}
void __init_or_module apply_alternatives(struct alt_instr *start,
struct alt_instr *end)
{
if (!alt_instr_disabled)
__apply_alternatives(start, end);
}
extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
void __init apply_alternative_instructions(void)
{
apply_alternatives(__alt_instructions, __alt_instructions_end);
}
static void do_sync_core(void *info)
{
sync_core();
}
void text_poke_sync(void)
{
on_each_cpu(do_sync_core, NULL, 1);
}
void text_poke_sync_lock(void)
{
cpus_read_lock();
text_poke_sync();
cpus_read_unlock();
}
| linux-master | arch/s390/kernel/alternative.c |
// SPDX-License-Identifier: GPL-2.0
/*
* EBCDIC -> ASCII, ASCII -> EBCDIC,
* upper to lower case (EBCDIC) conversion tables.
*
* S390 version
* Copyright IBM Corp. 1999
* Author(s): Martin Schwidefsky <[email protected]>
* Martin Peschke <[email protected]>
*/
#include <linux/types.h>
#include <linux/export.h>
#include <asm/ebcdic.h>
/*
* ASCII (IBM PC 437) -> EBCDIC 037
*/
__u8 _ascebc[256] =
{
/*00 NUL SOH STX ETX EOT ENQ ACK BEL */
0x00, 0x01, 0x02, 0x03, 0x37, 0x2D, 0x2E, 0x2F,
/*08 BS HT LF VT FF CR SO SI */
/* ->NL */
0x16, 0x05, 0x15, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
/*10 DLE DC1 DC2 DC3 DC4 NAK SYN ETB */
0x10, 0x11, 0x12, 0x13, 0x3C, 0x3D, 0x32, 0x26,
/*18 CAN EM SUB ESC FS GS RS US */
/* ->IGS ->IRS ->IUS */
0x18, 0x19, 0x3F, 0x27, 0x22, 0x1D, 0x1E, 0x1F,
/*20 SP ! " # $ % & ' */
0x40, 0x5A, 0x7F, 0x7B, 0x5B, 0x6C, 0x50, 0x7D,
/*28 ( ) * + , - . / */
0x4D, 0x5D, 0x5C, 0x4E, 0x6B, 0x60, 0x4B, 0x61,
/*30 0 1 2 3 4 5 6 7 */
0xF0, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7,
/*38 8 9 : ; < = > ? */
0xF8, 0xF9, 0x7A, 0x5E, 0x4C, 0x7E, 0x6E, 0x6F,
/*40 @ A B C D E F G */
0x7C, 0xC1, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7,
/*48 H I J K L M N O */
0xC8, 0xC9, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6,
/*50 P Q R S T U V W */
0xD7, 0xD8, 0xD9, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6,
/*58 X Y Z [ \ ] ^ _ */
0xE7, 0xE8, 0xE9, 0xBA, 0xE0, 0xBB, 0xB0, 0x6D,
/*60 ` a b c d e f g */
0x79, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
/*68 h i j k l m n o */
0x88, 0x89, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96,
/*70 p q r s t u v w */
0x97, 0x98, 0x99, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6,
/*78 x y z { | } ~ DL */
0xA7, 0xA8, 0xA9, 0xC0, 0x4F, 0xD0, 0xA1, 0x07,
/*80*/
0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
/*88*/
0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
/*90*/
0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
/*98*/
0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
/*A0*/
0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
/*A8*/
0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
/*B0*/
0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
/*B8*/
0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
/*C0*/
0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
/*C8*/
0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
/*D0*/
0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
/*D8*/
0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
/*E0 sz */
0x3F, 0x59, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
/*E8*/
0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
/*F0*/
0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
/*F8*/
0x90, 0x3F, 0x3F, 0x3F, 0x3F, 0xEA, 0x3F, 0xFF
};
/*
* EBCDIC 037 -> ASCII (IBM PC 437)
*/
__u8 _ebcasc[256] =
{
/* 0x00 NUL SOH STX ETX *SEL HT *RNL DEL */
0x00, 0x01, 0x02, 0x03, 0x07, 0x09, 0x07, 0x7F,
/* 0x08 -GE -SPS -RPT VT FF CR SO SI */
0x07, 0x07, 0x07, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
/* 0x10 DLE DC1 DC2 DC3 -RES -NL BS -POC
-ENP ->LF */
0x10, 0x11, 0x12, 0x13, 0x07, 0x0A, 0x08, 0x07,
/* 0x18 CAN EM -UBS -CU1 -IFS -IGS -IRS -ITB
-IUS */
0x18, 0x19, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07,
/* 0x20 -DS -SOS FS -WUS -BYP LF ETB ESC
-INP */
0x07, 0x07, 0x1C, 0x07, 0x07, 0x0A, 0x17, 0x1B,
/* 0x28 -SA -SFE -SM -CSP -MFA ENQ ACK BEL
-SW */
0x07, 0x07, 0x07, 0x07, 0x07, 0x05, 0x06, 0x07,
/* 0x30 ---- ---- SYN -IR -PP -TRN -NBS EOT */
0x07, 0x07, 0x16, 0x07, 0x07, 0x07, 0x07, 0x04,
/* 0x38 -SBS -IT -RFF -CU3 DC4 NAK ---- SUB */
0x07, 0x07, 0x07, 0x07, 0x14, 0x15, 0x07, 0x1A,
/* 0x40 SP RSP ä ---- */
0x20, 0xFF, 0x83, 0x84, 0x85, 0xA0, 0x07, 0x86,
/* 0x48 . < ( + | */
0x87, 0xA4, 0x9B, 0x2E, 0x3C, 0x28, 0x2B, 0x7C,
/* 0x50 & ---- */
0x26, 0x82, 0x88, 0x89, 0x8A, 0xA1, 0x8C, 0x07,
/* 0x58 ß ! $ * ) ; */
0x8D, 0xE1, 0x21, 0x24, 0x2A, 0x29, 0x3B, 0xAA,
/* 0x60 - / ---- Ä ---- ---- ---- */
0x2D, 0x2F, 0x07, 0x8E, 0x07, 0x07, 0x07, 0x8F,
/* 0x68 ---- , % _ > ? */
0x80, 0xA5, 0x07, 0x2C, 0x25, 0x5F, 0x3E, 0x3F,
/* 0x70 ---- ---- ---- ---- ---- ---- ---- */
0x07, 0x90, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07,
/* 0x78 * ` : # @ ' = " */
0x70, 0x60, 0x3A, 0x23, 0x40, 0x27, 0x3D, 0x22,
/* 0x80 * a b c d e f g */
0x07, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
/* 0x88 h i ---- ---- ---- */
0x68, 0x69, 0xAE, 0xAF, 0x07, 0x07, 0x07, 0xF1,
/* 0x90 ° j k l m n o p */
0xF8, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F, 0x70,
/* 0x98 q r ---- ---- */
0x71, 0x72, 0xA6, 0xA7, 0x91, 0x07, 0x92, 0x07,
/* 0xA0 ~ s t u v w x */
0xE6, 0x7E, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78,
/* 0xA8 y z ---- ---- ---- ---- */
0x79, 0x7A, 0xAD, 0xAB, 0x07, 0x07, 0x07, 0x07,
/* 0xB0 ^ ---- § ---- */
0x5E, 0x9C, 0x9D, 0xFA, 0x07, 0x07, 0x07, 0xAC,
/* 0xB8 ---- [ ] ---- ---- ---- ---- */
0xAB, 0x07, 0x5B, 0x5D, 0x07, 0x07, 0x07, 0x07,
/* 0xC0 { A B C D E F G */
0x7B, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
/* 0xC8 H I ---- ö ---- */
0x48, 0x49, 0x07, 0x93, 0x94, 0x95, 0xA2, 0x07,
/* 0xD0 } J K L M N O P */
0x7D, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F, 0x50,
/* 0xD8 Q R ---- ü */
0x51, 0x52, 0x07, 0x96, 0x81, 0x97, 0xA3, 0x98,
/* 0xE0 \ S T U V W X */
0x5C, 0xF6, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58,
/* 0xE8 Y Z ---- Ö ---- ---- ---- */
0x59, 0x5A, 0xFD, 0x07, 0x99, 0x07, 0x07, 0x07,
/* 0xF0 0 1 2 3 4 5 6 7 */
0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
/* 0xF8 8 9 ---- ---- Ü ---- ---- ---- */
0x38, 0x39, 0x07, 0x07, 0x9A, 0x07, 0x07, 0x07
};
/*
* ASCII (IBM PC 437) -> EBCDIC 500
*/
__u8 _ascebc_500[256] =
{
/*00 NUL SOH STX ETX EOT ENQ ACK BEL */
0x00, 0x01, 0x02, 0x03, 0x37, 0x2D, 0x2E, 0x2F,
/*08 BS HT LF VT FF CR SO SI */
/* ->NL */
0x16, 0x05, 0x15, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
/*10 DLE DC1 DC2 DC3 DC4 NAK SYN ETB */
0x10, 0x11, 0x12, 0x13, 0x3C, 0x3D, 0x32, 0x26,
/*18 CAN EM SUB ESC FS GS RS US */
/* ->IGS ->IRS ->IUS */
0x18, 0x19, 0x3F, 0x27, 0x22, 0x1D, 0x1E, 0x1F,
/*20 SP ! " # $ % & ' */
0x40, 0x4F, 0x7F, 0x7B, 0x5B, 0x6C, 0x50, 0x7D,
/*28 ( ) * + , - . / */
0x4D, 0x5D, 0x5C, 0x4E, 0x6B, 0x60, 0x4B, 0x61,
/*30 0 1 2 3 4 5 6 7 */
0xF0, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7,
/*38 8 9 : ; < = > ? */
0xF8, 0xF9, 0x7A, 0x5E, 0x4C, 0x7E, 0x6E, 0x6F,
/*40 @ A B C D E F G */
0x7C, 0xC1, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7,
/*48 H I J K L M N O */
0xC8, 0xC9, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6,
/*50 P Q R S T U V W */
0xD7, 0xD8, 0xD9, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6,
/*58 X Y Z [ \ ] ^ _ */
0xE7, 0xE8, 0xE9, 0x4A, 0xE0, 0x5A, 0x5F, 0x6D,
/*60 ` a b c d e f g */
0x79, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
/*68 h i j k l m n o */
0x88, 0x89, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96,
/*70 p q r s t u v w */
0x97, 0x98, 0x99, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6,
/*78 x y z { | } ~ DL */
0xA7, 0xA8, 0xA9, 0xC0, 0xBB, 0xD0, 0xA1, 0x07,
/*80*/
0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
/*88*/
0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
/*90*/
0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
/*98*/
0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
/*A0*/
0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
/*A8*/
0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
/*B0*/
0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
/*B8*/
0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
/*C0*/
0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
/*C8*/
0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
/*D0*/
0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
/*D8*/
0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
/*E0 sz */
0x3F, 0x59, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
/*E8*/
0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
/*F0*/
0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
/*F8*/
0x90, 0x3F, 0x3F, 0x3F, 0x3F, 0xEA, 0x3F, 0xFF
};
/*
* EBCDIC 500 -> ASCII (IBM PC 437)
*/
__u8 _ebcasc_500[256] =
{
/* 0x00 NUL SOH STX ETX *SEL HT *RNL DEL */
0x00, 0x01, 0x02, 0x03, 0x07, 0x09, 0x07, 0x7F,
/* 0x08 -GE -SPS -RPT VT FF CR SO SI */
0x07, 0x07, 0x07, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
/* 0x10 DLE DC1 DC2 DC3 -RES -NL BS -POC
-ENP ->LF */
0x10, 0x11, 0x12, 0x13, 0x07, 0x0A, 0x08, 0x07,
/* 0x18 CAN EM -UBS -CU1 -IFS -IGS -IRS -ITB
-IUS */
0x18, 0x19, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07,
/* 0x20 -DS -SOS FS -WUS -BYP LF ETB ESC
-INP */
0x07, 0x07, 0x1C, 0x07, 0x07, 0x0A, 0x17, 0x1B,
/* 0x28 -SA -SFE -SM -CSP -MFA ENQ ACK BEL
-SW */
0x07, 0x07, 0x07, 0x07, 0x07, 0x05, 0x06, 0x07,
/* 0x30 ---- ---- SYN -IR -PP -TRN -NBS EOT */
0x07, 0x07, 0x16, 0x07, 0x07, 0x07, 0x07, 0x04,
/* 0x38 -SBS -IT -RFF -CU3 DC4 NAK ---- SUB */
0x07, 0x07, 0x07, 0x07, 0x14, 0x15, 0x07, 0x1A,
/* 0x40 SP RSP ä ---- */
0x20, 0xFF, 0x83, 0x84, 0x85, 0xA0, 0x07, 0x86,
/* 0x48 [ . < ( + ! */
0x87, 0xA4, 0x5B, 0x2E, 0x3C, 0x28, 0x2B, 0x21,
/* 0x50 & ---- */
0x26, 0x82, 0x88, 0x89, 0x8A, 0xA1, 0x8C, 0x07,
/* 0x58 ß ] $ * ) ; ^ */
0x8D, 0xE1, 0x5D, 0x24, 0x2A, 0x29, 0x3B, 0x5E,
/* 0x60 - / ---- Ä ---- ---- ---- */
0x2D, 0x2F, 0x07, 0x8E, 0x07, 0x07, 0x07, 0x8F,
/* 0x68 ---- , % _ > ? */
0x80, 0xA5, 0x07, 0x2C, 0x25, 0x5F, 0x3E, 0x3F,
/* 0x70 ---- ---- ---- ---- ---- ---- ---- */
0x07, 0x90, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07,
/* 0x78 * ` : # @ ' = " */
0x70, 0x60, 0x3A, 0x23, 0x40, 0x27, 0x3D, 0x22,
/* 0x80 * a b c d e f g */
0x07, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
/* 0x88 h i ---- ---- ---- */
0x68, 0x69, 0xAE, 0xAF, 0x07, 0x07, 0x07, 0xF1,
/* 0x90 ° j k l m n o p */
0xF8, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F, 0x70,
/* 0x98 q r ---- ---- */
0x71, 0x72, 0xA6, 0xA7, 0x91, 0x07, 0x92, 0x07,
/* 0xA0 ~ s t u v w x */
0xE6, 0x7E, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78,
/* 0xA8 y z ---- ---- ---- ---- */
0x79, 0x7A, 0xAD, 0xAB, 0x07, 0x07, 0x07, 0x07,
/* 0xB0 ---- § ---- */
0x9B, 0x9C, 0x9D, 0xFA, 0x07, 0x07, 0x07, 0xAC,
/* 0xB8 ---- | ---- ---- ---- ---- */
0xAB, 0x07, 0xAA, 0x7C, 0x07, 0x07, 0x07, 0x07,
/* 0xC0 { A B C D E F G */
0x7B, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
/* 0xC8 H I ---- ö ---- */
0x48, 0x49, 0x07, 0x93, 0x94, 0x95, 0xA2, 0x07,
/* 0xD0 } J K L M N O P */
0x7D, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F, 0x50,
/* 0xD8 Q R ---- ü */
0x51, 0x52, 0x07, 0x96, 0x81, 0x97, 0xA3, 0x98,
/* 0xE0 \ S T U V W X */
0x5C, 0xF6, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58,
/* 0xE8 Y Z ---- Ö ---- ---- ---- */
0x59, 0x5A, 0xFD, 0x07, 0x99, 0x07, 0x07, 0x07,
/* 0xF0 0 1 2 3 4 5 6 7 */
0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
/* 0xF8 8 9 ---- ---- Ü ---- ---- ---- */
0x38, 0x39, 0x07, 0x07, 0x9A, 0x07, 0x07, 0x07
};
/*
* EBCDIC 037/500 conversion table:
* from upper to lower case
*/
__u8 _ebc_tolower[256] =
{
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F,
0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E, 0x2F,
0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
0x38, 0x39, 0x3A, 0x3B, 0x3C, 0x3D, 0x3E, 0x3F,
0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
0x48, 0x49, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F,
0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
0x58, 0x59, 0x5A, 0x5B, 0x5C, 0x5D, 0x5E, 0x5F,
0x60, 0x61, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
0x48, 0x49, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F,
0x70, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
0x58, 0x79, 0x7A, 0x7B, 0x7C, 0x7D, 0x7E, 0x7F,
0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
0x88, 0x89, 0x8A, 0x8B, 0x8C, 0x8D, 0x8E, 0x8F,
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
0x98, 0x99, 0x9A, 0x9B, 0x9C, 0x9D, 0x9C, 0x9F,
0xA0, 0xA1, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7,
0xA8, 0xA9, 0xAA, 0xAB, 0x8C, 0x8D, 0x8E, 0xAF,
0xB0, 0xB1, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6, 0xB7,
0xB8, 0xB9, 0xBA, 0xBB, 0xBC, 0xBD, 0xBE, 0xBF,
0xC0, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
0x88, 0x89, 0xCA, 0xCB, 0xCC, 0xCD, 0xCE, 0xCF,
0xD0, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
0x98, 0x99, 0xDA, 0xDB, 0xDC, 0xDD, 0xDE, 0xDF,
0xE0, 0xE1, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7,
0xA8, 0xA9, 0xEA, 0xCB, 0xCC, 0xCD, 0xCE, 0xCF,
0xF0, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7,
0xF8, 0xF9, 0xFA, 0xDB, 0xDC, 0xDD, 0xDE, 0xFF
};
/*
* EBCDIC 037/500 conversion table:
* from lower to upper case
*/
__u8 _ebc_toupper[256] =
{
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F,
0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E, 0x2F,
0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
0x38, 0x39, 0x3A, 0x3B, 0x3C, 0x3D, 0x3E, 0x3F,
0x40, 0x41, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
0x68, 0x69, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F,
0x50, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
0x78, 0x59, 0x5A, 0x5B, 0x5C, 0x5D, 0x5E, 0x5F,
0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F,
0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
0x78, 0x79, 0x7A, 0x7B, 0x7C, 0x7D, 0x7E, 0x7F,
0x80, 0xC1, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7,
0xC8, 0xC9, 0x8A, 0x8B, 0xAC, 0xAD, 0xAE, 0x8F,
0x90, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD7,
0xD8, 0xD9, 0x9A, 0x9B, 0x9E, 0x9D, 0x9E, 0x9F,
0xA0, 0xA1, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7,
0xE8, 0xE9, 0xAA, 0xAB, 0xAC, 0xAD, 0xAE, 0xAF,
0xB0, 0xB1, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6, 0xB7,
0xB8, 0xB9, 0xBA, 0xBB, 0xBC, 0xBD, 0xBE, 0xBF,
0xC0, 0xC1, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7,
0xC8, 0xC9, 0xCA, 0xEB, 0xEC, 0xED, 0xEE, 0xEF,
0xD0, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD7,
0xD8, 0xD9, 0xDA, 0xFB, 0xFC, 0xFD, 0xFE, 0xDF,
0xE0, 0xE1, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7,
0xE8, 0xE9, 0xEA, 0xEB, 0xEC, 0xED, 0xEE, 0xEF,
0xF0, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7,
0xF8, 0xF9, 0xFA, 0xFB, 0xFC, 0xFD, 0xFE, 0xFF
};
EXPORT_SYMBOL(_ascebc_500);
EXPORT_SYMBOL(_ebcasc_500);
EXPORT_SYMBOL(_ascebc);
EXPORT_SYMBOL(_ebcasc);
EXPORT_SYMBOL(_ebc_tolower);
EXPORT_SYMBOL(_ebc_toupper);
| linux-master | arch/s390/kernel/ebcdic.c |
/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/sched.h>
#include <linux/sched/task.h>
#include <linux/sched/task_stack.h>
#include <linux/interrupt.h>
#include <asm/sections.h>
#include <asm/ptrace.h>
#include <asm/bitops.h>
#include <asm/stacktrace.h>
#include <asm/unwind.h>
unsigned long unwind_get_return_address(struct unwind_state *state)
{
if (unwind_done(state))
return 0;
return __kernel_text_address(state->ip) ? state->ip : 0;
}
EXPORT_SYMBOL_GPL(unwind_get_return_address);
static bool outside_of_stack(struct unwind_state *state, unsigned long sp)
{
return (sp <= state->sp) ||
(sp > state->stack_info.end - sizeof(struct stack_frame));
}
static bool update_stack_info(struct unwind_state *state, unsigned long sp)
{
struct stack_info *info = &state->stack_info;
unsigned long *mask = &state->stack_mask;
/* New stack pointer leaves the current stack */
if (get_stack_info(sp, state->task, info, mask) != 0 ||
!on_stack(info, sp, sizeof(struct stack_frame)))
/* 'sp' does not point to a valid stack */
return false;
return true;
}
static inline bool is_final_pt_regs(struct unwind_state *state,
struct pt_regs *regs)
{
/* user mode or kernel thread pt_regs at the bottom of task stack */
if (task_pt_regs(state->task) == regs)
return true;
/* user mode pt_regs at the bottom of irq stack */
return state->stack_info.type == STACK_TYPE_IRQ &&
state->stack_info.end - sizeof(struct pt_regs) == (unsigned long)regs &&
READ_ONCE_NOCHECK(regs->psw.mask) & PSW_MASK_PSTATE;
}
bool unwind_next_frame(struct unwind_state *state)
{
struct stack_info *info = &state->stack_info;
struct stack_frame *sf;
struct pt_regs *regs;
unsigned long sp, ip;
bool reliable;
regs = state->regs;
if (unlikely(regs)) {
sp = state->sp;
sf = (struct stack_frame *) sp;
ip = READ_ONCE_NOCHECK(sf->gprs[8]);
reliable = false;
regs = NULL;
/* skip bogus %r14 or if is the same as regs->psw.addr */
if (!__kernel_text_address(ip) || state->ip == unwind_recover_ret_addr(state, ip)) {
state->regs = NULL;
return unwind_next_frame(state);
}
} else {
sf = (struct stack_frame *) state->sp;
sp = READ_ONCE_NOCHECK(sf->back_chain);
if (likely(sp)) {
/* Non-zero back-chain points to the previous frame */
if (unlikely(outside_of_stack(state, sp))) {
if (!update_stack_info(state, sp))
goto out_err;
}
sf = (struct stack_frame *) sp;
ip = READ_ONCE_NOCHECK(sf->gprs[8]);
reliable = true;
} else {
/* No back-chain, look for a pt_regs structure */
sp = state->sp + STACK_FRAME_OVERHEAD;
if (!on_stack(info, sp, sizeof(struct pt_regs)))
goto out_err;
regs = (struct pt_regs *) sp;
if (is_final_pt_regs(state, regs))
goto out_stop;
ip = READ_ONCE_NOCHECK(regs->psw.addr);
sp = READ_ONCE_NOCHECK(regs->gprs[15]);
if (unlikely(outside_of_stack(state, sp))) {
if (!update_stack_info(state, sp))
goto out_err;
}
reliable = true;
}
}
/* Sanity check: ABI requires SP to be aligned 8 bytes. */
if (sp & 0x7)
goto out_err;
/* Update unwind state */
state->sp = sp;
state->regs = regs;
state->reliable = reliable;
state->ip = unwind_recover_ret_addr(state, ip);
return true;
out_err:
state->error = true;
out_stop:
state->stack_info.type = STACK_TYPE_UNKNOWN;
return false;
}
EXPORT_SYMBOL_GPL(unwind_next_frame);
void __unwind_start(struct unwind_state *state, struct task_struct *task,
struct pt_regs *regs, unsigned long first_frame)
{
struct stack_info *info = &state->stack_info;
struct stack_frame *sf;
unsigned long ip, sp;
memset(state, 0, sizeof(*state));
state->task = task;
state->regs = regs;
/* Don't even attempt to start from user mode regs: */
if (regs && user_mode(regs)) {
info->type = STACK_TYPE_UNKNOWN;
return;
}
/* Get the instruction pointer from pt_regs or the stack frame */
if (regs) {
ip = regs->psw.addr;
sp = regs->gprs[15];
} else if (task == current) {
sp = current_frame_address();
} else {
sp = task->thread.ksp;
}
/* Get current stack pointer and initialize stack info */
if (!update_stack_info(state, sp)) {
/* Something is wrong with the stack pointer */
info->type = STACK_TYPE_UNKNOWN;
state->error = true;
return;
}
if (!regs) {
/* Stack frame is within valid stack */
sf = (struct stack_frame *)sp;
ip = READ_ONCE_NOCHECK(sf->gprs[8]);
}
/* Update unwind state */
state->sp = sp;
state->reliable = true;
state->ip = unwind_recover_ret_addr(state, ip);
if (!first_frame)
return;
/* Skip through the call chain to the specified starting frame */
while (!unwind_done(state)) {
if (on_stack(&state->stack_info, first_frame, sizeof(struct stack_frame))) {
if (state->sp >= first_frame)
break;
}
unwind_next_frame(state);
}
}
EXPORT_SYMBOL_GPL(__unwind_start);
| linux-master | arch/s390/kernel/unwind_bc.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/elf.h>
#include <asm/kexec.h>
int arch_kexec_do_relocs(int r_type, void *loc, unsigned long val,
unsigned long addr)
{
switch (r_type) {
case R_390_NONE:
break;
case R_390_8: /* Direct 8 bit. */
*(u8 *)loc = val;
break;
case R_390_12: /* Direct 12 bit. */
*(u16 *)loc &= 0xf000;
*(u16 *)loc |= val & 0xfff;
break;
case R_390_16: /* Direct 16 bit. */
*(u16 *)loc = val;
break;
case R_390_20: /* Direct 20 bit. */
*(u32 *)loc &= 0xf00000ff;
*(u32 *)loc |= (val & 0xfff) << 16; /* DL */
*(u32 *)loc |= (val & 0xff000) >> 4; /* DH */
break;
case R_390_32: /* Direct 32 bit. */
*(u32 *)loc = val;
break;
case R_390_64: /* Direct 64 bit. */
case R_390_GLOB_DAT:
case R_390_JMP_SLOT:
*(u64 *)loc = val;
break;
case R_390_PC16: /* PC relative 16 bit. */
*(u16 *)loc = (val - addr);
break;
case R_390_PC16DBL: /* PC relative 16 bit shifted by 1. */
*(u16 *)loc = (val - addr) >> 1;
break;
case R_390_PC32DBL: /* PC relative 32 bit shifted by 1. */
*(u32 *)loc = (val - addr) >> 1;
break;
case R_390_PC32: /* PC relative 32 bit. */
*(u32 *)loc = (val - addr);
break;
case R_390_PC64: /* PC relative 64 bit. */
*(u64 *)loc = (val - addr);
break;
case R_390_RELATIVE:
*(unsigned long *) loc = val;
break;
default:
return 1;
}
return 0;
}
| linux-master | arch/s390/kernel/machine_kexec_reloc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* S390 version
* Copyright IBM Corp. 1999, 2000
* Author(s): Martin Schwidefsky ([email protected]),
* Thomas Spatzier ([email protected])
*
* Derived from "arch/i386/kernel/sys_i386.c"
*
* This file contains various random system calls that
* have a non-standard calling sequence on the Linux/s390
* platform.
*/
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/smp.h>
#include <linux/sem.h>
#include <linux/msg.h>
#include <linux/shm.h>
#include <linux/stat.h>
#include <linux/syscalls.h>
#include <linux/mman.h>
#include <linux/file.h>
#include <linux/utsname.h>
#include <linux/personality.h>
#include <linux/unistd.h>
#include <linux/ipc.h>
#include <linux/uaccess.h>
#include <linux/string.h>
#include <linux/thread_info.h>
#include <linux/entry-common.h>
#include <asm/ptrace.h>
#include <asm/vtime.h>
#include "entry.h"
/*
* Perform the mmap() system call. Linux for S/390 isn't able to handle more
* than 5 system call parameters, so this system call uses a memory block
* for parameter passing.
*/
struct s390_mmap_arg_struct {
unsigned long addr;
unsigned long len;
unsigned long prot;
unsigned long flags;
unsigned long fd;
unsigned long offset;
};
SYSCALL_DEFINE1(mmap2, struct s390_mmap_arg_struct __user *, arg)
{
struct s390_mmap_arg_struct a;
int error = -EFAULT;
if (copy_from_user(&a, arg, sizeof(a)))
goto out;
error = ksys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, a.offset);
out:
return error;
}
#ifdef CONFIG_SYSVIPC
/*
* sys_ipc() is the de-multiplexer for the SysV IPC calls.
*/
SYSCALL_DEFINE5(s390_ipc, uint, call, int, first, unsigned long, second,
unsigned long, third, void __user *, ptr)
{
if (call >> 16)
return -EINVAL;
/* The s390 sys_ipc variant has only five parameters instead of six
* like the generic variant. The only difference is the handling of
* the SEMTIMEDOP subcall where on s390 the third parameter is used
* as a pointer to a struct timespec where the generic variant uses
* the fifth parameter.
* Therefore we can call the generic variant by simply passing the
* third parameter also as fifth parameter.
*/
return ksys_ipc(call, first, second, third, ptr, third);
}
#endif /* CONFIG_SYSVIPC */
SYSCALL_DEFINE1(s390_personality, unsigned int, personality)
{
unsigned int ret = current->personality;
if (personality(current->personality) == PER_LINUX32 &&
personality(personality) == PER_LINUX)
personality |= PER_LINUX32;
if (personality != 0xffffffff)
set_personality(personality);
if (personality(ret) == PER_LINUX32)
ret &= ~PER_LINUX32;
return ret;
}
SYSCALL_DEFINE0(ni_syscall)
{
return -ENOSYS;
}
static void do_syscall(struct pt_regs *regs)
{
unsigned long nr;
nr = regs->int_code & 0xffff;
if (!nr) {
nr = regs->gprs[1] & 0xffff;
regs->int_code &= ~0xffffUL;
regs->int_code |= nr;
}
regs->gprs[2] = nr;
if (nr == __NR_restart_syscall && !(current->restart_block.arch_data & 1)) {
regs->psw.addr = current->restart_block.arch_data;
current->restart_block.arch_data = 1;
}
nr = syscall_enter_from_user_mode_work(regs, nr);
/*
* In the s390 ptrace ABI, both the syscall number and the return value
* use gpr2. However, userspace puts the syscall number either in the
* svc instruction itself, or uses gpr1. To make at least skipping syscalls
* work, the ptrace code sets PIF_SYSCALL_RET_SET, which is checked here
* and if set, the syscall will be skipped.
*/
if (unlikely(test_and_clear_pt_regs_flag(regs, PIF_SYSCALL_RET_SET)))
goto out;
regs->gprs[2] = -ENOSYS;
if (likely(nr >= NR_syscalls))
goto out;
do {
regs->gprs[2] = current->thread.sys_call_table[nr](regs);
} while (test_and_clear_pt_regs_flag(regs, PIF_EXECVE_PGSTE_RESTART));
out:
syscall_exit_to_user_mode_work(regs);
}
void noinstr __do_syscall(struct pt_regs *regs, int per_trap)
{
add_random_kstack_offset();
enter_from_user_mode(regs);
regs->psw = S390_lowcore.svc_old_psw;
regs->int_code = S390_lowcore.svc_int_code;
update_timer_sys();
if (static_branch_likely(&cpu_has_bear))
current->thread.last_break = regs->last_break;
local_irq_enable();
regs->orig_gpr2 = regs->gprs[2];
if (per_trap)
set_thread_flag(TIF_PER_TRAP);
regs->flags = 0;
set_pt_regs_flag(regs, PIF_SYSCALL);
do_syscall(regs);
exit_to_user_mode();
}
| linux-master | arch/s390/kernel/syscall.c |
// SPDX-License-Identifier: GPL-2.0
/*
* OS info memory interface
*
* Copyright IBM Corp. 2012
* Author(s): Michael Holzheu <[email protected]>
*/
#define KMSG_COMPONENT "os_info"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/crash_dump.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <asm/checksum.h>
#include <asm/abs_lowcore.h>
#include <asm/os_info.h>
#include <asm/maccess.h>
#include <asm/asm-offsets.h>
/*
* OS info structure has to be page aligned
*/
static struct os_info os_info __page_aligned_data;
/*
* Compute checksum over OS info structure
*/
u32 os_info_csum(struct os_info *os_info)
{
int size = sizeof(*os_info) - offsetof(struct os_info, version_major);
return (__force u32)csum_partial(&os_info->version_major, size, 0);
}
/*
* Add crashkernel info to OS info and update checksum
*/
void os_info_crashkernel_add(unsigned long base, unsigned long size)
{
os_info.crashkernel_addr = (u64)(unsigned long)base;
os_info.crashkernel_size = (u64)(unsigned long)size;
os_info.csum = os_info_csum(&os_info);
}
/*
* Add OS info entry and update checksum
*/
void os_info_entry_add(int nr, void *ptr, u64 size)
{
os_info.entry[nr].addr = __pa(ptr);
os_info.entry[nr].size = size;
os_info.entry[nr].csum = (__force u32)csum_partial(ptr, size, 0);
os_info.csum = os_info_csum(&os_info);
}
/*
* Initialize OS info structure and set lowcore pointer
*/
void __init os_info_init(void)
{
struct lowcore *abs_lc;
os_info.version_major = OS_INFO_VERSION_MAJOR;
os_info.version_minor = OS_INFO_VERSION_MINOR;
os_info.magic = OS_INFO_MAGIC;
os_info.csum = os_info_csum(&os_info);
abs_lc = get_abs_lowcore();
abs_lc->os_info = __pa(&os_info);
put_abs_lowcore(abs_lc);
}
#ifdef CONFIG_CRASH_DUMP
static struct os_info *os_info_old;
/*
* Allocate and copy OS info entry from oldmem
*/
static void os_info_old_alloc(int nr, int align)
{
unsigned long addr, size = 0;
char *buf, *buf_align, *msg;
u32 csum;
addr = os_info_old->entry[nr].addr;
if (!addr) {
msg = "not available";
goto fail;
}
size = os_info_old->entry[nr].size;
buf = kmalloc(size + align - 1, GFP_KERNEL);
if (!buf) {
msg = "alloc failed";
goto fail;
}
buf_align = PTR_ALIGN(buf, align);
if (copy_oldmem_kernel(buf_align, addr, size)) {
msg = "copy failed";
goto fail_free;
}
csum = (__force u32)csum_partial(buf_align, size, 0);
if (csum != os_info_old->entry[nr].csum) {
msg = "checksum failed";
goto fail_free;
}
os_info_old->entry[nr].addr = (u64)(unsigned long)buf_align;
msg = "copied";
goto out;
fail_free:
kfree(buf);
fail:
os_info_old->entry[nr].addr = 0;
out:
pr_info("entry %i: %s (addr=0x%lx size=%lu)\n",
nr, msg, addr, size);
}
/*
* Initialize os info and os info entries from oldmem
*/
static void os_info_old_init(void)
{
static int os_info_init;
unsigned long addr;
if (os_info_init)
return;
if (!oldmem_data.start)
goto fail;
if (copy_oldmem_kernel(&addr, __LC_OS_INFO, sizeof(addr)))
goto fail;
if (addr == 0 || addr % PAGE_SIZE)
goto fail;
os_info_old = kzalloc(sizeof(*os_info_old), GFP_KERNEL);
if (!os_info_old)
goto fail;
if (copy_oldmem_kernel(os_info_old, addr, sizeof(*os_info_old)))
goto fail_free;
if (os_info_old->magic != OS_INFO_MAGIC)
goto fail_free;
if (os_info_old->csum != os_info_csum(os_info_old))
goto fail_free;
if (os_info_old->version_major > OS_INFO_VERSION_MAJOR)
goto fail_free;
os_info_old_alloc(OS_INFO_VMCOREINFO, 1);
os_info_old_alloc(OS_INFO_REIPL_BLOCK, 1);
pr_info("crashkernel: addr=0x%lx size=%lu\n",
(unsigned long) os_info_old->crashkernel_addr,
(unsigned long) os_info_old->crashkernel_size);
os_info_init = 1;
return;
fail_free:
kfree(os_info_old);
fail:
os_info_init = 1;
os_info_old = NULL;
}
/*
* Return pointer to os infor entry and its size
*/
void *os_info_old_entry(int nr, unsigned long *size)
{
os_info_old_init();
if (!os_info_old)
return NULL;
if (!os_info_old->entry[nr].addr)
return NULL;
*size = (unsigned long) os_info_old->entry[nr].size;
return (void *)(unsigned long)os_info_old->entry[nr].addr;
}
#endif
| linux-master | arch/s390/kernel/os_info.c |
// SPDX-License-Identifier: GPL-2.0-or-later
#include <linux/rethook.h>
#include <linux/kprobes.h>
#include "rethook.h"
void arch_rethook_prepare(struct rethook_node *rh, struct pt_regs *regs, bool mcount)
{
rh->ret_addr = regs->gprs[14];
rh->frame = regs->gprs[15];
/* Replace the return addr with trampoline addr */
regs->gprs[14] = (unsigned long)&arch_rethook_trampoline;
}
NOKPROBE_SYMBOL(arch_rethook_prepare);
void arch_rethook_fixup_return(struct pt_regs *regs,
unsigned long correct_ret_addr)
{
/* Replace fake return address with real one. */
regs->gprs[14] = correct_ret_addr;
}
NOKPROBE_SYMBOL(arch_rethook_fixup_return);
/*
* Called from arch_rethook_trampoline
*/
unsigned long arch_rethook_trampoline_callback(struct pt_regs *regs)
{
return rethook_trampoline_handler(regs, regs->gprs[15]);
}
NOKPROBE_SYMBOL(arch_rethook_trampoline_callback);
/* assembler function that handles the rethook must not be probed itself */
NOKPROBE_SYMBOL(arch_rethook_trampoline);
| linux-master | arch/s390/kernel/rethook.c |
// SPDX-License-Identifier: GPL-2.0
/*
* S390 version
* Copyright IBM Corp. 1999, 2012
* Author(s): Hartmut Penner ([email protected]),
* Martin Schwidefsky ([email protected])
*
* Derived from "arch/i386/kernel/setup.c"
* Copyright (C) 1995, Linus Torvalds
*/
/*
* This file handles the architecture-dependent parts of initialization
*/
#define KMSG_COMPONENT "setup"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/errno.h>
#include <linux/export.h>
#include <linux/sched.h>
#include <linux/sched/task.h>
#include <linux/cpu.h>
#include <linux/kernel.h>
#include <linux/memblock.h>
#include <linux/mm.h>
#include <linux/stddef.h>
#include <linux/unistd.h>
#include <linux/ptrace.h>
#include <linux/random.h>
#include <linux/user.h>
#include <linux/tty.h>
#include <linux/ioport.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/initrd.h>
#include <linux/root_dev.h>
#include <linux/console.h>
#include <linux/kernel_stat.h>
#include <linux/dma-map-ops.h>
#include <linux/device.h>
#include <linux/notifier.h>
#include <linux/pfn.h>
#include <linux/ctype.h>
#include <linux/reboot.h>
#include <linux/topology.h>
#include <linux/kexec.h>
#include <linux/crash_dump.h>
#include <linux/memory.h>
#include <linux/compat.h>
#include <linux/start_kernel.h>
#include <linux/hugetlb.h>
#include <linux/kmemleak.h>
#include <asm/archrandom.h>
#include <asm/boot_data.h>
#include <asm/ipl.h>
#include <asm/facility.h>
#include <asm/smp.h>
#include <asm/mmu_context.h>
#include <asm/cpcmd.h>
#include <asm/abs_lowcore.h>
#include <asm/nmi.h>
#include <asm/irq.h>
#include <asm/page.h>
#include <asm/ptrace.h>
#include <asm/sections.h>
#include <asm/ebcdic.h>
#include <asm/diag.h>
#include <asm/os_info.h>
#include <asm/sclp.h>
#include <asm/stacktrace.h>
#include <asm/sysinfo.h>
#include <asm/numa.h>
#include <asm/alternative.h>
#include <asm/nospec-branch.h>
#include <asm/physmem_info.h>
#include <asm/maccess.h>
#include <asm/uv.h>
#include <asm/asm-offsets.h>
#include "entry.h"
/*
* Machine setup..
*/
unsigned int console_mode = 0;
EXPORT_SYMBOL(console_mode);
unsigned int console_devno = -1;
EXPORT_SYMBOL(console_devno);
unsigned int console_irq = -1;
EXPORT_SYMBOL(console_irq);
/*
* Some code and data needs to stay below 2 GB, even when the kernel would be
* relocated above 2 GB, because it has to use 31 bit addresses.
* Such code and data is part of the .amode31 section.
*/
char __amode31_ref *__samode31 = _samode31;
char __amode31_ref *__eamode31 = _eamode31;
char __amode31_ref *__stext_amode31 = _stext_amode31;
char __amode31_ref *__etext_amode31 = _etext_amode31;
struct exception_table_entry __amode31_ref *__start_amode31_ex_table = _start_amode31_ex_table;
struct exception_table_entry __amode31_ref *__stop_amode31_ex_table = _stop_amode31_ex_table;
/*
* Control registers CR2, CR5 and CR15 are initialized with addresses
* of tables that must be placed below 2G which is handled by the AMODE31
* sections.
* Because the AMODE31 sections are relocated below 2G at startup,
* the content of control registers CR2, CR5 and CR15 must be updated
* with new addresses after the relocation. The initial initialization of
* control registers occurs in head64.S and then gets updated again after AMODE31
* relocation. We must access the relevant AMODE31 tables indirectly via
* pointers placed in the .amode31.refs linker section. Those pointers get
* updated automatically during AMODE31 relocation and always contain a valid
* address within AMODE31 sections.
*/
static __amode31_data u32 __ctl_duct_amode31[16] __aligned(64);
static __amode31_data u64 __ctl_aste_amode31[8] __aligned(64) = {
[1] = 0xffffffffffffffff
};
static __amode31_data u32 __ctl_duald_amode31[32] __aligned(128) = {
0x80000000, 0, 0, 0,
0x80000000, 0, 0, 0,
0x80000000, 0, 0, 0,
0x80000000, 0, 0, 0,
0x80000000, 0, 0, 0,
0x80000000, 0, 0, 0,
0x80000000, 0, 0, 0,
0x80000000, 0, 0, 0
};
static __amode31_data u32 __ctl_linkage_stack_amode31[8] __aligned(64) = {
0, 0, 0x89000000, 0,
0, 0, 0x8a000000, 0
};
static u64 __amode31_ref *__ctl_aste = __ctl_aste_amode31;
static u32 __amode31_ref *__ctl_duald = __ctl_duald_amode31;
static u32 __amode31_ref *__ctl_linkage_stack = __ctl_linkage_stack_amode31;
static u32 __amode31_ref *__ctl_duct = __ctl_duct_amode31;
unsigned long __bootdata_preserved(max_mappable);
unsigned long __bootdata(ident_map_size);
struct physmem_info __bootdata(physmem_info);
unsigned long __bootdata_preserved(__kaslr_offset);
int __bootdata_preserved(__kaslr_enabled);
unsigned int __bootdata_preserved(zlib_dfltcc_support);
EXPORT_SYMBOL(zlib_dfltcc_support);
u64 __bootdata_preserved(stfle_fac_list[16]);
EXPORT_SYMBOL(stfle_fac_list);
u64 __bootdata_preserved(alt_stfle_fac_list[16]);
struct oldmem_data __bootdata_preserved(oldmem_data);
unsigned long VMALLOC_START;
EXPORT_SYMBOL(VMALLOC_START);
unsigned long VMALLOC_END;
EXPORT_SYMBOL(VMALLOC_END);
struct page *vmemmap;
EXPORT_SYMBOL(vmemmap);
unsigned long vmemmap_size;
unsigned long MODULES_VADDR;
unsigned long MODULES_END;
/* An array with a pointer to the lowcore of every CPU. */
struct lowcore *lowcore_ptr[NR_CPUS];
EXPORT_SYMBOL(lowcore_ptr);
DEFINE_STATIC_KEY_FALSE(cpu_has_bear);
/*
* The Write Back bit position in the physaddr is given by the SLPC PCI.
* Leaving the mask zero always uses write through which is safe
*/
unsigned long mio_wb_bit_mask __ro_after_init;
/*
* This is set up by the setup-routine at boot-time
* for S390 need to find out, what we have to setup
* using address 0x10400 ...
*/
#include <asm/setup.h>
/*
* condev= and conmode= setup parameter.
*/
static int __init condev_setup(char *str)
{
int vdev;
vdev = simple_strtoul(str, &str, 0);
if (vdev >= 0 && vdev < 65536) {
console_devno = vdev;
console_irq = -1;
}
return 1;
}
__setup("condev=", condev_setup);
static void __init set_preferred_console(void)
{
if (CONSOLE_IS_3215 || CONSOLE_IS_SCLP)
add_preferred_console("ttyS", 0, NULL);
else if (CONSOLE_IS_3270)
add_preferred_console("tty3270", 0, NULL);
else if (CONSOLE_IS_VT220)
add_preferred_console("ttysclp", 0, NULL);
else if (CONSOLE_IS_HVC)
add_preferred_console("hvc", 0, NULL);
}
static int __init conmode_setup(char *str)
{
#if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
if (!strcmp(str, "hwc") || !strcmp(str, "sclp"))
SET_CONSOLE_SCLP;
#endif
#if defined(CONFIG_TN3215_CONSOLE)
if (!strcmp(str, "3215"))
SET_CONSOLE_3215;
#endif
#if defined(CONFIG_TN3270_CONSOLE)
if (!strcmp(str, "3270"))
SET_CONSOLE_3270;
#endif
set_preferred_console();
return 1;
}
__setup("conmode=", conmode_setup);
static void __init conmode_default(void)
{
char query_buffer[1024];
char *ptr;
if (MACHINE_IS_VM) {
cpcmd("QUERY CONSOLE", query_buffer, 1024, NULL);
console_devno = simple_strtoul(query_buffer + 5, NULL, 16);
ptr = strstr(query_buffer, "SUBCHANNEL =");
console_irq = simple_strtoul(ptr + 13, NULL, 16);
cpcmd("QUERY TERM", query_buffer, 1024, NULL);
ptr = strstr(query_buffer, "CONMODE");
/*
* Set the conmode to 3215 so that the device recognition
* will set the cu_type of the console to 3215. If the
* conmode is 3270 and we don't set it back then both
* 3215 and the 3270 driver will try to access the console
* device (3215 as console and 3270 as normal tty).
*/
cpcmd("TERM CONMODE 3215", NULL, 0, NULL);
if (ptr == NULL) {
#if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
SET_CONSOLE_SCLP;
#endif
return;
}
if (str_has_prefix(ptr + 8, "3270")) {
#if defined(CONFIG_TN3270_CONSOLE)
SET_CONSOLE_3270;
#elif defined(CONFIG_TN3215_CONSOLE)
SET_CONSOLE_3215;
#elif defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
SET_CONSOLE_SCLP;
#endif
} else if (str_has_prefix(ptr + 8, "3215")) {
#if defined(CONFIG_TN3215_CONSOLE)
SET_CONSOLE_3215;
#elif defined(CONFIG_TN3270_CONSOLE)
SET_CONSOLE_3270;
#elif defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
SET_CONSOLE_SCLP;
#endif
}
} else if (MACHINE_IS_KVM) {
if (sclp.has_vt220 && IS_ENABLED(CONFIG_SCLP_VT220_CONSOLE))
SET_CONSOLE_VT220;
else if (sclp.has_linemode && IS_ENABLED(CONFIG_SCLP_CONSOLE))
SET_CONSOLE_SCLP;
else
SET_CONSOLE_HVC;
} else {
#if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
SET_CONSOLE_SCLP;
#endif
}
}
#ifdef CONFIG_CRASH_DUMP
static void __init setup_zfcpdump(void)
{
if (!is_ipl_type_dump())
return;
if (oldmem_data.start)
return;
strcat(boot_command_line, " cio_ignore=all,!ipldev,!condev");
console_loglevel = 2;
}
#else
static inline void setup_zfcpdump(void) {}
#endif /* CONFIG_CRASH_DUMP */
/*
* Reboot, halt and power_off stubs. They just call _machine_restart,
* _machine_halt or _machine_power_off.
*/
void machine_restart(char *command)
{
if ((!in_interrupt() && !in_atomic()) || oops_in_progress)
/*
* Only unblank the console if we are called in enabled
* context or a bust_spinlocks cleared the way for us.
*/
console_unblank();
_machine_restart(command);
}
void machine_halt(void)
{
if (!in_interrupt() || oops_in_progress)
/*
* Only unblank the console if we are called in enabled
* context or a bust_spinlocks cleared the way for us.
*/
console_unblank();
_machine_halt();
}
void machine_power_off(void)
{
if (!in_interrupt() || oops_in_progress)
/*
* Only unblank the console if we are called in enabled
* context or a bust_spinlocks cleared the way for us.
*/
console_unblank();
_machine_power_off();
}
/*
* Dummy power off function.
*/
void (*pm_power_off)(void) = machine_power_off;
EXPORT_SYMBOL_GPL(pm_power_off);
void *restart_stack;
unsigned long stack_alloc(void)
{
#ifdef CONFIG_VMAP_STACK
void *ret;
ret = __vmalloc_node(THREAD_SIZE, THREAD_SIZE, THREADINFO_GFP,
NUMA_NO_NODE, __builtin_return_address(0));
kmemleak_not_leak(ret);
return (unsigned long)ret;
#else
return __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
#endif
}
void stack_free(unsigned long stack)
{
#ifdef CONFIG_VMAP_STACK
vfree((void *) stack);
#else
free_pages(stack, THREAD_SIZE_ORDER);
#endif
}
void __init __noreturn arch_call_rest_init(void)
{
smp_reinit_ipl_cpu();
rest_init();
}
static unsigned long __init stack_alloc_early(void)
{
unsigned long stack;
stack = (unsigned long)memblock_alloc(THREAD_SIZE, THREAD_SIZE);
if (!stack) {
panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
__func__, THREAD_SIZE, THREAD_SIZE);
}
return stack;
}
static void __init setup_lowcore(void)
{
struct lowcore *lc, *abs_lc;
/*
* Setup lowcore for boot cpu
*/
BUILD_BUG_ON(sizeof(struct lowcore) != LC_PAGES * PAGE_SIZE);
lc = memblock_alloc_low(sizeof(*lc), sizeof(*lc));
if (!lc)
panic("%s: Failed to allocate %zu bytes align=%zx\n",
__func__, sizeof(*lc), sizeof(*lc));
lc->restart_psw.mask = PSW_KERNEL_BITS & ~PSW_MASK_DAT;
lc->restart_psw.addr = __pa(restart_int_handler);
lc->external_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_MCHECK;
lc->external_new_psw.addr = (unsigned long) ext_int_handler;
lc->svc_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_MCHECK;
lc->svc_new_psw.addr = (unsigned long) system_call;
lc->program_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_MCHECK;
lc->program_new_psw.addr = (unsigned long) pgm_check_handler;
lc->mcck_new_psw.mask = PSW_KERNEL_BITS;
lc->mcck_new_psw.addr = (unsigned long) mcck_int_handler;
lc->io_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_MCHECK;
lc->io_new_psw.addr = (unsigned long) io_int_handler;
lc->clock_comparator = clock_comparator_max;
lc->current_task = (unsigned long)&init_task;
lc->lpp = LPP_MAGIC;
lc->machine_flags = S390_lowcore.machine_flags;
lc->preempt_count = S390_lowcore.preempt_count;
nmi_alloc_mcesa_early(&lc->mcesad);
lc->sys_enter_timer = S390_lowcore.sys_enter_timer;
lc->exit_timer = S390_lowcore.exit_timer;
lc->user_timer = S390_lowcore.user_timer;
lc->system_timer = S390_lowcore.system_timer;
lc->steal_timer = S390_lowcore.steal_timer;
lc->last_update_timer = S390_lowcore.last_update_timer;
lc->last_update_clock = S390_lowcore.last_update_clock;
/*
* Allocate the global restart stack which is the same for
* all CPUs in case *one* of them does a PSW restart.
*/
restart_stack = (void *)(stack_alloc_early() + STACK_INIT_OFFSET);
lc->mcck_stack = stack_alloc_early() + STACK_INIT_OFFSET;
lc->async_stack = stack_alloc_early() + STACK_INIT_OFFSET;
lc->nodat_stack = stack_alloc_early() + STACK_INIT_OFFSET;
lc->kernel_stack = S390_lowcore.kernel_stack;
/*
* Set up PSW restart to call ipl.c:do_restart(). Copy the relevant
* restart data to the absolute zero lowcore. This is necessary if
* PSW restart is done on an offline CPU that has lowcore zero.
*/
lc->restart_stack = (unsigned long) restart_stack;
lc->restart_fn = (unsigned long) do_restart;
lc->restart_data = 0;
lc->restart_source = -1U;
__ctl_store(lc->cregs_save_area, 0, 15);
lc->spinlock_lockval = arch_spin_lockval(0);
lc->spinlock_index = 0;
arch_spin_lock_setup(0);
lc->return_lpswe = gen_lpswe(__LC_RETURN_PSW);
lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW);
lc->preempt_count = PREEMPT_DISABLED;
lc->kernel_asce = S390_lowcore.kernel_asce;
lc->user_asce = S390_lowcore.user_asce;
abs_lc = get_abs_lowcore();
abs_lc->restart_stack = lc->restart_stack;
abs_lc->restart_fn = lc->restart_fn;
abs_lc->restart_data = lc->restart_data;
abs_lc->restart_source = lc->restart_source;
abs_lc->restart_psw = lc->restart_psw;
abs_lc->restart_flags = RESTART_FLAG_CTLREGS;
memcpy(abs_lc->cregs_save_area, lc->cregs_save_area, sizeof(abs_lc->cregs_save_area));
abs_lc->program_new_psw = lc->program_new_psw;
abs_lc->mcesad = lc->mcesad;
put_abs_lowcore(abs_lc);
set_prefix(__pa(lc));
lowcore_ptr[0] = lc;
if (abs_lowcore_map(0, lowcore_ptr[0], false))
panic("Couldn't setup absolute lowcore");
}
static struct resource code_resource = {
.name = "Kernel code",
.flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
};
static struct resource data_resource = {
.name = "Kernel data",
.flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
};
static struct resource bss_resource = {
.name = "Kernel bss",
.flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
};
static struct resource __initdata *standard_resources[] = {
&code_resource,
&data_resource,
&bss_resource,
};
static void __init setup_resources(void)
{
struct resource *res, *std_res, *sub_res;
phys_addr_t start, end;
int j;
u64 i;
code_resource.start = (unsigned long) _text;
code_resource.end = (unsigned long) _etext - 1;
data_resource.start = (unsigned long) _etext;
data_resource.end = (unsigned long) _edata - 1;
bss_resource.start = (unsigned long) __bss_start;
bss_resource.end = (unsigned long) __bss_stop - 1;
for_each_mem_range(i, &start, &end) {
res = memblock_alloc(sizeof(*res), 8);
if (!res)
panic("%s: Failed to allocate %zu bytes align=0x%x\n",
__func__, sizeof(*res), 8);
res->flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM;
res->name = "System RAM";
res->start = start;
/*
* In memblock, end points to the first byte after the
* range while in resources, end points to the last byte in
* the range.
*/
res->end = end - 1;
request_resource(&iomem_resource, res);
for (j = 0; j < ARRAY_SIZE(standard_resources); j++) {
std_res = standard_resources[j];
if (std_res->start < res->start ||
std_res->start > res->end)
continue;
if (std_res->end > res->end) {
sub_res = memblock_alloc(sizeof(*sub_res), 8);
if (!sub_res)
panic("%s: Failed to allocate %zu bytes align=0x%x\n",
__func__, sizeof(*sub_res), 8);
*sub_res = *std_res;
sub_res->end = res->end;
std_res->start = res->end + 1;
request_resource(res, sub_res);
} else {
request_resource(res, std_res);
}
}
}
#ifdef CONFIG_CRASH_DUMP
/*
* Re-add removed crash kernel memory as reserved memory. This makes
* sure it will be mapped with the identity mapping and struct pages
* will be created, so it can be resized later on.
* However add it later since the crash kernel resource should not be
* part of the System RAM resource.
*/
if (crashk_res.end) {
memblock_add_node(crashk_res.start, resource_size(&crashk_res),
0, MEMBLOCK_NONE);
memblock_reserve(crashk_res.start, resource_size(&crashk_res));
insert_resource(&iomem_resource, &crashk_res);
}
#endif
}
static void __init setup_memory_end(void)
{
max_pfn = max_low_pfn = PFN_DOWN(ident_map_size);
pr_notice("The maximum memory size is %luMB\n", ident_map_size >> 20);
}
#ifdef CONFIG_CRASH_DUMP
/*
* When kdump is enabled, we have to ensure that no memory from the area
* [0 - crashkernel memory size] is set offline - it will be exchanged with
* the crashkernel memory region when kdump is triggered. The crashkernel
* memory region can never get offlined (pages are unmovable).
*/
static int kdump_mem_notifier(struct notifier_block *nb,
unsigned long action, void *data)
{
struct memory_notify *arg = data;
if (action != MEM_GOING_OFFLINE)
return NOTIFY_OK;
if (arg->start_pfn < PFN_DOWN(resource_size(&crashk_res)))
return NOTIFY_BAD;
return NOTIFY_OK;
}
static struct notifier_block kdump_mem_nb = {
.notifier_call = kdump_mem_notifier,
};
#endif
/*
* Reserve page tables created by decompressor
*/
static void __init reserve_pgtables(void)
{
unsigned long start, end;
struct reserved_range *range;
for_each_physmem_reserved_type_range(RR_VMEM, range, &start, &end)
memblock_reserve(start, end - start);
}
/*
* Reserve memory for kdump kernel to be loaded with kexec
*/
static void __init reserve_crashkernel(void)
{
#ifdef CONFIG_CRASH_DUMP
unsigned long long crash_base, crash_size;
phys_addr_t low, high;
int rc;
rc = parse_crashkernel(boot_command_line, ident_map_size, &crash_size,
&crash_base);
crash_base = ALIGN(crash_base, KEXEC_CRASH_MEM_ALIGN);
crash_size = ALIGN(crash_size, KEXEC_CRASH_MEM_ALIGN);
if (rc || crash_size == 0)
return;
if (memblock.memory.regions[0].size < crash_size) {
pr_info("crashkernel reservation failed: %s\n",
"first memory chunk must be at least crashkernel size");
return;
}
low = crash_base ?: oldmem_data.start;
high = low + crash_size;
if (low >= oldmem_data.start && high <= oldmem_data.start + oldmem_data.size) {
/* The crashkernel fits into OLDMEM, reuse OLDMEM */
crash_base = low;
} else {
/* Find suitable area in free memory */
low = max_t(unsigned long, crash_size, sclp.hsa_size);
high = crash_base ? crash_base + crash_size : ULONG_MAX;
if (crash_base && crash_base < low) {
pr_info("crashkernel reservation failed: %s\n",
"crash_base too low");
return;
}
low = crash_base ?: low;
crash_base = memblock_phys_alloc_range(crash_size,
KEXEC_CRASH_MEM_ALIGN,
low, high);
}
if (!crash_base) {
pr_info("crashkernel reservation failed: %s\n",
"no suitable area found");
return;
}
if (register_memory_notifier(&kdump_mem_nb)) {
memblock_phys_free(crash_base, crash_size);
return;
}
if (!oldmem_data.start && MACHINE_IS_VM)
diag10_range(PFN_DOWN(crash_base), PFN_DOWN(crash_size));
crashk_res.start = crash_base;
crashk_res.end = crash_base + crash_size - 1;
memblock_remove(crash_base, crash_size);
pr_info("Reserving %lluMB of memory at %lluMB "
"for crashkernel (System RAM: %luMB)\n",
crash_size >> 20, crash_base >> 20,
(unsigned long)memblock.memory.total_size >> 20);
os_info_crashkernel_add(crash_base, crash_size);
#endif
}
/*
* Reserve the initrd from being used by memblock
*/
static void __init reserve_initrd(void)
{
unsigned long addr, size;
if (!IS_ENABLED(CONFIG_BLK_DEV_INITRD) || !get_physmem_reserved(RR_INITRD, &addr, &size))
return;
initrd_start = (unsigned long)__va(addr);
initrd_end = initrd_start + size;
memblock_reserve(addr, size);
}
/*
* Reserve the memory area used to pass the certificate lists
*/
static void __init reserve_certificate_list(void)
{
if (ipl_cert_list_addr)
memblock_reserve(ipl_cert_list_addr, ipl_cert_list_size);
}
static void __init reserve_physmem_info(void)
{
unsigned long addr, size;
if (get_physmem_reserved(RR_MEM_DETECT_EXTENDED, &addr, &size))
memblock_reserve(addr, size);
}
static void __init free_physmem_info(void)
{
unsigned long addr, size;
if (get_physmem_reserved(RR_MEM_DETECT_EXTENDED, &addr, &size))
memblock_phys_free(addr, size);
}
static void __init memblock_add_physmem_info(void)
{
unsigned long start, end;
int i;
pr_debug("physmem info source: %s (%hhd)\n",
get_physmem_info_source(), physmem_info.info_source);
/* keep memblock lists close to the kernel */
memblock_set_bottom_up(true);
for_each_physmem_usable_range(i, &start, &end)
memblock_add(start, end - start);
for_each_physmem_online_range(i, &start, &end)
memblock_physmem_add(start, end - start);
memblock_set_bottom_up(false);
memblock_set_node(0, ULONG_MAX, &memblock.memory, 0);
}
/*
* Reserve memory used for lowcore/command line/kernel image.
*/
static void __init reserve_kernel(void)
{
memblock_reserve(0, STARTUP_NORMAL_OFFSET);
memblock_reserve(OLDMEM_BASE, sizeof(unsigned long));
memblock_reserve(OLDMEM_SIZE, sizeof(unsigned long));
memblock_reserve(physmem_info.reserved[RR_AMODE31].start, __eamode31 - __samode31);
memblock_reserve(__pa(sclp_early_sccb), EXT_SCCB_READ_SCP);
memblock_reserve(__pa(_stext), _end - _stext);
}
static void __init setup_memory(void)
{
phys_addr_t start, end;
u64 i;
/*
* Init storage key for present memory
*/
for_each_mem_range(i, &start, &end)
storage_key_init_range(start, end);
psw_set_key(PAGE_DEFAULT_KEY);
}
static void __init relocate_amode31_section(void)
{
unsigned long amode31_size = __eamode31 - __samode31;
long amode31_offset, *ptr;
amode31_offset = physmem_info.reserved[RR_AMODE31].start - (unsigned long)__samode31;
pr_info("Relocating AMODE31 section of size 0x%08lx\n", amode31_size);
/* Move original AMODE31 section to the new one */
memmove((void *)physmem_info.reserved[RR_AMODE31].start, __samode31, amode31_size);
/* Zero out the old AMODE31 section to catch invalid accesses within it */
memset(__samode31, 0, amode31_size);
/* Update all AMODE31 region references */
for (ptr = _start_amode31_refs; ptr != _end_amode31_refs; ptr++)
*ptr += amode31_offset;
}
/* This must be called after AMODE31 relocation */
static void __init setup_cr(void)
{
union ctlreg2 cr2;
union ctlreg5 cr5;
union ctlreg15 cr15;
__ctl_duct[1] = (unsigned long)__ctl_aste;
__ctl_duct[2] = (unsigned long)__ctl_aste;
__ctl_duct[4] = (unsigned long)__ctl_duald;
/* Update control registers CR2, CR5 and CR15 */
__ctl_store(cr2.val, 2, 2);
__ctl_store(cr5.val, 5, 5);
__ctl_store(cr15.val, 15, 15);
cr2.ducto = (unsigned long)__ctl_duct >> 6;
cr5.pasteo = (unsigned long)__ctl_duct >> 6;
cr15.lsea = (unsigned long)__ctl_linkage_stack >> 3;
__ctl_load(cr2.val, 2, 2);
__ctl_load(cr5.val, 5, 5);
__ctl_load(cr15.val, 15, 15);
}
/*
* Add system information as device randomness
*/
static void __init setup_randomness(void)
{
struct sysinfo_3_2_2 *vmms;
vmms = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
if (!vmms)
panic("Failed to allocate memory for sysinfo structure\n");
if (stsi(vmms, 3, 2, 2) == 0 && vmms->count)
add_device_randomness(&vmms->vm, sizeof(vmms->vm[0]) * vmms->count);
memblock_free(vmms, PAGE_SIZE);
if (cpacf_query_func(CPACF_PRNO, CPACF_PRNO_TRNG))
static_branch_enable(&s390_arch_random_available);
}
/*
* Find the correct size for the task_struct. This depends on
* the size of the struct fpu at the end of the thread_struct
* which is embedded in the task_struct.
*/
static void __init setup_task_size(void)
{
int task_size = sizeof(struct task_struct);
if (!MACHINE_HAS_VX) {
task_size -= sizeof(__vector128) * __NUM_VXRS;
task_size += sizeof(freg_t) * __NUM_FPRS;
}
arch_task_struct_size = task_size;
}
/*
* Issue diagnose 318 to set the control program name and
* version codes.
*/
static void __init setup_control_program_code(void)
{
union diag318_info diag318_info = {
.cpnc = CPNC_LINUX,
.cpvc = 0,
};
if (!sclp.has_diag318)
return;
diag_stat_inc(DIAG_STAT_X318);
asm volatile("diag %0,0,0x318\n" : : "d" (diag318_info.val));
}
/*
* Print the component list from the IPL report
*/
static void __init log_component_list(void)
{
struct ipl_rb_component_entry *ptr, *end;
char *str;
if (!early_ipl_comp_list_addr)
return;
if (ipl_block.hdr.flags & IPL_PL_FLAG_SIPL)
pr_info("Linux is running with Secure-IPL enabled\n");
else
pr_info("Linux is running with Secure-IPL disabled\n");
ptr = __va(early_ipl_comp_list_addr);
end = (void *) ptr + early_ipl_comp_list_size;
pr_info("The IPL report contains the following components:\n");
while (ptr < end) {
if (ptr->flags & IPL_RB_COMPONENT_FLAG_SIGNED) {
if (ptr->flags & IPL_RB_COMPONENT_FLAG_VERIFIED)
str = "signed, verified";
else
str = "signed, verification failed";
} else {
str = "not signed";
}
pr_info("%016llx - %016llx (%s)\n",
ptr->addr, ptr->addr + ptr->len, str);
ptr++;
}
}
/*
* Setup function called from init/main.c just after the banner
* was printed.
*/
void __init setup_arch(char **cmdline_p)
{
/*
* print what head.S has found out about the machine
*/
if (MACHINE_IS_VM)
pr_info("Linux is running as a z/VM "
"guest operating system in 64-bit mode\n");
else if (MACHINE_IS_KVM)
pr_info("Linux is running under KVM in 64-bit mode\n");
else if (MACHINE_IS_LPAR)
pr_info("Linux is running natively in 64-bit mode\n");
else
pr_info("Linux is running as a guest in 64-bit mode\n");
log_component_list();
/* Have one command line that is parsed and saved in /proc/cmdline */
/* boot_command_line has been already set up in early.c */
*cmdline_p = boot_command_line;
ROOT_DEV = Root_RAM0;
setup_initial_init_mm(_text, _etext, _edata, _end);
if (IS_ENABLED(CONFIG_EXPOLINE_AUTO))
nospec_auto_detect();
jump_label_init();
parse_early_param();
#ifdef CONFIG_CRASH_DUMP
/* Deactivate elfcorehdr= kernel parameter */
elfcorehdr_addr = ELFCORE_ADDR_MAX;
#endif
os_info_init();
setup_ipl();
setup_task_size();
setup_control_program_code();
/* Do some memory reservations *before* memory is added to memblock */
reserve_pgtables();
reserve_kernel();
reserve_initrd();
reserve_certificate_list();
reserve_physmem_info();
memblock_set_current_limit(ident_map_size);
memblock_allow_resize();
/* Get information about *all* installed memory */
memblock_add_physmem_info();
free_physmem_info();
setup_memory_end();
memblock_dump_all();
setup_memory();
relocate_amode31_section();
setup_cr();
setup_uv();
dma_contiguous_reserve(ident_map_size);
vmcp_cma_reserve();
if (MACHINE_HAS_EDAT2)
hugetlb_cma_reserve(PUD_SHIFT - PAGE_SHIFT);
reserve_crashkernel();
#ifdef CONFIG_CRASH_DUMP
/*
* Be aware that smp_save_dump_secondary_cpus() triggers a system reset.
* Therefore CPU and device initialization should be done afterwards.
*/
smp_save_dump_secondary_cpus();
#endif
setup_resources();
setup_lowcore();
smp_fill_possible_mask();
cpu_detect_mhz_feature();
cpu_init();
numa_setup();
smp_detect_cpus();
topology_init_early();
if (test_facility(193))
static_branch_enable(&cpu_has_bear);
/*
* Create kernel page tables.
*/
paging_init();
/*
* After paging_init created the kernel page table, the new PSWs
* in lowcore can now run with DAT enabled.
*/
#ifdef CONFIG_CRASH_DUMP
smp_save_dump_ipl_cpu();
#endif
/* Setup default console */
conmode_default();
set_preferred_console();
apply_alternative_instructions();
if (IS_ENABLED(CONFIG_EXPOLINE))
nospec_init_branches();
/* Setup zfcp/nvme dump support */
setup_zfcpdump();
/* Add system specific data to the random pool */
setup_randomness();
}
| linux-master | arch/s390/kernel/setup.c |
// SPDX-License-Identifier: GPL-2.0
/*
* S/390 debug facility
*
* Copyright IBM Corp. 1999, 2020
*
* Author(s): Michael Holzheu ([email protected]),
* Holger Smolinski ([email protected])
*
* Bugreports to: <[email protected]>
*/
#define KMSG_COMPONENT "s390dbf"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/stddef.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/ctype.h>
#include <linux/string.h>
#include <linux/sysctl.h>
#include <linux/uaccess.h>
#include <linux/export.h>
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/minmax.h>
#include <linux/debugfs.h>
#include <asm/debug.h>
#define DEBUG_PROLOG_ENTRY -1
#define ALL_AREAS 0 /* copy all debug areas */
#define NO_AREAS 1 /* copy no debug areas */
/* typedefs */
typedef struct file_private_info {
loff_t offset; /* offset of last read in file */
int act_area; /* number of last formated area */
int act_page; /* act page in given area */
int act_entry; /* last formated entry (offset */
/* relative to beginning of last */
/* formated page) */
size_t act_entry_offset; /* up to this offset we copied */
/* in last read the last formated */
/* entry to userland */
char temp_buf[2048]; /* buffer for output */
debug_info_t *debug_info_org; /* original debug information */
debug_info_t *debug_info_snap; /* snapshot of debug information */
struct debug_view *view; /* used view of debug info */
} file_private_info_t;
typedef struct {
char *string;
/*
* This assumes that all args are converted into longs
* on L/390 this is the case for all types of parameter
* except of floats, and long long (32 bit)
*
*/
long args[];
} debug_sprintf_entry_t;
/* internal function prototyes */
static int debug_init(void);
static ssize_t debug_output(struct file *file, char __user *user_buf,
size_t user_len, loff_t *offset);
static ssize_t debug_input(struct file *file, const char __user *user_buf,
size_t user_len, loff_t *offset);
static int debug_open(struct inode *inode, struct file *file);
static int debug_close(struct inode *inode, struct file *file);
static debug_info_t *debug_info_create(const char *name, int pages_per_area,
int nr_areas, int buf_size, umode_t mode);
static void debug_info_get(debug_info_t *);
static void debug_info_put(debug_info_t *);
static int debug_prolog_level_fn(debug_info_t *id,
struct debug_view *view, char *out_buf);
static int debug_input_level_fn(debug_info_t *id, struct debug_view *view,
struct file *file, const char __user *user_buf,
size_t user_buf_size, loff_t *offset);
static int debug_prolog_pages_fn(debug_info_t *id,
struct debug_view *view, char *out_buf);
static int debug_input_pages_fn(debug_info_t *id, struct debug_view *view,
struct file *file, const char __user *user_buf,
size_t user_buf_size, loff_t *offset);
static int debug_input_flush_fn(debug_info_t *id, struct debug_view *view,
struct file *file, const char __user *user_buf,
size_t user_buf_size, loff_t *offset);
static int debug_hex_ascii_format_fn(debug_info_t *id, struct debug_view *view,
char *out_buf, const char *in_buf);
static int debug_sprintf_format_fn(debug_info_t *id, struct debug_view *view,
char *out_buf, const char *inbuf);
static void debug_areas_swap(debug_info_t *a, debug_info_t *b);
static void debug_events_append(debug_info_t *dest, debug_info_t *src);
/* globals */
struct debug_view debug_hex_ascii_view = {
"hex_ascii",
NULL,
&debug_dflt_header_fn,
&debug_hex_ascii_format_fn,
NULL,
NULL
};
EXPORT_SYMBOL(debug_hex_ascii_view);
static struct debug_view debug_level_view = {
"level",
&debug_prolog_level_fn,
NULL,
NULL,
&debug_input_level_fn,
NULL
};
static struct debug_view debug_pages_view = {
"pages",
&debug_prolog_pages_fn,
NULL,
NULL,
&debug_input_pages_fn,
NULL
};
static struct debug_view debug_flush_view = {
"flush",
NULL,
NULL,
NULL,
&debug_input_flush_fn,
NULL
};
struct debug_view debug_sprintf_view = {
"sprintf",
NULL,
&debug_dflt_header_fn,
&debug_sprintf_format_fn,
NULL,
NULL
};
EXPORT_SYMBOL(debug_sprintf_view);
/* used by dump analysis tools to determine version of debug feature */
static unsigned int __used debug_feature_version = __DEBUG_FEATURE_VERSION;
/* static globals */
static debug_info_t *debug_area_first;
static debug_info_t *debug_area_last;
static DEFINE_MUTEX(debug_mutex);
static int initialized;
static int debug_critical;
static const struct file_operations debug_file_ops = {
.owner = THIS_MODULE,
.read = debug_output,
.write = debug_input,
.open = debug_open,
.release = debug_close,
.llseek = no_llseek,
};
static struct dentry *debug_debugfs_root_entry;
/* functions */
/*
* debug_areas_alloc
* - Debug areas are implemented as a threedimensonal array:
* areas[areanumber][pagenumber][pageoffset]
*/
static debug_entry_t ***debug_areas_alloc(int pages_per_area, int nr_areas)
{
debug_entry_t ***areas;
int i, j;
areas = kmalloc_array(nr_areas, sizeof(debug_entry_t **), GFP_KERNEL);
if (!areas)
goto fail_malloc_areas;
for (i = 0; i < nr_areas; i++) {
/* GFP_NOWARN to avoid user triggerable WARN, we handle fails */
areas[i] = kmalloc_array(pages_per_area,
sizeof(debug_entry_t *),
GFP_KERNEL | __GFP_NOWARN);
if (!areas[i])
goto fail_malloc_areas2;
for (j = 0; j < pages_per_area; j++) {
areas[i][j] = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (!areas[i][j]) {
for (j--; j >= 0 ; j--)
kfree(areas[i][j]);
kfree(areas[i]);
goto fail_malloc_areas2;
}
}
}
return areas;
fail_malloc_areas2:
for (i--; i >= 0; i--) {
for (j = 0; j < pages_per_area; j++)
kfree(areas[i][j]);
kfree(areas[i]);
}
kfree(areas);
fail_malloc_areas:
return NULL;
}
/*
* debug_info_alloc
* - alloc new debug-info
*/
static debug_info_t *debug_info_alloc(const char *name, int pages_per_area,
int nr_areas, int buf_size, int level,
int mode)
{
debug_info_t *rc;
/* alloc everything */
rc = kmalloc(sizeof(debug_info_t), GFP_KERNEL);
if (!rc)
goto fail_malloc_rc;
rc->active_entries = kcalloc(nr_areas, sizeof(int), GFP_KERNEL);
if (!rc->active_entries)
goto fail_malloc_active_entries;
rc->active_pages = kcalloc(nr_areas, sizeof(int), GFP_KERNEL);
if (!rc->active_pages)
goto fail_malloc_active_pages;
if ((mode == ALL_AREAS) && (pages_per_area != 0)) {
rc->areas = debug_areas_alloc(pages_per_area, nr_areas);
if (!rc->areas)
goto fail_malloc_areas;
} else {
rc->areas = NULL;
}
/* initialize members */
spin_lock_init(&rc->lock);
rc->pages_per_area = pages_per_area;
rc->nr_areas = nr_areas;
rc->active_area = 0;
rc->level = level;
rc->buf_size = buf_size;
rc->entry_size = sizeof(debug_entry_t) + buf_size;
strscpy(rc->name, name, sizeof(rc->name));
memset(rc->views, 0, DEBUG_MAX_VIEWS * sizeof(struct debug_view *));
memset(rc->debugfs_entries, 0, DEBUG_MAX_VIEWS * sizeof(struct dentry *));
refcount_set(&(rc->ref_count), 0);
return rc;
fail_malloc_areas:
kfree(rc->active_pages);
fail_malloc_active_pages:
kfree(rc->active_entries);
fail_malloc_active_entries:
kfree(rc);
fail_malloc_rc:
return NULL;
}
/*
* debug_areas_free
* - free all debug areas
*/
static void debug_areas_free(debug_info_t *db_info)
{
int i, j;
if (!db_info->areas)
return;
for (i = 0; i < db_info->nr_areas; i++) {
for (j = 0; j < db_info->pages_per_area; j++)
kfree(db_info->areas[i][j]);
kfree(db_info->areas[i]);
}
kfree(db_info->areas);
db_info->areas = NULL;
}
/*
* debug_info_free
* - free memory debug-info
*/
static void debug_info_free(debug_info_t *db_info)
{
debug_areas_free(db_info);
kfree(db_info->active_entries);
kfree(db_info->active_pages);
kfree(db_info);
}
/*
* debug_info_create
* - create new debug-info
*/
static debug_info_t *debug_info_create(const char *name, int pages_per_area,
int nr_areas, int buf_size, umode_t mode)
{
debug_info_t *rc;
rc = debug_info_alloc(name, pages_per_area, nr_areas, buf_size,
DEBUG_DEFAULT_LEVEL, ALL_AREAS);
if (!rc)
goto out;
rc->mode = mode & ~S_IFMT;
refcount_set(&rc->ref_count, 1);
out:
return rc;
}
/*
* debug_info_copy
* - copy debug-info
*/
static debug_info_t *debug_info_copy(debug_info_t *in, int mode)
{
unsigned long flags;
debug_info_t *rc;
int i, j;
/* get a consistent copy of the debug areas */
do {
rc = debug_info_alloc(in->name, in->pages_per_area,
in->nr_areas, in->buf_size, in->level, mode);
spin_lock_irqsave(&in->lock, flags);
if (!rc)
goto out;
/* has something changed in the meantime ? */
if ((rc->pages_per_area == in->pages_per_area) &&
(rc->nr_areas == in->nr_areas)) {
break;
}
spin_unlock_irqrestore(&in->lock, flags);
debug_info_free(rc);
} while (1);
if (mode == NO_AREAS)
goto out;
for (i = 0; i < in->nr_areas; i++) {
for (j = 0; j < in->pages_per_area; j++)
memcpy(rc->areas[i][j], in->areas[i][j], PAGE_SIZE);
}
out:
spin_unlock_irqrestore(&in->lock, flags);
return rc;
}
/*
* debug_info_get
* - increments reference count for debug-info
*/
static void debug_info_get(debug_info_t *db_info)
{
if (db_info)
refcount_inc(&db_info->ref_count);
}
/*
* debug_info_put:
* - decreases reference count for debug-info and frees it if necessary
*/
static void debug_info_put(debug_info_t *db_info)
{
if (!db_info)
return;
if (refcount_dec_and_test(&db_info->ref_count))
debug_info_free(db_info);
}
/*
* debug_format_entry:
* - format one debug entry and return size of formated data
*/
static int debug_format_entry(file_private_info_t *p_info)
{
debug_info_t *id_snap = p_info->debug_info_snap;
struct debug_view *view = p_info->view;
debug_entry_t *act_entry;
size_t len = 0;
if (p_info->act_entry == DEBUG_PROLOG_ENTRY) {
/* print prolog */
if (view->prolog_proc)
len += view->prolog_proc(id_snap, view, p_info->temp_buf);
goto out;
}
if (!id_snap->areas) /* this is true, if we have a prolog only view */
goto out; /* or if 'pages_per_area' is 0 */
act_entry = (debug_entry_t *) ((char *)id_snap->areas[p_info->act_area]
[p_info->act_page] + p_info->act_entry);
if (act_entry->clock == 0LL)
goto out; /* empty entry */
if (view->header_proc)
len += view->header_proc(id_snap, view, p_info->act_area,
act_entry, p_info->temp_buf + len);
if (view->format_proc)
len += view->format_proc(id_snap, view, p_info->temp_buf + len,
DEBUG_DATA(act_entry));
out:
return len;
}
/*
* debug_next_entry:
* - goto next entry in p_info
*/
static inline int debug_next_entry(file_private_info_t *p_info)
{
debug_info_t *id;
id = p_info->debug_info_snap;
if (p_info->act_entry == DEBUG_PROLOG_ENTRY) {
p_info->act_entry = 0;
p_info->act_page = 0;
goto out;
}
if (!id->areas)
return 1;
p_info->act_entry += id->entry_size;
/* switch to next page, if we reached the end of the page */
if (p_info->act_entry > (PAGE_SIZE - id->entry_size)) {
/* next page */
p_info->act_entry = 0;
p_info->act_page += 1;
if ((p_info->act_page % id->pages_per_area) == 0) {
/* next area */
p_info->act_area++;
p_info->act_page = 0;
}
if (p_info->act_area >= id->nr_areas)
return 1;
}
out:
return 0;
}
/*
* debug_output:
* - called for user read()
* - copies formated debug entries to the user buffer
*/
static ssize_t debug_output(struct file *file, /* file descriptor */
char __user *user_buf, /* user buffer */
size_t len, /* length of buffer */
loff_t *offset) /* offset in the file */
{
size_t count = 0;
size_t entry_offset;
file_private_info_t *p_info;
p_info = (file_private_info_t *) file->private_data;
if (*offset != p_info->offset)
return -EPIPE;
if (p_info->act_area >= p_info->debug_info_snap->nr_areas)
return 0;
entry_offset = p_info->act_entry_offset;
while (count < len) {
int formatted_line_residue;
int formatted_line_size;
int user_buf_residue;
size_t copy_size;
formatted_line_size = debug_format_entry(p_info);
formatted_line_residue = formatted_line_size - entry_offset;
user_buf_residue = len-count;
copy_size = min(user_buf_residue, formatted_line_residue);
if (copy_size) {
if (copy_to_user(user_buf + count, p_info->temp_buf
+ entry_offset, copy_size))
return -EFAULT;
count += copy_size;
entry_offset += copy_size;
}
if (copy_size == formatted_line_residue) {
entry_offset = 0;
if (debug_next_entry(p_info))
goto out;
}
}
out:
p_info->offset = *offset + count;
p_info->act_entry_offset = entry_offset;
*offset = p_info->offset;
return count;
}
/*
* debug_input:
* - called for user write()
* - calls input function of view
*/
static ssize_t debug_input(struct file *file, const char __user *user_buf,
size_t length, loff_t *offset)
{
file_private_info_t *p_info;
int rc = 0;
mutex_lock(&debug_mutex);
p_info = ((file_private_info_t *) file->private_data);
if (p_info->view->input_proc) {
rc = p_info->view->input_proc(p_info->debug_info_org,
p_info->view, file, user_buf,
length, offset);
} else {
rc = -EPERM;
}
mutex_unlock(&debug_mutex);
return rc; /* number of input characters */
}
/*
* debug_open:
* - called for user open()
* - copies formated output to private_data area of the file
* handle
*/
static int debug_open(struct inode *inode, struct file *file)
{
debug_info_t *debug_info, *debug_info_snapshot;
file_private_info_t *p_info;
int i, rc = 0;
mutex_lock(&debug_mutex);
debug_info = file_inode(file)->i_private;
/* find debug view */
for (i = 0; i < DEBUG_MAX_VIEWS; i++) {
if (!debug_info->views[i])
continue;
else if (debug_info->debugfs_entries[i] == file->f_path.dentry)
goto found; /* found view ! */
}
/* no entry found */
rc = -EINVAL;
goto out;
found:
/* Make snapshot of current debug areas to get it consistent. */
/* To copy all the areas is only needed, if we have a view which */
/* formats the debug areas. */
if (!debug_info->views[i]->format_proc && !debug_info->views[i]->header_proc)
debug_info_snapshot = debug_info_copy(debug_info, NO_AREAS);
else
debug_info_snapshot = debug_info_copy(debug_info, ALL_AREAS);
if (!debug_info_snapshot) {
rc = -ENOMEM;
goto out;
}
p_info = kmalloc(sizeof(file_private_info_t), GFP_KERNEL);
if (!p_info) {
debug_info_free(debug_info_snapshot);
rc = -ENOMEM;
goto out;
}
p_info->offset = 0;
p_info->debug_info_snap = debug_info_snapshot;
p_info->debug_info_org = debug_info;
p_info->view = debug_info->views[i];
p_info->act_area = 0;
p_info->act_page = 0;
p_info->act_entry = DEBUG_PROLOG_ENTRY;
p_info->act_entry_offset = 0;
file->private_data = p_info;
debug_info_get(debug_info);
nonseekable_open(inode, file);
out:
mutex_unlock(&debug_mutex);
return rc;
}
/*
* debug_close:
* - called for user close()
* - deletes private_data area of the file handle
*/
static int debug_close(struct inode *inode, struct file *file)
{
file_private_info_t *p_info;
p_info = (file_private_info_t *) file->private_data;
if (p_info->debug_info_snap)
debug_info_free(p_info->debug_info_snap);
debug_info_put(p_info->debug_info_org);
kfree(file->private_data);
return 0; /* success */
}
/* Create debugfs entries and add to internal list. */
static void _debug_register(debug_info_t *id)
{
/* create root directory */
id->debugfs_root_entry = debugfs_create_dir(id->name,
debug_debugfs_root_entry);
/* append new element to linked list */
if (!debug_area_first) {
/* first element in list */
debug_area_first = id;
id->prev = NULL;
} else {
/* append element to end of list */
debug_area_last->next = id;
id->prev = debug_area_last;
}
debug_area_last = id;
id->next = NULL;
debug_register_view(id, &debug_level_view);
debug_register_view(id, &debug_flush_view);
debug_register_view(id, &debug_pages_view);
}
/**
* debug_register_mode() - creates and initializes debug area.
*
* @name: Name of debug log (e.g. used for debugfs entry)
* @pages_per_area: Number of pages, which will be allocated per area
* @nr_areas: Number of debug areas
* @buf_size: Size of data area in each debug entry
* @mode: File mode for debugfs files. E.g. S_IRWXUGO
* @uid: User ID for debugfs files. Currently only 0 is supported.
* @gid: Group ID for debugfs files. Currently only 0 is supported.
*
* Return:
* - Handle for generated debug area
* - %NULL if register failed
*
* Allocates memory for a debug log.
* Must not be called within an interrupt handler.
*/
debug_info_t *debug_register_mode(const char *name, int pages_per_area,
int nr_areas, int buf_size, umode_t mode,
uid_t uid, gid_t gid)
{
debug_info_t *rc = NULL;
/* Since debugfs currently does not support uid/gid other than root, */
/* we do not allow gid/uid != 0 until we get support for that. */
if ((uid != 0) || (gid != 0))
pr_warn("Root becomes the owner of all s390dbf files in sysfs\n");
BUG_ON(!initialized);
/* create new debug_info */
rc = debug_info_create(name, pages_per_area, nr_areas, buf_size, mode);
if (rc) {
mutex_lock(&debug_mutex);
_debug_register(rc);
mutex_unlock(&debug_mutex);
} else {
pr_err("Registering debug feature %s failed\n", name);
}
return rc;
}
EXPORT_SYMBOL(debug_register_mode);
/**
* debug_register() - creates and initializes debug area with default file mode.
*
* @name: Name of debug log (e.g. used for debugfs entry)
* @pages_per_area: Number of pages, which will be allocated per area
* @nr_areas: Number of debug areas
* @buf_size: Size of data area in each debug entry
*
* Return:
* - Handle for generated debug area
* - %NULL if register failed
*
* Allocates memory for a debug log.
* The debugfs file mode access permissions are read and write for user.
* Must not be called within an interrupt handler.
*/
debug_info_t *debug_register(const char *name, int pages_per_area,
int nr_areas, int buf_size)
{
return debug_register_mode(name, pages_per_area, nr_areas, buf_size,
S_IRUSR | S_IWUSR, 0, 0);
}
EXPORT_SYMBOL(debug_register);
/**
* debug_register_static() - registers a static debug area
*
* @id: Handle for static debug area
* @pages_per_area: Number of pages per area
* @nr_areas: Number of debug areas
*
* Register debug_info_t defined using DEFINE_STATIC_DEBUG_INFO.
*
* Note: This function is called automatically via an initcall generated by
* DEFINE_STATIC_DEBUG_INFO.
*/
void debug_register_static(debug_info_t *id, int pages_per_area, int nr_areas)
{
unsigned long flags;
debug_info_t *copy;
if (!initialized) {
pr_err("Tried to register debug feature %s too early\n",
id->name);
return;
}
copy = debug_info_alloc("", pages_per_area, nr_areas, id->buf_size,
id->level, ALL_AREAS);
if (!copy) {
pr_err("Registering debug feature %s failed\n", id->name);
/* Clear pointers to prevent tracing into released initdata. */
spin_lock_irqsave(&id->lock, flags);
id->areas = NULL;
id->active_pages = NULL;
id->active_entries = NULL;
spin_unlock_irqrestore(&id->lock, flags);
return;
}
/* Replace static trace area with dynamic copy. */
spin_lock_irqsave(&id->lock, flags);
debug_events_append(copy, id);
debug_areas_swap(id, copy);
spin_unlock_irqrestore(&id->lock, flags);
/* Clear pointers to initdata and discard copy. */
copy->areas = NULL;
copy->active_pages = NULL;
copy->active_entries = NULL;
debug_info_free(copy);
mutex_lock(&debug_mutex);
_debug_register(id);
mutex_unlock(&debug_mutex);
}
/* Remove debugfs entries and remove from internal list. */
static void _debug_unregister(debug_info_t *id)
{
int i;
for (i = 0; i < DEBUG_MAX_VIEWS; i++) {
if (!id->views[i])
continue;
debugfs_remove(id->debugfs_entries[i]);
}
debugfs_remove(id->debugfs_root_entry);
if (id == debug_area_first)
debug_area_first = id->next;
if (id == debug_area_last)
debug_area_last = id->prev;
if (id->prev)
id->prev->next = id->next;
if (id->next)
id->next->prev = id->prev;
}
/**
* debug_unregister() - give back debug area.
*
* @id: handle for debug log
*
* Return:
* none
*/
void debug_unregister(debug_info_t *id)
{
if (!id)
return;
mutex_lock(&debug_mutex);
_debug_unregister(id);
mutex_unlock(&debug_mutex);
debug_info_put(id);
}
EXPORT_SYMBOL(debug_unregister);
/*
* debug_set_size:
* - set area size (number of pages) and number of areas
*/
static int debug_set_size(debug_info_t *id, int nr_areas, int pages_per_area)
{
debug_info_t *new_id;
unsigned long flags;
if (!id || (nr_areas <= 0) || (pages_per_area < 0))
return -EINVAL;
new_id = debug_info_alloc("", pages_per_area, nr_areas, id->buf_size,
id->level, ALL_AREAS);
if (!new_id) {
pr_info("Allocating memory for %i pages failed\n",
pages_per_area);
return -ENOMEM;
}
spin_lock_irqsave(&id->lock, flags);
debug_events_append(new_id, id);
debug_areas_swap(new_id, id);
debug_info_free(new_id);
spin_unlock_irqrestore(&id->lock, flags);
pr_info("%s: set new size (%i pages)\n", id->name, pages_per_area);
return 0;
}
/**
* debug_set_level() - Sets new actual debug level if new_level is valid.
*
* @id: handle for debug log
* @new_level: new debug level
*
* Return:
* none
*/
void debug_set_level(debug_info_t *id, int new_level)
{
unsigned long flags;
if (!id)
return;
if (new_level == DEBUG_OFF_LEVEL) {
pr_info("%s: switched off\n", id->name);
} else if ((new_level > DEBUG_MAX_LEVEL) || (new_level < 0)) {
pr_info("%s: level %i is out of range (%i - %i)\n",
id->name, new_level, 0, DEBUG_MAX_LEVEL);
return;
}
spin_lock_irqsave(&id->lock, flags);
id->level = new_level;
spin_unlock_irqrestore(&id->lock, flags);
}
EXPORT_SYMBOL(debug_set_level);
/*
* proceed_active_entry:
* - set active entry to next in the ring buffer
*/
static inline void proceed_active_entry(debug_info_t *id)
{
if ((id->active_entries[id->active_area] += id->entry_size)
> (PAGE_SIZE - id->entry_size)) {
id->active_entries[id->active_area] = 0;
id->active_pages[id->active_area] =
(id->active_pages[id->active_area] + 1) %
id->pages_per_area;
}
}
/*
* proceed_active_area:
* - set active area to next in the ring buffer
*/
static inline void proceed_active_area(debug_info_t *id)
{
id->active_area++;
id->active_area = id->active_area % id->nr_areas;
}
/*
* get_active_entry:
*/
static inline debug_entry_t *get_active_entry(debug_info_t *id)
{
return (debug_entry_t *) (((char *) id->areas[id->active_area]
[id->active_pages[id->active_area]]) +
id->active_entries[id->active_area]);
}
/* Swap debug areas of a and b. */
static void debug_areas_swap(debug_info_t *a, debug_info_t *b)
{
swap(a->nr_areas, b->nr_areas);
swap(a->pages_per_area, b->pages_per_area);
swap(a->areas, b->areas);
swap(a->active_area, b->active_area);
swap(a->active_pages, b->active_pages);
swap(a->active_entries, b->active_entries);
}
/* Append all debug events in active area from source to destination log. */
static void debug_events_append(debug_info_t *dest, debug_info_t *src)
{
debug_entry_t *from, *to, *last;
if (!src->areas || !dest->areas)
return;
/* Loop over all entries in src, starting with oldest. */
from = get_active_entry(src);
last = from;
do {
if (from->clock != 0LL) {
to = get_active_entry(dest);
memset(to, 0, dest->entry_size);
memcpy(to, from, min(src->entry_size,
dest->entry_size));
proceed_active_entry(dest);
}
proceed_active_entry(src);
from = get_active_entry(src);
} while (from != last);
}
/*
* debug_finish_entry:
* - set timestamp, caller address, cpu number etc.
*/
static inline void debug_finish_entry(debug_info_t *id, debug_entry_t *active,
int level, int exception)
{
unsigned long timestamp;
union tod_clock clk;
store_tod_clock_ext(&clk);
timestamp = clk.us;
timestamp -= TOD_UNIX_EPOCH >> 12;
active->clock = timestamp;
active->cpu = smp_processor_id();
active->caller = __builtin_return_address(0);
active->exception = exception;
active->level = level;
proceed_active_entry(id);
if (exception)
proceed_active_area(id);
}
static int debug_stoppable = 1;
static int debug_active = 1;
#define CTL_S390DBF_STOPPABLE 5678
#define CTL_S390DBF_ACTIVE 5679
/*
* proc handler for the running debug_active sysctl
* always allow read, allow write only if debug_stoppable is set or
* if debug_active is already off
*/
static int s390dbf_procactive(struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
if (!write || debug_stoppable || !debug_active)
return proc_dointvec(table, write, buffer, lenp, ppos);
else
return 0;
}
static struct ctl_table s390dbf_table[] = {
{
.procname = "debug_stoppable",
.data = &debug_stoppable,
.maxlen = sizeof(int),
.mode = S_IRUGO | S_IWUSR,
.proc_handler = proc_dointvec,
},
{
.procname = "debug_active",
.data = &debug_active,
.maxlen = sizeof(int),
.mode = S_IRUGO | S_IWUSR,
.proc_handler = s390dbf_procactive,
},
{ }
};
static struct ctl_table_header *s390dbf_sysctl_header;
/**
* debug_stop_all() - stops the debug feature if stopping is allowed.
*
* Return:
* - none
*
* Currently used in case of a kernel oops.
*/
void debug_stop_all(void)
{
if (debug_stoppable)
debug_active = 0;
}
EXPORT_SYMBOL(debug_stop_all);
/**
* debug_set_critical() - event/exception functions try lock instead of spin.
*
* Return:
* - none
*
* Currently used in case of stopping all CPUs but the current one.
* Once in this state, functions to write a debug entry for an
* event or exception no longer spin on the debug area lock,
* but only try to get it and fail if they do not get the lock.
*/
void debug_set_critical(void)
{
debug_critical = 1;
}
/*
* debug_event_common:
* - write debug entry with given size
*/
debug_entry_t *debug_event_common(debug_info_t *id, int level, const void *buf,
int len)
{
debug_entry_t *active;
unsigned long flags;
if (!debug_active || !id->areas)
return NULL;
if (debug_critical) {
if (!spin_trylock_irqsave(&id->lock, flags))
return NULL;
} else {
spin_lock_irqsave(&id->lock, flags);
}
do {
active = get_active_entry(id);
memcpy(DEBUG_DATA(active), buf, min(len, id->buf_size));
if (len < id->buf_size)
memset((DEBUG_DATA(active)) + len, 0, id->buf_size - len);
debug_finish_entry(id, active, level, 0);
len -= id->buf_size;
buf += id->buf_size;
} while (len > 0);
spin_unlock_irqrestore(&id->lock, flags);
return active;
}
EXPORT_SYMBOL(debug_event_common);
/*
* debug_exception_common:
* - write debug entry with given size and switch to next debug area
*/
debug_entry_t *debug_exception_common(debug_info_t *id, int level,
const void *buf, int len)
{
debug_entry_t *active;
unsigned long flags;
if (!debug_active || !id->areas)
return NULL;
if (debug_critical) {
if (!spin_trylock_irqsave(&id->lock, flags))
return NULL;
} else {
spin_lock_irqsave(&id->lock, flags);
}
do {
active = get_active_entry(id);
memcpy(DEBUG_DATA(active), buf, min(len, id->buf_size));
if (len < id->buf_size)
memset((DEBUG_DATA(active)) + len, 0, id->buf_size - len);
debug_finish_entry(id, active, level, len <= id->buf_size);
len -= id->buf_size;
buf += id->buf_size;
} while (len > 0);
spin_unlock_irqrestore(&id->lock, flags);
return active;
}
EXPORT_SYMBOL(debug_exception_common);
/*
* counts arguments in format string for sprintf view
*/
static inline int debug_count_numargs(char *string)
{
int numargs = 0;
while (*string) {
if (*string++ == '%')
numargs++;
}
return numargs;
}
/*
* debug_sprintf_event:
*/
debug_entry_t *__debug_sprintf_event(debug_info_t *id, int level, char *string, ...)
{
debug_sprintf_entry_t *curr_event;
debug_entry_t *active;
unsigned long flags;
int numargs, idx;
va_list ap;
if (!debug_active || !id->areas)
return NULL;
numargs = debug_count_numargs(string);
if (debug_critical) {
if (!spin_trylock_irqsave(&id->lock, flags))
return NULL;
} else {
spin_lock_irqsave(&id->lock, flags);
}
active = get_active_entry(id);
curr_event = (debug_sprintf_entry_t *) DEBUG_DATA(active);
va_start(ap, string);
curr_event->string = string;
for (idx = 0; idx < min(numargs, (int)(id->buf_size / sizeof(long)) - 1); idx++)
curr_event->args[idx] = va_arg(ap, long);
va_end(ap);
debug_finish_entry(id, active, level, 0);
spin_unlock_irqrestore(&id->lock, flags);
return active;
}
EXPORT_SYMBOL(__debug_sprintf_event);
/*
* debug_sprintf_exception:
*/
debug_entry_t *__debug_sprintf_exception(debug_info_t *id, int level, char *string, ...)
{
debug_sprintf_entry_t *curr_event;
debug_entry_t *active;
unsigned long flags;
int numargs, idx;
va_list ap;
if (!debug_active || !id->areas)
return NULL;
numargs = debug_count_numargs(string);
if (debug_critical) {
if (!spin_trylock_irqsave(&id->lock, flags))
return NULL;
} else {
spin_lock_irqsave(&id->lock, flags);
}
active = get_active_entry(id);
curr_event = (debug_sprintf_entry_t *)DEBUG_DATA(active);
va_start(ap, string);
curr_event->string = string;
for (idx = 0; idx < min(numargs, (int)(id->buf_size / sizeof(long)) - 1); idx++)
curr_event->args[idx] = va_arg(ap, long);
va_end(ap);
debug_finish_entry(id, active, level, 1);
spin_unlock_irqrestore(&id->lock, flags);
return active;
}
EXPORT_SYMBOL(__debug_sprintf_exception);
/**
* debug_register_view() - registers new debug view and creates debugfs
* dir entry
*
* @id: handle for debug log
* @view: pointer to debug view struct
*
* Return:
* - 0 : ok
* - < 0: Error
*/
int debug_register_view(debug_info_t *id, struct debug_view *view)
{
unsigned long flags;
struct dentry *pde;
umode_t mode;
int rc = 0;
int i;
if (!id)
goto out;
mode = (id->mode | S_IFREG) & ~S_IXUGO;
if (!(view->prolog_proc || view->format_proc || view->header_proc))
mode &= ~(S_IRUSR | S_IRGRP | S_IROTH);
if (!view->input_proc)
mode &= ~(S_IWUSR | S_IWGRP | S_IWOTH);
pde = debugfs_create_file(view->name, mode, id->debugfs_root_entry,
id, &debug_file_ops);
spin_lock_irqsave(&id->lock, flags);
for (i = 0; i < DEBUG_MAX_VIEWS; i++) {
if (!id->views[i])
break;
}
if (i == DEBUG_MAX_VIEWS) {
rc = -1;
} else {
id->views[i] = view;
id->debugfs_entries[i] = pde;
}
spin_unlock_irqrestore(&id->lock, flags);
if (rc) {
pr_err("Registering view %s/%s would exceed the maximum "
"number of views %i\n", id->name, view->name, i);
debugfs_remove(pde);
}
out:
return rc;
}
EXPORT_SYMBOL(debug_register_view);
/**
* debug_unregister_view() - unregisters debug view and removes debugfs
* dir entry
*
* @id: handle for debug log
* @view: pointer to debug view struct
*
* Return:
* - 0 : ok
* - < 0: Error
*/
int debug_unregister_view(debug_info_t *id, struct debug_view *view)
{
struct dentry *dentry = NULL;
unsigned long flags;
int i, rc = 0;
if (!id)
goto out;
spin_lock_irqsave(&id->lock, flags);
for (i = 0; i < DEBUG_MAX_VIEWS; i++) {
if (id->views[i] == view)
break;
}
if (i == DEBUG_MAX_VIEWS) {
rc = -1;
} else {
dentry = id->debugfs_entries[i];
id->views[i] = NULL;
id->debugfs_entries[i] = NULL;
}
spin_unlock_irqrestore(&id->lock, flags);
debugfs_remove(dentry);
out:
return rc;
}
EXPORT_SYMBOL(debug_unregister_view);
static inline char *debug_get_user_string(const char __user *user_buf,
size_t user_len)
{
char *buffer;
buffer = kmalloc(user_len + 1, GFP_KERNEL);
if (!buffer)
return ERR_PTR(-ENOMEM);
if (copy_from_user(buffer, user_buf, user_len) != 0) {
kfree(buffer);
return ERR_PTR(-EFAULT);
}
/* got the string, now strip linefeed. */
if (buffer[user_len - 1] == '\n')
buffer[user_len - 1] = 0;
else
buffer[user_len] = 0;
return buffer;
}
static inline int debug_get_uint(char *buf)
{
int rc;
buf = skip_spaces(buf);
rc = simple_strtoul(buf, &buf, 10);
if (*buf)
rc = -EINVAL;
return rc;
}
/*
* functions for debug-views
***********************************
*/
/*
* prints out actual debug level
*/
static int debug_prolog_pages_fn(debug_info_t *id, struct debug_view *view,
char *out_buf)
{
return sprintf(out_buf, "%i\n", id->pages_per_area);
}
/*
* reads new size (number of pages per debug area)
*/
static int debug_input_pages_fn(debug_info_t *id, struct debug_view *view,
struct file *file, const char __user *user_buf,
size_t user_len, loff_t *offset)
{
int rc, new_pages;
char *str;
if (user_len > 0x10000)
user_len = 0x10000;
if (*offset != 0) {
rc = -EPIPE;
goto out;
}
str = debug_get_user_string(user_buf, user_len);
if (IS_ERR(str)) {
rc = PTR_ERR(str);
goto out;
}
new_pages = debug_get_uint(str);
if (new_pages < 0) {
rc = -EINVAL;
goto free_str;
}
rc = debug_set_size(id, id->nr_areas, new_pages);
if (rc != 0) {
rc = -EINVAL;
goto free_str;
}
rc = user_len;
free_str:
kfree(str);
out:
*offset += user_len;
return rc; /* number of input characters */
}
/*
* prints out actual debug level
*/
static int debug_prolog_level_fn(debug_info_t *id, struct debug_view *view,
char *out_buf)
{
int rc = 0;
if (id->level == DEBUG_OFF_LEVEL)
rc = sprintf(out_buf, "-\n");
else
rc = sprintf(out_buf, "%i\n", id->level);
return rc;
}
/*
* reads new debug level
*/
static int debug_input_level_fn(debug_info_t *id, struct debug_view *view,
struct file *file, const char __user *user_buf,
size_t user_len, loff_t *offset)
{
int rc, new_level;
char *str;
if (user_len > 0x10000)
user_len = 0x10000;
if (*offset != 0) {
rc = -EPIPE;
goto out;
}
str = debug_get_user_string(user_buf, user_len);
if (IS_ERR(str)) {
rc = PTR_ERR(str);
goto out;
}
if (str[0] == '-') {
debug_set_level(id, DEBUG_OFF_LEVEL);
rc = user_len;
goto free_str;
} else {
new_level = debug_get_uint(str);
}
if (new_level < 0) {
pr_warn("%s is not a valid level for a debug feature\n", str);
rc = -EINVAL;
} else {
debug_set_level(id, new_level);
rc = user_len;
}
free_str:
kfree(str);
out:
*offset += user_len;
return rc; /* number of input characters */
}
/*
* flushes debug areas
*/
static void debug_flush(debug_info_t *id, int area)
{
unsigned long flags;
int i, j;
if (!id || !id->areas)
return;
spin_lock_irqsave(&id->lock, flags);
if (area == DEBUG_FLUSH_ALL) {
id->active_area = 0;
memset(id->active_entries, 0, id->nr_areas * sizeof(int));
for (i = 0; i < id->nr_areas; i++) {
id->active_pages[i] = 0;
for (j = 0; j < id->pages_per_area; j++)
memset(id->areas[i][j], 0, PAGE_SIZE);
}
} else if (area >= 0 && area < id->nr_areas) {
id->active_entries[area] = 0;
id->active_pages[area] = 0;
for (i = 0; i < id->pages_per_area; i++)
memset(id->areas[area][i], 0, PAGE_SIZE);
}
spin_unlock_irqrestore(&id->lock, flags);
}
/*
* view function: flushes debug areas
*/
static int debug_input_flush_fn(debug_info_t *id, struct debug_view *view,
struct file *file, const char __user *user_buf,
size_t user_len, loff_t *offset)
{
char input_buf[1];
int rc = user_len;
if (user_len > 0x10000)
user_len = 0x10000;
if (*offset != 0) {
rc = -EPIPE;
goto out;
}
if (copy_from_user(input_buf, user_buf, 1)) {
rc = -EFAULT;
goto out;
}
if (input_buf[0] == '-') {
debug_flush(id, DEBUG_FLUSH_ALL);
goto out;
}
if (isdigit(input_buf[0])) {
int area = ((int) input_buf[0] - (int) '0');
debug_flush(id, area);
goto out;
}
pr_info("Flushing debug data failed because %c is not a valid "
"area\n", input_buf[0]);
out:
*offset += user_len;
return rc; /* number of input characters */
}
/*
* prints debug data in hex/ascii format
*/
static int debug_hex_ascii_format_fn(debug_info_t *id, struct debug_view *view,
char *out_buf, const char *in_buf)
{
int i, rc = 0;
for (i = 0; i < id->buf_size; i++)
rc += sprintf(out_buf + rc, "%02x ", ((unsigned char *) in_buf)[i]);
rc += sprintf(out_buf + rc, "| ");
for (i = 0; i < id->buf_size; i++) {
unsigned char c = in_buf[i];
if (isascii(c) && isprint(c))
rc += sprintf(out_buf + rc, "%c", c);
else
rc += sprintf(out_buf + rc, ".");
}
rc += sprintf(out_buf + rc, "\n");
return rc;
}
/*
* prints header for debug entry
*/
int debug_dflt_header_fn(debug_info_t *id, struct debug_view *view,
int area, debug_entry_t *entry, char *out_buf)
{
unsigned long sec, usec;
unsigned long caller;
unsigned int level;
char *except_str;
int rc = 0;
level = entry->level;
sec = entry->clock;
usec = do_div(sec, USEC_PER_SEC);
if (entry->exception)
except_str = "*";
else
except_str = "-";
caller = (unsigned long) entry->caller;
rc += sprintf(out_buf, "%02i %011ld:%06lu %1u %1s %04u %px ",
area, sec, usec, level, except_str,
entry->cpu, (void *)caller);
return rc;
}
EXPORT_SYMBOL(debug_dflt_header_fn);
/*
* prints debug data sprintf-formated:
* debug_sprinf_event/exception calls must be used together with this view
*/
#define DEBUG_SPRINTF_MAX_ARGS 10
static int debug_sprintf_format_fn(debug_info_t *id, struct debug_view *view,
char *out_buf, const char *inbuf)
{
debug_sprintf_entry_t *curr_event = (debug_sprintf_entry_t *)inbuf;
int num_longs, num_used_args = 0, i, rc = 0;
int index[DEBUG_SPRINTF_MAX_ARGS];
/* count of longs fit into one entry */
num_longs = id->buf_size / sizeof(long);
if (num_longs < 1)
goto out; /* bufsize of entry too small */
if (num_longs == 1) {
/* no args, we use only the string */
strcpy(out_buf, curr_event->string);
rc = strlen(curr_event->string);
goto out;
}
/* number of arguments used for sprintf (without the format string) */
num_used_args = min(DEBUG_SPRINTF_MAX_ARGS, (num_longs - 1));
memset(index, 0, DEBUG_SPRINTF_MAX_ARGS * sizeof(int));
for (i = 0; i < num_used_args; i++)
index[i] = i;
rc = sprintf(out_buf, curr_event->string, curr_event->args[index[0]],
curr_event->args[index[1]], curr_event->args[index[2]],
curr_event->args[index[3]], curr_event->args[index[4]],
curr_event->args[index[5]], curr_event->args[index[6]],
curr_event->args[index[7]], curr_event->args[index[8]],
curr_event->args[index[9]]);
out:
return rc;
}
/*
* debug_init:
* - is called exactly once to initialize the debug feature
*/
static int __init debug_init(void)
{
s390dbf_sysctl_header = register_sysctl("s390dbf", s390dbf_table);
mutex_lock(&debug_mutex);
debug_debugfs_root_entry = debugfs_create_dir(DEBUG_DIR_ROOT, NULL);
initialized = 1;
mutex_unlock(&debug_mutex);
return 0;
}
postcore_initcall(debug_init);
| linux-master | arch/s390/kernel/debug.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/init.h>
#include <linux/types.h>
#include <linux/audit.h>
#include <asm/unistd.h>
#include "audit.h"
static unsigned dir_class[] = {
#include <asm-generic/audit_dir_write.h>
~0U
};
static unsigned read_class[] = {
#include <asm-generic/audit_read.h>
~0U
};
static unsigned write_class[] = {
#include <asm-generic/audit_write.h>
~0U
};
static unsigned chattr_class[] = {
#include <asm-generic/audit_change_attr.h>
~0U
};
static unsigned signal_class[] = {
#include <asm-generic/audit_signal.h>
~0U
};
int audit_classify_arch(int arch)
{
#ifdef CONFIG_COMPAT
if (arch == AUDIT_ARCH_S390)
return 1;
#endif
return 0;
}
int audit_classify_syscall(int abi, unsigned syscall)
{
#ifdef CONFIG_COMPAT
if (abi == AUDIT_ARCH_S390)
return s390_classify_syscall(syscall);
#endif
switch(syscall) {
case __NR_open:
return AUDITSC_OPEN;
case __NR_openat:
return AUDITSC_OPENAT;
case __NR_socketcall:
return AUDITSC_SOCKETCALL;
case __NR_execve:
return AUDITSC_EXECVE;
case __NR_openat2:
return AUDITSC_OPENAT2;
default:
return AUDITSC_NATIVE;
}
}
static int __init audit_classes_init(void)
{
#ifdef CONFIG_COMPAT
audit_register_class(AUDIT_CLASS_WRITE_32, s390_write_class);
audit_register_class(AUDIT_CLASS_READ_32, s390_read_class);
audit_register_class(AUDIT_CLASS_DIR_WRITE_32, s390_dir_class);
audit_register_class(AUDIT_CLASS_CHATTR_32, s390_chattr_class);
audit_register_class(AUDIT_CLASS_SIGNAL_32, s390_signal_class);
#endif
audit_register_class(AUDIT_CLASS_WRITE, write_class);
audit_register_class(AUDIT_CLASS_READ, read_class);
audit_register_class(AUDIT_CLASS_DIR_WRITE, dir_class);
audit_register_class(AUDIT_CLASS_CHATTR, chattr_class);
audit_register_class(AUDIT_CLASS_SIGNAL, signal_class);
return 0;
}
__initcall(audit_classes_init);
| linux-master | arch/s390/kernel/audit.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Time of day based timer functions.
*
* S390 version
* Copyright IBM Corp. 1999, 2008
* Author(s): Hartmut Penner ([email protected]),
* Martin Schwidefsky ([email protected]),
* Denis Joseph Barrow ([email protected],[email protected])
*
* Derived from "arch/i386/kernel/time.c"
* Copyright (C) 1991, 1992, 1995 Linus Torvalds
*/
#define KMSG_COMPONENT "time"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/kernel_stat.h>
#include <linux/errno.h>
#include <linux/export.h>
#include <linux/sched.h>
#include <linux/sched/clock.h>
#include <linux/kernel.h>
#include <linux/param.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/cpu.h>
#include <linux/stop_machine.h>
#include <linux/time.h>
#include <linux/device.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/smp.h>
#include <linux/types.h>
#include <linux/profile.h>
#include <linux/timex.h>
#include <linux/notifier.h>
#include <linux/timekeeper_internal.h>
#include <linux/clockchips.h>
#include <linux/gfp.h>
#include <linux/kprobes.h>
#include <linux/uaccess.h>
#include <vdso/vsyscall.h>
#include <vdso/clocksource.h>
#include <vdso/helpers.h>
#include <asm/facility.h>
#include <asm/delay.h>
#include <asm/div64.h>
#include <asm/vdso.h>
#include <asm/irq.h>
#include <asm/irq_regs.h>
#include <asm/vtimer.h>
#include <asm/stp.h>
#include <asm/cio.h>
#include "entry.h"
union tod_clock tod_clock_base __section(".data");
EXPORT_SYMBOL_GPL(tod_clock_base);
u64 clock_comparator_max = -1ULL;
EXPORT_SYMBOL_GPL(clock_comparator_max);
static DEFINE_PER_CPU(struct clock_event_device, comparators);
ATOMIC_NOTIFIER_HEAD(s390_epoch_delta_notifier);
EXPORT_SYMBOL(s390_epoch_delta_notifier);
unsigned char ptff_function_mask[16];
static unsigned long lpar_offset;
static unsigned long initial_leap_seconds;
static unsigned long tod_steering_end;
static long tod_steering_delta;
/*
* Get time offsets with PTFF
*/
void __init time_early_init(void)
{
struct ptff_qto qto;
struct ptff_qui qui;
int cs;
/* Initialize TOD steering parameters */
tod_steering_end = tod_clock_base.tod;
for (cs = 0; cs < CS_BASES; cs++)
vdso_data[cs].arch_data.tod_steering_end = tod_steering_end;
if (!test_facility(28))
return;
ptff(&ptff_function_mask, sizeof(ptff_function_mask), PTFF_QAF);
/* get LPAR offset */
if (ptff_query(PTFF_QTO) && ptff(&qto, sizeof(qto), PTFF_QTO) == 0)
lpar_offset = qto.tod_epoch_difference;
/* get initial leap seconds */
if (ptff_query(PTFF_QUI) && ptff(&qui, sizeof(qui), PTFF_QUI) == 0)
initial_leap_seconds = (unsigned long)
((long) qui.old_leap * 4096000000L);
}
unsigned long long noinstr sched_clock_noinstr(void)
{
return tod_to_ns(__get_tod_clock_monotonic());
}
/*
* Scheduler clock - returns current time in nanosec units.
*/
unsigned long long notrace sched_clock(void)
{
return tod_to_ns(get_tod_clock_monotonic());
}
NOKPROBE_SYMBOL(sched_clock);
static void ext_to_timespec64(union tod_clock *clk, struct timespec64 *xt)
{
unsigned long rem, sec, nsec;
sec = clk->us;
rem = do_div(sec, 1000000);
nsec = ((clk->sus + (rem << 12)) * 125) >> 9;
xt->tv_sec = sec;
xt->tv_nsec = nsec;
}
void clock_comparator_work(void)
{
struct clock_event_device *cd;
S390_lowcore.clock_comparator = clock_comparator_max;
cd = this_cpu_ptr(&comparators);
cd->event_handler(cd);
}
static int s390_next_event(unsigned long delta,
struct clock_event_device *evt)
{
S390_lowcore.clock_comparator = get_tod_clock() + delta;
set_clock_comparator(S390_lowcore.clock_comparator);
return 0;
}
/*
* Set up lowcore and control register of the current cpu to
* enable TOD clock and clock comparator interrupts.
*/
void init_cpu_timer(void)
{
struct clock_event_device *cd;
int cpu;
S390_lowcore.clock_comparator = clock_comparator_max;
set_clock_comparator(S390_lowcore.clock_comparator);
cpu = smp_processor_id();
cd = &per_cpu(comparators, cpu);
cd->name = "comparator";
cd->features = CLOCK_EVT_FEAT_ONESHOT;
cd->mult = 16777;
cd->shift = 12;
cd->min_delta_ns = 1;
cd->min_delta_ticks = 1;
cd->max_delta_ns = LONG_MAX;
cd->max_delta_ticks = ULONG_MAX;
cd->rating = 400;
cd->cpumask = cpumask_of(cpu);
cd->set_next_event = s390_next_event;
clockevents_register_device(cd);
/* Enable clock comparator timer interrupt. */
__ctl_set_bit(0,11);
/* Always allow the timing alert external interrupt. */
__ctl_set_bit(0, 4);
}
static void clock_comparator_interrupt(struct ext_code ext_code,
unsigned int param32,
unsigned long param64)
{
inc_irq_stat(IRQEXT_CLK);
if (S390_lowcore.clock_comparator == clock_comparator_max)
set_clock_comparator(S390_lowcore.clock_comparator);
}
static void stp_timing_alert(struct stp_irq_parm *);
static void timing_alert_interrupt(struct ext_code ext_code,
unsigned int param32, unsigned long param64)
{
inc_irq_stat(IRQEXT_TLA);
if (param32 & 0x00038000)
stp_timing_alert((struct stp_irq_parm *) ¶m32);
}
static void stp_reset(void);
void read_persistent_clock64(struct timespec64 *ts)
{
union tod_clock clk;
u64 delta;
delta = initial_leap_seconds + TOD_UNIX_EPOCH;
store_tod_clock_ext(&clk);
clk.eitod -= delta;
ext_to_timespec64(&clk, ts);
}
void __init read_persistent_wall_and_boot_offset(struct timespec64 *wall_time,
struct timespec64 *boot_offset)
{
struct timespec64 boot_time;
union tod_clock clk;
u64 delta;
delta = initial_leap_seconds + TOD_UNIX_EPOCH;
clk = tod_clock_base;
clk.eitod -= delta;
ext_to_timespec64(&clk, &boot_time);
read_persistent_clock64(wall_time);
*boot_offset = timespec64_sub(*wall_time, boot_time);
}
static u64 read_tod_clock(struct clocksource *cs)
{
unsigned long now, adj;
preempt_disable(); /* protect from changes to steering parameters */
now = get_tod_clock();
adj = tod_steering_end - now;
if (unlikely((s64) adj > 0))
/*
* manually steer by 1 cycle every 2^16 cycles. This
* corresponds to shifting the tod delta by 15. 1s is
* therefore steered in ~9h. The adjust will decrease
* over time, until it finally reaches 0.
*/
now += (tod_steering_delta < 0) ? (adj >> 15) : -(adj >> 15);
preempt_enable();
return now;
}
static struct clocksource clocksource_tod = {
.name = "tod",
.rating = 400,
.read = read_tod_clock,
.mask = CLOCKSOURCE_MASK(64),
.mult = 1000,
.shift = 12,
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
.vdso_clock_mode = VDSO_CLOCKMODE_TOD,
};
struct clocksource * __init clocksource_default_clock(void)
{
return &clocksource_tod;
}
/*
* Initialize the TOD clock and the CPU timer of
* the boot cpu.
*/
void __init time_init(void)
{
/* Reset time synchronization interfaces. */
stp_reset();
/* request the clock comparator external interrupt */
if (register_external_irq(EXT_IRQ_CLK_COMP, clock_comparator_interrupt))
panic("Couldn't request external interrupt 0x1004");
/* request the timing alert external interrupt */
if (register_external_irq(EXT_IRQ_TIMING_ALERT, timing_alert_interrupt))
panic("Couldn't request external interrupt 0x1406");
if (__clocksource_register(&clocksource_tod) != 0)
panic("Could not register TOD clock source");
/* Enable TOD clock interrupts on the boot cpu. */
init_cpu_timer();
/* Enable cpu timer interrupts on the boot cpu. */
vtime_init();
}
static DEFINE_PER_CPU(atomic_t, clock_sync_word);
static DEFINE_MUTEX(stp_mutex);
static unsigned long clock_sync_flags;
#define CLOCK_SYNC_HAS_STP 0
#define CLOCK_SYNC_STP 1
#define CLOCK_SYNC_STPINFO_VALID 2
/*
* The get_clock function for the physical clock. It will get the current
* TOD clock, subtract the LPAR offset and write the result to *clock.
* The function returns 0 if the clock is in sync with the external time
* source. If the clock mode is local it will return -EOPNOTSUPP and
* -EAGAIN if the clock is not in sync with the external reference.
*/
int get_phys_clock(unsigned long *clock)
{
atomic_t *sw_ptr;
unsigned int sw0, sw1;
sw_ptr = &get_cpu_var(clock_sync_word);
sw0 = atomic_read(sw_ptr);
*clock = get_tod_clock() - lpar_offset;
sw1 = atomic_read(sw_ptr);
put_cpu_var(clock_sync_word);
if (sw0 == sw1 && (sw0 & 0x80000000U))
/* Success: time is in sync. */
return 0;
if (!test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags))
return -EOPNOTSUPP;
if (!test_bit(CLOCK_SYNC_STP, &clock_sync_flags))
return -EACCES;
return -EAGAIN;
}
EXPORT_SYMBOL(get_phys_clock);
/*
* Make get_phys_clock() return -EAGAIN.
*/
static void disable_sync_clock(void *dummy)
{
atomic_t *sw_ptr = this_cpu_ptr(&clock_sync_word);
/*
* Clear the in-sync bit 2^31. All get_phys_clock calls will
* fail until the sync bit is turned back on. In addition
* increase the "sequence" counter to avoid the race of an
* stp event and the complete recovery against get_phys_clock.
*/
atomic_andnot(0x80000000, sw_ptr);
atomic_inc(sw_ptr);
}
/*
* Make get_phys_clock() return 0 again.
* Needs to be called from a context disabled for preemption.
*/
static void enable_sync_clock(void)
{
atomic_t *sw_ptr = this_cpu_ptr(&clock_sync_word);
atomic_or(0x80000000, sw_ptr);
}
/*
* Function to check if the clock is in sync.
*/
static inline int check_sync_clock(void)
{
atomic_t *sw_ptr;
int rc;
sw_ptr = &get_cpu_var(clock_sync_word);
rc = (atomic_read(sw_ptr) & 0x80000000U) != 0;
put_cpu_var(clock_sync_word);
return rc;
}
/*
* Apply clock delta to the global data structures.
* This is called once on the CPU that performed the clock sync.
*/
static void clock_sync_global(long delta)
{
unsigned long now, adj;
struct ptff_qto qto;
int cs;
/* Fixup the monotonic sched clock. */
tod_clock_base.eitod += delta;
/* Adjust TOD steering parameters. */
now = get_tod_clock();
adj = tod_steering_end - now;
if (unlikely((s64) adj >= 0))
/* Calculate how much of the old adjustment is left. */
tod_steering_delta = (tod_steering_delta < 0) ?
-(adj >> 15) : (adj >> 15);
tod_steering_delta += delta;
if ((abs(tod_steering_delta) >> 48) != 0)
panic("TOD clock sync offset %li is too large to drift\n",
tod_steering_delta);
tod_steering_end = now + (abs(tod_steering_delta) << 15);
for (cs = 0; cs < CS_BASES; cs++) {
vdso_data[cs].arch_data.tod_steering_end = tod_steering_end;
vdso_data[cs].arch_data.tod_steering_delta = tod_steering_delta;
}
/* Update LPAR offset. */
if (ptff_query(PTFF_QTO) && ptff(&qto, sizeof(qto), PTFF_QTO) == 0)
lpar_offset = qto.tod_epoch_difference;
/* Call the TOD clock change notifier. */
atomic_notifier_call_chain(&s390_epoch_delta_notifier, 0, &delta);
}
/*
* Apply clock delta to the per-CPU data structures of this CPU.
* This is called for each online CPU after the call to clock_sync_global.
*/
static void clock_sync_local(long delta)
{
/* Add the delta to the clock comparator. */
if (S390_lowcore.clock_comparator != clock_comparator_max) {
S390_lowcore.clock_comparator += delta;
set_clock_comparator(S390_lowcore.clock_comparator);
}
/* Adjust the last_update_clock time-stamp. */
S390_lowcore.last_update_clock += delta;
}
/* Single threaded workqueue used for stp sync events */
static struct workqueue_struct *time_sync_wq;
static void __init time_init_wq(void)
{
if (time_sync_wq)
return;
time_sync_wq = create_singlethread_workqueue("timesync");
}
struct clock_sync_data {
atomic_t cpus;
int in_sync;
long clock_delta;
};
/*
* Server Time Protocol (STP) code.
*/
static bool stp_online;
static struct stp_sstpi stp_info;
static void *stp_page;
static void stp_work_fn(struct work_struct *work);
static DECLARE_WORK(stp_work, stp_work_fn);
static struct timer_list stp_timer;
static int __init early_parse_stp(char *p)
{
return kstrtobool(p, &stp_online);
}
early_param("stp", early_parse_stp);
/*
* Reset STP attachment.
*/
static void __init stp_reset(void)
{
int rc;
stp_page = (void *) get_zeroed_page(GFP_ATOMIC);
rc = chsc_sstpc(stp_page, STP_OP_CTRL, 0x0000, NULL);
if (rc == 0)
set_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags);
else if (stp_online) {
pr_warn("The real or virtual hardware system does not provide an STP interface\n");
free_page((unsigned long) stp_page);
stp_page = NULL;
stp_online = false;
}
}
static void stp_timeout(struct timer_list *unused)
{
queue_work(time_sync_wq, &stp_work);
}
static int __init stp_init(void)
{
if (!test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags))
return 0;
timer_setup(&stp_timer, stp_timeout, 0);
time_init_wq();
if (!stp_online)
return 0;
queue_work(time_sync_wq, &stp_work);
return 0;
}
arch_initcall(stp_init);
/*
* STP timing alert. There are three causes:
* 1) timing status change
* 2) link availability change
* 3) time control parameter change
* In all three cases we are only interested in the clock source state.
* If a STP clock source is now available use it.
*/
static void stp_timing_alert(struct stp_irq_parm *intparm)
{
if (intparm->tsc || intparm->lac || intparm->tcpc)
queue_work(time_sync_wq, &stp_work);
}
/*
* STP sync check machine check. This is called when the timing state
* changes from the synchronized state to the unsynchronized state.
* After a STP sync check the clock is not in sync. The machine check
* is broadcasted to all cpus at the same time.
*/
int stp_sync_check(void)
{
disable_sync_clock(NULL);
return 1;
}
/*
* STP island condition machine check. This is called when an attached
* server attempts to communicate over an STP link and the servers
* have matching CTN ids and have a valid stratum-1 configuration
* but the configurations do not match.
*/
int stp_island_check(void)
{
disable_sync_clock(NULL);
return 1;
}
void stp_queue_work(void)
{
queue_work(time_sync_wq, &stp_work);
}
static int __store_stpinfo(void)
{
int rc = chsc_sstpi(stp_page, &stp_info, sizeof(struct stp_sstpi));
if (rc)
clear_bit(CLOCK_SYNC_STPINFO_VALID, &clock_sync_flags);
else
set_bit(CLOCK_SYNC_STPINFO_VALID, &clock_sync_flags);
return rc;
}
static int stpinfo_valid(void)
{
return stp_online && test_bit(CLOCK_SYNC_STPINFO_VALID, &clock_sync_flags);
}
static int stp_sync_clock(void *data)
{
struct clock_sync_data *sync = data;
long clock_delta, flags;
static int first;
int rc;
enable_sync_clock();
if (xchg(&first, 1) == 0) {
/* Wait until all other cpus entered the sync function. */
while (atomic_read(&sync->cpus) != 0)
cpu_relax();
rc = 0;
if (stp_info.todoff || stp_info.tmd != 2) {
flags = vdso_update_begin();
rc = chsc_sstpc(stp_page, STP_OP_SYNC, 0,
&clock_delta);
if (rc == 0) {
sync->clock_delta = clock_delta;
clock_sync_global(clock_delta);
rc = __store_stpinfo();
if (rc == 0 && stp_info.tmd != 2)
rc = -EAGAIN;
}
vdso_update_end(flags);
}
sync->in_sync = rc ? -EAGAIN : 1;
xchg(&first, 0);
} else {
/* Slave */
atomic_dec(&sync->cpus);
/* Wait for in_sync to be set. */
while (READ_ONCE(sync->in_sync) == 0)
__udelay(1);
}
if (sync->in_sync != 1)
/* Didn't work. Clear per-cpu in sync bit again. */
disable_sync_clock(NULL);
/* Apply clock delta to per-CPU fields of this CPU. */
clock_sync_local(sync->clock_delta);
return 0;
}
static int stp_clear_leap(void)
{
struct __kernel_timex txc;
int ret;
memset(&txc, 0, sizeof(txc));
ret = do_adjtimex(&txc);
if (ret < 0)
return ret;
txc.modes = ADJ_STATUS;
txc.status &= ~(STA_INS|STA_DEL);
return do_adjtimex(&txc);
}
static void stp_check_leap(void)
{
struct stp_stzi stzi;
struct stp_lsoib *lsoib = &stzi.lsoib;
struct __kernel_timex txc;
int64_t timediff;
int leapdiff, ret;
if (!stp_info.lu || !check_sync_clock()) {
/*
* Either a scheduled leap second was removed by the operator,
* or STP is out of sync. In both cases, clear the leap second
* kernel flags.
*/
if (stp_clear_leap() < 0)
pr_err("failed to clear leap second flags\n");
return;
}
if (chsc_stzi(stp_page, &stzi, sizeof(stzi))) {
pr_err("stzi failed\n");
return;
}
timediff = tod_to_ns(lsoib->nlsout - get_tod_clock()) / NSEC_PER_SEC;
leapdiff = lsoib->nlso - lsoib->also;
if (leapdiff != 1 && leapdiff != -1) {
pr_err("Cannot schedule %d leap seconds\n", leapdiff);
return;
}
if (timediff < 0) {
if (stp_clear_leap() < 0)
pr_err("failed to clear leap second flags\n");
} else if (timediff < 7200) {
memset(&txc, 0, sizeof(txc));
ret = do_adjtimex(&txc);
if (ret < 0)
return;
txc.modes = ADJ_STATUS;
if (leapdiff > 0)
txc.status |= STA_INS;
else
txc.status |= STA_DEL;
ret = do_adjtimex(&txc);
if (ret < 0)
pr_err("failed to set leap second flags\n");
/* arm Timer to clear leap second flags */
mod_timer(&stp_timer, jiffies + msecs_to_jiffies(14400 * MSEC_PER_SEC));
} else {
/* The day the leap second is scheduled for hasn't been reached. Retry
* in one hour.
*/
mod_timer(&stp_timer, jiffies + msecs_to_jiffies(3600 * MSEC_PER_SEC));
}
}
/*
* STP work. Check for the STP state and take over the clock
* synchronization if the STP clock source is usable.
*/
static void stp_work_fn(struct work_struct *work)
{
struct clock_sync_data stp_sync;
int rc;
/* prevent multiple execution. */
mutex_lock(&stp_mutex);
if (!stp_online) {
chsc_sstpc(stp_page, STP_OP_CTRL, 0x0000, NULL);
del_timer_sync(&stp_timer);
goto out_unlock;
}
rc = chsc_sstpc(stp_page, STP_OP_CTRL, 0xf0e0, NULL);
if (rc)
goto out_unlock;
rc = __store_stpinfo();
if (rc || stp_info.c == 0)
goto out_unlock;
/* Skip synchronization if the clock is already in sync. */
if (!check_sync_clock()) {
memset(&stp_sync, 0, sizeof(stp_sync));
cpus_read_lock();
atomic_set(&stp_sync.cpus, num_online_cpus() - 1);
stop_machine_cpuslocked(stp_sync_clock, &stp_sync, cpu_online_mask);
cpus_read_unlock();
}
if (!check_sync_clock())
/*
* There is a usable clock but the synchronization failed.
* Retry after a second.
*/
mod_timer(&stp_timer, jiffies + msecs_to_jiffies(MSEC_PER_SEC));
else if (stp_info.lu)
stp_check_leap();
out_unlock:
mutex_unlock(&stp_mutex);
}
/*
* STP subsys sysfs interface functions
*/
static struct bus_type stp_subsys = {
.name = "stp",
.dev_name = "stp",
};
static ssize_t ctn_id_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
ssize_t ret = -ENODATA;
mutex_lock(&stp_mutex);
if (stpinfo_valid())
ret = sprintf(buf, "%016lx\n",
*(unsigned long *) stp_info.ctnid);
mutex_unlock(&stp_mutex);
return ret;
}
static DEVICE_ATTR_RO(ctn_id);
static ssize_t ctn_type_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
ssize_t ret = -ENODATA;
mutex_lock(&stp_mutex);
if (stpinfo_valid())
ret = sprintf(buf, "%i\n", stp_info.ctn);
mutex_unlock(&stp_mutex);
return ret;
}
static DEVICE_ATTR_RO(ctn_type);
static ssize_t dst_offset_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
ssize_t ret = -ENODATA;
mutex_lock(&stp_mutex);
if (stpinfo_valid() && (stp_info.vbits & 0x2000))
ret = sprintf(buf, "%i\n", (int)(s16) stp_info.dsto);
mutex_unlock(&stp_mutex);
return ret;
}
static DEVICE_ATTR_RO(dst_offset);
static ssize_t leap_seconds_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
ssize_t ret = -ENODATA;
mutex_lock(&stp_mutex);
if (stpinfo_valid() && (stp_info.vbits & 0x8000))
ret = sprintf(buf, "%i\n", (int)(s16) stp_info.leaps);
mutex_unlock(&stp_mutex);
return ret;
}
static DEVICE_ATTR_RO(leap_seconds);
static ssize_t leap_seconds_scheduled_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct stp_stzi stzi;
ssize_t ret;
mutex_lock(&stp_mutex);
if (!stpinfo_valid() || !(stp_info.vbits & 0x8000) || !stp_info.lu) {
mutex_unlock(&stp_mutex);
return -ENODATA;
}
ret = chsc_stzi(stp_page, &stzi, sizeof(stzi));
mutex_unlock(&stp_mutex);
if (ret < 0)
return ret;
if (!stzi.lsoib.p)
return sprintf(buf, "0,0\n");
return sprintf(buf, "%lu,%d\n",
tod_to_ns(stzi.lsoib.nlsout - TOD_UNIX_EPOCH) / NSEC_PER_SEC,
stzi.lsoib.nlso - stzi.lsoib.also);
}
static DEVICE_ATTR_RO(leap_seconds_scheduled);
static ssize_t stratum_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
ssize_t ret = -ENODATA;
mutex_lock(&stp_mutex);
if (stpinfo_valid())
ret = sprintf(buf, "%i\n", (int)(s16) stp_info.stratum);
mutex_unlock(&stp_mutex);
return ret;
}
static DEVICE_ATTR_RO(stratum);
static ssize_t time_offset_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
ssize_t ret = -ENODATA;
mutex_lock(&stp_mutex);
if (stpinfo_valid() && (stp_info.vbits & 0x0800))
ret = sprintf(buf, "%i\n", (int) stp_info.tto);
mutex_unlock(&stp_mutex);
return ret;
}
static DEVICE_ATTR_RO(time_offset);
static ssize_t time_zone_offset_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
ssize_t ret = -ENODATA;
mutex_lock(&stp_mutex);
if (stpinfo_valid() && (stp_info.vbits & 0x4000))
ret = sprintf(buf, "%i\n", (int)(s16) stp_info.tzo);
mutex_unlock(&stp_mutex);
return ret;
}
static DEVICE_ATTR_RO(time_zone_offset);
static ssize_t timing_mode_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
ssize_t ret = -ENODATA;
mutex_lock(&stp_mutex);
if (stpinfo_valid())
ret = sprintf(buf, "%i\n", stp_info.tmd);
mutex_unlock(&stp_mutex);
return ret;
}
static DEVICE_ATTR_RO(timing_mode);
static ssize_t timing_state_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
ssize_t ret = -ENODATA;
mutex_lock(&stp_mutex);
if (stpinfo_valid())
ret = sprintf(buf, "%i\n", stp_info.tst);
mutex_unlock(&stp_mutex);
return ret;
}
static DEVICE_ATTR_RO(timing_state);
static ssize_t online_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
return sprintf(buf, "%i\n", stp_online);
}
static ssize_t online_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
unsigned int value;
value = simple_strtoul(buf, NULL, 0);
if (value != 0 && value != 1)
return -EINVAL;
if (!test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags))
return -EOPNOTSUPP;
mutex_lock(&stp_mutex);
stp_online = value;
if (stp_online)
set_bit(CLOCK_SYNC_STP, &clock_sync_flags);
else
clear_bit(CLOCK_SYNC_STP, &clock_sync_flags);
queue_work(time_sync_wq, &stp_work);
mutex_unlock(&stp_mutex);
return count;
}
/*
* Can't use DEVICE_ATTR because the attribute should be named
* stp/online but dev_attr_online already exists in this file ..
*/
static DEVICE_ATTR_RW(online);
static struct attribute *stp_dev_attrs[] = {
&dev_attr_ctn_id.attr,
&dev_attr_ctn_type.attr,
&dev_attr_dst_offset.attr,
&dev_attr_leap_seconds.attr,
&dev_attr_online.attr,
&dev_attr_leap_seconds_scheduled.attr,
&dev_attr_stratum.attr,
&dev_attr_time_offset.attr,
&dev_attr_time_zone_offset.attr,
&dev_attr_timing_mode.attr,
&dev_attr_timing_state.attr,
NULL
};
ATTRIBUTE_GROUPS(stp_dev);
static int __init stp_init_sysfs(void)
{
return subsys_system_register(&stp_subsys, stp_dev_groups);
}
device_initcall(stp_init_sysfs);
| linux-master | arch/s390/kernel/time.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2005, 2011
*
* Author(s): Rolf Adelsberger,
* Michael Holzheu <[email protected]>
*/
#include <linux/device.h>
#include <linux/mm.h>
#include <linux/kexec.h>
#include <linux/delay.h>
#include <linux/reboot.h>
#include <linux/ftrace.h>
#include <linux/debug_locks.h>
#include <asm/pfault.h>
#include <asm/cio.h>
#include <asm/setup.h>
#include <asm/smp.h>
#include <asm/ipl.h>
#include <asm/diag.h>
#include <asm/elf.h>
#include <asm/asm-offsets.h>
#include <asm/cacheflush.h>
#include <asm/abs_lowcore.h>
#include <asm/os_info.h>
#include <asm/set_memory.h>
#include <asm/stacktrace.h>
#include <asm/switch_to.h>
#include <asm/nmi.h>
#include <asm/sclp.h>
typedef void (*relocate_kernel_t)(unsigned long, unsigned long, unsigned long);
typedef int (*purgatory_t)(int);
extern const unsigned char relocate_kernel[];
extern const unsigned long long relocate_kernel_len;
#ifdef CONFIG_CRASH_DUMP
/*
* Reset the system, copy boot CPU registers to absolute zero,
* and jump to the kdump image
*/
static void __do_machine_kdump(void *data)
{
struct kimage *image = data;
purgatory_t purgatory;
unsigned long prefix;
purgatory = (purgatory_t)image->start;
/* store_status() saved the prefix register to lowcore */
prefix = (unsigned long) S390_lowcore.prefixreg_save_area;
/* Now do the reset */
s390_reset_system();
/*
* Copy dump CPU store status info to absolute zero.
* This need to be done *after* s390_reset_system set the
* prefix register of this CPU to zero
*/
memcpy(absolute_pointer(__LC_FPREGS_SAVE_AREA),
phys_to_virt(prefix + __LC_FPREGS_SAVE_AREA), 512);
call_nodat(1, int, purgatory, int, 1);
/* Die if kdump returns */
disabled_wait();
}
/*
* Start kdump: create a LGR log entry, store status of all CPUs and
* branch to __do_machine_kdump.
*/
static noinline void __machine_kdump(void *image)
{
struct mcesa *mcesa;
union ctlreg2 cr2_old, cr2_new;
int this_cpu, cpu;
lgr_info_log();
/* Get status of the other CPUs */
this_cpu = smp_find_processor_id(stap());
for_each_online_cpu(cpu) {
if (cpu == this_cpu)
continue;
if (smp_store_status(cpu))
continue;
}
/* Store status of the boot CPU */
mcesa = __va(S390_lowcore.mcesad & MCESA_ORIGIN_MASK);
if (MACHINE_HAS_VX)
save_vx_regs((__vector128 *) mcesa->vector_save_area);
if (MACHINE_HAS_GS) {
__ctl_store(cr2_old.val, 2, 2);
cr2_new = cr2_old;
cr2_new.gse = 1;
__ctl_load(cr2_new.val, 2, 2);
save_gs_cb((struct gs_cb *) mcesa->guarded_storage_save_area);
__ctl_load(cr2_old.val, 2, 2);
}
/*
* To create a good backchain for this CPU in the dump store_status
* is passed the address of a function. The address is saved into
* the PSW save area of the boot CPU and the function is invoked as
* a tail call of store_status. The backchain in the dump will look
* like this:
* restart_int_handler -> __machine_kexec -> __do_machine_kdump
* The call to store_status() will not return.
*/
store_status(__do_machine_kdump, image);
}
#endif /* CONFIG_CRASH_DUMP */
/*
* Check if kdump checksums are valid: We call purgatory with parameter "0"
*/
static bool kdump_csum_valid(struct kimage *image)
{
#ifdef CONFIG_CRASH_DUMP
purgatory_t purgatory = (purgatory_t)image->start;
int rc;
rc = call_nodat(1, int, purgatory, int, 0);
return rc == 0;
#else
return false;
#endif
}
#ifdef CONFIG_CRASH_DUMP
void crash_free_reserved_phys_range(unsigned long begin, unsigned long end)
{
unsigned long addr, size;
for (addr = begin; addr < end; addr += PAGE_SIZE)
free_reserved_page(pfn_to_page(addr >> PAGE_SHIFT));
size = begin - crashk_res.start;
if (size)
os_info_crashkernel_add(crashk_res.start, size);
else
os_info_crashkernel_add(0, 0);
}
static void crash_protect_pages(int protect)
{
unsigned long size;
if (!crashk_res.end)
return;
size = resource_size(&crashk_res);
if (protect)
set_memory_ro(crashk_res.start, size >> PAGE_SHIFT);
else
set_memory_rw(crashk_res.start, size >> PAGE_SHIFT);
}
void arch_kexec_protect_crashkres(void)
{
crash_protect_pages(1);
}
void arch_kexec_unprotect_crashkres(void)
{
crash_protect_pages(0);
}
#endif
/*
* Give back memory to hypervisor before new kdump is loaded
*/
static int machine_kexec_prepare_kdump(void)
{
#ifdef CONFIG_CRASH_DUMP
if (MACHINE_IS_VM)
diag10_range(PFN_DOWN(crashk_res.start),
PFN_DOWN(crashk_res.end - crashk_res.start + 1));
return 0;
#else
return -EINVAL;
#endif
}
int machine_kexec_prepare(struct kimage *image)
{
void *reboot_code_buffer;
if (image->type == KEXEC_TYPE_CRASH)
return machine_kexec_prepare_kdump();
/* We don't support anything but the default image type for now. */
if (image->type != KEXEC_TYPE_DEFAULT)
return -EINVAL;
/* Get the destination where the assembler code should be copied to.*/
reboot_code_buffer = page_to_virt(image->control_code_page);
/* Then copy it */
memcpy(reboot_code_buffer, relocate_kernel, relocate_kernel_len);
return 0;
}
void machine_kexec_cleanup(struct kimage *image)
{
}
void arch_crash_save_vmcoreinfo(void)
{
struct lowcore *abs_lc;
VMCOREINFO_SYMBOL(lowcore_ptr);
VMCOREINFO_SYMBOL(high_memory);
VMCOREINFO_LENGTH(lowcore_ptr, NR_CPUS);
vmcoreinfo_append_str("SAMODE31=%lx\n", (unsigned long)__samode31);
vmcoreinfo_append_str("EAMODE31=%lx\n", (unsigned long)__eamode31);
vmcoreinfo_append_str("KERNELOFFSET=%lx\n", kaslr_offset());
abs_lc = get_abs_lowcore();
abs_lc->vmcore_info = paddr_vmcoreinfo_note();
put_abs_lowcore(abs_lc);
}
void machine_shutdown(void)
{
}
void machine_crash_shutdown(struct pt_regs *regs)
{
set_os_info_reipl_block();
}
/*
* Do normal kexec
*/
static void __do_machine_kexec(void *data)
{
unsigned long data_mover, entry, diag308_subcode;
struct kimage *image = data;
data_mover = page_to_phys(image->control_code_page);
entry = virt_to_phys(&image->head);
diag308_subcode = DIAG308_CLEAR_RESET;
if (sclp.has_iplcc)
diag308_subcode |= DIAG308_FLAG_EI;
s390_reset_system();
call_nodat(3, void, (relocate_kernel_t)data_mover,
unsigned long, entry,
unsigned long, image->start,
unsigned long, diag308_subcode);
/* Die if kexec returns */
disabled_wait();
}
/*
* Reset system and call either kdump or normal kexec
*/
static void __machine_kexec(void *data)
{
pfault_fini();
tracing_off();
debug_locks_off();
#ifdef CONFIG_CRASH_DUMP
if (((struct kimage *) data)->type == KEXEC_TYPE_CRASH)
__machine_kdump(data);
#endif
__do_machine_kexec(data);
}
/*
* Do either kdump or normal kexec. In case of kdump we first ask
* purgatory, if kdump checksums are valid.
*/
void machine_kexec(struct kimage *image)
{
if (image->type == KEXEC_TYPE_CRASH && !kdump_csum_valid(image))
return;
tracer_disable();
smp_send_stop();
smp_call_ipl_cpu(__machine_kexec, image);
}
| linux-master | arch/s390/kernel/machine_kexec.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Performance event support for s390x
*
* Copyright IBM Corp. 2012, 2013
* Author(s): Hendrik Brueckner <[email protected]>
*/
#define KMSG_COMPONENT "perf"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/kernel.h>
#include <linux/perf_event.h>
#include <linux/kvm_host.h>
#include <linux/percpu.h>
#include <linux/export.h>
#include <linux/seq_file.h>
#include <linux/spinlock.h>
#include <linux/sysfs.h>
#include <asm/irq.h>
#include <asm/cpu_mf.h>
#include <asm/lowcore.h>
#include <asm/processor.h>
#include <asm/sysinfo.h>
#include <asm/unwind.h>
static struct kvm_s390_sie_block *sie_block(struct pt_regs *regs)
{
struct stack_frame *stack = (struct stack_frame *) regs->gprs[15];
if (!stack)
return NULL;
return (struct kvm_s390_sie_block *)stack->sie_control_block;
}
static bool is_in_guest(struct pt_regs *regs)
{
if (user_mode(regs))
return false;
#if IS_ENABLED(CONFIG_KVM)
return instruction_pointer(regs) == (unsigned long) &sie_exit;
#else
return false;
#endif
}
static unsigned long guest_is_user_mode(struct pt_regs *regs)
{
return sie_block(regs)->gpsw.mask & PSW_MASK_PSTATE;
}
static unsigned long instruction_pointer_guest(struct pt_regs *regs)
{
return sie_block(regs)->gpsw.addr;
}
unsigned long perf_instruction_pointer(struct pt_regs *regs)
{
return is_in_guest(regs) ? instruction_pointer_guest(regs)
: instruction_pointer(regs);
}
static unsigned long perf_misc_guest_flags(struct pt_regs *regs)
{
return guest_is_user_mode(regs) ? PERF_RECORD_MISC_GUEST_USER
: PERF_RECORD_MISC_GUEST_KERNEL;
}
static unsigned long perf_misc_flags_sf(struct pt_regs *regs)
{
struct perf_sf_sde_regs *sde_regs;
unsigned long flags;
sde_regs = (struct perf_sf_sde_regs *) ®s->int_parm_long;
if (sde_regs->in_guest)
flags = user_mode(regs) ? PERF_RECORD_MISC_GUEST_USER
: PERF_RECORD_MISC_GUEST_KERNEL;
else
flags = user_mode(regs) ? PERF_RECORD_MISC_USER
: PERF_RECORD_MISC_KERNEL;
return flags;
}
unsigned long perf_misc_flags(struct pt_regs *regs)
{
/* Check if the cpum_sf PMU has created the pt_regs structure.
* In this case, perf misc flags can be easily extracted. Otherwise,
* do regular checks on the pt_regs content.
*/
if (regs->int_code == 0x1407 && regs->int_parm == CPU_MF_INT_SF_PRA)
if (!regs->gprs[15])
return perf_misc_flags_sf(regs);
if (is_in_guest(regs))
return perf_misc_guest_flags(regs);
return user_mode(regs) ? PERF_RECORD_MISC_USER
: PERF_RECORD_MISC_KERNEL;
}
static void print_debug_cf(void)
{
struct cpumf_ctr_info cf_info;
int cpu = smp_processor_id();
memset(&cf_info, 0, sizeof(cf_info));
if (!qctri(&cf_info))
pr_info("CPU[%i] CPUM_CF: ver=%u.%u A=%04x E=%04x C=%04x\n",
cpu, cf_info.cfvn, cf_info.csvn,
cf_info.auth_ctl, cf_info.enable_ctl, cf_info.act_ctl);
}
static void print_debug_sf(void)
{
struct hws_qsi_info_block si;
int cpu = smp_processor_id();
memset(&si, 0, sizeof(si));
if (qsi(&si))
return;
pr_info("CPU[%i] CPUM_SF: basic=%i diag=%i min=%lu max=%lu cpu_speed=%u\n",
cpu, si.as, si.ad, si.min_sampl_rate, si.max_sampl_rate,
si.cpu_speed);
if (si.as)
pr_info("CPU[%i] CPUM_SF: Basic-sampling: a=%i e=%i c=%i"
" bsdes=%i tear=%016lx dear=%016lx\n", cpu,
si.as, si.es, si.cs, si.bsdes, si.tear, si.dear);
if (si.ad)
pr_info("CPU[%i] CPUM_SF: Diagnostic-sampling: a=%i e=%i c=%i"
" dsdes=%i tear=%016lx dear=%016lx\n", cpu,
si.ad, si.ed, si.cd, si.dsdes, si.tear, si.dear);
}
void perf_event_print_debug(void)
{
unsigned long flags;
local_irq_save(flags);
if (cpum_cf_avail())
print_debug_cf();
if (cpum_sf_avail())
print_debug_sf();
local_irq_restore(flags);
}
/* Service level infrastructure */
static void sl_print_counter(struct seq_file *m)
{
struct cpumf_ctr_info ci;
memset(&ci, 0, sizeof(ci));
if (qctri(&ci))
return;
seq_printf(m, "CPU-MF: Counter facility: version=%u.%u "
"authorization=%04x\n", ci.cfvn, ci.csvn, ci.auth_ctl);
}
static void sl_print_sampling(struct seq_file *m)
{
struct hws_qsi_info_block si;
memset(&si, 0, sizeof(si));
if (qsi(&si))
return;
if (!si.as && !si.ad)
return;
seq_printf(m, "CPU-MF: Sampling facility: min_rate=%lu max_rate=%lu"
" cpu_speed=%u\n", si.min_sampl_rate, si.max_sampl_rate,
si.cpu_speed);
if (si.as)
seq_printf(m, "CPU-MF: Sampling facility: mode=basic"
" sample_size=%u\n", si.bsdes);
if (si.ad)
seq_printf(m, "CPU-MF: Sampling facility: mode=diagnostic"
" sample_size=%u\n", si.dsdes);
}
static void service_level_perf_print(struct seq_file *m,
struct service_level *sl)
{
if (cpum_cf_avail())
sl_print_counter(m);
if (cpum_sf_avail())
sl_print_sampling(m);
}
static struct service_level service_level_perf = {
.seq_print = service_level_perf_print,
};
static int __init service_level_perf_register(void)
{
return register_service_level(&service_level_perf);
}
arch_initcall(service_level_perf_register);
void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
struct pt_regs *regs)
{
struct unwind_state state;
unsigned long addr;
unwind_for_each_frame(&state, current, regs, 0) {
addr = unwind_get_return_address(&state);
if (!addr || perf_callchain_store(entry, addr))
return;
}
}
/* Perf definitions for PMU event attributes in sysfs */
ssize_t cpumf_events_sysfs_show(struct device *dev,
struct device_attribute *attr, char *page)
{
struct perf_pmu_events_attr *pmu_attr;
pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
return sprintf(page, "event=0x%04llx\n", pmu_attr->id);
}
| linux-master | arch/s390/kernel/perf_event.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2012
* Author(s): Jan Glauber <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/syscalls.h>
#include <linux/signal.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/kernel_stat.h>
#include <linux/sched/task_stack.h>
#include <asm/runtime_instr.h>
#include <asm/cpu_mf.h>
#include <asm/irq.h>
#include "entry.h"
/* empty control block to disable RI by loading it */
struct runtime_instr_cb runtime_instr_empty_cb;
void runtime_instr_release(struct task_struct *tsk)
{
kfree(tsk->thread.ri_cb);
}
static void disable_runtime_instr(void)
{
struct task_struct *task = current;
struct pt_regs *regs;
if (!task->thread.ri_cb)
return;
regs = task_pt_regs(task);
preempt_disable();
load_runtime_instr_cb(&runtime_instr_empty_cb);
kfree(task->thread.ri_cb);
task->thread.ri_cb = NULL;
preempt_enable();
/*
* Make sure the RI bit is deleted from the PSW. If the user did not
* switch off RI before the system call the process will get a
* specification exception otherwise.
*/
regs->psw.mask &= ~PSW_MASK_RI;
}
static void init_runtime_instr_cb(struct runtime_instr_cb *cb)
{
cb->rla = 0xfff;
cb->s = 1;
cb->k = 1;
cb->ps = 1;
cb->pc = 1;
cb->key = PAGE_DEFAULT_KEY >> 4;
cb->v = 1;
}
/*
* The signum argument is unused. In older kernels it was used to
* specify a real-time signal. For backwards compatibility user space
* should pass a valid real-time signal number (the signum argument
* was checked in older kernels).
*/
SYSCALL_DEFINE2(s390_runtime_instr, int, command, int, signum)
{
struct runtime_instr_cb *cb;
if (!test_facility(64))
return -EOPNOTSUPP;
if (command == S390_RUNTIME_INSTR_STOP) {
disable_runtime_instr();
return 0;
}
if (command != S390_RUNTIME_INSTR_START)
return -EINVAL;
if (!current->thread.ri_cb) {
cb = kzalloc(sizeof(*cb), GFP_KERNEL);
if (!cb)
return -ENOMEM;
} else {
cb = current->thread.ri_cb;
memset(cb, 0, sizeof(*cb));
}
init_runtime_instr_cb(cb);
/* now load the control block to make it available */
preempt_disable();
current->thread.ri_cb = cb;
load_runtime_instr_cb(cb);
preempt_enable();
return 0;
}
| linux-master | arch/s390/kernel/runtime_instr.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Jump label s390 support
*
* Copyright IBM Corp. 2011
* Author(s): Jan Glauber <[email protected]>
*/
#include <linux/uaccess.h>
#include <linux/jump_label.h>
#include <linux/module.h>
#include <asm/text-patching.h>
#include <asm/ipl.h>
struct insn {
u16 opcode;
s32 offset;
} __packed;
static void jump_label_make_nop(struct jump_entry *entry, struct insn *insn)
{
/* brcl 0,offset */
insn->opcode = 0xc004;
insn->offset = (jump_entry_target(entry) - jump_entry_code(entry)) >> 1;
}
static void jump_label_make_branch(struct jump_entry *entry, struct insn *insn)
{
/* brcl 15,offset */
insn->opcode = 0xc0f4;
insn->offset = (jump_entry_target(entry) - jump_entry_code(entry)) >> 1;
}
static void jump_label_bug(struct jump_entry *entry, struct insn *expected,
struct insn *new)
{
unsigned char *ipc = (unsigned char *)jump_entry_code(entry);
unsigned char *ipe = (unsigned char *)expected;
unsigned char *ipn = (unsigned char *)new;
pr_emerg("Jump label code mismatch at %pS [%px]\n", ipc, ipc);
pr_emerg("Found: %6ph\n", ipc);
pr_emerg("Expected: %6ph\n", ipe);
pr_emerg("New: %6ph\n", ipn);
panic("Corrupted kernel text");
}
static void jump_label_transform(struct jump_entry *entry,
enum jump_label_type type)
{
void *code = (void *)jump_entry_code(entry);
struct insn old, new;
if (type == JUMP_LABEL_JMP) {
jump_label_make_nop(entry, &old);
jump_label_make_branch(entry, &new);
} else {
jump_label_make_branch(entry, &old);
jump_label_make_nop(entry, &new);
}
if (memcmp(code, &old, sizeof(old)))
jump_label_bug(entry, &old, &new);
s390_kernel_write(code, &new, sizeof(new));
}
void arch_jump_label_transform(struct jump_entry *entry,
enum jump_label_type type)
{
jump_label_transform(entry, type);
text_poke_sync();
}
bool arch_jump_label_transform_queue(struct jump_entry *entry,
enum jump_label_type type)
{
jump_label_transform(entry, type);
return true;
}
void arch_jump_label_transform_apply(void)
{
text_poke_sync();
}
| linux-master | arch/s390/kernel/jump_label.c |
// SPDX-License-Identifier: GPL-2.0
/*
* store hypervisor information instruction emulation functions.
*
* Copyright IBM Corp. 2016
* Author(s): Janosch Frank <[email protected]>
*/
#include <linux/errno.h>
#include <linux/pagemap.h>
#include <linux/vmalloc.h>
#include <linux/syscalls.h>
#include <linux/mutex.h>
#include <asm/asm-offsets.h>
#include <asm/sclp.h>
#include <asm/diag.h>
#include <asm/sysinfo.h>
#include <asm/ebcdic.h>
#include <asm/facility.h>
#include <asm/sthyi.h>
#include "entry.h"
#define DED_WEIGHT 0xffff
/*
* CP and IFL as EBCDIC strings, SP/0x40 determines the end of string
* as they are justified with spaces.
*/
#define CP 0xc3d7404040404040UL
#define IFL 0xc9c6d34040404040UL
enum hdr_flags {
HDR_NOT_LPAR = 0x10,
HDR_STACK_INCM = 0x20,
HDR_STSI_UNAV = 0x40,
HDR_PERF_UNAV = 0x80,
};
enum mac_validity {
MAC_NAME_VLD = 0x20,
MAC_ID_VLD = 0x40,
MAC_CNT_VLD = 0x80,
};
enum par_flag {
PAR_MT_EN = 0x80,
};
enum par_validity {
PAR_GRP_VLD = 0x08,
PAR_ID_VLD = 0x10,
PAR_ABS_VLD = 0x20,
PAR_WGHT_VLD = 0x40,
PAR_PCNT_VLD = 0x80,
};
struct hdr_sctn {
u8 infhflg1;
u8 infhflg2; /* reserved */
u8 infhval1; /* reserved */
u8 infhval2; /* reserved */
u8 reserved[3];
u8 infhygct;
u16 infhtotl;
u16 infhdln;
u16 infmoff;
u16 infmlen;
u16 infpoff;
u16 infplen;
u16 infhoff1;
u16 infhlen1;
u16 infgoff1;
u16 infglen1;
u16 infhoff2;
u16 infhlen2;
u16 infgoff2;
u16 infglen2;
u16 infhoff3;
u16 infhlen3;
u16 infgoff3;
u16 infglen3;
u8 reserved2[4];
} __packed;
struct mac_sctn {
u8 infmflg1; /* reserved */
u8 infmflg2; /* reserved */
u8 infmval1;
u8 infmval2; /* reserved */
u16 infmscps;
u16 infmdcps;
u16 infmsifl;
u16 infmdifl;
char infmname[8];
char infmtype[4];
char infmmanu[16];
char infmseq[16];
char infmpman[4];
u8 reserved[4];
} __packed;
struct par_sctn {
u8 infpflg1;
u8 infpflg2; /* reserved */
u8 infpval1;
u8 infpval2; /* reserved */
u16 infppnum;
u16 infpscps;
u16 infpdcps;
u16 infpsifl;
u16 infpdifl;
u16 reserved;
char infppnam[8];
u32 infpwbcp;
u32 infpabcp;
u32 infpwbif;
u32 infpabif;
char infplgnm[8];
u32 infplgcp;
u32 infplgif;
} __packed;
struct sthyi_sctns {
struct hdr_sctn hdr;
struct mac_sctn mac;
struct par_sctn par;
} __packed;
struct cpu_inf {
u64 lpar_cap;
u64 lpar_grp_cap;
u64 lpar_weight;
u64 all_weight;
int cpu_num_ded;
int cpu_num_shd;
};
struct lpar_cpu_inf {
struct cpu_inf cp;
struct cpu_inf ifl;
};
/*
* STHYI requires extensive locking in the higher hypervisors
* and is very computational/memory expensive. Therefore we
* cache the retrieved data whose valid period is 1s.
*/
#define CACHE_VALID_JIFFIES HZ
struct sthyi_info {
void *info;
unsigned long end;
};
static DEFINE_MUTEX(sthyi_mutex);
static struct sthyi_info sthyi_cache;
static inline u64 cpu_id(u8 ctidx, void *diag224_buf)
{
return *((u64 *)(diag224_buf + (ctidx + 1) * DIAG204_CPU_NAME_LEN));
}
/*
* Scales the cpu capping from the lpar range to the one expected in
* sthyi data.
*
* diag204 reports a cap in hundredths of processor units.
* z/VM's range for one core is 0 - 0x10000.
*/
static u32 scale_cap(u32 in)
{
return (0x10000 * in) / 100;
}
static void fill_hdr(struct sthyi_sctns *sctns)
{
sctns->hdr.infhdln = sizeof(sctns->hdr);
sctns->hdr.infmoff = sizeof(sctns->hdr);
sctns->hdr.infmlen = sizeof(sctns->mac);
sctns->hdr.infplen = sizeof(sctns->par);
sctns->hdr.infpoff = sctns->hdr.infhdln + sctns->hdr.infmlen;
sctns->hdr.infhtotl = sctns->hdr.infpoff + sctns->hdr.infplen;
}
static void fill_stsi_mac(struct sthyi_sctns *sctns,
struct sysinfo_1_1_1 *sysinfo)
{
sclp_ocf_cpc_name_copy(sctns->mac.infmname);
if (*(u64 *)sctns->mac.infmname != 0)
sctns->mac.infmval1 |= MAC_NAME_VLD;
if (stsi(sysinfo, 1, 1, 1))
return;
memcpy(sctns->mac.infmtype, sysinfo->type, sizeof(sctns->mac.infmtype));
memcpy(sctns->mac.infmmanu, sysinfo->manufacturer, sizeof(sctns->mac.infmmanu));
memcpy(sctns->mac.infmpman, sysinfo->plant, sizeof(sctns->mac.infmpman));
memcpy(sctns->mac.infmseq, sysinfo->sequence, sizeof(sctns->mac.infmseq));
sctns->mac.infmval1 |= MAC_ID_VLD;
}
static void fill_stsi_par(struct sthyi_sctns *sctns,
struct sysinfo_2_2_2 *sysinfo)
{
if (stsi(sysinfo, 2, 2, 2))
return;
sctns->par.infppnum = sysinfo->lpar_number;
memcpy(sctns->par.infppnam, sysinfo->name, sizeof(sctns->par.infppnam));
sctns->par.infpval1 |= PAR_ID_VLD;
}
static void fill_stsi(struct sthyi_sctns *sctns)
{
void *sysinfo;
/* Errors are handled through the validity bits in the response. */
sysinfo = (void *)__get_free_page(GFP_KERNEL);
if (!sysinfo)
return;
fill_stsi_mac(sctns, sysinfo);
fill_stsi_par(sctns, sysinfo);
free_pages((unsigned long)sysinfo, 0);
}
static void fill_diag_mac(struct sthyi_sctns *sctns,
struct diag204_x_phys_block *block,
void *diag224_buf)
{
int i;
for (i = 0; i < block->hdr.cpus; i++) {
switch (cpu_id(block->cpus[i].ctidx, diag224_buf)) {
case CP:
if (block->cpus[i].weight == DED_WEIGHT)
sctns->mac.infmdcps++;
else
sctns->mac.infmscps++;
break;
case IFL:
if (block->cpus[i].weight == DED_WEIGHT)
sctns->mac.infmdifl++;
else
sctns->mac.infmsifl++;
break;
}
}
sctns->mac.infmval1 |= MAC_CNT_VLD;
}
/* Returns a pointer to the the next partition block. */
static struct diag204_x_part_block *lpar_cpu_inf(struct lpar_cpu_inf *part_inf,
bool this_lpar,
void *diag224_buf,
struct diag204_x_part_block *block)
{
int i, capped = 0, weight_cp = 0, weight_ifl = 0;
struct cpu_inf *cpu_inf;
for (i = 0; i < block->hdr.rcpus; i++) {
if (!(block->cpus[i].cflag & DIAG204_CPU_ONLINE))
continue;
switch (cpu_id(block->cpus[i].ctidx, diag224_buf)) {
case CP:
cpu_inf = &part_inf->cp;
if (block->cpus[i].cur_weight < DED_WEIGHT)
weight_cp |= block->cpus[i].cur_weight;
break;
case IFL:
cpu_inf = &part_inf->ifl;
if (block->cpus[i].cur_weight < DED_WEIGHT)
weight_ifl |= block->cpus[i].cur_weight;
break;
default:
continue;
}
if (!this_lpar)
continue;
capped |= block->cpus[i].cflag & DIAG204_CPU_CAPPED;
cpu_inf->lpar_cap |= block->cpus[i].cpu_type_cap;
cpu_inf->lpar_grp_cap |= block->cpus[i].group_cpu_type_cap;
if (block->cpus[i].weight == DED_WEIGHT)
cpu_inf->cpu_num_ded += 1;
else
cpu_inf->cpu_num_shd += 1;
}
if (this_lpar && capped) {
part_inf->cp.lpar_weight = weight_cp;
part_inf->ifl.lpar_weight = weight_ifl;
}
part_inf->cp.all_weight += weight_cp;
part_inf->ifl.all_weight += weight_ifl;
return (struct diag204_x_part_block *)&block->cpus[i];
}
static void fill_diag(struct sthyi_sctns *sctns)
{
int i, r, pages;
bool this_lpar;
void *diag204_buf;
void *diag224_buf = NULL;
struct diag204_x_info_blk_hdr *ti_hdr;
struct diag204_x_part_block *part_block;
struct diag204_x_phys_block *phys_block;
struct lpar_cpu_inf lpar_inf = {};
/* Errors are handled through the validity bits in the response. */
pages = diag204((unsigned long)DIAG204_SUBC_RSI |
(unsigned long)DIAG204_INFO_EXT, 0, NULL);
if (pages <= 0)
return;
diag204_buf = __vmalloc_node(array_size(pages, PAGE_SIZE),
PAGE_SIZE, GFP_KERNEL, NUMA_NO_NODE,
__builtin_return_address(0));
if (!diag204_buf)
return;
r = diag204((unsigned long)DIAG204_SUBC_STIB7 |
(unsigned long)DIAG204_INFO_EXT, pages, diag204_buf);
if (r < 0)
goto out;
diag224_buf = (void *)__get_free_page(GFP_KERNEL | GFP_DMA);
if (!diag224_buf || diag224(diag224_buf))
goto out;
ti_hdr = diag204_buf;
part_block = diag204_buf + sizeof(*ti_hdr);
for (i = 0; i < ti_hdr->npar; i++) {
/*
* For the calling lpar we also need to get the cpu
* caps and weights. The time information block header
* specifies the offset to the partition block of the
* caller lpar, so we know when we process its data.
*/
this_lpar = (void *)part_block - diag204_buf == ti_hdr->this_part;
part_block = lpar_cpu_inf(&lpar_inf, this_lpar, diag224_buf,
part_block);
}
phys_block = (struct diag204_x_phys_block *)part_block;
part_block = diag204_buf + ti_hdr->this_part;
if (part_block->hdr.mtid)
sctns->par.infpflg1 = PAR_MT_EN;
sctns->par.infpval1 |= PAR_GRP_VLD;
sctns->par.infplgcp = scale_cap(lpar_inf.cp.lpar_grp_cap);
sctns->par.infplgif = scale_cap(lpar_inf.ifl.lpar_grp_cap);
memcpy(sctns->par.infplgnm, part_block->hdr.hardware_group_name,
sizeof(sctns->par.infplgnm));
sctns->par.infpscps = lpar_inf.cp.cpu_num_shd;
sctns->par.infpdcps = lpar_inf.cp.cpu_num_ded;
sctns->par.infpsifl = lpar_inf.ifl.cpu_num_shd;
sctns->par.infpdifl = lpar_inf.ifl.cpu_num_ded;
sctns->par.infpval1 |= PAR_PCNT_VLD;
sctns->par.infpabcp = scale_cap(lpar_inf.cp.lpar_cap);
sctns->par.infpabif = scale_cap(lpar_inf.ifl.lpar_cap);
sctns->par.infpval1 |= PAR_ABS_VLD;
/*
* Everything below needs global performance data to be
* meaningful.
*/
if (!(ti_hdr->flags & DIAG204_LPAR_PHYS_FLG)) {
sctns->hdr.infhflg1 |= HDR_PERF_UNAV;
goto out;
}
fill_diag_mac(sctns, phys_block, diag224_buf);
if (lpar_inf.cp.lpar_weight) {
sctns->par.infpwbcp = sctns->mac.infmscps * 0x10000 *
lpar_inf.cp.lpar_weight / lpar_inf.cp.all_weight;
}
if (lpar_inf.ifl.lpar_weight) {
sctns->par.infpwbif = sctns->mac.infmsifl * 0x10000 *
lpar_inf.ifl.lpar_weight / lpar_inf.ifl.all_weight;
}
sctns->par.infpval1 |= PAR_WGHT_VLD;
out:
free_page((unsigned long)diag224_buf);
vfree(diag204_buf);
}
static int sthyi(u64 vaddr, u64 *rc)
{
union register_pair r1 = { .even = 0, }; /* subcode */
union register_pair r2 = { .even = vaddr, };
int cc;
asm volatile(
".insn rre,0xB2560000,%[r1],%[r2]\n"
"ipm %[cc]\n"
"srl %[cc],28\n"
: [cc] "=&d" (cc), [r2] "+&d" (r2.pair)
: [r1] "d" (r1.pair)
: "memory", "cc");
*rc = r2.odd;
return cc;
}
static int fill_dst(void *dst, u64 *rc)
{
struct sthyi_sctns *sctns = (struct sthyi_sctns *)dst;
/*
* If the facility is on, we don't want to emulate the instruction.
* We ask the hypervisor to provide the data.
*/
if (test_facility(74))
return sthyi((u64)dst, rc);
fill_hdr(sctns);
fill_stsi(sctns);
fill_diag(sctns);
*rc = 0;
return 0;
}
static int sthyi_init_cache(void)
{
if (sthyi_cache.info)
return 0;
sthyi_cache.info = (void *)get_zeroed_page(GFP_KERNEL);
if (!sthyi_cache.info)
return -ENOMEM;
sthyi_cache.end = jiffies - 1; /* expired */
return 0;
}
static int sthyi_update_cache(u64 *rc)
{
int r;
memset(sthyi_cache.info, 0, PAGE_SIZE);
r = fill_dst(sthyi_cache.info, rc);
if (r)
return r;
sthyi_cache.end = jiffies + CACHE_VALID_JIFFIES;
return r;
}
/*
* sthyi_fill - Fill page with data returned by the STHYI instruction
*
* @dst: Pointer to zeroed page
* @rc: Pointer for storing the return code of the instruction
*
* Fills the destination with system information returned by the STHYI
* instruction. The data is generated by emulation or execution of STHYI,
* if available. The return value is either a negative error value or
* the condition code that would be returned, the rc parameter is the
* return code which is passed in register R2 + 1.
*/
int sthyi_fill(void *dst, u64 *rc)
{
int r;
mutex_lock(&sthyi_mutex);
r = sthyi_init_cache();
if (r)
goto out;
if (time_is_before_jiffies(sthyi_cache.end)) {
/* cache expired */
r = sthyi_update_cache(rc);
if (r)
goto out;
}
*rc = 0;
memcpy(dst, sthyi_cache.info, PAGE_SIZE);
out:
mutex_unlock(&sthyi_mutex);
return r;
}
EXPORT_SYMBOL_GPL(sthyi_fill);
SYSCALL_DEFINE4(s390_sthyi, unsigned long, function_code, void __user *, buffer,
u64 __user *, return_code, unsigned long, flags)
{
u64 sthyi_rc;
void *info;
int r;
if (flags)
return -EINVAL;
if (function_code != STHYI_FC_CP_IFL_CAP)
return -EOPNOTSUPP;
info = (void *)get_zeroed_page(GFP_KERNEL);
if (!info)
return -ENOMEM;
r = sthyi_fill(info, &sthyi_rc);
if (r < 0)
goto out;
if (return_code && put_user(sthyi_rc, return_code)) {
r = -EFAULT;
goto out;
}
if (copy_to_user(buffer, info, PAGE_SIZE))
r = -EFAULT;
out:
free_page((unsigned long)info);
return r;
}
| linux-master | arch/s390/kernel/sthyi.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2008
* Author(s): Martin Schwidefsky ([email protected])
*/
#define KMSG_COMPONENT "cpu"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/stop_machine.h>
#include <linux/bitops.h>
#include <linux/kernel.h>
#include <linux/random.h>
#include <linux/sched/mm.h>
#include <linux/init.h>
#include <linux/seq_file.h>
#include <linux/mm_types.h>
#include <linux/delay.h>
#include <linux/cpu.h>
#include <asm/diag.h>
#include <asm/facility.h>
#include <asm/elf.h>
#include <asm/lowcore.h>
#include <asm/param.h>
#include <asm/sclp.h>
#include <asm/smp.h>
unsigned long __read_mostly elf_hwcap;
char elf_platform[ELF_PLATFORM_SIZE];
struct cpu_info {
unsigned int cpu_mhz_dynamic;
unsigned int cpu_mhz_static;
struct cpuid cpu_id;
};
static DEFINE_PER_CPU(struct cpu_info, cpu_info);
static DEFINE_PER_CPU(int, cpu_relax_retry);
static bool machine_has_cpu_mhz;
void __init cpu_detect_mhz_feature(void)
{
if (test_facility(34) && __ecag(ECAG_CPU_ATTRIBUTE, 0) != -1UL)
machine_has_cpu_mhz = true;
}
static void update_cpu_mhz(void *arg)
{
unsigned long mhz;
struct cpu_info *c;
mhz = __ecag(ECAG_CPU_ATTRIBUTE, 0);
c = this_cpu_ptr(&cpu_info);
c->cpu_mhz_dynamic = mhz >> 32;
c->cpu_mhz_static = mhz & 0xffffffff;
}
void s390_update_cpu_mhz(void)
{
s390_adjust_jiffies();
if (machine_has_cpu_mhz)
on_each_cpu(update_cpu_mhz, NULL, 0);
}
void notrace stop_machine_yield(const struct cpumask *cpumask)
{
int cpu, this_cpu;
this_cpu = smp_processor_id();
if (__this_cpu_inc_return(cpu_relax_retry) >= spin_retry) {
__this_cpu_write(cpu_relax_retry, 0);
cpu = cpumask_next_wrap(this_cpu, cpumask, this_cpu, false);
if (cpu >= nr_cpu_ids)
return;
if (arch_vcpu_is_preempted(cpu))
smp_yield_cpu(cpu);
}
}
/*
* cpu_init - initializes state that is per-CPU.
*/
void cpu_init(void)
{
struct cpuid *id = this_cpu_ptr(&cpu_info.cpu_id);
get_cpu_id(id);
if (machine_has_cpu_mhz)
update_cpu_mhz(NULL);
mmgrab(&init_mm);
current->active_mm = &init_mm;
BUG_ON(current->mm);
enter_lazy_tlb(&init_mm, current);
}
static void show_facilities(struct seq_file *m)
{
unsigned int bit;
seq_puts(m, "facilities :");
for_each_set_bit_inv(bit, (long *)&stfle_fac_list, MAX_FACILITY_BIT)
seq_printf(m, " %d", bit);
seq_putc(m, '\n');
}
static void show_cpu_summary(struct seq_file *m, void *v)
{
static const char *hwcap_str[] = {
[HWCAP_NR_ESAN3] = "esan3",
[HWCAP_NR_ZARCH] = "zarch",
[HWCAP_NR_STFLE] = "stfle",
[HWCAP_NR_MSA] = "msa",
[HWCAP_NR_LDISP] = "ldisp",
[HWCAP_NR_EIMM] = "eimm",
[HWCAP_NR_DFP] = "dfp",
[HWCAP_NR_HPAGE] = "edat",
[HWCAP_NR_ETF3EH] = "etf3eh",
[HWCAP_NR_HIGH_GPRS] = "highgprs",
[HWCAP_NR_TE] = "te",
[HWCAP_NR_VXRS] = "vx",
[HWCAP_NR_VXRS_BCD] = "vxd",
[HWCAP_NR_VXRS_EXT] = "vxe",
[HWCAP_NR_GS] = "gs",
[HWCAP_NR_VXRS_EXT2] = "vxe2",
[HWCAP_NR_VXRS_PDE] = "vxp",
[HWCAP_NR_SORT] = "sort",
[HWCAP_NR_DFLT] = "dflt",
[HWCAP_NR_VXRS_PDE2] = "vxp2",
[HWCAP_NR_NNPA] = "nnpa",
[HWCAP_NR_PCI_MIO] = "pcimio",
[HWCAP_NR_SIE] = "sie",
};
int i, cpu;
BUILD_BUG_ON(ARRAY_SIZE(hwcap_str) != HWCAP_NR_MAX);
seq_printf(m, "vendor_id : IBM/S390\n"
"# processors : %i\n"
"bogomips per cpu: %lu.%02lu\n",
num_online_cpus(), loops_per_jiffy/(500000/HZ),
(loops_per_jiffy/(5000/HZ))%100);
seq_printf(m, "max thread id : %d\n", smp_cpu_mtid);
seq_puts(m, "features\t: ");
for (i = 0; i < ARRAY_SIZE(hwcap_str); i++)
if (hwcap_str[i] && (elf_hwcap & (1UL << i)))
seq_printf(m, "%s ", hwcap_str[i]);
seq_puts(m, "\n");
show_facilities(m);
show_cacheinfo(m);
for_each_online_cpu(cpu) {
struct cpuid *id = &per_cpu(cpu_info.cpu_id, cpu);
seq_printf(m, "processor %d: "
"version = %02X, "
"identification = %06X, "
"machine = %04X\n",
cpu, id->version, id->ident, id->machine);
}
}
static int __init setup_hwcaps(void)
{
/* instructions named N3, "backported" to esa-mode */
elf_hwcap |= HWCAP_ESAN3;
/* z/Architecture mode active */
elf_hwcap |= HWCAP_ZARCH;
/* store-facility-list-extended */
if (test_facility(7))
elf_hwcap |= HWCAP_STFLE;
/* message-security assist */
if (test_facility(17))
elf_hwcap |= HWCAP_MSA;
/* long-displacement */
if (test_facility(19))
elf_hwcap |= HWCAP_LDISP;
/* extended-immediate */
elf_hwcap |= HWCAP_EIMM;
/* extended-translation facility 3 enhancement */
if (test_facility(22) && test_facility(30))
elf_hwcap |= HWCAP_ETF3EH;
/* decimal floating point & perform floating point operation */
if (test_facility(42) && test_facility(44))
elf_hwcap |= HWCAP_DFP;
/* huge page support */
if (MACHINE_HAS_EDAT1)
elf_hwcap |= HWCAP_HPAGE;
/* 64-bit register support for 31-bit processes */
elf_hwcap |= HWCAP_HIGH_GPRS;
/* transactional execution */
if (MACHINE_HAS_TE)
elf_hwcap |= HWCAP_TE;
/*
* Vector extension can be disabled with the "novx" parameter.
* Use MACHINE_HAS_VX instead of facility bit 129.
*/
if (MACHINE_HAS_VX) {
elf_hwcap |= HWCAP_VXRS;
if (test_facility(134))
elf_hwcap |= HWCAP_VXRS_BCD;
if (test_facility(135))
elf_hwcap |= HWCAP_VXRS_EXT;
if (test_facility(148))
elf_hwcap |= HWCAP_VXRS_EXT2;
if (test_facility(152))
elf_hwcap |= HWCAP_VXRS_PDE;
if (test_facility(192))
elf_hwcap |= HWCAP_VXRS_PDE2;
}
if (test_facility(150))
elf_hwcap |= HWCAP_SORT;
if (test_facility(151))
elf_hwcap |= HWCAP_DFLT;
if (test_facility(165))
elf_hwcap |= HWCAP_NNPA;
/* guarded storage */
if (MACHINE_HAS_GS)
elf_hwcap |= HWCAP_GS;
if (MACHINE_HAS_PCI_MIO)
elf_hwcap |= HWCAP_PCI_MIO;
/* virtualization support */
if (sclp.has_sief2)
elf_hwcap |= HWCAP_SIE;
return 0;
}
arch_initcall(setup_hwcaps);
static int __init setup_elf_platform(void)
{
struct cpuid cpu_id;
get_cpu_id(&cpu_id);
add_device_randomness(&cpu_id, sizeof(cpu_id));
switch (cpu_id.machine) {
default: /* Use "z10" as default. */
strcpy(elf_platform, "z10");
break;
case 0x2817:
case 0x2818:
strcpy(elf_platform, "z196");
break;
case 0x2827:
case 0x2828:
strcpy(elf_platform, "zEC12");
break;
case 0x2964:
case 0x2965:
strcpy(elf_platform, "z13");
break;
case 0x3906:
case 0x3907:
strcpy(elf_platform, "z14");
break;
case 0x8561:
case 0x8562:
strcpy(elf_platform, "z15");
break;
case 0x3931:
case 0x3932:
strcpy(elf_platform, "z16");
break;
}
return 0;
}
arch_initcall(setup_elf_platform);
static void show_cpu_topology(struct seq_file *m, unsigned long n)
{
#ifdef CONFIG_SCHED_TOPOLOGY
seq_printf(m, "physical id : %d\n", topology_physical_package_id(n));
seq_printf(m, "core id : %d\n", topology_core_id(n));
seq_printf(m, "book id : %d\n", topology_book_id(n));
seq_printf(m, "drawer id : %d\n", topology_drawer_id(n));
seq_printf(m, "dedicated : %d\n", topology_cpu_dedicated(n));
seq_printf(m, "address : %d\n", smp_cpu_get_cpu_address(n));
seq_printf(m, "siblings : %d\n", cpumask_weight(topology_core_cpumask(n)));
seq_printf(m, "cpu cores : %d\n", topology_booted_cores(n));
#endif /* CONFIG_SCHED_TOPOLOGY */
}
static void show_cpu_ids(struct seq_file *m, unsigned long n)
{
struct cpuid *id = &per_cpu(cpu_info.cpu_id, n);
seq_printf(m, "version : %02X\n", id->version);
seq_printf(m, "identification : %06X\n", id->ident);
seq_printf(m, "machine : %04X\n", id->machine);
}
static void show_cpu_mhz(struct seq_file *m, unsigned long n)
{
struct cpu_info *c = per_cpu_ptr(&cpu_info, n);
if (!machine_has_cpu_mhz)
return;
seq_printf(m, "cpu MHz dynamic : %d\n", c->cpu_mhz_dynamic);
seq_printf(m, "cpu MHz static : %d\n", c->cpu_mhz_static);
}
/*
* show_cpuinfo - Get information on one CPU for use by procfs.
*/
static int show_cpuinfo(struct seq_file *m, void *v)
{
unsigned long n = (unsigned long) v - 1;
unsigned long first = cpumask_first(cpu_online_mask);
if (n == first)
show_cpu_summary(m, v);
seq_printf(m, "\ncpu number : %ld\n", n);
show_cpu_topology(m, n);
show_cpu_ids(m, n);
show_cpu_mhz(m, n);
return 0;
}
static inline void *c_update(loff_t *pos)
{
if (*pos)
*pos = cpumask_next(*pos - 1, cpu_online_mask);
else
*pos = cpumask_first(cpu_online_mask);
return *pos < nr_cpu_ids ? (void *)*pos + 1 : NULL;
}
static void *c_start(struct seq_file *m, loff_t *pos)
{
cpus_read_lock();
return c_update(pos);
}
static void *c_next(struct seq_file *m, void *v, loff_t *pos)
{
++*pos;
return c_update(pos);
}
static void c_stop(struct seq_file *m, void *v)
{
cpus_read_unlock();
}
const struct seq_operations cpuinfo_op = {
.start = c_start,
.next = c_next,
.stop = c_stop,
.show = show_cpuinfo,
};
| linux-master | arch/s390/kernel/processor.c |
// SPDX-License-Identifier: GPL-2.0
/*
* User-space Probes (UProbes) for s390
*
* Copyright IBM Corp. 2014
* Author(s): Jan Willeke,
*/
#include <linux/uaccess.h>
#include <linux/uprobes.h>
#include <linux/compat.h>
#include <linux/kdebug.h>
#include <linux/sched/task_stack.h>
#include <asm/switch_to.h>
#include <asm/facility.h>
#include <asm/kprobes.h>
#include <asm/dis.h>
#include "entry.h"
#define UPROBE_TRAP_NR UINT_MAX
int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm,
unsigned long addr)
{
return probe_is_prohibited_opcode(auprobe->insn);
}
int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
{
if (psw_bits(regs->psw).eaba == PSW_BITS_AMODE_24BIT)
return -EINVAL;
if (!is_compat_task() && psw_bits(regs->psw).eaba == PSW_BITS_AMODE_31BIT)
return -EINVAL;
clear_thread_flag(TIF_PER_TRAP);
auprobe->saved_per = psw_bits(regs->psw).per;
auprobe->saved_int_code = regs->int_code;
regs->int_code = UPROBE_TRAP_NR;
regs->psw.addr = current->utask->xol_vaddr;
set_tsk_thread_flag(current, TIF_UPROBE_SINGLESTEP);
update_cr_regs(current);
return 0;
}
bool arch_uprobe_xol_was_trapped(struct task_struct *tsk)
{
struct pt_regs *regs = task_pt_regs(tsk);
if (regs->int_code != UPROBE_TRAP_NR)
return true;
return false;
}
static int check_per_event(unsigned short cause, unsigned long control,
struct pt_regs *regs)
{
if (!(regs->psw.mask & PSW_MASK_PER))
return 0;
/* user space single step */
if (control == 0)
return 1;
/* over indication for storage alteration */
if ((control & 0x20200000) && (cause & 0x2000))
return 1;
if (cause & 0x8000) {
/* all branches */
if ((control & 0x80800000) == 0x80000000)
return 1;
/* branch into selected range */
if (((control & 0x80800000) == 0x80800000) &&
regs->psw.addr >= current->thread.per_user.start &&
regs->psw.addr <= current->thread.per_user.end)
return 1;
}
return 0;
}
int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
{
int fixup = probe_get_fixup_type(auprobe->insn);
struct uprobe_task *utask = current->utask;
clear_tsk_thread_flag(current, TIF_UPROBE_SINGLESTEP);
update_cr_regs(current);
psw_bits(regs->psw).per = auprobe->saved_per;
regs->int_code = auprobe->saved_int_code;
if (fixup & FIXUP_PSW_NORMAL)
regs->psw.addr += utask->vaddr - utask->xol_vaddr;
if (fixup & FIXUP_RETURN_REGISTER) {
int reg = (auprobe->insn[0] & 0xf0) >> 4;
regs->gprs[reg] += utask->vaddr - utask->xol_vaddr;
}
if (fixup & FIXUP_BRANCH_NOT_TAKEN) {
int ilen = insn_length(auprobe->insn[0] >> 8);
if (regs->psw.addr - utask->xol_vaddr == ilen)
regs->psw.addr = utask->vaddr + ilen;
}
if (check_per_event(current->thread.per_event.cause,
current->thread.per_user.control, regs)) {
/* fix per address */
current->thread.per_event.address = utask->vaddr;
/* trigger per event */
set_thread_flag(TIF_PER_TRAP);
}
return 0;
}
int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val,
void *data)
{
struct die_args *args = data;
struct pt_regs *regs = args->regs;
if (!user_mode(regs))
return NOTIFY_DONE;
if (regs->int_code & 0x200) /* Trap during transaction */
return NOTIFY_DONE;
switch (val) {
case DIE_BPT:
if (uprobe_pre_sstep_notifier(regs))
return NOTIFY_STOP;
break;
case DIE_SSTEP:
if (uprobe_post_sstep_notifier(regs))
return NOTIFY_STOP;
break;
default:
break;
}
return NOTIFY_DONE;
}
void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
{
clear_thread_flag(TIF_UPROBE_SINGLESTEP);
regs->int_code = auprobe->saved_int_code;
regs->psw.addr = current->utask->vaddr;
current->thread.per_event.address = current->utask->vaddr;
}
unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline,
struct pt_regs *regs)
{
unsigned long orig;
orig = regs->gprs[14];
regs->gprs[14] = trampoline;
return orig;
}
bool arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx,
struct pt_regs *regs)
{
if (ctx == RP_CHECK_CHAIN_CALL)
return user_stack_pointer(regs) <= ret->stack;
else
return user_stack_pointer(regs) < ret->stack;
}
/* Instruction Emulation */
static void adjust_psw_addr(psw_t *psw, unsigned long len)
{
psw->addr = __rewind_psw(*psw, -len);
}
#define EMU_ILLEGAL_OP 1
#define EMU_SPECIFICATION 2
#define EMU_ADDRESSING 3
#define emu_load_ril(ptr, output) \
({ \
unsigned int mask = sizeof(*(ptr)) - 1; \
__typeof__(*(ptr)) input; \
int __rc = 0; \
\
if ((u64 __force)ptr & mask) \
__rc = EMU_SPECIFICATION; \
else if (get_user(input, ptr)) \
__rc = EMU_ADDRESSING; \
else \
*(output) = input; \
__rc; \
})
#define emu_store_ril(regs, ptr, input) \
({ \
unsigned int mask = sizeof(*(ptr)) - 1; \
__typeof__(ptr) __ptr = (ptr); \
int __rc = 0; \
\
if ((u64 __force)__ptr & mask) \
__rc = EMU_SPECIFICATION; \
else if (put_user(*(input), __ptr)) \
__rc = EMU_ADDRESSING; \
if (__rc == 0) \
sim_stor_event(regs, \
(void __force *)__ptr, \
mask + 1); \
__rc; \
})
#define emu_cmp_ril(regs, ptr, cmp) \
({ \
unsigned int mask = sizeof(*(ptr)) - 1; \
__typeof__(*(ptr)) input; \
int __rc = 0; \
\
if ((u64 __force)ptr & mask) \
__rc = EMU_SPECIFICATION; \
else if (get_user(input, ptr)) \
__rc = EMU_ADDRESSING; \
else if (input > *(cmp)) \
psw_bits((regs)->psw).cc = 1; \
else if (input < *(cmp)) \
psw_bits((regs)->psw).cc = 2; \
else \
psw_bits((regs)->psw).cc = 0; \
__rc; \
})
struct insn_ril {
u8 opc0;
u8 reg : 4;
u8 opc1 : 4;
s32 disp;
} __packed;
union split_register {
u64 u64;
u32 u32[2];
u16 u16[4];
s64 s64;
s32 s32[2];
s16 s16[4];
};
/*
* If user per registers are setup to trace storage alterations and an
* emulated store took place on a fitting address a user trap is generated.
*/
static void sim_stor_event(struct pt_regs *regs, void *addr, int len)
{
if (!(regs->psw.mask & PSW_MASK_PER))
return;
if (!(current->thread.per_user.control & PER_EVENT_STORE))
return;
if ((void *)current->thread.per_user.start > (addr + len))
return;
if ((void *)current->thread.per_user.end < addr)
return;
current->thread.per_event.address = regs->psw.addr;
current->thread.per_event.cause = PER_EVENT_STORE >> 16;
set_thread_flag(TIF_PER_TRAP);
}
/*
* pc relative instructions are emulated, since parameters may not be
* accessible from the xol area due to range limitations.
*/
static void handle_insn_ril(struct arch_uprobe *auprobe, struct pt_regs *regs)
{
union split_register *rx;
struct insn_ril *insn;
unsigned int ilen;
void *uptr;
int rc = 0;
insn = (struct insn_ril *) &auprobe->insn;
rx = (union split_register *) ®s->gprs[insn->reg];
uptr = (void *)(regs->psw.addr + (insn->disp * 2));
ilen = insn_length(insn->opc0);
switch (insn->opc0) {
case 0xc0:
switch (insn->opc1) {
case 0x00: /* larl */
rx->u64 = (unsigned long)uptr;
break;
}
break;
case 0xc4:
switch (insn->opc1) {
case 0x02: /* llhrl */
rc = emu_load_ril((u16 __user *)uptr, &rx->u32[1]);
break;
case 0x04: /* lghrl */
rc = emu_load_ril((s16 __user *)uptr, &rx->u64);
break;
case 0x05: /* lhrl */
rc = emu_load_ril((s16 __user *)uptr, &rx->u32[1]);
break;
case 0x06: /* llghrl */
rc = emu_load_ril((u16 __user *)uptr, &rx->u64);
break;
case 0x08: /* lgrl */
rc = emu_load_ril((u64 __user *)uptr, &rx->u64);
break;
case 0x0c: /* lgfrl */
rc = emu_load_ril((s32 __user *)uptr, &rx->u64);
break;
case 0x0d: /* lrl */
rc = emu_load_ril((u32 __user *)uptr, &rx->u32[1]);
break;
case 0x0e: /* llgfrl */
rc = emu_load_ril((u32 __user *)uptr, &rx->u64);
break;
case 0x07: /* sthrl */
rc = emu_store_ril(regs, (u16 __user *)uptr, &rx->u16[3]);
break;
case 0x0b: /* stgrl */
rc = emu_store_ril(regs, (u64 __user *)uptr, &rx->u64);
break;
case 0x0f: /* strl */
rc = emu_store_ril(regs, (u32 __user *)uptr, &rx->u32[1]);
break;
}
break;
case 0xc6:
switch (insn->opc1) {
case 0x04: /* cghrl */
rc = emu_cmp_ril(regs, (s16 __user *)uptr, &rx->s64);
break;
case 0x05: /* chrl */
rc = emu_cmp_ril(regs, (s16 __user *)uptr, &rx->s32[1]);
break;
case 0x06: /* clghrl */
rc = emu_cmp_ril(regs, (u16 __user *)uptr, &rx->u64);
break;
case 0x07: /* clhrl */
rc = emu_cmp_ril(regs, (u16 __user *)uptr, &rx->u32[1]);
break;
case 0x08: /* cgrl */
rc = emu_cmp_ril(regs, (s64 __user *)uptr, &rx->s64);
break;
case 0x0a: /* clgrl */
rc = emu_cmp_ril(regs, (u64 __user *)uptr, &rx->u64);
break;
case 0x0c: /* cgfrl */
rc = emu_cmp_ril(regs, (s32 __user *)uptr, &rx->s64);
break;
case 0x0d: /* crl */
rc = emu_cmp_ril(regs, (s32 __user *)uptr, &rx->s32[1]);
break;
case 0x0e: /* clgfrl */
rc = emu_cmp_ril(regs, (u32 __user *)uptr, &rx->u64);
break;
case 0x0f: /* clrl */
rc = emu_cmp_ril(regs, (u32 __user *)uptr, &rx->u32[1]);
break;
}
break;
}
adjust_psw_addr(®s->psw, ilen);
switch (rc) {
case EMU_ILLEGAL_OP:
regs->int_code = ilen << 16 | 0x0001;
do_report_trap(regs, SIGILL, ILL_ILLOPC, NULL);
break;
case EMU_SPECIFICATION:
regs->int_code = ilen << 16 | 0x0006;
do_report_trap(regs, SIGILL, ILL_ILLOPC , NULL);
break;
case EMU_ADDRESSING:
regs->int_code = ilen << 16 | 0x0005;
do_report_trap(regs, SIGSEGV, SEGV_MAPERR, NULL);
break;
}
}
bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
{
if ((psw_bits(regs->psw).eaba == PSW_BITS_AMODE_24BIT) ||
((psw_bits(regs->psw).eaba == PSW_BITS_AMODE_31BIT) &&
!is_compat_task())) {
regs->psw.addr = __rewind_psw(regs->psw, UPROBE_SWBP_INSN_SIZE);
do_report_trap(regs, SIGILL, ILL_ILLADR, NULL);
return true;
}
if (probe_is_insn_relative_long(auprobe->insn)) {
handle_insn_ril(auprobe, regs);
return true;
}
return false;
}
| linux-master | arch/s390/kernel/uprobes.c |
// SPDX-License-Identifier: GPL-2.0
/*
* S390 version
* Copyright IBM Corp. 1999, 2007
* Author(s): Martin Schwidefsky ([email protected]),
* Christian Borntraeger ([email protected]),
*/
#define KMSG_COMPONENT "cpcmd"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/stddef.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/io.h>
#include <asm/diag.h>
#include <asm/ebcdic.h>
#include <asm/cpcmd.h>
static DEFINE_SPINLOCK(cpcmd_lock);
static char cpcmd_buf[241];
static int diag8_noresponse(int cmdlen)
{
asm volatile(
" diag %[rx],%[ry],0x8\n"
: [ry] "+&d" (cmdlen)
: [rx] "d" (__pa(cpcmd_buf))
: "cc");
return cmdlen;
}
static int diag8_response(int cmdlen, char *response, int *rlen)
{
union register_pair rx, ry;
int cc;
rx.even = __pa(cpcmd_buf);
rx.odd = __pa(response);
ry.even = cmdlen | 0x40000000L;
ry.odd = *rlen;
asm volatile(
" diag %[rx],%[ry],0x8\n"
" ipm %[cc]\n"
" srl %[cc],28\n"
: [cc] "=&d" (cc), [ry] "+&d" (ry.pair)
: [rx] "d" (rx.pair)
: "cc");
if (cc)
*rlen += ry.odd;
else
*rlen = ry.odd;
return ry.even;
}
/*
* __cpcmd has some restrictions over cpcmd
* - __cpcmd is unlocked and therefore not SMP-safe
*/
int __cpcmd(const char *cmd, char *response, int rlen, int *response_code)
{
int cmdlen;
int rc;
int response_len;
cmdlen = strlen(cmd);
BUG_ON(cmdlen > 240);
memcpy(cpcmd_buf, cmd, cmdlen);
ASCEBC(cpcmd_buf, cmdlen);
diag_stat_inc(DIAG_STAT_X008);
if (response) {
memset(response, 0, rlen);
response_len = rlen;
rc = diag8_response(cmdlen, response, &rlen);
EBCASC(response, response_len);
} else {
rc = diag8_noresponse(cmdlen);
}
if (response_code)
*response_code = rc;
return rlen;
}
EXPORT_SYMBOL(__cpcmd);
int cpcmd(const char *cmd, char *response, int rlen, int *response_code)
{
unsigned long flags;
char *lowbuf;
int len;
if (is_vmalloc_or_module_addr(response)) {
lowbuf = kmalloc(rlen, GFP_KERNEL);
if (!lowbuf) {
pr_warn("The cpcmd kernel function failed to allocate a response buffer\n");
return -ENOMEM;
}
spin_lock_irqsave(&cpcmd_lock, flags);
len = __cpcmd(cmd, lowbuf, rlen, response_code);
spin_unlock_irqrestore(&cpcmd_lock, flags);
memcpy(response, lowbuf, rlen);
kfree(lowbuf);
} else {
spin_lock_irqsave(&cpcmd_lock, flags);
len = __cpcmd(cmd, response, rlen, response_code);
spin_unlock_irqrestore(&cpcmd_lock, flags);
}
return len;
}
EXPORT_SYMBOL(cpcmd);
| linux-master | arch/s390/kernel/cpcmd.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Disassemble s390 instructions.
*
* Copyright IBM Corp. 2007
* Author(s): Martin Schwidefsky ([email protected]),
*/
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/ptrace.h>
#include <linux/timer.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/export.h>
#include <linux/kallsyms.h>
#include <linux/reboot.h>
#include <linux/kprobes.h>
#include <linux/kdebug.h>
#include <linux/uaccess.h>
#include <linux/atomic.h>
#include <linux/io.h>
#include <asm/dis.h>
#include <asm/cpcmd.h>
#include <asm/lowcore.h>
#include <asm/debug.h>
#include <asm/irq.h>
/* Type of operand */
#define OPERAND_GPR 0x1 /* Operand printed as %rx */
#define OPERAND_FPR 0x2 /* Operand printed as %fx */
#define OPERAND_AR 0x4 /* Operand printed as %ax */
#define OPERAND_CR 0x8 /* Operand printed as %cx */
#define OPERAND_VR 0x10 /* Operand printed as %vx */
#define OPERAND_DISP 0x20 /* Operand printed as displacement */
#define OPERAND_BASE 0x40 /* Operand printed as base register */
#define OPERAND_INDEX 0x80 /* Operand printed as index register */
#define OPERAND_PCREL 0x100 /* Operand printed as pc-relative symbol */
#define OPERAND_SIGNED 0x200 /* Operand printed as signed value */
#define OPERAND_LENGTH 0x400 /* Operand printed as length (+1) */
struct s390_operand {
unsigned char bits; /* The number of bits in the operand. */
unsigned char shift; /* The number of bits to shift. */
unsigned short flags; /* One bit syntax flags. */
};
struct s390_insn {
union {
const char name[5];
struct {
unsigned char zero;
unsigned int offset;
} __packed;
};
unsigned char opfrag;
unsigned char format;
};
struct s390_opcode_offset {
unsigned char opcode;
unsigned char mask;
unsigned char byte;
unsigned short offset;
unsigned short count;
} __packed;
enum {
UNUSED,
A_8, /* Access reg. starting at position 8 */
A_12, /* Access reg. starting at position 12 */
A_24, /* Access reg. starting at position 24 */
A_28, /* Access reg. starting at position 28 */
B_16, /* Base register starting at position 16 */
B_32, /* Base register starting at position 32 */
C_8, /* Control reg. starting at position 8 */
C_12, /* Control reg. starting at position 12 */
D20_20, /* 20 bit displacement starting at 20 */
D_20, /* Displacement starting at position 20 */
D_36, /* Displacement starting at position 36 */
F_8, /* FPR starting at position 8 */
F_12, /* FPR starting at position 12 */
F_16, /* FPR starting at position 16 */
F_24, /* FPR starting at position 24 */
F_28, /* FPR starting at position 28 */
F_32, /* FPR starting at position 32 */
I8_8, /* 8 bit signed value starting at 8 */
I8_32, /* 8 bit signed value starting at 32 */
I16_16, /* 16 bit signed value starting at 16 */
I16_32, /* 16 bit signed value starting at 32 */
I32_16, /* 32 bit signed value starting at 16 */
J12_12, /* 12 bit PC relative offset at 12 */
J16_16, /* 16 bit PC relative offset at 16 */
J16_32, /* 16 bit PC relative offset at 32 */
J24_24, /* 24 bit PC relative offset at 24 */
J32_16, /* 32 bit PC relative offset at 16 */
L4_8, /* 4 bit length starting at position 8 */
L4_12, /* 4 bit length starting at position 12 */
L8_8, /* 8 bit length starting at position 8 */
R_8, /* GPR starting at position 8 */
R_12, /* GPR starting at position 12 */
R_16, /* GPR starting at position 16 */
R_24, /* GPR starting at position 24 */
R_28, /* GPR starting at position 28 */
U4_8, /* 4 bit unsigned value starting at 8 */
U4_12, /* 4 bit unsigned value starting at 12 */
U4_16, /* 4 bit unsigned value starting at 16 */
U4_20, /* 4 bit unsigned value starting at 20 */
U4_24, /* 4 bit unsigned value starting at 24 */
U4_28, /* 4 bit unsigned value starting at 28 */
U4_32, /* 4 bit unsigned value starting at 32 */
U4_36, /* 4 bit unsigned value starting at 36 */
U8_8, /* 8 bit unsigned value starting at 8 */
U8_16, /* 8 bit unsigned value starting at 16 */
U8_24, /* 8 bit unsigned value starting at 24 */
U8_28, /* 8 bit unsigned value starting at 28 */
U8_32, /* 8 bit unsigned value starting at 32 */
U12_16, /* 12 bit unsigned value starting at 16 */
U16_16, /* 16 bit unsigned value starting at 16 */
U16_32, /* 16 bit unsigned value starting at 32 */
U32_16, /* 32 bit unsigned value starting at 16 */
VX_12, /* Vector index register starting at position 12 */
V_8, /* Vector reg. starting at position 8 */
V_12, /* Vector reg. starting at position 12 */
V_16, /* Vector reg. starting at position 16 */
V_32, /* Vector reg. starting at position 32 */
X_12, /* Index register starting at position 12 */
};
static const struct s390_operand operands[] = {
[UNUSED] = { 0, 0, 0 },
[A_8] = { 4, 8, OPERAND_AR },
[A_12] = { 4, 12, OPERAND_AR },
[A_24] = { 4, 24, OPERAND_AR },
[A_28] = { 4, 28, OPERAND_AR },
[B_16] = { 4, 16, OPERAND_BASE | OPERAND_GPR },
[B_32] = { 4, 32, OPERAND_BASE | OPERAND_GPR },
[C_8] = { 4, 8, OPERAND_CR },
[C_12] = { 4, 12, OPERAND_CR },
[D20_20] = { 20, 20, OPERAND_DISP | OPERAND_SIGNED },
[D_20] = { 12, 20, OPERAND_DISP },
[D_36] = { 12, 36, OPERAND_DISP },
[F_8] = { 4, 8, OPERAND_FPR },
[F_12] = { 4, 12, OPERAND_FPR },
[F_16] = { 4, 16, OPERAND_FPR },
[F_24] = { 4, 24, OPERAND_FPR },
[F_28] = { 4, 28, OPERAND_FPR },
[F_32] = { 4, 32, OPERAND_FPR },
[I8_8] = { 8, 8, OPERAND_SIGNED },
[I8_32] = { 8, 32, OPERAND_SIGNED },
[I16_16] = { 16, 16, OPERAND_SIGNED },
[I16_32] = { 16, 32, OPERAND_SIGNED },
[I32_16] = { 32, 16, OPERAND_SIGNED },
[J12_12] = { 12, 12, OPERAND_PCREL },
[J16_16] = { 16, 16, OPERAND_PCREL },
[J16_32] = { 16, 32, OPERAND_PCREL },
[J24_24] = { 24, 24, OPERAND_PCREL },
[J32_16] = { 32, 16, OPERAND_PCREL },
[L4_8] = { 4, 8, OPERAND_LENGTH },
[L4_12] = { 4, 12, OPERAND_LENGTH },
[L8_8] = { 8, 8, OPERAND_LENGTH },
[R_8] = { 4, 8, OPERAND_GPR },
[R_12] = { 4, 12, OPERAND_GPR },
[R_16] = { 4, 16, OPERAND_GPR },
[R_24] = { 4, 24, OPERAND_GPR },
[R_28] = { 4, 28, OPERAND_GPR },
[U4_8] = { 4, 8, 0 },
[U4_12] = { 4, 12, 0 },
[U4_16] = { 4, 16, 0 },
[U4_20] = { 4, 20, 0 },
[U4_24] = { 4, 24, 0 },
[U4_28] = { 4, 28, 0 },
[U4_32] = { 4, 32, 0 },
[U4_36] = { 4, 36, 0 },
[U8_8] = { 8, 8, 0 },
[U8_16] = { 8, 16, 0 },
[U8_24] = { 8, 24, 0 },
[U8_28] = { 8, 28, 0 },
[U8_32] = { 8, 32, 0 },
[U12_16] = { 12, 16, 0 },
[U16_16] = { 16, 16, 0 },
[U16_32] = { 16, 32, 0 },
[U32_16] = { 32, 16, 0 },
[VX_12] = { 4, 12, OPERAND_INDEX | OPERAND_VR },
[V_8] = { 4, 8, OPERAND_VR },
[V_12] = { 4, 12, OPERAND_VR },
[V_16] = { 4, 16, OPERAND_VR },
[V_32] = { 4, 32, OPERAND_VR },
[X_12] = { 4, 12, OPERAND_INDEX | OPERAND_GPR },
};
static const unsigned char formats[][6] = {
[INSTR_E] = { 0, 0, 0, 0, 0, 0 },
[INSTR_IE_UU] = { U4_24, U4_28, 0, 0, 0, 0 },
[INSTR_MII_UPP] = { U4_8, J12_12, J24_24 },
[INSTR_RIE_R0IU] = { R_8, I16_16, U4_32, 0, 0, 0 },
[INSTR_RIE_R0UU] = { R_8, U16_16, U4_32, 0, 0, 0 },
[INSTR_RIE_RRI0] = { R_8, R_12, I16_16, 0, 0, 0 },
[INSTR_RIE_RRP] = { R_8, R_12, J16_16, 0, 0, 0 },
[INSTR_RIE_RRPU] = { R_8, R_12, U4_32, J16_16, 0, 0 },
[INSTR_RIE_RRUUU] = { R_8, R_12, U8_16, U8_24, U8_32, 0 },
[INSTR_RIE_RUI0] = { R_8, I16_16, U4_12, 0, 0, 0 },
[INSTR_RIE_RUPI] = { R_8, I8_32, U4_12, J16_16, 0, 0 },
[INSTR_RIE_RUPU] = { R_8, U8_32, U4_12, J16_16, 0, 0 },
[INSTR_RIL_RI] = { R_8, I32_16, 0, 0, 0, 0 },
[INSTR_RIL_RP] = { R_8, J32_16, 0, 0, 0, 0 },
[INSTR_RIL_RU] = { R_8, U32_16, 0, 0, 0, 0 },
[INSTR_RIL_UP] = { U4_8, J32_16, 0, 0, 0, 0 },
[INSTR_RIS_RURDI] = { R_8, I8_32, U4_12, D_20, B_16, 0 },
[INSTR_RIS_RURDU] = { R_8, U8_32, U4_12, D_20, B_16, 0 },
[INSTR_RI_RI] = { R_8, I16_16, 0, 0, 0, 0 },
[INSTR_RI_RP] = { R_8, J16_16, 0, 0, 0, 0 },
[INSTR_RI_RU] = { R_8, U16_16, 0, 0, 0, 0 },
[INSTR_RI_UP] = { U4_8, J16_16, 0, 0, 0, 0 },
[INSTR_RRE_00] = { 0, 0, 0, 0, 0, 0 },
[INSTR_RRE_AA] = { A_24, A_28, 0, 0, 0, 0 },
[INSTR_RRE_AR] = { A_24, R_28, 0, 0, 0, 0 },
[INSTR_RRE_F0] = { F_24, 0, 0, 0, 0, 0 },
[INSTR_RRE_FF] = { F_24, F_28, 0, 0, 0, 0 },
[INSTR_RRE_FR] = { F_24, R_28, 0, 0, 0, 0 },
[INSTR_RRE_R0] = { R_24, 0, 0, 0, 0, 0 },
[INSTR_RRE_RA] = { R_24, A_28, 0, 0, 0, 0 },
[INSTR_RRE_RF] = { R_24, F_28, 0, 0, 0, 0 },
[INSTR_RRE_RR] = { R_24, R_28, 0, 0, 0, 0 },
[INSTR_RRF_0UFF] = { F_24, F_28, U4_20, 0, 0, 0 },
[INSTR_RRF_0URF] = { R_24, F_28, U4_20, 0, 0, 0 },
[INSTR_RRF_F0FF] = { F_16, F_24, F_28, 0, 0, 0 },
[INSTR_RRF_F0FF2] = { F_24, F_16, F_28, 0, 0, 0 },
[INSTR_RRF_F0FR] = { F_24, F_16, R_28, 0, 0, 0 },
[INSTR_RRF_FFRU] = { F_24, F_16, R_28, U4_20, 0, 0 },
[INSTR_RRF_FUFF] = { F_24, F_16, F_28, U4_20, 0, 0 },
[INSTR_RRF_FUFF2] = { F_24, F_28, F_16, U4_20, 0, 0 },
[INSTR_RRF_R0RR] = { R_24, R_16, R_28, 0, 0, 0 },
[INSTR_RRF_R0RR2] = { R_24, R_28, R_16, 0, 0, 0 },
[INSTR_RRF_RURR] = { R_24, R_28, R_16, U4_20, 0, 0 },
[INSTR_RRF_RURR2] = { R_24, R_16, R_28, U4_20, 0, 0 },
[INSTR_RRF_U0FF] = { F_24, U4_16, F_28, 0, 0, 0 },
[INSTR_RRF_U0RF] = { R_24, U4_16, F_28, 0, 0, 0 },
[INSTR_RRF_U0RR] = { R_24, R_28, U4_16, 0, 0, 0 },
[INSTR_RRF_URR] = { R_24, R_28, U8_16, 0, 0, 0 },
[INSTR_RRF_UUFF] = { F_24, U4_16, F_28, U4_20, 0, 0 },
[INSTR_RRF_UUFR] = { F_24, U4_16, R_28, U4_20, 0, 0 },
[INSTR_RRF_UURF] = { R_24, U4_16, F_28, U4_20, 0, 0 },
[INSTR_RRS_RRRDU] = { R_8, R_12, U4_32, D_20, B_16 },
[INSTR_RR_FF] = { F_8, F_12, 0, 0, 0, 0 },
[INSTR_RR_R0] = { R_8, 0, 0, 0, 0, 0 },
[INSTR_RR_RR] = { R_8, R_12, 0, 0, 0, 0 },
[INSTR_RR_U0] = { U8_8, 0, 0, 0, 0, 0 },
[INSTR_RR_UR] = { U4_8, R_12, 0, 0, 0, 0 },
[INSTR_RSI_RRP] = { R_8, R_12, J16_16, 0, 0, 0 },
[INSTR_RSL_LRDFU] = { F_32, D_20, L8_8, B_16, U4_36, 0 },
[INSTR_RSL_R0RD] = { D_20, L4_8, B_16, 0, 0, 0 },
[INSTR_RSY_AARD] = { A_8, A_12, D20_20, B_16, 0, 0 },
[INSTR_RSY_CCRD] = { C_8, C_12, D20_20, B_16, 0, 0 },
[INSTR_RSY_RDRU] = { R_8, D20_20, B_16, U4_12, 0, 0 },
[INSTR_RSY_RRRD] = { R_8, R_12, D20_20, B_16, 0, 0 },
[INSTR_RSY_RURD] = { R_8, U4_12, D20_20, B_16, 0, 0 },
[INSTR_RSY_RURD2] = { R_8, D20_20, B_16, U4_12, 0, 0 },
[INSTR_RS_AARD] = { A_8, A_12, D_20, B_16, 0, 0 },
[INSTR_RS_CCRD] = { C_8, C_12, D_20, B_16, 0, 0 },
[INSTR_RS_R0RD] = { R_8, D_20, B_16, 0, 0, 0 },
[INSTR_RS_RRRD] = { R_8, R_12, D_20, B_16, 0, 0 },
[INSTR_RS_RURD] = { R_8, U4_12, D_20, B_16, 0, 0 },
[INSTR_RXE_FRRD] = { F_8, D_20, X_12, B_16, 0, 0 },
[INSTR_RXE_RRRDU] = { R_8, D_20, X_12, B_16, U4_32, 0 },
[INSTR_RXF_FRRDF] = { F_32, F_8, D_20, X_12, B_16, 0 },
[INSTR_RXY_FRRD] = { F_8, D20_20, X_12, B_16, 0, 0 },
[INSTR_RXY_RRRD] = { R_8, D20_20, X_12, B_16, 0, 0 },
[INSTR_RXY_URRD] = { U4_8, D20_20, X_12, B_16, 0, 0 },
[INSTR_RX_FRRD] = { F_8, D_20, X_12, B_16, 0, 0 },
[INSTR_RX_RRRD] = { R_8, D_20, X_12, B_16, 0, 0 },
[INSTR_RX_URRD] = { U4_8, D_20, X_12, B_16, 0, 0 },
[INSTR_SIL_RDI] = { D_20, B_16, I16_32, 0, 0, 0 },
[INSTR_SIL_RDU] = { D_20, B_16, U16_32, 0, 0, 0 },
[INSTR_SIY_IRD] = { D20_20, B_16, I8_8, 0, 0, 0 },
[INSTR_SIY_RD] = { D20_20, B_16, 0, 0, 0, 0 },
[INSTR_SIY_URD] = { D20_20, B_16, U8_8, 0, 0, 0 },
[INSTR_SI_RD] = { D_20, B_16, 0, 0, 0, 0 },
[INSTR_SI_URD] = { D_20, B_16, U8_8, 0, 0, 0 },
[INSTR_SMI_U0RDP] = { U4_8, J16_32, D_20, B_16, 0, 0 },
[INSTR_SSE_RDRD] = { D_20, B_16, D_36, B_32, 0, 0 },
[INSTR_SSF_RRDRD] = { D_20, B_16, D_36, B_32, R_8, 0 },
[INSTR_SSF_RRDRD2] = { R_8, D_20, B_16, D_36, B_32, 0 },
[INSTR_SS_L0RDRD] = { D_20, L8_8, B_16, D_36, B_32, 0 },
[INSTR_SS_L2RDRD] = { D_20, B_16, D_36, L8_8, B_32, 0 },
[INSTR_SS_LIRDRD] = { D_20, L4_8, B_16, D_36, B_32, U4_12 },
[INSTR_SS_LLRDRD] = { D_20, L4_8, B_16, D_36, L4_12, B_32 },
[INSTR_SS_RRRDRD] = { D_20, R_8, B_16, D_36, B_32, R_12 },
[INSTR_SS_RRRDRD2] = { R_8, D_20, B_16, R_12, D_36, B_32 },
[INSTR_SS_RRRDRD3] = { R_8, R_12, D_20, B_16, D_36, B_32 },
[INSTR_S_00] = { 0, 0, 0, 0, 0, 0 },
[INSTR_S_RD] = { D_20, B_16, 0, 0, 0, 0 },
[INSTR_VRI_V0IU] = { V_8, I16_16, U4_32, 0, 0, 0 },
[INSTR_VRI_V0U] = { V_8, U16_16, 0, 0, 0, 0 },
[INSTR_VRI_V0UU2] = { V_8, U16_16, U4_32, 0, 0, 0 },
[INSTR_VRI_V0UUU] = { V_8, U8_16, U8_24, U4_32, 0, 0 },
[INSTR_VRI_VR0UU] = { V_8, R_12, U8_28, U4_24, 0, 0 },
[INSTR_VRI_VVUU] = { V_8, V_12, U16_16, U4_32, 0, 0 },
[INSTR_VRI_VVUUU] = { V_8, V_12, U12_16, U4_32, U4_28, 0 },
[INSTR_VRI_VVUUU2] = { V_8, V_12, U8_28, U8_16, U4_24, 0 },
[INSTR_VRI_VVV0U] = { V_8, V_12, V_16, U8_24, 0, 0 },
[INSTR_VRI_VVV0UU] = { V_8, V_12, V_16, U8_24, U4_32, 0 },
[INSTR_VRI_VVV0UU2] = { V_8, V_12, V_16, U8_28, U4_24, 0 },
[INSTR_VRR_0V] = { V_12, 0, 0, 0, 0, 0 },
[INSTR_VRR_0VV0U] = { V_12, V_16, U4_24, 0, 0, 0 },
[INSTR_VRR_RV0UU] = { R_8, V_12, U4_24, U4_28, 0, 0 },
[INSTR_VRR_VRR] = { V_8, R_12, R_16, 0, 0, 0 },
[INSTR_VRR_VV] = { V_8, V_12, 0, 0, 0, 0 },
[INSTR_VRR_VV0U] = { V_8, V_12, U4_32, 0, 0, 0 },
[INSTR_VRR_VV0U0U] = { V_8, V_12, U4_32, U4_24, 0, 0 },
[INSTR_VRR_VV0U2] = { V_8, V_12, U4_24, 0, 0, 0 },
[INSTR_VRR_VV0UU2] = { V_8, V_12, U4_32, U4_28, 0, 0 },
[INSTR_VRR_VV0UUU] = { V_8, V_12, U4_32, U4_28, U4_24, 0 },
[INSTR_VRR_VVV] = { V_8, V_12, V_16, 0, 0, 0 },
[INSTR_VRR_VVV0U] = { V_8, V_12, V_16, U4_32, 0, 0 },
[INSTR_VRR_VVV0U0] = { V_8, V_12, V_16, U4_24, 0, 0 },
[INSTR_VRR_VVV0U0U] = { V_8, V_12, V_16, U4_32, U4_24, 0 },
[INSTR_VRR_VVV0UU] = { V_8, V_12, V_16, U4_32, U4_28, 0 },
[INSTR_VRR_VVV0UUU] = { V_8, V_12, V_16, U4_32, U4_28, U4_24 },
[INSTR_VRR_VVV0V] = { V_8, V_12, V_16, V_32, 0, 0 },
[INSTR_VRR_VVVU0UV] = { V_8, V_12, V_16, V_32, U4_28, U4_20 },
[INSTR_VRR_VVVU0V] = { V_8, V_12, V_16, V_32, U4_20, 0 },
[INSTR_VRR_VVVUU0V] = { V_8, V_12, V_16, V_32, U4_20, U4_24 },
[INSTR_VRS_RRDV] = { V_32, R_12, D_20, B_16, 0, 0 },
[INSTR_VRS_RVRDU] = { R_8, V_12, D_20, B_16, U4_32, 0 },
[INSTR_VRS_VRRD] = { V_8, R_12, D_20, B_16, 0, 0 },
[INSTR_VRS_VRRDU] = { V_8, R_12, D_20, B_16, U4_32, 0 },
[INSTR_VRS_VVRDU] = { V_8, V_12, D_20, B_16, U4_32, 0 },
[INSTR_VRV_VVXRDU] = { V_8, D_20, VX_12, B_16, U4_32, 0 },
[INSTR_VRX_VRRDU] = { V_8, D_20, X_12, B_16, U4_32, 0 },
[INSTR_VRX_VV] = { V_8, V_12, 0, 0, 0, 0 },
[INSTR_VSI_URDV] = { V_32, D_20, B_16, U8_8, 0, 0 },
};
static char long_insn_name[][7] = LONG_INSN_INITIALIZER;
static struct s390_insn opcode[] = OPCODE_TABLE_INITIALIZER;
static struct s390_opcode_offset opcode_offset[] = OPCODE_OFFSET_INITIALIZER;
/* Extracts an operand value from an instruction. */
static unsigned int extract_operand(unsigned char *code,
const struct s390_operand *operand)
{
unsigned char *cp;
unsigned int val;
int bits;
/* Extract fragments of the operand byte for byte. */
cp = code + operand->shift / 8;
bits = (operand->shift & 7) + operand->bits;
val = 0;
do {
val <<= 8;
val |= (unsigned int) *cp++;
bits -= 8;
} while (bits > 0);
val >>= -bits;
val &= ((1U << (operand->bits - 1)) << 1) - 1;
/* Check for special long displacement case. */
if (operand->bits == 20 && operand->shift == 20)
val = (val & 0xff) << 12 | (val & 0xfff00) >> 8;
/* Check for register extensions bits for vector registers. */
if (operand->flags & OPERAND_VR) {
if (operand->shift == 8)
val |= (code[4] & 8) << 1;
else if (operand->shift == 12)
val |= (code[4] & 4) << 2;
else if (operand->shift == 16)
val |= (code[4] & 2) << 3;
else if (operand->shift == 32)
val |= (code[4] & 1) << 4;
}
/* Sign extend value if the operand is signed or pc relative. */
if ((operand->flags & (OPERAND_SIGNED | OPERAND_PCREL)) &&
(val & (1U << (operand->bits - 1))))
val |= (-1U << (operand->bits - 1)) << 1;
/* Double value if the operand is pc relative. */
if (operand->flags & OPERAND_PCREL)
val <<= 1;
/* Length x in an instructions has real length x + 1. */
if (operand->flags & OPERAND_LENGTH)
val++;
return val;
}
struct s390_insn *find_insn(unsigned char *code)
{
struct s390_opcode_offset *entry;
struct s390_insn *insn;
unsigned char opfrag;
int i;
/* Search the opcode offset table to find an entry which
* matches the beginning of the opcode. If there is no match
* the last entry will be used, which is the default entry for
* unknown instructions as well as 1-byte opcode instructions.
*/
for (i = 0; i < ARRAY_SIZE(opcode_offset); i++) {
entry = &opcode_offset[i];
if (entry->opcode == code[0])
break;
}
opfrag = *(code + entry->byte) & entry->mask;
insn = &opcode[entry->offset];
for (i = 0; i < entry->count; i++) {
if (insn->opfrag == opfrag)
return insn;
insn++;
}
return NULL;
}
static int print_insn(char *buffer, unsigned char *code, unsigned long addr)
{
struct s390_insn *insn;
const unsigned char *ops;
const struct s390_operand *operand;
unsigned int value;
char separator;
char *ptr;
int i;
ptr = buffer;
insn = find_insn(code);
if (insn) {
if (insn->zero == 0)
ptr += sprintf(ptr, "%.7s\t",
long_insn_name[insn->offset]);
else
ptr += sprintf(ptr, "%.5s\t", insn->name);
/* Extract the operands. */
separator = 0;
for (ops = formats[insn->format], i = 0;
*ops != 0 && i < 6; ops++, i++) {
operand = operands + *ops;
value = extract_operand(code, operand);
if ((operand->flags & OPERAND_INDEX) && value == 0)
continue;
if ((operand->flags & OPERAND_BASE) &&
value == 0 && separator == '(') {
separator = ',';
continue;
}
if (separator)
ptr += sprintf(ptr, "%c", separator);
if (operand->flags & OPERAND_GPR)
ptr += sprintf(ptr, "%%r%i", value);
else if (operand->flags & OPERAND_FPR)
ptr += sprintf(ptr, "%%f%i", value);
else if (operand->flags & OPERAND_AR)
ptr += sprintf(ptr, "%%a%i", value);
else if (operand->flags & OPERAND_CR)
ptr += sprintf(ptr, "%%c%i", value);
else if (operand->flags & OPERAND_VR)
ptr += sprintf(ptr, "%%v%i", value);
else if (operand->flags & OPERAND_PCREL) {
void *pcrel = (void *)((int)value + addr);
ptr += sprintf(ptr, "%px", pcrel);
} else if (operand->flags & OPERAND_SIGNED)
ptr += sprintf(ptr, "%i", value);
else
ptr += sprintf(ptr, "%u", value);
if (operand->flags & OPERAND_DISP)
separator = '(';
else if (operand->flags & OPERAND_BASE) {
ptr += sprintf(ptr, ")");
separator = ',';
} else
separator = ',';
}
} else
ptr += sprintf(ptr, "unknown");
return (int) (ptr - buffer);
}
static int copy_from_regs(struct pt_regs *regs, void *dst, void *src, int len)
{
if (user_mode(regs)) {
if (copy_from_user(dst, (char __user *)src, len))
return -EFAULT;
} else {
if (copy_from_kernel_nofault(dst, src, len))
return -EFAULT;
}
return 0;
}
void show_code(struct pt_regs *regs)
{
char *mode = user_mode(regs) ? "User" : "Krnl";
unsigned char code[64];
char buffer[128], *ptr;
unsigned long addr;
int start, end, opsize, hops, i;
/* Get a snapshot of the 64 bytes surrounding the fault address. */
for (start = 32; start && regs->psw.addr >= 34 - start; start -= 2) {
addr = regs->psw.addr - 34 + start;
if (copy_from_regs(regs, code + start - 2, (void *)addr, 2))
break;
}
for (end = 32; end < 64; end += 2) {
addr = regs->psw.addr + end - 32;
if (copy_from_regs(regs, code + end, (void *)addr, 2))
break;
}
/* Code snapshot usable ? */
if ((regs->psw.addr & 1) || start >= end) {
printk("%s Code: Bad PSW.\n", mode);
return;
}
/* Find a starting point for the disassembly. */
while (start < 32) {
for (i = 0, hops = 0; start + i < 32 && hops < 3; hops++) {
if (!find_insn(code + start + i))
break;
i += insn_length(code[start + i]);
}
if (start + i == 32)
/* Looks good, sequence ends at PSW. */
break;
start += 2;
}
/* Decode the instructions. */
ptr = buffer;
ptr += sprintf(ptr, "%s Code:", mode);
hops = 0;
while (start < end && hops < 8) {
opsize = insn_length(code[start]);
if (start + opsize == 32)
*ptr++ = '#';
else if (start == 32)
*ptr++ = '>';
else
*ptr++ = ' ';
addr = regs->psw.addr + start - 32;
ptr += sprintf(ptr, "%px: ", (void *)addr);
if (start + opsize >= end)
break;
for (i = 0; i < opsize; i++)
ptr += sprintf(ptr, "%02x", code[start + i]);
*ptr++ = '\t';
if (i < 6)
*ptr++ = '\t';
ptr += print_insn(ptr, code + start, addr);
start += opsize;
pr_cont("%s", buffer);
ptr = buffer;
ptr += sprintf(ptr, "\n ");
hops++;
}
pr_cont("\n");
}
void print_fn_code(unsigned char *code, unsigned long len)
{
char buffer[128], *ptr;
int opsize, i;
while (len) {
ptr = buffer;
opsize = insn_length(*code);
if (opsize > len)
break;
ptr += sprintf(ptr, "%px: ", code);
for (i = 0; i < opsize; i++)
ptr += sprintf(ptr, "%02x", code[i]);
*ptr++ = '\t';
if (i < 4)
*ptr++ = '\t';
ptr += print_insn(ptr, code, (unsigned long) code);
*ptr++ = '\n';
*ptr++ = 0;
printk("%s", buffer);
code += opsize;
len -= opsize;
}
}
| linux-master | arch/s390/kernel/dis.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Stack dumping functions
*
* Copyright IBM Corp. 1999, 2013
*/
#include <linux/kallsyms.h>
#include <linux/hardirq.h>
#include <linux/kprobes.h>
#include <linux/utsname.h>
#include <linux/export.h>
#include <linux/kdebug.h>
#include <linux/ptrace.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/sched/debug.h>
#include <linux/sched/task_stack.h>
#include <asm/processor.h>
#include <asm/debug.h>
#include <asm/dis.h>
#include <asm/ipl.h>
#include <asm/unwind.h>
const char *stack_type_name(enum stack_type type)
{
switch (type) {
case STACK_TYPE_TASK:
return "task";
case STACK_TYPE_IRQ:
return "irq";
case STACK_TYPE_NODAT:
return "nodat";
case STACK_TYPE_RESTART:
return "restart";
default:
return "unknown";
}
}
EXPORT_SYMBOL_GPL(stack_type_name);
static inline bool in_stack(unsigned long sp, struct stack_info *info,
enum stack_type type, unsigned long stack)
{
if (sp < stack || sp >= stack + THREAD_SIZE)
return false;
info->type = type;
info->begin = stack;
info->end = stack + THREAD_SIZE;
return true;
}
static bool in_task_stack(unsigned long sp, struct task_struct *task,
struct stack_info *info)
{
unsigned long stack = (unsigned long)task_stack_page(task);
return in_stack(sp, info, STACK_TYPE_TASK, stack);
}
static bool in_irq_stack(unsigned long sp, struct stack_info *info)
{
unsigned long stack = S390_lowcore.async_stack - STACK_INIT_OFFSET;
return in_stack(sp, info, STACK_TYPE_IRQ, stack);
}
static bool in_nodat_stack(unsigned long sp, struct stack_info *info)
{
unsigned long stack = S390_lowcore.nodat_stack - STACK_INIT_OFFSET;
return in_stack(sp, info, STACK_TYPE_NODAT, stack);
}
static bool in_mcck_stack(unsigned long sp, struct stack_info *info)
{
unsigned long stack = S390_lowcore.mcck_stack - STACK_INIT_OFFSET;
return in_stack(sp, info, STACK_TYPE_MCCK, stack);
}
static bool in_restart_stack(unsigned long sp, struct stack_info *info)
{
unsigned long stack = S390_lowcore.restart_stack - STACK_INIT_OFFSET;
return in_stack(sp, info, STACK_TYPE_RESTART, stack);
}
int get_stack_info(unsigned long sp, struct task_struct *task,
struct stack_info *info, unsigned long *visit_mask)
{
if (!sp)
goto unknown;
/* Sanity check: ABI requires SP to be aligned 8 bytes. */
if (sp & 0x7)
goto unknown;
/* Check per-task stack */
if (in_task_stack(sp, task, info))
goto recursion_check;
if (task != current)
goto unknown;
/* Check per-cpu stacks */
if (!in_irq_stack(sp, info) &&
!in_nodat_stack(sp, info) &&
!in_restart_stack(sp, info) &&
!in_mcck_stack(sp, info))
goto unknown;
recursion_check:
/*
* Make sure we don't iterate through any given stack more than once.
* If it comes up a second time then there's something wrong going on:
* just break out and report an unknown stack type.
*/
if (*visit_mask & (1UL << info->type))
goto unknown;
*visit_mask |= 1UL << info->type;
return 0;
unknown:
info->type = STACK_TYPE_UNKNOWN;
return -EINVAL;
}
void show_stack(struct task_struct *task, unsigned long *stack,
const char *loglvl)
{
struct unwind_state state;
printk("%sCall Trace:\n", loglvl);
unwind_for_each_frame(&state, task, NULL, (unsigned long) stack)
printk(state.reliable ? "%s [<%016lx>] %pSR \n" :
"%s([<%016lx>] %pSR)\n",
loglvl, state.ip, (void *) state.ip);
debug_show_held_locks(task ? : current);
}
static void show_last_breaking_event(struct pt_regs *regs)
{
printk("Last Breaking-Event-Address:\n");
printk(" [<%016lx>] ", regs->last_break);
if (user_mode(regs)) {
print_vma_addr(KERN_CONT, regs->last_break);
pr_cont("\n");
} else {
pr_cont("%pSR\n", (void *)regs->last_break);
}
}
void show_registers(struct pt_regs *regs)
{
struct psw_bits *psw = &psw_bits(regs->psw);
char *mode;
mode = user_mode(regs) ? "User" : "Krnl";
printk("%s PSW : %px %px", mode, (void *)regs->psw.mask, (void *)regs->psw.addr);
if (!user_mode(regs))
pr_cont(" (%pSR)", (void *)regs->psw.addr);
pr_cont("\n");
printk(" R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x "
"P:%x AS:%x CC:%x PM:%x", psw->per, psw->dat, psw->io, psw->ext,
psw->key, psw->mcheck, psw->wait, psw->pstate, psw->as, psw->cc, psw->pm);
pr_cont(" RI:%x EA:%x\n", psw->ri, psw->eaba);
printk("%s GPRS: %016lx %016lx %016lx %016lx\n", mode,
regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]);
printk(" %016lx %016lx %016lx %016lx\n",
regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]);
printk(" %016lx %016lx %016lx %016lx\n",
regs->gprs[8], regs->gprs[9], regs->gprs[10], regs->gprs[11]);
printk(" %016lx %016lx %016lx %016lx\n",
regs->gprs[12], regs->gprs[13], regs->gprs[14], regs->gprs[15]);
show_code(regs);
}
void show_regs(struct pt_regs *regs)
{
show_regs_print_info(KERN_DEFAULT);
show_registers(regs);
/* Show stack backtrace if pt_regs is from kernel mode */
if (!user_mode(regs))
show_stack(NULL, (unsigned long *) regs->gprs[15], KERN_DEFAULT);
show_last_breaking_event(regs);
}
static DEFINE_SPINLOCK(die_lock);
void __noreturn die(struct pt_regs *regs, const char *str)
{
static int die_counter;
oops_enter();
lgr_info_log();
debug_stop_all();
console_verbose();
spin_lock_irq(&die_lock);
bust_spinlocks(1);
printk("%s: %04x ilc:%d [#%d] ", str, regs->int_code & 0xffff,
regs->int_code >> 17, ++die_counter);
#ifdef CONFIG_PREEMPT
pr_cont("PREEMPT ");
#elif defined(CONFIG_PREEMPT_RT)
pr_cont("PREEMPT_RT ");
#endif
pr_cont("SMP ");
if (debug_pagealloc_enabled())
pr_cont("DEBUG_PAGEALLOC");
pr_cont("\n");
notify_die(DIE_OOPS, str, regs, 0, regs->int_code & 0xffff, SIGSEGV);
print_modules();
show_regs(regs);
bust_spinlocks(0);
add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
spin_unlock_irq(&die_lock);
if (in_interrupt())
panic("Fatal exception in interrupt");
if (panic_on_oops)
panic("Fatal exception: panic_on_oops");
oops_exit();
make_task_dead(SIGSEGV);
}
| linux-master | arch/s390/kernel/dumpstack.c |
// SPDX-License-Identifier: GPL-2.0
/*
* NUMA support for s390
*
* Implement NUMA core code.
*
* Copyright IBM Corp. 2015
*/
#include <linux/kernel.h>
#include <linux/mmzone.h>
#include <linux/cpumask.h>
#include <linux/memblock.h>
#include <linux/node.h>
#include <asm/numa.h>
struct pglist_data *node_data[MAX_NUMNODES];
EXPORT_SYMBOL(node_data);
void __init numa_setup(void)
{
int nid;
nodes_clear(node_possible_map);
node_set(0, node_possible_map);
node_set_online(0);
for (nid = 0; nid < MAX_NUMNODES; nid++) {
NODE_DATA(nid) = memblock_alloc(sizeof(pg_data_t), 8);
if (!NODE_DATA(nid))
panic("%s: Failed to allocate %zu bytes align=0x%x\n",
__func__, sizeof(pg_data_t), 8);
}
NODE_DATA(0)->node_spanned_pages = memblock_end_of_DRAM() >> PAGE_SHIFT;
NODE_DATA(0)->node_id = 0;
}
| linux-master | arch/s390/kernel/numa.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Stack trace management functions
*
* Copyright IBM Corp. 2006
*/
#include <linux/stacktrace.h>
#include <asm/stacktrace.h>
#include <asm/unwind.h>
#include <asm/kprobes.h>
void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
struct task_struct *task, struct pt_regs *regs)
{
struct unwind_state state;
unsigned long addr;
unwind_for_each_frame(&state, task, regs, 0) {
addr = unwind_get_return_address(&state);
if (!addr || !consume_entry(cookie, addr))
break;
}
}
int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
void *cookie, struct task_struct *task)
{
struct unwind_state state;
unsigned long addr;
unwind_for_each_frame(&state, task, NULL, 0) {
if (state.stack_info.type != STACK_TYPE_TASK)
return -EINVAL;
if (state.regs)
return -EINVAL;
addr = unwind_get_return_address(&state);
if (!addr)
return -EINVAL;
#ifdef CONFIG_RETHOOK
/*
* Mark stacktraces with krethook functions on them
* as unreliable.
*/
if (state.ip == (unsigned long)arch_rethook_trampoline)
return -EINVAL;
#endif
if (!consume_entry(cookie, addr))
return -EINVAL;
}
/* Check for stack corruption */
if (unwind_error(&state))
return -EINVAL;
return 0;
}
| linux-master | arch/s390/kernel/stacktrace.c |
// SPDX-License-Identifier: GPL-2.0
/*
* s390 code for kexec_file_load system call
*
* Copyright IBM Corp. 2018
*
* Author(s): Philipp Rudo <[email protected]>
*/
#define pr_fmt(fmt) "kexec: " fmt
#include <linux/elf.h>
#include <linux/errno.h>
#include <linux/kexec.h>
#include <linux/module_signature.h>
#include <linux/verification.h>
#include <linux/vmalloc.h>
#include <asm/boot_data.h>
#include <asm/ipl.h>
#include <asm/setup.h>
const struct kexec_file_ops * const kexec_file_loaders[] = {
&s390_kexec_elf_ops,
&s390_kexec_image_ops,
NULL,
};
#ifdef CONFIG_KEXEC_SIG
int s390_verify_sig(const char *kernel, unsigned long kernel_len)
{
const unsigned long marker_len = sizeof(MODULE_SIG_STRING) - 1;
struct module_signature *ms;
unsigned long sig_len;
int ret;
/* Skip signature verification when not secure IPLed. */
if (!ipl_secure_flag)
return 0;
if (marker_len > kernel_len)
return -EKEYREJECTED;
if (memcmp(kernel + kernel_len - marker_len, MODULE_SIG_STRING,
marker_len))
return -EKEYREJECTED;
kernel_len -= marker_len;
ms = (void *)kernel + kernel_len - sizeof(*ms);
kernel_len -= sizeof(*ms);
sig_len = be32_to_cpu(ms->sig_len);
if (sig_len >= kernel_len)
return -EKEYREJECTED;
kernel_len -= sig_len;
if (ms->id_type != PKEY_ID_PKCS7)
return -EKEYREJECTED;
if (ms->algo != 0 ||
ms->hash != 0 ||
ms->signer_len != 0 ||
ms->key_id_len != 0 ||
ms->__pad[0] != 0 ||
ms->__pad[1] != 0 ||
ms->__pad[2] != 0) {
return -EBADMSG;
}
ret = verify_pkcs7_signature(kernel, kernel_len,
kernel + kernel_len, sig_len,
VERIFY_USE_SECONDARY_KEYRING,
VERIFYING_MODULE_SIGNATURE,
NULL, NULL);
if (ret == -ENOKEY && IS_ENABLED(CONFIG_INTEGRITY_PLATFORM_KEYRING))
ret = verify_pkcs7_signature(kernel, kernel_len,
kernel + kernel_len, sig_len,
VERIFY_USE_PLATFORM_KEYRING,
VERIFYING_MODULE_SIGNATURE,
NULL, NULL);
return ret;
}
#endif /* CONFIG_KEXEC_SIG */
static int kexec_file_update_purgatory(struct kimage *image,
struct s390_load_data *data)
{
u64 entry, type;
int ret;
if (image->type == KEXEC_TYPE_CRASH) {
entry = STARTUP_KDUMP_OFFSET;
type = KEXEC_TYPE_CRASH;
} else {
entry = STARTUP_NORMAL_OFFSET;
type = KEXEC_TYPE_DEFAULT;
}
ret = kexec_purgatory_get_set_symbol(image, "kernel_entry", &entry,
sizeof(entry), false);
if (ret)
return ret;
ret = kexec_purgatory_get_set_symbol(image, "kernel_type", &type,
sizeof(type), false);
if (ret)
return ret;
if (image->type == KEXEC_TYPE_CRASH) {
u64 crash_size;
ret = kexec_purgatory_get_set_symbol(image, "crash_start",
&crashk_res.start,
sizeof(crashk_res.start),
false);
if (ret)
return ret;
crash_size = crashk_res.end - crashk_res.start + 1;
ret = kexec_purgatory_get_set_symbol(image, "crash_size",
&crash_size,
sizeof(crash_size),
false);
}
return ret;
}
static int kexec_file_add_purgatory(struct kimage *image,
struct s390_load_data *data)
{
struct kexec_buf buf;
int ret;
buf.image = image;
data->memsz = ALIGN(data->memsz, PAGE_SIZE);
buf.mem = data->memsz;
if (image->type == KEXEC_TYPE_CRASH)
buf.mem += crashk_res.start;
ret = kexec_load_purgatory(image, &buf);
if (ret)
return ret;
data->memsz += buf.memsz;
return kexec_file_update_purgatory(image, data);
}
static int kexec_file_add_initrd(struct kimage *image,
struct s390_load_data *data)
{
struct kexec_buf buf;
int ret;
buf.image = image;
buf.buffer = image->initrd_buf;
buf.bufsz = image->initrd_buf_len;
data->memsz = ALIGN(data->memsz, PAGE_SIZE);
buf.mem = data->memsz;
if (image->type == KEXEC_TYPE_CRASH)
buf.mem += crashk_res.start;
buf.memsz = buf.bufsz;
data->parm->initrd_start = data->memsz;
data->parm->initrd_size = buf.memsz;
data->memsz += buf.memsz;
ret = kexec_add_buffer(&buf);
if (ret)
return ret;
return ipl_report_add_component(data->report, &buf, 0, 0);
}
static int kexec_file_add_ipl_report(struct kimage *image,
struct s390_load_data *data)
{
__u32 *lc_ipl_parmblock_ptr;
unsigned int len, ncerts;
struct kexec_buf buf;
unsigned long addr;
void *ptr, *end;
int ret;
buf.image = image;
data->memsz = ALIGN(data->memsz, PAGE_SIZE);
buf.mem = data->memsz;
ptr = __va(ipl_cert_list_addr);
end = ptr + ipl_cert_list_size;
ncerts = 0;
while (ptr < end) {
ncerts++;
len = *(unsigned int *)ptr;
ptr += sizeof(len);
ptr += len;
}
addr = data->memsz + data->report->size;
addr += ncerts * sizeof(struct ipl_rb_certificate_entry);
ptr = __va(ipl_cert_list_addr);
while (ptr < end) {
len = *(unsigned int *)ptr;
ptr += sizeof(len);
ipl_report_add_certificate(data->report, ptr, addr, len);
addr += len;
ptr += len;
}
ret = -ENOMEM;
buf.buffer = ipl_report_finish(data->report);
if (!buf.buffer)
goto out;
buf.bufsz = data->report->size;
buf.memsz = buf.bufsz;
image->arch.ipl_buf = buf.buffer;
data->memsz += buf.memsz;
lc_ipl_parmblock_ptr =
data->kernel_buf + offsetof(struct lowcore, ipl_parmblock_ptr);
*lc_ipl_parmblock_ptr = (__u32)buf.mem;
if (image->type == KEXEC_TYPE_CRASH)
buf.mem += crashk_res.start;
ret = kexec_add_buffer(&buf);
out:
return ret;
}
void *kexec_file_add_components(struct kimage *image,
int (*add_kernel)(struct kimage *image,
struct s390_load_data *data))
{
unsigned long max_command_line_size = LEGACY_COMMAND_LINE_SIZE;
struct s390_load_data data = {0};
unsigned long minsize;
int ret;
data.report = ipl_report_init(&ipl_block);
if (IS_ERR(data.report))
return data.report;
ret = add_kernel(image, &data);
if (ret)
goto out;
ret = -EINVAL;
minsize = PARMAREA + offsetof(struct parmarea, command_line);
if (image->kernel_buf_len < minsize)
goto out;
if (data.parm->max_command_line_size)
max_command_line_size = data.parm->max_command_line_size;
if (minsize + max_command_line_size < minsize)
goto out;
if (image->kernel_buf_len < minsize + max_command_line_size)
goto out;
if (image->cmdline_buf_len >= max_command_line_size)
goto out;
memcpy(data.parm->command_line, image->cmdline_buf,
image->cmdline_buf_len);
if (image->type == KEXEC_TYPE_CRASH) {
data.parm->oldmem_base = crashk_res.start;
data.parm->oldmem_size = crashk_res.end - crashk_res.start + 1;
}
if (image->initrd_buf) {
ret = kexec_file_add_initrd(image, &data);
if (ret)
goto out;
}
ret = kexec_file_add_purgatory(image, &data);
if (ret)
goto out;
if (data.kernel_mem == 0) {
unsigned long restart_psw = 0x0008000080000000UL;
restart_psw += image->start;
memcpy(data.kernel_buf, &restart_psw, sizeof(restart_psw));
image->start = 0;
}
ret = kexec_file_add_ipl_report(image, &data);
out:
ipl_report_free(data.report);
return ERR_PTR(ret);
}
int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
Elf_Shdr *section,
const Elf_Shdr *relsec,
const Elf_Shdr *symtab)
{
const char *strtab, *name, *shstrtab;
const Elf_Shdr *sechdrs;
Elf_Rela *relas;
int i, r_type;
int ret;
/* String & section header string table */
sechdrs = (void *)pi->ehdr + pi->ehdr->e_shoff;
strtab = (char *)pi->ehdr + sechdrs[symtab->sh_link].sh_offset;
shstrtab = (char *)pi->ehdr + sechdrs[pi->ehdr->e_shstrndx].sh_offset;
relas = (void *)pi->ehdr + relsec->sh_offset;
for (i = 0; i < relsec->sh_size / sizeof(*relas); i++) {
const Elf_Sym *sym; /* symbol to relocate */
unsigned long addr; /* final location after relocation */
unsigned long val; /* relocated symbol value */
void *loc; /* tmp location to modify */
sym = (void *)pi->ehdr + symtab->sh_offset;
sym += ELF64_R_SYM(relas[i].r_info);
if (sym->st_name)
name = strtab + sym->st_name;
else
name = shstrtab + sechdrs[sym->st_shndx].sh_name;
if (sym->st_shndx == SHN_UNDEF) {
pr_err("Undefined symbol: %s\n", name);
return -ENOEXEC;
}
if (sym->st_shndx == SHN_COMMON) {
pr_err("symbol '%s' in common section\n", name);
return -ENOEXEC;
}
if (sym->st_shndx >= pi->ehdr->e_shnum &&
sym->st_shndx != SHN_ABS) {
pr_err("Invalid section %d for symbol %s\n",
sym->st_shndx, name);
return -ENOEXEC;
}
loc = pi->purgatory_buf;
loc += section->sh_offset;
loc += relas[i].r_offset;
val = sym->st_value;
if (sym->st_shndx != SHN_ABS)
val += pi->sechdrs[sym->st_shndx].sh_addr;
val += relas[i].r_addend;
addr = section->sh_addr + relas[i].r_offset;
r_type = ELF64_R_TYPE(relas[i].r_info);
if (r_type == R_390_PLT32DBL)
r_type = R_390_PC32DBL;
ret = arch_kexec_do_relocs(r_type, loc, val, addr);
if (ret) {
pr_err("Unknown rela relocation: %d\n", r_type);
return -ENOEXEC;
}
}
return 0;
}
int arch_kimage_file_post_load_cleanup(struct kimage *image)
{
vfree(image->arch.ipl_buf);
image->arch.ipl_buf = NULL;
return kexec_image_post_load_cleanup_default(image);
}
| linux-master | arch/s390/kernel/machine_kexec_file.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Performance event support - Processor Activity Instrumentation Facility
*
* Copyright IBM Corp. 2022
* Author(s): Thomas Richter <[email protected]>
*/
#define KMSG_COMPONENT "pai_crypto"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/kernel.h>
#include <linux/kernel_stat.h>
#include <linux/percpu.h>
#include <linux/notifier.h>
#include <linux/init.h>
#include <linux/export.h>
#include <linux/io.h>
#include <linux/perf_event.h>
#include <asm/ctl_reg.h>
#include <asm/pai.h>
#include <asm/debug.h>
static debug_info_t *cfm_dbg;
static unsigned int paicrypt_cnt; /* Size of the mapped counter sets */
/* extracted with QPACI instruction */
DEFINE_STATIC_KEY_FALSE(pai_key);
struct pai_userdata {
u16 num;
u64 value;
} __packed;
struct paicrypt_map {
unsigned long *page; /* Page for CPU to store counters */
struct pai_userdata *save; /* Page to store no-zero counters */
unsigned int active_events; /* # of PAI crypto users */
refcount_t refcnt; /* Reference count mapped buffers */
enum paievt_mode mode; /* Type of event */
struct perf_event *event; /* Perf event for sampling */
};
static DEFINE_PER_CPU(struct paicrypt_map, paicrypt_map);
/* Release the PMU if event is the last perf event */
static DEFINE_MUTEX(pai_reserve_mutex);
/* Adjust usage counters and remove allocated memory when all users are
* gone.
*/
static void paicrypt_event_destroy(struct perf_event *event)
{
struct paicrypt_map *cpump = per_cpu_ptr(&paicrypt_map, event->cpu);
cpump->event = NULL;
static_branch_dec(&pai_key);
mutex_lock(&pai_reserve_mutex);
debug_sprintf_event(cfm_dbg, 5, "%s event %#llx cpu %d users %d"
" mode %d refcnt %u\n", __func__,
event->attr.config, event->cpu,
cpump->active_events, cpump->mode,
refcount_read(&cpump->refcnt));
if (refcount_dec_and_test(&cpump->refcnt)) {
debug_sprintf_event(cfm_dbg, 4, "%s page %#lx save %p\n",
__func__, (unsigned long)cpump->page,
cpump->save);
free_page((unsigned long)cpump->page);
cpump->page = NULL;
kvfree(cpump->save);
cpump->save = NULL;
cpump->mode = PAI_MODE_NONE;
}
mutex_unlock(&pai_reserve_mutex);
}
static u64 paicrypt_getctr(struct paicrypt_map *cpump, int nr, bool kernel)
{
if (kernel)
nr += PAI_CRYPTO_MAXCTR;
return cpump->page[nr];
}
/* Read the counter values. Return value from location in CMP. For event
* CRYPTO_ALL sum up all events.
*/
static u64 paicrypt_getdata(struct perf_event *event, bool kernel)
{
struct paicrypt_map *cpump = this_cpu_ptr(&paicrypt_map);
u64 sum = 0;
int i;
if (event->attr.config != PAI_CRYPTO_BASE) {
return paicrypt_getctr(cpump,
event->attr.config - PAI_CRYPTO_BASE,
kernel);
}
for (i = 1; i <= paicrypt_cnt; i++) {
u64 val = paicrypt_getctr(cpump, i, kernel);
if (!val)
continue;
sum += val;
}
return sum;
}
static u64 paicrypt_getall(struct perf_event *event)
{
u64 sum = 0;
if (!event->attr.exclude_kernel)
sum += paicrypt_getdata(event, true);
if (!event->attr.exclude_user)
sum += paicrypt_getdata(event, false);
return sum;
}
/* Used to avoid races in checking concurrent access of counting and
* sampling for crypto events
*
* Only one instance of event pai_crypto/CRYPTO_ALL/ for sampling is
* allowed and when this event is running, no counting event is allowed.
* Several counting events are allowed in parallel, but no sampling event
* is allowed while one (or more) counting events are running.
*
* This function is called in process context and it is save to block.
* When the event initialization functions fails, no other call back will
* be invoked.
*
* Allocate the memory for the event.
*/
static int paicrypt_busy(struct perf_event_attr *a, struct paicrypt_map *cpump)
{
int rc = 0;
mutex_lock(&pai_reserve_mutex);
if (a->sample_period) { /* Sampling requested */
if (cpump->mode != PAI_MODE_NONE)
rc = -EBUSY; /* ... sampling/counting active */
} else { /* Counting requested */
if (cpump->mode == PAI_MODE_SAMPLING)
rc = -EBUSY; /* ... and sampling active */
}
if (rc)
goto unlock;
/* Allocate memory for counter page and counter extraction.
* Only the first counting event has to allocate a page.
*/
if (cpump->page) {
refcount_inc(&cpump->refcnt);
goto unlock;
}
rc = -ENOMEM;
cpump->page = (unsigned long *)get_zeroed_page(GFP_KERNEL);
if (!cpump->page)
goto unlock;
cpump->save = kvmalloc_array(paicrypt_cnt + 1,
sizeof(struct pai_userdata), GFP_KERNEL);
if (!cpump->save) {
free_page((unsigned long)cpump->page);
cpump->page = NULL;
goto unlock;
}
rc = 0;
refcount_set(&cpump->refcnt, 1);
unlock:
/* If rc is non-zero, do not set mode and reference count */
if (!rc) {
cpump->mode = a->sample_period ? PAI_MODE_SAMPLING
: PAI_MODE_COUNTING;
}
debug_sprintf_event(cfm_dbg, 5, "%s sample_period %#llx users %d"
" mode %d refcnt %u page %#lx save %p rc %d\n",
__func__, a->sample_period, cpump->active_events,
cpump->mode, refcount_read(&cpump->refcnt),
(unsigned long)cpump->page, cpump->save, rc);
mutex_unlock(&pai_reserve_mutex);
return rc;
}
/* Might be called on different CPU than the one the event is intended for. */
static int paicrypt_event_init(struct perf_event *event)
{
struct perf_event_attr *a = &event->attr;
struct paicrypt_map *cpump;
int rc;
/* PAI crypto PMU registered as PERF_TYPE_RAW, check event type */
if (a->type != PERF_TYPE_RAW && event->pmu->type != a->type)
return -ENOENT;
/* PAI crypto event must be in valid range */
if (a->config < PAI_CRYPTO_BASE ||
a->config > PAI_CRYPTO_BASE + paicrypt_cnt)
return -EINVAL;
/* Allow only CPU wide operation, no process context for now. */
if (event->hw.target || event->cpu == -1)
return -ENOENT;
/* Allow only CRYPTO_ALL for sampling. */
if (a->sample_period && a->config != PAI_CRYPTO_BASE)
return -EINVAL;
cpump = per_cpu_ptr(&paicrypt_map, event->cpu);
rc = paicrypt_busy(a, cpump);
if (rc)
return rc;
/* Event initialization sets last_tag to 0. When later on the events
* are deleted and re-added, do not reset the event count value to zero.
* Events are added, deleted and re-added when 2 or more events
* are active at the same time.
*/
event->hw.last_tag = 0;
cpump->event = event;
event->destroy = paicrypt_event_destroy;
if (a->sample_period) {
a->sample_period = 1;
a->freq = 0;
/* Register for paicrypt_sched_task() to be called */
event->attach_state |= PERF_ATTACH_SCHED_CB;
/* Add raw data which contain the memory mapped counters */
a->sample_type |= PERF_SAMPLE_RAW;
/* Turn off inheritance */
a->inherit = 0;
}
static_branch_inc(&pai_key);
return 0;
}
static void paicrypt_read(struct perf_event *event)
{
u64 prev, new, delta;
prev = local64_read(&event->hw.prev_count);
new = paicrypt_getall(event);
local64_set(&event->hw.prev_count, new);
delta = (prev <= new) ? new - prev
: (-1ULL - prev) + new + 1; /* overflow */
local64_add(delta, &event->count);
}
static void paicrypt_start(struct perf_event *event, int flags)
{
u64 sum;
if (!event->hw.last_tag) {
event->hw.last_tag = 1;
sum = paicrypt_getall(event); /* Get current value */
local64_set(&event->count, 0);
local64_set(&event->hw.prev_count, sum);
}
}
static int paicrypt_add(struct perf_event *event, int flags)
{
struct paicrypt_map *cpump = this_cpu_ptr(&paicrypt_map);
unsigned long ccd;
if (++cpump->active_events == 1) {
ccd = virt_to_phys(cpump->page) | PAI_CRYPTO_KERNEL_OFFSET;
WRITE_ONCE(S390_lowcore.ccd, ccd);
__ctl_set_bit(0, 50);
}
cpump->event = event;
if (flags & PERF_EF_START && !event->attr.sample_period) {
/* Only counting needs initial counter value */
paicrypt_start(event, PERF_EF_RELOAD);
}
event->hw.state = 0;
if (event->attr.sample_period)
perf_sched_cb_inc(event->pmu);
return 0;
}
static void paicrypt_stop(struct perf_event *event, int flags)
{
paicrypt_read(event);
event->hw.state = PERF_HES_STOPPED;
}
static void paicrypt_del(struct perf_event *event, int flags)
{
struct paicrypt_map *cpump = this_cpu_ptr(&paicrypt_map);
if (event->attr.sample_period)
perf_sched_cb_dec(event->pmu);
if (!event->attr.sample_period)
/* Only counting needs to read counter */
paicrypt_stop(event, PERF_EF_UPDATE);
if (--cpump->active_events == 0) {
__ctl_clear_bit(0, 50);
WRITE_ONCE(S390_lowcore.ccd, 0);
}
}
/* Create raw data and save it in buffer. Returns number of bytes copied.
* Saves only positive counter entries of the form
* 2 bytes: Number of counter
* 8 bytes: Value of counter
*/
static size_t paicrypt_copy(struct pai_userdata *userdata,
struct paicrypt_map *cpump,
bool exclude_user, bool exclude_kernel)
{
int i, outidx = 0;
for (i = 1; i <= paicrypt_cnt; i++) {
u64 val = 0;
if (!exclude_kernel)
val += paicrypt_getctr(cpump, i, true);
if (!exclude_user)
val += paicrypt_getctr(cpump, i, false);
if (val) {
userdata[outidx].num = i;
userdata[outidx].value = val;
outidx++;
}
}
return outidx * sizeof(struct pai_userdata);
}
static int paicrypt_push_sample(void)
{
struct paicrypt_map *cpump = this_cpu_ptr(&paicrypt_map);
struct perf_event *event = cpump->event;
struct perf_sample_data data;
struct perf_raw_record raw;
struct pt_regs regs;
size_t rawsize;
int overflow;
if (!cpump->event) /* No event active */
return 0;
rawsize = paicrypt_copy(cpump->save, cpump,
cpump->event->attr.exclude_user,
cpump->event->attr.exclude_kernel);
if (!rawsize) /* No incremented counters */
return 0;
/* Setup perf sample */
memset(®s, 0, sizeof(regs));
memset(&raw, 0, sizeof(raw));
memset(&data, 0, sizeof(data));
perf_sample_data_init(&data, 0, event->hw.last_period);
if (event->attr.sample_type & PERF_SAMPLE_TID) {
data.tid_entry.pid = task_tgid_nr(current);
data.tid_entry.tid = task_pid_nr(current);
}
if (event->attr.sample_type & PERF_SAMPLE_TIME)
data.time = event->clock();
if (event->attr.sample_type & (PERF_SAMPLE_ID | PERF_SAMPLE_IDENTIFIER))
data.id = event->id;
if (event->attr.sample_type & PERF_SAMPLE_CPU) {
data.cpu_entry.cpu = smp_processor_id();
data.cpu_entry.reserved = 0;
}
if (event->attr.sample_type & PERF_SAMPLE_RAW) {
raw.frag.size = rawsize;
raw.frag.data = cpump->save;
perf_sample_save_raw_data(&data, &raw);
}
overflow = perf_event_overflow(event, &data, ®s);
perf_event_update_userpage(event);
/* Clear lowcore page after read */
memset(cpump->page, 0, PAGE_SIZE);
return overflow;
}
/* Called on schedule-in and schedule-out. No access to event structure,
* but for sampling only event CRYPTO_ALL is allowed.
*/
static void paicrypt_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in)
{
/* We started with a clean page on event installation. So read out
* results on schedule_out and if page was dirty, clear values.
*/
if (!sched_in)
paicrypt_push_sample();
}
/* Attribute definitions for paicrypt interface. As with other CPU
* Measurement Facilities, there is one attribute per mapped counter.
* The number of mapped counters may vary per machine generation. Use
* the QUERY PROCESSOR ACTIVITY COUNTER INFORMATION (QPACI) instruction
* to determine the number of mapped counters. The instructions returns
* a positive number, which is the highest number of supported counters.
* All counters less than this number are also supported, there are no
* holes. A returned number of zero means no support for mapped counters.
*
* The identification of the counter is a unique number. The chosen range
* is 0x1000 + offset in mapped kernel page.
* All CPU Measurement Facility counters identifiers must be unique and
* the numbers from 0 to 496 are already used for the CPU Measurement
* Counter facility. Numbers 0xb0000, 0xbc000 and 0xbd000 are already
* used for the CPU Measurement Sampling facility.
*/
PMU_FORMAT_ATTR(event, "config:0-63");
static struct attribute *paicrypt_format_attr[] = {
&format_attr_event.attr,
NULL,
};
static struct attribute_group paicrypt_events_group = {
.name = "events",
.attrs = NULL /* Filled in attr_event_init() */
};
static struct attribute_group paicrypt_format_group = {
.name = "format",
.attrs = paicrypt_format_attr,
};
static const struct attribute_group *paicrypt_attr_groups[] = {
&paicrypt_events_group,
&paicrypt_format_group,
NULL,
};
/* Performance monitoring unit for mapped counters */
static struct pmu paicrypt = {
.task_ctx_nr = perf_invalid_context,
.event_init = paicrypt_event_init,
.add = paicrypt_add,
.del = paicrypt_del,
.start = paicrypt_start,
.stop = paicrypt_stop,
.read = paicrypt_read,
.sched_task = paicrypt_sched_task,
.attr_groups = paicrypt_attr_groups
};
/* List of symbolic PAI counter names. */
static const char * const paicrypt_ctrnames[] = {
[0] = "CRYPTO_ALL",
[1] = "KM_DEA",
[2] = "KM_TDEA_128",
[3] = "KM_TDEA_192",
[4] = "KM_ENCRYPTED_DEA",
[5] = "KM_ENCRYPTED_TDEA_128",
[6] = "KM_ENCRYPTED_TDEA_192",
[7] = "KM_AES_128",
[8] = "KM_AES_192",
[9] = "KM_AES_256",
[10] = "KM_ENCRYPTED_AES_128",
[11] = "KM_ENCRYPTED_AES_192",
[12] = "KM_ENCRYPTED_AES_256",
[13] = "KM_XTS_AES_128",
[14] = "KM_XTS_AES_256",
[15] = "KM_XTS_ENCRYPTED_AES_128",
[16] = "KM_XTS_ENCRYPTED_AES_256",
[17] = "KMC_DEA",
[18] = "KMC_TDEA_128",
[19] = "KMC_TDEA_192",
[20] = "KMC_ENCRYPTED_DEA",
[21] = "KMC_ENCRYPTED_TDEA_128",
[22] = "KMC_ENCRYPTED_TDEA_192",
[23] = "KMC_AES_128",
[24] = "KMC_AES_192",
[25] = "KMC_AES_256",
[26] = "KMC_ENCRYPTED_AES_128",
[27] = "KMC_ENCRYPTED_AES_192",
[28] = "KMC_ENCRYPTED_AES_256",
[29] = "KMC_PRNG",
[30] = "KMA_GCM_AES_128",
[31] = "KMA_GCM_AES_192",
[32] = "KMA_GCM_AES_256",
[33] = "KMA_GCM_ENCRYPTED_AES_128",
[34] = "KMA_GCM_ENCRYPTED_AES_192",
[35] = "KMA_GCM_ENCRYPTED_AES_256",
[36] = "KMF_DEA",
[37] = "KMF_TDEA_128",
[38] = "KMF_TDEA_192",
[39] = "KMF_ENCRYPTED_DEA",
[40] = "KMF_ENCRYPTED_TDEA_128",
[41] = "KMF_ENCRYPTED_TDEA_192",
[42] = "KMF_AES_128",
[43] = "KMF_AES_192",
[44] = "KMF_AES_256",
[45] = "KMF_ENCRYPTED_AES_128",
[46] = "KMF_ENCRYPTED_AES_192",
[47] = "KMF_ENCRYPTED_AES_256",
[48] = "KMCTR_DEA",
[49] = "KMCTR_TDEA_128",
[50] = "KMCTR_TDEA_192",
[51] = "KMCTR_ENCRYPTED_DEA",
[52] = "KMCTR_ENCRYPTED_TDEA_128",
[53] = "KMCTR_ENCRYPTED_TDEA_192",
[54] = "KMCTR_AES_128",
[55] = "KMCTR_AES_192",
[56] = "KMCTR_AES_256",
[57] = "KMCTR_ENCRYPTED_AES_128",
[58] = "KMCTR_ENCRYPTED_AES_192",
[59] = "KMCTR_ENCRYPTED_AES_256",
[60] = "KMO_DEA",
[61] = "KMO_TDEA_128",
[62] = "KMO_TDEA_192",
[63] = "KMO_ENCRYPTED_DEA",
[64] = "KMO_ENCRYPTED_TDEA_128",
[65] = "KMO_ENCRYPTED_TDEA_192",
[66] = "KMO_AES_128",
[67] = "KMO_AES_192",
[68] = "KMO_AES_256",
[69] = "KMO_ENCRYPTED_AES_128",
[70] = "KMO_ENCRYPTED_AES_192",
[71] = "KMO_ENCRYPTED_AES_256",
[72] = "KIMD_SHA_1",
[73] = "KIMD_SHA_256",
[74] = "KIMD_SHA_512",
[75] = "KIMD_SHA3_224",
[76] = "KIMD_SHA3_256",
[77] = "KIMD_SHA3_384",
[78] = "KIMD_SHA3_512",
[79] = "KIMD_SHAKE_128",
[80] = "KIMD_SHAKE_256",
[81] = "KIMD_GHASH",
[82] = "KLMD_SHA_1",
[83] = "KLMD_SHA_256",
[84] = "KLMD_SHA_512",
[85] = "KLMD_SHA3_224",
[86] = "KLMD_SHA3_256",
[87] = "KLMD_SHA3_384",
[88] = "KLMD_SHA3_512",
[89] = "KLMD_SHAKE_128",
[90] = "KLMD_SHAKE_256",
[91] = "KMAC_DEA",
[92] = "KMAC_TDEA_128",
[93] = "KMAC_TDEA_192",
[94] = "KMAC_ENCRYPTED_DEA",
[95] = "KMAC_ENCRYPTED_TDEA_128",
[96] = "KMAC_ENCRYPTED_TDEA_192",
[97] = "KMAC_AES_128",
[98] = "KMAC_AES_192",
[99] = "KMAC_AES_256",
[100] = "KMAC_ENCRYPTED_AES_128",
[101] = "KMAC_ENCRYPTED_AES_192",
[102] = "KMAC_ENCRYPTED_AES_256",
[103] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_DEA",
[104] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_TDEA_128",
[105] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_TDEA_192",
[106] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_DEA",
[107] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_TDEA_128",
[108] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_TDEA_192",
[109] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_AES_128",
[110] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_AES_192",
[111] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_AES_256",
[112] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_AES_128",
[113] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_AES_192",
[114] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_AES_256A",
[115] = "PCC_COMPUTE_XTS_PARAMETER_USING_AES_128",
[116] = "PCC_COMPUTE_XTS_PARAMETER_USING_AES_256",
[117] = "PCC_COMPUTE_XTS_PARAMETER_USING_ENCRYPTED_AES_128",
[118] = "PCC_COMPUTE_XTS_PARAMETER_USING_ENCRYPTED_AES_256",
[119] = "PCC_SCALAR_MULTIPLY_P256",
[120] = "PCC_SCALAR_MULTIPLY_P384",
[121] = "PCC_SCALAR_MULTIPLY_P521",
[122] = "PCC_SCALAR_MULTIPLY_ED25519",
[123] = "PCC_SCALAR_MULTIPLY_ED448",
[124] = "PCC_SCALAR_MULTIPLY_X25519",
[125] = "PCC_SCALAR_MULTIPLY_X448",
[126] = "PRNO_SHA_512_DRNG",
[127] = "PRNO_TRNG_QUERY_RAW_TO_CONDITIONED_RATIO",
[128] = "PRNO_TRNG",
[129] = "KDSA_ECDSA_VERIFY_P256",
[130] = "KDSA_ECDSA_VERIFY_P384",
[131] = "KDSA_ECDSA_VERIFY_P521",
[132] = "KDSA_ECDSA_SIGN_P256",
[133] = "KDSA_ECDSA_SIGN_P384",
[134] = "KDSA_ECDSA_SIGN_P521",
[135] = "KDSA_ENCRYPTED_ECDSA_SIGN_P256",
[136] = "KDSA_ENCRYPTED_ECDSA_SIGN_P384",
[137] = "KDSA_ENCRYPTED_ECDSA_SIGN_P521",
[138] = "KDSA_EDDSA_VERIFY_ED25519",
[139] = "KDSA_EDDSA_VERIFY_ED448",
[140] = "KDSA_EDDSA_SIGN_ED25519",
[141] = "KDSA_EDDSA_SIGN_ED448",
[142] = "KDSA_ENCRYPTED_EDDSA_SIGN_ED25519",
[143] = "KDSA_ENCRYPTED_EDDSA_SIGN_ED448",
[144] = "PCKMO_ENCRYPT_DEA_KEY",
[145] = "PCKMO_ENCRYPT_TDEA_128_KEY",
[146] = "PCKMO_ENCRYPT_TDEA_192_KEY",
[147] = "PCKMO_ENCRYPT_AES_128_KEY",
[148] = "PCKMO_ENCRYPT_AES_192_KEY",
[149] = "PCKMO_ENCRYPT_AES_256_KEY",
[150] = "PCKMO_ENCRYPT_ECC_P256_KEY",
[151] = "PCKMO_ENCRYPT_ECC_P384_KEY",
[152] = "PCKMO_ENCRYPT_ECC_P521_KEY",
[153] = "PCKMO_ENCRYPT_ECC_ED25519_KEY",
[154] = "PCKMO_ENCRYPT_ECC_ED448_KEY",
[155] = "IBM_RESERVED_155",
[156] = "IBM_RESERVED_156",
};
static void __init attr_event_free(struct attribute **attrs, int num)
{
struct perf_pmu_events_attr *pa;
int i;
for (i = 0; i < num; i++) {
struct device_attribute *dap;
dap = container_of(attrs[i], struct device_attribute, attr);
pa = container_of(dap, struct perf_pmu_events_attr, attr);
kfree(pa);
}
kfree(attrs);
}
static int __init attr_event_init_one(struct attribute **attrs, int num)
{
struct perf_pmu_events_attr *pa;
pa = kzalloc(sizeof(*pa), GFP_KERNEL);
if (!pa)
return -ENOMEM;
sysfs_attr_init(&pa->attr.attr);
pa->id = PAI_CRYPTO_BASE + num;
pa->attr.attr.name = paicrypt_ctrnames[num];
pa->attr.attr.mode = 0444;
pa->attr.show = cpumf_events_sysfs_show;
pa->attr.store = NULL;
attrs[num] = &pa->attr.attr;
return 0;
}
/* Create PMU sysfs event attributes on the fly. */
static int __init attr_event_init(void)
{
struct attribute **attrs;
int ret, i;
attrs = kmalloc_array(ARRAY_SIZE(paicrypt_ctrnames) + 1, sizeof(*attrs),
GFP_KERNEL);
if (!attrs)
return -ENOMEM;
for (i = 0; i < ARRAY_SIZE(paicrypt_ctrnames); i++) {
ret = attr_event_init_one(attrs, i);
if (ret) {
attr_event_free(attrs, i - 1);
return ret;
}
}
attrs[i] = NULL;
paicrypt_events_group.attrs = attrs;
return 0;
}
static int __init paicrypt_init(void)
{
struct qpaci_info_block ib;
int rc;
if (!test_facility(196))
return 0;
qpaci(&ib);
paicrypt_cnt = ib.num_cc;
if (paicrypt_cnt == 0)
return 0;
if (paicrypt_cnt >= PAI_CRYPTO_MAXCTR)
paicrypt_cnt = PAI_CRYPTO_MAXCTR - 1;
rc = attr_event_init(); /* Export known PAI crypto events */
if (rc) {
pr_err("Creation of PMU pai_crypto /sysfs failed\n");
return rc;
}
/* Setup s390dbf facility */
cfm_dbg = debug_register(KMSG_COMPONENT, 2, 256, 128);
if (!cfm_dbg) {
pr_err("Registration of s390dbf pai_crypto failed\n");
return -ENOMEM;
}
debug_register_view(cfm_dbg, &debug_sprintf_view);
rc = perf_pmu_register(&paicrypt, "pai_crypto", -1);
if (rc) {
pr_err("Registering the pai_crypto PMU failed with rc=%i\n",
rc);
debug_unregister_view(cfm_dbg, &debug_sprintf_view);
debug_unregister(cfm_dbg);
return rc;
}
return 0;
}
device_initcall(paicrypt_init);
| linux-master | arch/s390/kernel/perf_pai_crypto.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 1999, 2006
* Author(s): Denis Joseph Barrow ([email protected],[email protected])
*
* Based on Intel version
*
* Copyright (C) 1991, 1992 Linus Torvalds
*
* 1997-11-28 Modified for POSIX.1b signals by Richard Henderson
*/
#include <linux/sched.h>
#include <linux/sched/task_stack.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/kernel.h>
#include <linux/signal.h>
#include <linux/entry-common.h>
#include <linux/errno.h>
#include <linux/wait.h>
#include <linux/ptrace.h>
#include <linux/unistd.h>
#include <linux/stddef.h>
#include <linux/tty.h>
#include <linux/personality.h>
#include <linux/binfmts.h>
#include <linux/syscalls.h>
#include <linux/compat.h>
#include <asm/ucontext.h>
#include <linux/uaccess.h>
#include <asm/lowcore.h>
#include <asm/switch_to.h>
#include <asm/vdso.h>
#include "entry.h"
/*
* Layout of an old-style signal-frame:
* -----------------------------------------
* | save area (_SIGNAL_FRAMESIZE) |
* -----------------------------------------
* | struct sigcontext |
* | oldmask |
* | _sigregs * |
* -----------------------------------------
* | _sigregs with |
* | _s390_regs_common |
* | _s390_fp_regs |
* -----------------------------------------
* | int signo |
* -----------------------------------------
* | _sigregs_ext with |
* | gprs_high 64 byte (opt) |
* | vxrs_low 128 byte (opt) |
* | vxrs_high 256 byte (opt) |
* | reserved 128 byte (opt) |
* -----------------------------------------
* | __u16 svc_insn |
* -----------------------------------------
* The svc_insn entry with the sigreturn system call opcode does not
* have a fixed position and moves if gprs_high or vxrs exist.
* Future extensions will be added to _sigregs_ext.
*/
struct sigframe
{
__u8 callee_used_stack[__SIGNAL_FRAMESIZE];
struct sigcontext sc;
_sigregs sregs;
int signo;
_sigregs_ext sregs_ext;
__u16 svc_insn; /* Offset of svc_insn is NOT fixed! */
};
/*
* Layout of an rt signal-frame:
* -----------------------------------------
* | save area (_SIGNAL_FRAMESIZE) |
* -----------------------------------------
* | svc __NR_rt_sigreturn 2 byte |
* -----------------------------------------
* | struct siginfo |
* -----------------------------------------
* | struct ucontext_extended with |
* | unsigned long uc_flags |
* | struct ucontext *uc_link |
* | stack_t uc_stack |
* | _sigregs uc_mcontext with |
* | _s390_regs_common |
* | _s390_fp_regs |
* | sigset_t uc_sigmask |
* | _sigregs_ext uc_mcontext_ext |
* | gprs_high 64 byte (opt) |
* | vxrs_low 128 byte (opt) |
* | vxrs_high 256 byte (opt)|
* | reserved 128 byte (opt) |
* -----------------------------------------
* Future extensions will be added to _sigregs_ext.
*/
struct rt_sigframe
{
__u8 callee_used_stack[__SIGNAL_FRAMESIZE];
__u16 svc_insn;
struct siginfo info;
struct ucontext_extended uc;
};
/* Store registers needed to create the signal frame */
static void store_sigregs(void)
{
save_access_regs(current->thread.acrs);
save_fpu_regs();
}
/* Load registers after signal return */
static void load_sigregs(void)
{
restore_access_regs(current->thread.acrs);
}
/* Returns non-zero on fault. */
static int save_sigregs(struct pt_regs *regs, _sigregs __user *sregs)
{
_sigregs user_sregs;
/* Copy a 'clean' PSW mask to the user to avoid leaking
information about whether PER is currently on. */
user_sregs.regs.psw.mask = PSW_USER_BITS |
(regs->psw.mask & (PSW_MASK_USER | PSW_MASK_RI));
user_sregs.regs.psw.addr = regs->psw.addr;
memcpy(&user_sregs.regs.gprs, ®s->gprs, sizeof(sregs->regs.gprs));
memcpy(&user_sregs.regs.acrs, current->thread.acrs,
sizeof(user_sregs.regs.acrs));
fpregs_store(&user_sregs.fpregs, ¤t->thread.fpu);
if (__copy_to_user(sregs, &user_sregs, sizeof(_sigregs)))
return -EFAULT;
return 0;
}
static int restore_sigregs(struct pt_regs *regs, _sigregs __user *sregs)
{
_sigregs user_sregs;
/* Always make any pending restarted system call return -EINTR */
current->restart_block.fn = do_no_restart_syscall;
if (__copy_from_user(&user_sregs, sregs, sizeof(user_sregs)))
return -EFAULT;
if (!is_ri_task(current) && (user_sregs.regs.psw.mask & PSW_MASK_RI))
return -EINVAL;
/* Test the floating-point-control word. */
if (test_fp_ctl(user_sregs.fpregs.fpc))
return -EINVAL;
/* Use regs->psw.mask instead of PSW_USER_BITS to preserve PER bit. */
regs->psw.mask = (regs->psw.mask & ~(PSW_MASK_USER | PSW_MASK_RI)) |
(user_sregs.regs.psw.mask & (PSW_MASK_USER | PSW_MASK_RI));
/* Check for invalid user address space control. */
if ((regs->psw.mask & PSW_MASK_ASC) == PSW_ASC_HOME)
regs->psw.mask = PSW_ASC_PRIMARY |
(regs->psw.mask & ~PSW_MASK_ASC);
/* Check for invalid amode */
if (regs->psw.mask & PSW_MASK_EA)
regs->psw.mask |= PSW_MASK_BA;
regs->psw.addr = user_sregs.regs.psw.addr;
memcpy(®s->gprs, &user_sregs.regs.gprs, sizeof(sregs->regs.gprs));
memcpy(¤t->thread.acrs, &user_sregs.regs.acrs,
sizeof(current->thread.acrs));
fpregs_load(&user_sregs.fpregs, ¤t->thread.fpu);
clear_pt_regs_flag(regs, PIF_SYSCALL); /* No longer in a system call */
return 0;
}
/* Returns non-zero on fault. */
static int save_sigregs_ext(struct pt_regs *regs,
_sigregs_ext __user *sregs_ext)
{
__u64 vxrs[__NUM_VXRS_LOW];
int i;
/* Save vector registers to signal stack */
if (MACHINE_HAS_VX) {
for (i = 0; i < __NUM_VXRS_LOW; i++)
vxrs[i] = current->thread.fpu.vxrs[i].low;
if (__copy_to_user(&sregs_ext->vxrs_low, vxrs,
sizeof(sregs_ext->vxrs_low)) ||
__copy_to_user(&sregs_ext->vxrs_high,
current->thread.fpu.vxrs + __NUM_VXRS_LOW,
sizeof(sregs_ext->vxrs_high)))
return -EFAULT;
}
return 0;
}
static int restore_sigregs_ext(struct pt_regs *regs,
_sigregs_ext __user *sregs_ext)
{
__u64 vxrs[__NUM_VXRS_LOW];
int i;
/* Restore vector registers from signal stack */
if (MACHINE_HAS_VX) {
if (__copy_from_user(vxrs, &sregs_ext->vxrs_low,
sizeof(sregs_ext->vxrs_low)) ||
__copy_from_user(current->thread.fpu.vxrs + __NUM_VXRS_LOW,
&sregs_ext->vxrs_high,
sizeof(sregs_ext->vxrs_high)))
return -EFAULT;
for (i = 0; i < __NUM_VXRS_LOW; i++)
current->thread.fpu.vxrs[i].low = vxrs[i];
}
return 0;
}
SYSCALL_DEFINE0(sigreturn)
{
struct pt_regs *regs = task_pt_regs(current);
struct sigframe __user *frame =
(struct sigframe __user *) regs->gprs[15];
sigset_t set;
if (__copy_from_user(&set.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE))
goto badframe;
set_current_blocked(&set);
save_fpu_regs();
if (restore_sigregs(regs, &frame->sregs))
goto badframe;
if (restore_sigregs_ext(regs, &frame->sregs_ext))
goto badframe;
load_sigregs();
return regs->gprs[2];
badframe:
force_sig(SIGSEGV);
return 0;
}
SYSCALL_DEFINE0(rt_sigreturn)
{
struct pt_regs *regs = task_pt_regs(current);
struct rt_sigframe __user *frame =
(struct rt_sigframe __user *)regs->gprs[15];
sigset_t set;
if (__copy_from_user(&set.sig, &frame->uc.uc_sigmask, sizeof(set)))
goto badframe;
set_current_blocked(&set);
if (restore_altstack(&frame->uc.uc_stack))
goto badframe;
save_fpu_regs();
if (restore_sigregs(regs, &frame->uc.uc_mcontext))
goto badframe;
if (restore_sigregs_ext(regs, &frame->uc.uc_mcontext_ext))
goto badframe;
load_sigregs();
return regs->gprs[2];
badframe:
force_sig(SIGSEGV);
return 0;
}
/*
* Determine which stack to use..
*/
static inline void __user *
get_sigframe(struct k_sigaction *ka, struct pt_regs * regs, size_t frame_size)
{
unsigned long sp;
/* Default to using normal stack */
sp = regs->gprs[15];
/* Overflow on alternate signal stack gives SIGSEGV. */
if (on_sig_stack(sp) && !on_sig_stack((sp - frame_size) & -8UL))
return (void __user *) -1UL;
/* This is the X/Open sanctioned signal stack switching. */
if (ka->sa.sa_flags & SA_ONSTACK) {
if (! sas_ss_flags(sp))
sp = current->sas_ss_sp + current->sas_ss_size;
}
return (void __user *)((sp - frame_size) & -8ul);
}
static int setup_frame(int sig, struct k_sigaction *ka,
sigset_t *set, struct pt_regs * regs)
{
struct sigframe __user *frame;
struct sigcontext sc;
unsigned long restorer;
size_t frame_size;
/*
* gprs_high are only present for a 31-bit task running on
* a 64-bit kernel (see compat_signal.c) but the space for
* gprs_high need to be allocated if vector registers are
* included in the signal frame on a 31-bit system.
*/
frame_size = sizeof(*frame) - sizeof(frame->sregs_ext);
if (MACHINE_HAS_VX)
frame_size += sizeof(frame->sregs_ext);
frame = get_sigframe(ka, regs, frame_size);
if (frame == (void __user *) -1UL)
return -EFAULT;
/* Set up backchain. */
if (__put_user(regs->gprs[15], (addr_t __user *) frame))
return -EFAULT;
/* Create struct sigcontext on the signal stack */
memcpy(&sc.oldmask, &set->sig, _SIGMASK_COPY_SIZE);
sc.sregs = (_sigregs __user __force *) &frame->sregs;
if (__copy_to_user(&frame->sc, &sc, sizeof(frame->sc)))
return -EFAULT;
/* Store registers needed to create the signal frame */
store_sigregs();
/* Create _sigregs on the signal stack */
if (save_sigregs(regs, &frame->sregs))
return -EFAULT;
/* Place signal number on stack to allow backtrace from handler. */
if (__put_user(regs->gprs[2], (int __user *) &frame->signo))
return -EFAULT;
/* Create _sigregs_ext on the signal stack */
if (save_sigregs_ext(regs, &frame->sregs_ext))
return -EFAULT;
/* Set up to return from userspace. If provided, use a stub
already in userspace. */
if (ka->sa.sa_flags & SA_RESTORER)
restorer = (unsigned long) ka->sa.sa_restorer;
else
restorer = VDSO64_SYMBOL(current, sigreturn);
/* Set up registers for signal handler */
regs->gprs[14] = restorer;
regs->gprs[15] = (unsigned long) frame;
/* Force default amode and default user address space control. */
regs->psw.mask = PSW_MASK_EA | PSW_MASK_BA |
(PSW_USER_BITS & PSW_MASK_ASC) |
(regs->psw.mask & ~PSW_MASK_ASC);
regs->psw.addr = (unsigned long) ka->sa.sa_handler;
regs->gprs[2] = sig;
regs->gprs[3] = (unsigned long) &frame->sc;
/* We forgot to include these in the sigcontext.
To avoid breaking binary compatibility, they are passed as args. */
if (sig == SIGSEGV || sig == SIGBUS || sig == SIGILL ||
sig == SIGTRAP || sig == SIGFPE) {
/* set extra registers only for synchronous signals */
regs->gprs[4] = regs->int_code & 127;
regs->gprs[5] = regs->int_parm_long;
regs->gprs[6] = current->thread.last_break;
}
return 0;
}
static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
struct pt_regs *regs)
{
struct rt_sigframe __user *frame;
unsigned long uc_flags, restorer;
size_t frame_size;
frame_size = sizeof(struct rt_sigframe) - sizeof(_sigregs_ext);
/*
* gprs_high are only present for a 31-bit task running on
* a 64-bit kernel (see compat_signal.c) but the space for
* gprs_high need to be allocated if vector registers are
* included in the signal frame on a 31-bit system.
*/
uc_flags = 0;
if (MACHINE_HAS_VX) {
frame_size += sizeof(_sigregs_ext);
uc_flags |= UC_VXRS;
}
frame = get_sigframe(&ksig->ka, regs, frame_size);
if (frame == (void __user *) -1UL)
return -EFAULT;
/* Set up backchain. */
if (__put_user(regs->gprs[15], (addr_t __user *) frame))
return -EFAULT;
/* Set up to return from userspace. If provided, use a stub
already in userspace. */
if (ksig->ka.sa.sa_flags & SA_RESTORER)
restorer = (unsigned long) ksig->ka.sa.sa_restorer;
else
restorer = VDSO64_SYMBOL(current, rt_sigreturn);
/* Create siginfo on the signal stack */
if (copy_siginfo_to_user(&frame->info, &ksig->info))
return -EFAULT;
/* Store registers needed to create the signal frame */
store_sigregs();
/* Create ucontext on the signal stack. */
if (__put_user(uc_flags, &frame->uc.uc_flags) ||
__put_user(NULL, &frame->uc.uc_link) ||
__save_altstack(&frame->uc.uc_stack, regs->gprs[15]) ||
save_sigregs(regs, &frame->uc.uc_mcontext) ||
__copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)) ||
save_sigregs_ext(regs, &frame->uc.uc_mcontext_ext))
return -EFAULT;
/* Set up registers for signal handler */
regs->gprs[14] = restorer;
regs->gprs[15] = (unsigned long) frame;
/* Force default amode and default user address space control. */
regs->psw.mask = PSW_MASK_EA | PSW_MASK_BA |
(PSW_USER_BITS & PSW_MASK_ASC) |
(regs->psw.mask & ~PSW_MASK_ASC);
regs->psw.addr = (unsigned long) ksig->ka.sa.sa_handler;
regs->gprs[2] = ksig->sig;
regs->gprs[3] = (unsigned long) &frame->info;
regs->gprs[4] = (unsigned long) &frame->uc;
regs->gprs[5] = current->thread.last_break;
return 0;
}
static void handle_signal(struct ksignal *ksig, sigset_t *oldset,
struct pt_regs *regs)
{
int ret;
/* Set up the stack frame */
if (ksig->ka.sa.sa_flags & SA_SIGINFO)
ret = setup_rt_frame(ksig, oldset, regs);
else
ret = setup_frame(ksig->sig, &ksig->ka, oldset, regs);
signal_setup_done(ret, ksig, test_thread_flag(TIF_SINGLE_STEP));
}
/*
* Note that 'init' is a special process: it doesn't get signals it doesn't
* want to handle. Thus you cannot kill init even with a SIGKILL even by
* mistake.
*
* Note that we go through the signals twice: once to check the signals that
* the kernel can handle, and then we build all the user-level signal handling
* stack-frames in one go after that.
*/
void arch_do_signal_or_restart(struct pt_regs *regs)
{
struct ksignal ksig;
sigset_t *oldset = sigmask_to_save();
/*
* Get signal to deliver. When running under ptrace, at this point
* the debugger may change all our registers, including the system
* call information.
*/
current->thread.system_call =
test_pt_regs_flag(regs, PIF_SYSCALL) ? regs->int_code : 0;
if (get_signal(&ksig)) {
/* Whee! Actually deliver the signal. */
if (current->thread.system_call) {
regs->int_code = current->thread.system_call;
/* Check for system call restarting. */
switch (regs->gprs[2]) {
case -ERESTART_RESTARTBLOCK:
case -ERESTARTNOHAND:
regs->gprs[2] = -EINTR;
break;
case -ERESTARTSYS:
if (!(ksig.ka.sa.sa_flags & SA_RESTART)) {
regs->gprs[2] = -EINTR;
break;
}
fallthrough;
case -ERESTARTNOINTR:
regs->gprs[2] = regs->orig_gpr2;
regs->psw.addr =
__rewind_psw(regs->psw,
regs->int_code >> 16);
break;
}
}
/* No longer in a system call */
clear_pt_regs_flag(regs, PIF_SYSCALL);
rseq_signal_deliver(&ksig, regs);
if (is_compat_task())
handle_signal32(&ksig, oldset, regs);
else
handle_signal(&ksig, oldset, regs);
return;
}
/* No handlers present - check for system call restart */
clear_pt_regs_flag(regs, PIF_SYSCALL);
if (current->thread.system_call) {
regs->int_code = current->thread.system_call;
switch (regs->gprs[2]) {
case -ERESTART_RESTARTBLOCK:
/* Restart with sys_restart_syscall */
regs->gprs[2] = regs->orig_gpr2;
current->restart_block.arch_data = regs->psw.addr;
if (is_compat_task())
regs->psw.addr = VDSO32_SYMBOL(current, restart_syscall);
else
regs->psw.addr = VDSO64_SYMBOL(current, restart_syscall);
if (test_thread_flag(TIF_SINGLE_STEP))
clear_thread_flag(TIF_PER_TRAP);
break;
case -ERESTARTNOHAND:
case -ERESTARTSYS:
case -ERESTARTNOINTR:
regs->gprs[2] = regs->orig_gpr2;
regs->psw.addr = __rewind_psw(regs->psw, regs->int_code >> 16);
if (test_thread_flag(TIF_SINGLE_STEP))
clear_thread_flag(TIF_PER_TRAP);
break;
}
}
/*
* If there's no signal to deliver, we just put the saved sigmask back.
*/
restore_saved_sigmask();
}
| linux-master | arch/s390/kernel/signal.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Extract CPU cache information and expose them via sysfs.
*
* Copyright IBM Corp. 2012
*/
#include <linux/seq_file.h>
#include <linux/cpu.h>
#include <linux/cacheinfo.h>
#include <asm/facility.h>
enum {
CACHE_SCOPE_NOTEXISTS,
CACHE_SCOPE_PRIVATE,
CACHE_SCOPE_SHARED,
CACHE_SCOPE_RESERVED,
};
enum {
CTYPE_SEPARATE,
CTYPE_DATA,
CTYPE_INSTRUCTION,
CTYPE_UNIFIED,
};
enum {
EXTRACT_TOPOLOGY,
EXTRACT_LINE_SIZE,
EXTRACT_SIZE,
EXTRACT_ASSOCIATIVITY,
};
enum {
CACHE_TI_UNIFIED = 0,
CACHE_TI_DATA = 0,
CACHE_TI_INSTRUCTION,
};
struct cache_info {
unsigned char : 4;
unsigned char scope : 2;
unsigned char type : 2;
};
#define CACHE_MAX_LEVEL 8
union cache_topology {
struct cache_info ci[CACHE_MAX_LEVEL];
unsigned long raw;
};
static const char * const cache_type_string[] = {
"",
"Instruction",
"Data",
"",
"Unified",
};
static const enum cache_type cache_type_map[] = {
[CTYPE_SEPARATE] = CACHE_TYPE_SEPARATE,
[CTYPE_DATA] = CACHE_TYPE_DATA,
[CTYPE_INSTRUCTION] = CACHE_TYPE_INST,
[CTYPE_UNIFIED] = CACHE_TYPE_UNIFIED,
};
void show_cacheinfo(struct seq_file *m)
{
struct cpu_cacheinfo *this_cpu_ci;
struct cacheinfo *cache;
int idx;
this_cpu_ci = get_cpu_cacheinfo(cpumask_any(cpu_online_mask));
for (idx = 0; idx < this_cpu_ci->num_leaves; idx++) {
cache = this_cpu_ci->info_list + idx;
seq_printf(m, "cache%-11d: ", idx);
seq_printf(m, "level=%d ", cache->level);
seq_printf(m, "type=%s ", cache_type_string[cache->type]);
seq_printf(m, "scope=%s ",
cache->disable_sysfs ? "Shared" : "Private");
seq_printf(m, "size=%dK ", cache->size >> 10);
seq_printf(m, "line_size=%u ", cache->coherency_line_size);
seq_printf(m, "associativity=%d", cache->ways_of_associativity);
seq_puts(m, "\n");
}
}
static inline enum cache_type get_cache_type(struct cache_info *ci, int level)
{
if (level >= CACHE_MAX_LEVEL)
return CACHE_TYPE_NOCACHE;
ci += level;
if (ci->scope != CACHE_SCOPE_SHARED && ci->scope != CACHE_SCOPE_PRIVATE)
return CACHE_TYPE_NOCACHE;
return cache_type_map[ci->type];
}
static inline unsigned long ecag(int ai, int li, int ti)
{
return __ecag(ECAG_CACHE_ATTRIBUTE, ai << 4 | li << 1 | ti);
}
static void ci_leaf_init(struct cacheinfo *this_leaf, int private,
enum cache_type type, unsigned int level, int cpu)
{
int ti, num_sets;
if (type == CACHE_TYPE_INST)
ti = CACHE_TI_INSTRUCTION;
else
ti = CACHE_TI_UNIFIED;
this_leaf->level = level + 1;
this_leaf->type = type;
this_leaf->coherency_line_size = ecag(EXTRACT_LINE_SIZE, level, ti);
this_leaf->ways_of_associativity = ecag(EXTRACT_ASSOCIATIVITY, level, ti);
this_leaf->size = ecag(EXTRACT_SIZE, level, ti);
num_sets = this_leaf->size / this_leaf->coherency_line_size;
num_sets /= this_leaf->ways_of_associativity;
this_leaf->number_of_sets = num_sets;
cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
if (!private)
this_leaf->disable_sysfs = true;
}
int init_cache_level(unsigned int cpu)
{
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
unsigned int level = 0, leaves = 0;
union cache_topology ct;
enum cache_type ctype;
if (!this_cpu_ci)
return -EINVAL;
ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0);
do {
ctype = get_cache_type(&ct.ci[0], level);
if (ctype == CACHE_TYPE_NOCACHE)
break;
/* Separate instruction and data caches */
leaves += (ctype == CACHE_TYPE_SEPARATE) ? 2 : 1;
} while (++level < CACHE_MAX_LEVEL);
this_cpu_ci->num_levels = level;
this_cpu_ci->num_leaves = leaves;
return 0;
}
int populate_cache_leaves(unsigned int cpu)
{
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
struct cacheinfo *this_leaf = this_cpu_ci->info_list;
unsigned int level, idx, pvt;
union cache_topology ct;
enum cache_type ctype;
ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0);
for (idx = 0, level = 0; level < this_cpu_ci->num_levels &&
idx < this_cpu_ci->num_leaves; idx++, level++) {
if (!this_leaf)
return -EINVAL;
pvt = (ct.ci[level].scope == CACHE_SCOPE_PRIVATE) ? 1 : 0;
ctype = get_cache_type(&ct.ci[0], level);
if (ctype == CACHE_TYPE_SEPARATE) {
ci_leaf_init(this_leaf++, pvt, CACHE_TYPE_DATA, level, cpu);
ci_leaf_init(this_leaf++, pvt, CACHE_TYPE_INST, level, cpu);
} else {
ci_leaf_init(this_leaf++, pvt, ctype, level, cpu);
}
}
return 0;
}
| linux-master | arch/s390/kernel/cache.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Common Ultravisor functions and initialization
*
* Copyright IBM Corp. 2019, 2020
*/
#define KMSG_COMPONENT "prot_virt"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/sizes.h>
#include <linux/bitmap.h>
#include <linux/memblock.h>
#include <linux/pagemap.h>
#include <linux/swap.h>
#include <asm/facility.h>
#include <asm/sections.h>
#include <asm/uv.h>
/* the bootdata_preserved fields come from ones in arch/s390/boot/uv.c */
#ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST
int __bootdata_preserved(prot_virt_guest);
#endif
/*
* uv_info contains both host and guest information but it's currently only
* expected to be used within modules if it's the KVM module or for
* any PV guest module.
*
* The kernel itself will write these values once in uv_query_info()
* and then make some of them readable via a sysfs interface.
*/
struct uv_info __bootdata_preserved(uv_info);
EXPORT_SYMBOL(uv_info);
#if IS_ENABLED(CONFIG_KVM)
int __bootdata_preserved(prot_virt_host);
EXPORT_SYMBOL(prot_virt_host);
static int __init uv_init(phys_addr_t stor_base, unsigned long stor_len)
{
struct uv_cb_init uvcb = {
.header.cmd = UVC_CMD_INIT_UV,
.header.len = sizeof(uvcb),
.stor_origin = stor_base,
.stor_len = stor_len,
};
if (uv_call(0, (uint64_t)&uvcb)) {
pr_err("Ultravisor init failed with rc: 0x%x rrc: 0%x\n",
uvcb.header.rc, uvcb.header.rrc);
return -1;
}
return 0;
}
void __init setup_uv(void)
{
void *uv_stor_base;
if (!is_prot_virt_host())
return;
uv_stor_base = memblock_alloc_try_nid(
uv_info.uv_base_stor_len, SZ_1M, SZ_2G,
MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
if (!uv_stor_base) {
pr_warn("Failed to reserve %lu bytes for ultravisor base storage\n",
uv_info.uv_base_stor_len);
goto fail;
}
if (uv_init(__pa(uv_stor_base), uv_info.uv_base_stor_len)) {
memblock_free(uv_stor_base, uv_info.uv_base_stor_len);
goto fail;
}
pr_info("Reserving %luMB as ultravisor base storage\n",
uv_info.uv_base_stor_len >> 20);
return;
fail:
pr_info("Disabling support for protected virtualization");
prot_virt_host = 0;
}
/*
* Requests the Ultravisor to pin the page in the shared state. This will
* cause an intercept when the guest attempts to unshare the pinned page.
*/
int uv_pin_shared(unsigned long paddr)
{
struct uv_cb_cfs uvcb = {
.header.cmd = UVC_CMD_PIN_PAGE_SHARED,
.header.len = sizeof(uvcb),
.paddr = paddr,
};
if (uv_call(0, (u64)&uvcb))
return -EINVAL;
return 0;
}
EXPORT_SYMBOL_GPL(uv_pin_shared);
/*
* Requests the Ultravisor to destroy a guest page and make it
* accessible to the host. The destroy clears the page instead of
* exporting.
*
* @paddr: Absolute host address of page to be destroyed
*/
static int uv_destroy_page(unsigned long paddr)
{
struct uv_cb_cfs uvcb = {
.header.cmd = UVC_CMD_DESTR_SEC_STOR,
.header.len = sizeof(uvcb),
.paddr = paddr
};
if (uv_call(0, (u64)&uvcb)) {
/*
* Older firmware uses 107/d as an indication of a non secure
* page. Let us emulate the newer variant (no-op).
*/
if (uvcb.header.rc == 0x107 && uvcb.header.rrc == 0xd)
return 0;
return -EINVAL;
}
return 0;
}
/*
* The caller must already hold a reference to the page
*/
int uv_destroy_owned_page(unsigned long paddr)
{
struct page *page = phys_to_page(paddr);
int rc;
get_page(page);
rc = uv_destroy_page(paddr);
if (!rc)
clear_bit(PG_arch_1, &page->flags);
put_page(page);
return rc;
}
/*
* Requests the Ultravisor to encrypt a guest page and make it
* accessible to the host for paging (export).
*
* @paddr: Absolute host address of page to be exported
*/
int uv_convert_from_secure(unsigned long paddr)
{
struct uv_cb_cfs uvcb = {
.header.cmd = UVC_CMD_CONV_FROM_SEC_STOR,
.header.len = sizeof(uvcb),
.paddr = paddr
};
if (uv_call(0, (u64)&uvcb))
return -EINVAL;
return 0;
}
/*
* The caller must already hold a reference to the page
*/
int uv_convert_owned_from_secure(unsigned long paddr)
{
struct page *page = phys_to_page(paddr);
int rc;
get_page(page);
rc = uv_convert_from_secure(paddr);
if (!rc)
clear_bit(PG_arch_1, &page->flags);
put_page(page);
return rc;
}
/*
* Calculate the expected ref_count for a page that would otherwise have no
* further pins. This was cribbed from similar functions in other places in
* the kernel, but with some slight modifications. We know that a secure
* page can not be a huge page for example.
*/
static int expected_page_refs(struct page *page)
{
int res;
res = page_mapcount(page);
if (PageSwapCache(page)) {
res++;
} else if (page_mapping(page)) {
res++;
if (page_has_private(page))
res++;
}
return res;
}
static int make_page_secure(struct page *page, struct uv_cb_header *uvcb)
{
int expected, cc = 0;
if (PageWriteback(page))
return -EAGAIN;
expected = expected_page_refs(page);
if (!page_ref_freeze(page, expected))
return -EBUSY;
set_bit(PG_arch_1, &page->flags);
/*
* If the UVC does not succeed or fail immediately, we don't want to
* loop for long, or we might get stall notifications.
* On the other hand, this is a complex scenario and we are holding a lot of
* locks, so we can't easily sleep and reschedule. We try only once,
* and if the UVC returned busy or partial completion, we return
* -EAGAIN and we let the callers deal with it.
*/
cc = __uv_call(0, (u64)uvcb);
page_ref_unfreeze(page, expected);
/*
* Return -ENXIO if the page was not mapped, -EINVAL for other errors.
* If busy or partially completed, return -EAGAIN.
*/
if (cc == UVC_CC_OK)
return 0;
else if (cc == UVC_CC_BUSY || cc == UVC_CC_PARTIAL)
return -EAGAIN;
return uvcb->rc == 0x10a ? -ENXIO : -EINVAL;
}
/**
* should_export_before_import - Determine whether an export is needed
* before an import-like operation
* @uvcb: the Ultravisor control block of the UVC to be performed
* @mm: the mm of the process
*
* Returns whether an export is needed before every import-like operation.
* This is needed for shared pages, which don't trigger a secure storage
* exception when accessed from a different guest.
*
* Although considered as one, the Unpin Page UVC is not an actual import,
* so it is not affected.
*
* No export is needed also when there is only one protected VM, because the
* page cannot belong to the wrong VM in that case (there is no "other VM"
* it can belong to).
*
* Return: true if an export is needed before every import, otherwise false.
*/
static bool should_export_before_import(struct uv_cb_header *uvcb, struct mm_struct *mm)
{
/*
* The misc feature indicates, among other things, that importing a
* shared page from a different protected VM will automatically also
* transfer its ownership.
*/
if (uv_has_feature(BIT_UV_FEAT_MISC))
return false;
if (uvcb->cmd == UVC_CMD_UNPIN_PAGE_SHARED)
return false;
return atomic_read(&mm->context.protected_count) > 1;
}
/*
* Requests the Ultravisor to make a page accessible to a guest.
* If it's brought in the first time, it will be cleared. If
* it has been exported before, it will be decrypted and integrity
* checked.
*/
int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb)
{
struct vm_area_struct *vma;
bool local_drain = false;
spinlock_t *ptelock;
unsigned long uaddr;
struct page *page;
pte_t *ptep;
int rc;
again:
rc = -EFAULT;
mmap_read_lock(gmap->mm);
uaddr = __gmap_translate(gmap, gaddr);
if (IS_ERR_VALUE(uaddr))
goto out;
vma = vma_lookup(gmap->mm, uaddr);
if (!vma)
goto out;
/*
* Secure pages cannot be huge and userspace should not combine both.
* In case userspace does it anyway this will result in an -EFAULT for
* the unpack. The guest is thus never reaching secure mode. If
* userspace is playing dirty tricky with mapping huge pages later
* on this will result in a segmentation fault.
*/
if (is_vm_hugetlb_page(vma))
goto out;
rc = -ENXIO;
ptep = get_locked_pte(gmap->mm, uaddr, &ptelock);
if (!ptep)
goto out;
if (pte_present(*ptep) && !(pte_val(*ptep) & _PAGE_INVALID) && pte_write(*ptep)) {
page = pte_page(*ptep);
rc = -EAGAIN;
if (trylock_page(page)) {
if (should_export_before_import(uvcb, gmap->mm))
uv_convert_from_secure(page_to_phys(page));
rc = make_page_secure(page, uvcb);
unlock_page(page);
}
}
pte_unmap_unlock(ptep, ptelock);
out:
mmap_read_unlock(gmap->mm);
if (rc == -EAGAIN) {
/*
* If we are here because the UVC returned busy or partial
* completion, this is just a useless check, but it is safe.
*/
wait_on_page_writeback(page);
} else if (rc == -EBUSY) {
/*
* If we have tried a local drain and the page refcount
* still does not match our expected safe value, try with a
* system wide drain. This is needed if the pagevecs holding
* the page are on a different CPU.
*/
if (local_drain) {
lru_add_drain_all();
/* We give up here, and let the caller try again */
return -EAGAIN;
}
/*
* We are here if the page refcount does not match the
* expected safe value. The main culprits are usually
* pagevecs. With lru_add_drain() we drain the pagevecs
* on the local CPU so that hopefully the refcount will
* reach the expected safe value.
*/
lru_add_drain();
local_drain = true;
/* And now we try again immediately after draining */
goto again;
} else if (rc == -ENXIO) {
if (gmap_fault(gmap, gaddr, FAULT_FLAG_WRITE))
return -EFAULT;
return -EAGAIN;
}
return rc;
}
EXPORT_SYMBOL_GPL(gmap_make_secure);
int gmap_convert_to_secure(struct gmap *gmap, unsigned long gaddr)
{
struct uv_cb_cts uvcb = {
.header.cmd = UVC_CMD_CONV_TO_SEC_STOR,
.header.len = sizeof(uvcb),
.guest_handle = gmap->guest_handle,
.gaddr = gaddr,
};
return gmap_make_secure(gmap, gaddr, &uvcb);
}
EXPORT_SYMBOL_GPL(gmap_convert_to_secure);
/**
* gmap_destroy_page - Destroy a guest page.
* @gmap: the gmap of the guest
* @gaddr: the guest address to destroy
*
* An attempt will be made to destroy the given guest page. If the attempt
* fails, an attempt is made to export the page. If both attempts fail, an
* appropriate error is returned.
*/
int gmap_destroy_page(struct gmap *gmap, unsigned long gaddr)
{
struct vm_area_struct *vma;
unsigned long uaddr;
struct page *page;
int rc;
rc = -EFAULT;
mmap_read_lock(gmap->mm);
uaddr = __gmap_translate(gmap, gaddr);
if (IS_ERR_VALUE(uaddr))
goto out;
vma = vma_lookup(gmap->mm, uaddr);
if (!vma)
goto out;
/*
* Huge pages should not be able to become secure
*/
if (is_vm_hugetlb_page(vma))
goto out;
rc = 0;
/* we take an extra reference here */
page = follow_page(vma, uaddr, FOLL_WRITE | FOLL_GET);
if (IS_ERR_OR_NULL(page))
goto out;
rc = uv_destroy_owned_page(page_to_phys(page));
/*
* Fault handlers can race; it is possible that two CPUs will fault
* on the same secure page. One CPU can destroy the page, reboot,
* re-enter secure mode and import it, while the second CPU was
* stuck at the beginning of the handler. At some point the second
* CPU will be able to progress, and it will not be able to destroy
* the page. In that case we do not want to terminate the process,
* we instead try to export the page.
*/
if (rc)
rc = uv_convert_owned_from_secure(page_to_phys(page));
put_page(page);
out:
mmap_read_unlock(gmap->mm);
return rc;
}
EXPORT_SYMBOL_GPL(gmap_destroy_page);
/*
* To be called with the page locked or with an extra reference! This will
* prevent gmap_make_secure from touching the page concurrently. Having 2
* parallel make_page_accessible is fine, as the UV calls will become a
* no-op if the page is already exported.
*/
int arch_make_page_accessible(struct page *page)
{
int rc = 0;
/* Hugepage cannot be protected, so nothing to do */
if (PageHuge(page))
return 0;
/*
* PG_arch_1 is used in 3 places:
* 1. for kernel page tables during early boot
* 2. for storage keys of huge pages and KVM
* 3. As an indication that this page might be secure. This can
* overindicate, e.g. we set the bit before calling
* convert_to_secure.
* As secure pages are never huge, all 3 variants can co-exists.
*/
if (!test_bit(PG_arch_1, &page->flags))
return 0;
rc = uv_pin_shared(page_to_phys(page));
if (!rc) {
clear_bit(PG_arch_1, &page->flags);
return 0;
}
rc = uv_convert_from_secure(page_to_phys(page));
if (!rc) {
clear_bit(PG_arch_1, &page->flags);
return 0;
}
return rc;
}
EXPORT_SYMBOL_GPL(arch_make_page_accessible);
#endif
#if defined(CONFIG_PROTECTED_VIRTUALIZATION_GUEST) || IS_ENABLED(CONFIG_KVM)
static ssize_t uv_query_facilities(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
return sysfs_emit(buf, "%lx\n%lx\n%lx\n%lx\n",
uv_info.inst_calls_list[0],
uv_info.inst_calls_list[1],
uv_info.inst_calls_list[2],
uv_info.inst_calls_list[3]);
}
static struct kobj_attribute uv_query_facilities_attr =
__ATTR(facilities, 0444, uv_query_facilities, NULL);
static ssize_t uv_query_supp_se_hdr_ver(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
return sysfs_emit(buf, "%lx\n", uv_info.supp_se_hdr_ver);
}
static struct kobj_attribute uv_query_supp_se_hdr_ver_attr =
__ATTR(supp_se_hdr_ver, 0444, uv_query_supp_se_hdr_ver, NULL);
static ssize_t uv_query_supp_se_hdr_pcf(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
return sysfs_emit(buf, "%lx\n", uv_info.supp_se_hdr_pcf);
}
static struct kobj_attribute uv_query_supp_se_hdr_pcf_attr =
__ATTR(supp_se_hdr_pcf, 0444, uv_query_supp_se_hdr_pcf, NULL);
static ssize_t uv_query_dump_cpu_len(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
return sysfs_emit(buf, "%lx\n", uv_info.guest_cpu_stor_len);
}
static struct kobj_attribute uv_query_dump_cpu_len_attr =
__ATTR(uv_query_dump_cpu_len, 0444, uv_query_dump_cpu_len, NULL);
static ssize_t uv_query_dump_storage_state_len(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
return sysfs_emit(buf, "%lx\n", uv_info.conf_dump_storage_state_len);
}
static struct kobj_attribute uv_query_dump_storage_state_len_attr =
__ATTR(dump_storage_state_len, 0444, uv_query_dump_storage_state_len, NULL);
static ssize_t uv_query_dump_finalize_len(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
return sysfs_emit(buf, "%lx\n", uv_info.conf_dump_finalize_len);
}
static struct kobj_attribute uv_query_dump_finalize_len_attr =
__ATTR(dump_finalize_len, 0444, uv_query_dump_finalize_len, NULL);
static ssize_t uv_query_feature_indications(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
return sysfs_emit(buf, "%lx\n", uv_info.uv_feature_indications);
}
static struct kobj_attribute uv_query_feature_indications_attr =
__ATTR(feature_indications, 0444, uv_query_feature_indications, NULL);
static ssize_t uv_query_max_guest_cpus(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
return sysfs_emit(buf, "%d\n", uv_info.max_guest_cpu_id + 1);
}
static struct kobj_attribute uv_query_max_guest_cpus_attr =
__ATTR(max_cpus, 0444, uv_query_max_guest_cpus, NULL);
static ssize_t uv_query_max_guest_vms(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
return sysfs_emit(buf, "%d\n", uv_info.max_num_sec_conf);
}
static struct kobj_attribute uv_query_max_guest_vms_attr =
__ATTR(max_guests, 0444, uv_query_max_guest_vms, NULL);
static ssize_t uv_query_max_guest_addr(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
return sysfs_emit(buf, "%lx\n", uv_info.max_sec_stor_addr);
}
static struct kobj_attribute uv_query_max_guest_addr_attr =
__ATTR(max_address, 0444, uv_query_max_guest_addr, NULL);
static ssize_t uv_query_supp_att_req_hdr_ver(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
return sysfs_emit(buf, "%lx\n", uv_info.supp_att_req_hdr_ver);
}
static struct kobj_attribute uv_query_supp_att_req_hdr_ver_attr =
__ATTR(supp_att_req_hdr_ver, 0444, uv_query_supp_att_req_hdr_ver, NULL);
static ssize_t uv_query_supp_att_pflags(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
return sysfs_emit(buf, "%lx\n", uv_info.supp_att_pflags);
}
static struct kobj_attribute uv_query_supp_att_pflags_attr =
__ATTR(supp_att_pflags, 0444, uv_query_supp_att_pflags, NULL);
static ssize_t uv_query_supp_add_secret_req_ver(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
return sysfs_emit(buf, "%lx\n", uv_info.supp_add_secret_req_ver);
}
static struct kobj_attribute uv_query_supp_add_secret_req_ver_attr =
__ATTR(supp_add_secret_req_ver, 0444, uv_query_supp_add_secret_req_ver, NULL);
static ssize_t uv_query_supp_add_secret_pcf(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
return sysfs_emit(buf, "%lx\n", uv_info.supp_add_secret_pcf);
}
static struct kobj_attribute uv_query_supp_add_secret_pcf_attr =
__ATTR(supp_add_secret_pcf, 0444, uv_query_supp_add_secret_pcf, NULL);
static ssize_t uv_query_supp_secret_types(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
return sysfs_emit(buf, "%lx\n", uv_info.supp_secret_types);
}
static struct kobj_attribute uv_query_supp_secret_types_attr =
__ATTR(supp_secret_types, 0444, uv_query_supp_secret_types, NULL);
static ssize_t uv_query_max_secrets(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
return sysfs_emit(buf, "%d\n", uv_info.max_secrets);
}
static struct kobj_attribute uv_query_max_secrets_attr =
__ATTR(max_secrets, 0444, uv_query_max_secrets, NULL);
static struct attribute *uv_query_attrs[] = {
&uv_query_facilities_attr.attr,
&uv_query_feature_indications_attr.attr,
&uv_query_max_guest_cpus_attr.attr,
&uv_query_max_guest_vms_attr.attr,
&uv_query_max_guest_addr_attr.attr,
&uv_query_supp_se_hdr_ver_attr.attr,
&uv_query_supp_se_hdr_pcf_attr.attr,
&uv_query_dump_storage_state_len_attr.attr,
&uv_query_dump_finalize_len_attr.attr,
&uv_query_dump_cpu_len_attr.attr,
&uv_query_supp_att_req_hdr_ver_attr.attr,
&uv_query_supp_att_pflags_attr.attr,
&uv_query_supp_add_secret_req_ver_attr.attr,
&uv_query_supp_add_secret_pcf_attr.attr,
&uv_query_supp_secret_types_attr.attr,
&uv_query_max_secrets_attr.attr,
NULL,
};
static struct attribute_group uv_query_attr_group = {
.attrs = uv_query_attrs,
};
static ssize_t uv_is_prot_virt_guest(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
int val = 0;
#ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST
val = prot_virt_guest;
#endif
return sysfs_emit(buf, "%d\n", val);
}
static ssize_t uv_is_prot_virt_host(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
int val = 0;
#if IS_ENABLED(CONFIG_KVM)
val = prot_virt_host;
#endif
return sysfs_emit(buf, "%d\n", val);
}
static struct kobj_attribute uv_prot_virt_guest =
__ATTR(prot_virt_guest, 0444, uv_is_prot_virt_guest, NULL);
static struct kobj_attribute uv_prot_virt_host =
__ATTR(prot_virt_host, 0444, uv_is_prot_virt_host, NULL);
static const struct attribute *uv_prot_virt_attrs[] = {
&uv_prot_virt_guest.attr,
&uv_prot_virt_host.attr,
NULL,
};
static struct kset *uv_query_kset;
static struct kobject *uv_kobj;
static int __init uv_info_init(void)
{
int rc = -ENOMEM;
if (!test_facility(158))
return 0;
uv_kobj = kobject_create_and_add("uv", firmware_kobj);
if (!uv_kobj)
return -ENOMEM;
rc = sysfs_create_files(uv_kobj, uv_prot_virt_attrs);
if (rc)
goto out_kobj;
uv_query_kset = kset_create_and_add("query", NULL, uv_kobj);
if (!uv_query_kset) {
rc = -ENOMEM;
goto out_ind_files;
}
rc = sysfs_create_group(&uv_query_kset->kobj, &uv_query_attr_group);
if (!rc)
return 0;
kset_unregister(uv_query_kset);
out_ind_files:
sysfs_remove_files(uv_kobj, uv_prot_virt_attrs);
out_kobj:
kobject_del(uv_kobj);
kobject_put(uv_kobj);
return rc;
}
device_initcall(uv_info_init);
#endif
| linux-master | arch/s390/kernel/uv.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/perf_event.h>
#include <linux/perf_regs.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/bug.h>
#include <asm/ptrace.h>
#include <asm/fpu/api.h>
#include <asm/fpu/types.h>
u64 perf_reg_value(struct pt_regs *regs, int idx)
{
freg_t fp;
if (idx >= PERF_REG_S390_R0 && idx <= PERF_REG_S390_R15)
return regs->gprs[idx];
if (idx >= PERF_REG_S390_FP0 && idx <= PERF_REG_S390_FP15) {
if (!user_mode(regs))
return 0;
idx -= PERF_REG_S390_FP0;
fp = MACHINE_HAS_VX ? *(freg_t *)(current->thread.fpu.vxrs + idx)
: current->thread.fpu.fprs[idx];
return fp.ui;
}
if (idx == PERF_REG_S390_MASK)
return regs->psw.mask;
if (idx == PERF_REG_S390_PC)
return regs->psw.addr;
WARN_ON_ONCE((u32)idx >= PERF_REG_S390_MAX);
return 0;
}
#define REG_RESERVED (~((1UL << PERF_REG_S390_MAX) - 1))
int perf_reg_validate(u64 mask)
{
if (!mask || mask & REG_RESERVED)
return -EINVAL;
return 0;
}
u64 perf_reg_abi(struct task_struct *task)
{
if (test_tsk_thread_flag(task, TIF_31BIT))
return PERF_SAMPLE_REGS_ABI_32;
return PERF_SAMPLE_REGS_ABI_64;
}
void perf_get_regs_user(struct perf_regs *regs_user,
struct pt_regs *regs)
{
/*
* Use the regs from the first interruption and let
* perf_sample_regs_intr() handle interrupts (regs == get_irq_regs()).
*
* Also save FPU registers for user-space tasks only.
*/
regs_user->regs = task_pt_regs(current);
if (user_mode(regs_user->regs))
save_fpu_regs();
regs_user->abi = perf_reg_abi(current);
}
| linux-master | arch/s390/kernel/perf_regs.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Machine check handler
*
* Copyright IBM Corp. 2000, 2009
* Author(s): Ingo Adlung <[email protected]>,
* Martin Schwidefsky <[email protected]>,
* Cornelia Huck <[email protected]>,
*/
#include <linux/kernel_stat.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/entry-common.h>
#include <linux/hardirq.h>
#include <linux/log2.h>
#include <linux/kprobes.h>
#include <linux/kmemleak.h>
#include <linux/time.h>
#include <linux/module.h>
#include <linux/sched/signal.h>
#include <linux/kvm_host.h>
#include <linux/export.h>
#include <asm/lowcore.h>
#include <asm/smp.h>
#include <asm/stp.h>
#include <asm/cputime.h>
#include <asm/nmi.h>
#include <asm/crw.h>
#include <asm/switch_to.h>
#include <asm/ctl_reg.h>
#include <asm/asm-offsets.h>
#include <asm/pai.h>
#include <asm/vx-insn.h>
struct mcck_struct {
unsigned int kill_task : 1;
unsigned int channel_report : 1;
unsigned int warning : 1;
unsigned int stp_queue : 1;
unsigned long mcck_code;
};
static DEFINE_PER_CPU(struct mcck_struct, cpu_mcck);
static inline int nmi_needs_mcesa(void)
{
return MACHINE_HAS_VX || MACHINE_HAS_GS;
}
/*
* The initial machine check extended save area for the boot CPU.
* It will be replaced on the boot CPU reinit with an allocated
* structure. The structure is required for machine check happening
* early in the boot process.
*/
static struct mcesa boot_mcesa __aligned(MCESA_MAX_SIZE);
void __init nmi_alloc_mcesa_early(u64 *mcesad)
{
if (!nmi_needs_mcesa())
return;
*mcesad = __pa(&boot_mcesa);
if (MACHINE_HAS_GS)
*mcesad |= ilog2(MCESA_MAX_SIZE);
}
int nmi_alloc_mcesa(u64 *mcesad)
{
unsigned long size;
void *origin;
*mcesad = 0;
if (!nmi_needs_mcesa())
return 0;
size = MACHINE_HAS_GS ? MCESA_MAX_SIZE : MCESA_MIN_SIZE;
origin = kmalloc(size, GFP_KERNEL);
if (!origin)
return -ENOMEM;
/* The pointer is stored with mcesa_bits ORed in */
kmemleak_not_leak(origin);
*mcesad = __pa(origin);
if (MACHINE_HAS_GS)
*mcesad |= ilog2(MCESA_MAX_SIZE);
return 0;
}
void nmi_free_mcesa(u64 *mcesad)
{
if (!nmi_needs_mcesa())
return;
kfree(__va(*mcesad & MCESA_ORIGIN_MASK));
}
static __always_inline char *nmi_puts(char *dest, const char *src)
{
while (*src)
*dest++ = *src++;
*dest = 0;
return dest;
}
static __always_inline char *u64_to_hex(char *dest, u64 val)
{
int i, num;
for (i = 1; i <= 16; i++) {
num = (val >> (64 - 4 * i)) & 0xf;
if (num >= 10)
*dest++ = 'A' + num - 10;
else
*dest++ = '0' + num;
}
*dest = 0;
return dest;
}
static notrace void s390_handle_damage(void)
{
union ctlreg0 cr0, cr0_new;
char message[100];
psw_t psw_save;
char *ptr;
smp_emergency_stop();
diag_amode31_ops.diag308_reset();
ptr = nmi_puts(message, "System stopped due to unrecoverable machine check, code: 0x");
u64_to_hex(ptr, S390_lowcore.mcck_interruption_code);
/*
* Disable low address protection and make machine check new PSW a
* disabled wait PSW. Any additional machine check cannot be handled.
*/
__ctl_store(cr0.val, 0, 0);
cr0_new = cr0;
cr0_new.lap = 0;
__ctl_load(cr0_new.val, 0, 0);
psw_save = S390_lowcore.mcck_new_psw;
psw_bits(S390_lowcore.mcck_new_psw).io = 0;
psw_bits(S390_lowcore.mcck_new_psw).ext = 0;
psw_bits(S390_lowcore.mcck_new_psw).wait = 1;
sclp_emergency_printk(message);
/*
* Restore machine check new PSW and control register 0 to original
* values. This makes possible system dump analysis easier.
*/
S390_lowcore.mcck_new_psw = psw_save;
__ctl_load(cr0.val, 0, 0);
disabled_wait();
while (1);
}
NOKPROBE_SYMBOL(s390_handle_damage);
/*
* Main machine check handler function. Will be called with interrupts disabled
* and machine checks enabled.
*/
void s390_handle_mcck(void)
{
struct mcck_struct mcck;
/*
* Disable machine checks and get the current state of accumulated
* machine checks. Afterwards delete the old state and enable machine
* checks again.
*/
local_mcck_disable();
mcck = *this_cpu_ptr(&cpu_mcck);
memset(this_cpu_ptr(&cpu_mcck), 0, sizeof(mcck));
local_mcck_enable();
if (mcck.channel_report)
crw_handle_channel_report();
/*
* A warning may remain for a prolonged period on the bare iron.
* (actually until the machine is powered off, or the problem is gone)
* So we just stop listening for the WARNING MCH and avoid continuously
* being interrupted. One caveat is however, that we must do this per
* processor and cannot use the smp version of ctl_clear_bit().
* On VM we only get one interrupt per virtally presented machinecheck.
* Though one suffices, we may get one interrupt per (virtual) cpu.
*/
if (mcck.warning) { /* WARNING pending ? */
static int mchchk_wng_posted = 0;
/* Use single cpu clear, as we cannot handle smp here. */
__ctl_clear_bit(14, 24); /* Disable WARNING MCH */
if (xchg(&mchchk_wng_posted, 1) == 0)
kill_cad_pid(SIGPWR, 1);
}
if (mcck.stp_queue)
stp_queue_work();
if (mcck.kill_task) {
printk(KERN_EMERG "mcck: Terminating task because of machine "
"malfunction (code 0x%016lx).\n", mcck.mcck_code);
printk(KERN_EMERG "mcck: task: %s, pid: %d.\n",
current->comm, current->pid);
if (is_global_init(current))
panic("mcck: Attempting to kill init!\n");
do_send_sig_info(SIGKILL, SEND_SIG_PRIV, current, PIDTYPE_PID);
}
}
/*
* returns 0 if register contents could be validated
* returns 1 otherwise
*/
static int notrace s390_validate_registers(union mci mci)
{
struct mcesa *mcesa;
void *fpt_save_area;
union ctlreg2 cr2;
int kill_task;
u64 zero;
kill_task = 0;
zero = 0;
if (!mci.gr || !mci.fp)
kill_task = 1;
fpt_save_area = &S390_lowcore.floating_pt_save_area;
if (!mci.fc) {
kill_task = 1;
asm volatile(
" lfpc %0\n"
:
: "Q" (zero));
} else {
asm volatile(
" lfpc %0\n"
:
: "Q" (S390_lowcore.fpt_creg_save_area));
}
mcesa = __va(S390_lowcore.mcesad & MCESA_ORIGIN_MASK);
if (!MACHINE_HAS_VX) {
/* Validate floating point registers */
asm volatile(
" ld 0,0(%0)\n"
" ld 1,8(%0)\n"
" ld 2,16(%0)\n"
" ld 3,24(%0)\n"
" ld 4,32(%0)\n"
" ld 5,40(%0)\n"
" ld 6,48(%0)\n"
" ld 7,56(%0)\n"
" ld 8,64(%0)\n"
" ld 9,72(%0)\n"
" ld 10,80(%0)\n"
" ld 11,88(%0)\n"
" ld 12,96(%0)\n"
" ld 13,104(%0)\n"
" ld 14,112(%0)\n"
" ld 15,120(%0)\n"
:
: "a" (fpt_save_area)
: "memory");
} else {
/* Validate vector registers */
union ctlreg0 cr0;
/*
* The vector validity must only be checked if not running a
* KVM guest. For KVM guests the machine check is forwarded by
* KVM and it is the responsibility of the guest to take
* appropriate actions. The host vector or FPU values have been
* saved by KVM and will be restored by KVM.
*/
if (!mci.vr && !test_cpu_flag(CIF_MCCK_GUEST))
kill_task = 1;
cr0.val = S390_lowcore.cregs_save_area[0];
cr0.afp = cr0.vx = 1;
__ctl_load(cr0.val, 0, 0);
asm volatile(
" la 1,%0\n"
" VLM 0,15,0,1\n"
" VLM 16,31,256,1\n"
:
: "Q" (*(struct vx_array *)mcesa->vector_save_area)
: "1");
__ctl_load(S390_lowcore.cregs_save_area[0], 0, 0);
}
/* Validate access registers */
asm volatile(
" lam 0,15,0(%0)\n"
:
: "a" (&S390_lowcore.access_regs_save_area)
: "memory");
if (!mci.ar)
kill_task = 1;
/* Validate guarded storage registers */
cr2.val = S390_lowcore.cregs_save_area[2];
if (cr2.gse) {
if (!mci.gs) {
/*
* 2 cases:
* - machine check in kernel or userspace
* - machine check while running SIE (KVM guest)
* For kernel or userspace the userspace values of
* guarded storage control can not be recreated, the
* process must be terminated.
* For SIE the guest values of guarded storage can not
* be recreated. This is either due to a bug or due to
* GS being disabled in the guest. The guest will be
* notified by KVM code and the guests machine check
* handling must take care of this. The host values
* are saved by KVM and are not affected.
*/
if (!test_cpu_flag(CIF_MCCK_GUEST))
kill_task = 1;
} else {
load_gs_cb((struct gs_cb *)mcesa->guarded_storage_save_area);
}
}
/*
* The getcpu vdso syscall reads CPU number from the programmable
* field of the TOD clock. Disregard the TOD programmable register
* validity bit and load the CPU number into the TOD programmable
* field unconditionally.
*/
set_tod_programmable_field(raw_smp_processor_id());
/* Validate clock comparator register */
set_clock_comparator(S390_lowcore.clock_comparator);
if (!mci.ms || !mci.pm || !mci.ia)
kill_task = 1;
return kill_task;
}
NOKPROBE_SYMBOL(s390_validate_registers);
/*
* Backup the guest's machine check info to its description block
*/
static void notrace s390_backup_mcck_info(struct pt_regs *regs)
{
struct mcck_volatile_info *mcck_backup;
struct sie_page *sie_page;
/* r14 contains the sie block, which was set in sie64a */
struct kvm_s390_sie_block *sie_block = phys_to_virt(regs->gprs[14]);
if (sie_block == NULL)
/* Something's seriously wrong, stop system. */
s390_handle_damage();
sie_page = container_of(sie_block, struct sie_page, sie_block);
mcck_backup = &sie_page->mcck_info;
mcck_backup->mcic = S390_lowcore.mcck_interruption_code &
~(MCCK_CODE_CP | MCCK_CODE_EXT_DAMAGE);
mcck_backup->ext_damage_code = S390_lowcore.external_damage_code;
mcck_backup->failing_storage_address
= S390_lowcore.failing_storage_address;
}
NOKPROBE_SYMBOL(s390_backup_mcck_info);
#define MAX_IPD_COUNT 29
#define MAX_IPD_TIME (5 * 60 * USEC_PER_SEC) /* 5 minutes */
#define ED_STP_ISLAND 6 /* External damage STP island check */
#define ED_STP_SYNC 7 /* External damage STP sync check */
#define MCCK_CODE_NO_GUEST (MCCK_CODE_CP | MCCK_CODE_EXT_DAMAGE)
/*
* machine check handler.
*/
void notrace s390_do_machine_check(struct pt_regs *regs)
{
static int ipd_count;
static DEFINE_SPINLOCK(ipd_lock);
static unsigned long long last_ipd;
struct mcck_struct *mcck;
unsigned long long tmp;
irqentry_state_t irq_state;
union mci mci;
unsigned long mcck_dam_code;
int mcck_pending = 0;
irq_state = irqentry_nmi_enter(regs);
if (user_mode(regs))
update_timer_mcck();
inc_irq_stat(NMI_NMI);
mci.val = S390_lowcore.mcck_interruption_code;
mcck = this_cpu_ptr(&cpu_mcck);
/*
* Reinject the instruction processing damages' machine checks
* including Delayed Access Exception into the guest
* instead of damaging the host if they happen in the guest.
*/
if (mci.pd && !test_cpu_flag(CIF_MCCK_GUEST)) {
if (mci.b) {
/* Processing backup -> verify if we can survive this */
u64 z_mcic, o_mcic, t_mcic;
z_mcic = (1ULL<<63 | 1ULL<<59 | 1ULL<<29);
o_mcic = (1ULL<<43 | 1ULL<<42 | 1ULL<<41 | 1ULL<<40 |
1ULL<<36 | 1ULL<<35 | 1ULL<<34 | 1ULL<<32 |
1ULL<<30 | 1ULL<<21 | 1ULL<<20 | 1ULL<<17 |
1ULL<<16);
t_mcic = mci.val;
if (((t_mcic & z_mcic) != 0) ||
((t_mcic & o_mcic) != o_mcic)) {
s390_handle_damage();
}
/*
* Nullifying exigent condition, therefore we might
* retry this instruction.
*/
spin_lock(&ipd_lock);
tmp = get_tod_clock();
if (((tmp - last_ipd) >> 12) < MAX_IPD_TIME)
ipd_count++;
else
ipd_count = 1;
last_ipd = tmp;
if (ipd_count == MAX_IPD_COUNT)
s390_handle_damage();
spin_unlock(&ipd_lock);
} else {
/* Processing damage -> stopping machine */
s390_handle_damage();
}
}
if (s390_validate_registers(mci)) {
if (!user_mode(regs))
s390_handle_damage();
/*
* Couldn't restore all register contents for the
* user space process -> mark task for termination.
*/
mcck->kill_task = 1;
mcck->mcck_code = mci.val;
mcck_pending = 1;
}
/*
* Backup the machine check's info if it happens when the guest
* is running.
*/
if (test_cpu_flag(CIF_MCCK_GUEST))
s390_backup_mcck_info(regs);
if (mci.cd) {
/* Timing facility damage */
s390_handle_damage();
}
if (mci.ed && mci.ec) {
/* External damage */
if (S390_lowcore.external_damage_code & (1U << ED_STP_SYNC))
mcck->stp_queue |= stp_sync_check();
if (S390_lowcore.external_damage_code & (1U << ED_STP_ISLAND))
mcck->stp_queue |= stp_island_check();
mcck_pending = 1;
}
/*
* Reinject storage related machine checks into the guest if they
* happen when the guest is running.
*/
if (!test_cpu_flag(CIF_MCCK_GUEST)) {
/* Storage error uncorrected */
if (mci.se)
s390_handle_damage();
/* Storage key-error uncorrected */
if (mci.ke)
s390_handle_damage();
/* Storage degradation */
if (mci.ds && mci.fa)
s390_handle_damage();
}
if (mci.cp) {
/* Channel report word pending */
mcck->channel_report = 1;
mcck_pending = 1;
}
if (mci.w) {
/* Warning pending */
mcck->warning = 1;
mcck_pending = 1;
}
/*
* If there are only Channel Report Pending and External Damage
* machine checks, they will not be reinjected into the guest
* because they refer to host conditions only.
*/
mcck_dam_code = (mci.val & MCIC_SUBCLASS_MASK);
if (test_cpu_flag(CIF_MCCK_GUEST) &&
(mcck_dam_code & MCCK_CODE_NO_GUEST) != mcck_dam_code) {
/* Set exit reason code for host's later handling */
*((long *)(regs->gprs[15] + __SF_SIE_REASON)) = -EINTR;
}
clear_cpu_flag(CIF_MCCK_GUEST);
if (mcck_pending)
schedule_mcck_handler();
irqentry_nmi_exit(regs, irq_state);
}
NOKPROBE_SYMBOL(s390_do_machine_check);
static int __init machine_check_init(void)
{
ctl_set_bit(14, 25); /* enable external damage MCH */
ctl_set_bit(14, 27); /* enable system recovery MCH */
ctl_set_bit(14, 24); /* enable warning MCH */
return 0;
}
early_initcall(machine_check_init);
| linux-master | arch/s390/kernel/nmi.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2001, 2009
* Author(s): Ulrich Weigand <[email protected]>,
* Martin Schwidefsky <[email protected]>,
*/
#include <linux/debugfs.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/export.h>
#include <linux/slab.h>
#include <asm/asm-extable.h>
#include <asm/ebcdic.h>
#include <asm/debug.h>
#include <asm/sysinfo.h>
#include <asm/cpcmd.h>
#include <asm/topology.h>
#include <asm/fpu/api.h>
int topology_max_mnest;
static inline int __stsi(void *sysinfo, int fc, int sel1, int sel2, int *lvl)
{
int r0 = (fc << 28) | sel1;
int rc = 0;
asm volatile(
" lr 0,%[r0]\n"
" lr 1,%[r1]\n"
" stsi 0(%[sysinfo])\n"
"0: jz 2f\n"
"1: lhi %[rc],%[retval]\n"
"2: lr %[r0],0\n"
EX_TABLE(0b, 1b)
: [r0] "+d" (r0), [rc] "+d" (rc)
: [r1] "d" (sel2),
[sysinfo] "a" (sysinfo),
[retval] "K" (-EOPNOTSUPP)
: "cc", "0", "1", "memory");
*lvl = ((unsigned int) r0) >> 28;
return rc;
}
/*
* stsi - store system information
*
* Returns the current configuration level if function code 0 was specified.
* Otherwise returns 0 on success or a negative value on error.
*/
int stsi(void *sysinfo, int fc, int sel1, int sel2)
{
int lvl, rc;
rc = __stsi(sysinfo, fc, sel1, sel2, &lvl);
if (rc)
return rc;
return fc ? 0 : lvl;
}
EXPORT_SYMBOL(stsi);
#ifdef CONFIG_PROC_FS
static bool convert_ext_name(unsigned char encoding, char *name, size_t len)
{
switch (encoding) {
case 1: /* EBCDIC */
EBCASC(name, len);
break;
case 2: /* UTF-8 */
break;
default:
return false;
}
return true;
}
static void stsi_1_1_1(struct seq_file *m, struct sysinfo_1_1_1 *info)
{
int i;
if (stsi(info, 1, 1, 1))
return;
EBCASC(info->manufacturer, sizeof(info->manufacturer));
EBCASC(info->type, sizeof(info->type));
EBCASC(info->model, sizeof(info->model));
EBCASC(info->sequence, sizeof(info->sequence));
EBCASC(info->plant, sizeof(info->plant));
EBCASC(info->model_capacity, sizeof(info->model_capacity));
EBCASC(info->model_perm_cap, sizeof(info->model_perm_cap));
EBCASC(info->model_temp_cap, sizeof(info->model_temp_cap));
seq_printf(m, "Manufacturer: %-16.16s\n", info->manufacturer);
seq_printf(m, "Type: %-4.4s\n", info->type);
if (info->lic)
seq_printf(m, "LIC Identifier: %016lx\n", info->lic);
/*
* Sigh: the model field has been renamed with System z9
* to model_capacity and a new model field has been added
* after the plant field. To avoid confusing older programs
* the "Model:" prints "model_capacity model" or just
* "model_capacity" if the model string is empty .
*/
seq_printf(m, "Model: %-16.16s", info->model_capacity);
if (info->model[0] != '\0')
seq_printf(m, " %-16.16s", info->model);
seq_putc(m, '\n');
seq_printf(m, "Sequence Code: %-16.16s\n", info->sequence);
seq_printf(m, "Plant: %-4.4s\n", info->plant);
seq_printf(m, "Model Capacity: %-16.16s %08u\n",
info->model_capacity, info->model_cap_rating);
if (info->model_perm_cap_rating)
seq_printf(m, "Model Perm. Capacity: %-16.16s %08u\n",
info->model_perm_cap,
info->model_perm_cap_rating);
if (info->model_temp_cap_rating)
seq_printf(m, "Model Temp. Capacity: %-16.16s %08u\n",
info->model_temp_cap,
info->model_temp_cap_rating);
if (info->ncr)
seq_printf(m, "Nominal Cap. Rating: %08u\n", info->ncr);
if (info->npr)
seq_printf(m, "Nominal Perm. Rating: %08u\n", info->npr);
if (info->ntr)
seq_printf(m, "Nominal Temp. Rating: %08u\n", info->ntr);
if (info->cai) {
seq_printf(m, "Capacity Adj. Ind.: %d\n", info->cai);
seq_printf(m, "Capacity Ch. Reason: %d\n", info->ccr);
seq_printf(m, "Capacity Transient: %d\n", info->t);
}
if (info->p) {
for (i = 1; i <= ARRAY_SIZE(info->typepct); i++) {
seq_printf(m, "Type %d Percentage: %d\n",
i, info->typepct[i - 1]);
}
}
}
static void stsi_15_1_x(struct seq_file *m, struct sysinfo_15_1_x *info)
{
int i;
seq_putc(m, '\n');
if (!MACHINE_HAS_TOPOLOGY)
return;
if (stsi(info, 15, 1, topology_max_mnest))
return;
seq_printf(m, "CPU Topology HW: ");
for (i = 0; i < TOPOLOGY_NR_MAG; i++)
seq_printf(m, " %d", info->mag[i]);
seq_putc(m, '\n');
#ifdef CONFIG_SCHED_TOPOLOGY
store_topology(info);
seq_printf(m, "CPU Topology SW: ");
for (i = 0; i < TOPOLOGY_NR_MAG; i++)
seq_printf(m, " %d", info->mag[i]);
seq_putc(m, '\n');
#endif
}
static void stsi_1_2_2(struct seq_file *m, struct sysinfo_1_2_2 *info)
{
struct sysinfo_1_2_2_extension *ext;
int i;
if (stsi(info, 1, 2, 2))
return;
ext = (struct sysinfo_1_2_2_extension *)
((unsigned long) info + info->acc_offset);
seq_printf(m, "CPUs Total: %d\n", info->cpus_total);
seq_printf(m, "CPUs Configured: %d\n", info->cpus_configured);
seq_printf(m, "CPUs Standby: %d\n", info->cpus_standby);
seq_printf(m, "CPUs Reserved: %d\n", info->cpus_reserved);
if (info->mt_installed) {
seq_printf(m, "CPUs G-MTID: %d\n", info->mt_gtid);
seq_printf(m, "CPUs S-MTID: %d\n", info->mt_stid);
}
/*
* Sigh 2. According to the specification the alternate
* capability field is a 32 bit floating point number
* if the higher order 8 bits are not zero. Printing
* a floating point number in the kernel is a no-no,
* always print the number as 32 bit unsigned integer.
* The user-space needs to know about the strange
* encoding of the alternate cpu capability.
*/
seq_printf(m, "Capability: %u", info->capability);
if (info->format == 1)
seq_printf(m, " %u", ext->alt_capability);
seq_putc(m, '\n');
if (info->nominal_cap)
seq_printf(m, "Nominal Capability: %d\n", info->nominal_cap);
if (info->secondary_cap)
seq_printf(m, "Secondary Capability: %d\n", info->secondary_cap);
for (i = 2; i <= info->cpus_total; i++) {
seq_printf(m, "Adjustment %02d-way: %u",
i, info->adjustment[i-2]);
if (info->format == 1)
seq_printf(m, " %u", ext->alt_adjustment[i-2]);
seq_putc(m, '\n');
}
}
static void stsi_2_2_2(struct seq_file *m, struct sysinfo_2_2_2 *info)
{
if (stsi(info, 2, 2, 2))
return;
EBCASC(info->name, sizeof(info->name));
seq_putc(m, '\n');
seq_printf(m, "LPAR Number: %d\n", info->lpar_number);
seq_printf(m, "LPAR Characteristics: ");
if (info->characteristics & LPAR_CHAR_DEDICATED)
seq_printf(m, "Dedicated ");
if (info->characteristics & LPAR_CHAR_SHARED)
seq_printf(m, "Shared ");
if (info->characteristics & LPAR_CHAR_LIMITED)
seq_printf(m, "Limited ");
seq_putc(m, '\n');
seq_printf(m, "LPAR Name: %-8.8s\n", info->name);
seq_printf(m, "LPAR Adjustment: %d\n", info->caf);
seq_printf(m, "LPAR CPUs Total: %d\n", info->cpus_total);
seq_printf(m, "LPAR CPUs Configured: %d\n", info->cpus_configured);
seq_printf(m, "LPAR CPUs Standby: %d\n", info->cpus_standby);
seq_printf(m, "LPAR CPUs Reserved: %d\n", info->cpus_reserved);
seq_printf(m, "LPAR CPUs Dedicated: %d\n", info->cpus_dedicated);
seq_printf(m, "LPAR CPUs Shared: %d\n", info->cpus_shared);
if (info->mt_installed) {
seq_printf(m, "LPAR CPUs G-MTID: %d\n", info->mt_gtid);
seq_printf(m, "LPAR CPUs S-MTID: %d\n", info->mt_stid);
seq_printf(m, "LPAR CPUs PS-MTID: %d\n", info->mt_psmtid);
}
if (convert_ext_name(info->vsne, info->ext_name, sizeof(info->ext_name))) {
seq_printf(m, "LPAR Extended Name: %-.256s\n", info->ext_name);
seq_printf(m, "LPAR UUID: %pUb\n", &info->uuid);
}
}
static void print_ext_name(struct seq_file *m, int lvl,
struct sysinfo_3_2_2 *info)
{
size_t len = sizeof(info->ext_names[lvl]);
if (!convert_ext_name(info->vm[lvl].evmne, info->ext_names[lvl], len))
return;
seq_printf(m, "VM%02d Extended Name: %-.256s\n", lvl,
info->ext_names[lvl]);
}
static void print_uuid(struct seq_file *m, int i, struct sysinfo_3_2_2 *info)
{
if (uuid_is_null(&info->vm[i].uuid))
return;
seq_printf(m, "VM%02d UUID: %pUb\n", i, &info->vm[i].uuid);
}
static void stsi_3_2_2(struct seq_file *m, struct sysinfo_3_2_2 *info)
{
int i;
if (stsi(info, 3, 2, 2))
return;
for (i = 0; i < info->count; i++) {
EBCASC(info->vm[i].name, sizeof(info->vm[i].name));
EBCASC(info->vm[i].cpi, sizeof(info->vm[i].cpi));
seq_putc(m, '\n');
seq_printf(m, "VM%02d Name: %-8.8s\n", i, info->vm[i].name);
seq_printf(m, "VM%02d Control Program: %-16.16s\n", i, info->vm[i].cpi);
seq_printf(m, "VM%02d Adjustment: %d\n", i, info->vm[i].caf);
seq_printf(m, "VM%02d CPUs Total: %d\n", i, info->vm[i].cpus_total);
seq_printf(m, "VM%02d CPUs Configured: %d\n", i, info->vm[i].cpus_configured);
seq_printf(m, "VM%02d CPUs Standby: %d\n", i, info->vm[i].cpus_standby);
seq_printf(m, "VM%02d CPUs Reserved: %d\n", i, info->vm[i].cpus_reserved);
print_ext_name(m, i, info);
print_uuid(m, i, info);
}
}
static int sysinfo_show(struct seq_file *m, void *v)
{
void *info = (void *)get_zeroed_page(GFP_KERNEL);
int level;
if (!info)
return 0;
level = stsi(NULL, 0, 0, 0);
if (level >= 1)
stsi_1_1_1(m, info);
if (level >= 1)
stsi_15_1_x(m, info);
if (level >= 1)
stsi_1_2_2(m, info);
if (level >= 2)
stsi_2_2_2(m, info);
if (level >= 3)
stsi_3_2_2(m, info);
free_page((unsigned long)info);
return 0;
}
static int __init sysinfo_create_proc(void)
{
proc_create_single("sysinfo", 0444, NULL, sysinfo_show);
return 0;
}
device_initcall(sysinfo_create_proc);
#endif /* CONFIG_PROC_FS */
/*
* Service levels interface.
*/
static DECLARE_RWSEM(service_level_sem);
static LIST_HEAD(service_level_list);
int register_service_level(struct service_level *slr)
{
struct service_level *ptr;
down_write(&service_level_sem);
list_for_each_entry(ptr, &service_level_list, list)
if (ptr == slr) {
up_write(&service_level_sem);
return -EEXIST;
}
list_add_tail(&slr->list, &service_level_list);
up_write(&service_level_sem);
return 0;
}
EXPORT_SYMBOL(register_service_level);
int unregister_service_level(struct service_level *slr)
{
struct service_level *ptr, *next;
int rc = -ENOENT;
down_write(&service_level_sem);
list_for_each_entry_safe(ptr, next, &service_level_list, list) {
if (ptr != slr)
continue;
list_del(&ptr->list);
rc = 0;
break;
}
up_write(&service_level_sem);
return rc;
}
EXPORT_SYMBOL(unregister_service_level);
static void *service_level_start(struct seq_file *m, loff_t *pos)
{
down_read(&service_level_sem);
return seq_list_start(&service_level_list, *pos);
}
static void *service_level_next(struct seq_file *m, void *p, loff_t *pos)
{
return seq_list_next(p, &service_level_list, pos);
}
static void service_level_stop(struct seq_file *m, void *p)
{
up_read(&service_level_sem);
}
static int service_level_show(struct seq_file *m, void *p)
{
struct service_level *slr;
slr = list_entry(p, struct service_level, list);
slr->seq_print(m, slr);
return 0;
}
static const struct seq_operations service_level_seq_ops = {
.start = service_level_start,
.next = service_level_next,
.stop = service_level_stop,
.show = service_level_show
};
static void service_level_vm_print(struct seq_file *m,
struct service_level *slr)
{
char *query_buffer, *str;
query_buffer = kmalloc(1024, GFP_KERNEL | GFP_DMA);
if (!query_buffer)
return;
cpcmd("QUERY CPLEVEL", query_buffer, 1024, NULL);
str = strchr(query_buffer, '\n');
if (str)
*str = 0;
seq_printf(m, "VM: %s\n", query_buffer);
kfree(query_buffer);
}
static struct service_level service_level_vm = {
.seq_print = service_level_vm_print
};
static __init int create_proc_service_level(void)
{
proc_create_seq("service_levels", 0, NULL, &service_level_seq_ops);
if (MACHINE_IS_VM)
register_service_level(&service_level_vm);
return 0;
}
subsys_initcall(create_proc_service_level);
/*
* CPU capability might have changed. Therefore recalculate loops_per_jiffy.
*/
void s390_adjust_jiffies(void)
{
struct sysinfo_1_2_2 *info;
unsigned long capability;
struct kernel_fpu fpu;
info = (void *) get_zeroed_page(GFP_KERNEL);
if (!info)
return;
if (stsi(info, 1, 2, 2) == 0) {
/*
* Major sigh. The cpu capability encoding is "special".
* If the first 9 bits of info->capability are 0 then it
* is a 32 bit unsigned integer in the range 0 .. 2^23.
* If the first 9 bits are != 0 then it is a 32 bit float.
* In addition a lower value indicates a proportionally
* higher cpu capacity. Bogomips are the other way round.
* To get to a halfway suitable number we divide 1e7
* by the cpu capability number. Yes, that means a floating
* point division ..
*/
kernel_fpu_begin(&fpu, KERNEL_FPR);
asm volatile(
" sfpc %3\n"
" l %0,%1\n"
" tmlh %0,0xff80\n"
" jnz 0f\n"
" cefbr %%f2,%0\n"
" j 1f\n"
"0: le %%f2,%1\n"
"1: cefbr %%f0,%2\n"
" debr %%f0,%%f2\n"
" cgebr %0,5,%%f0\n"
: "=&d" (capability)
: "Q" (info->capability), "d" (10000000), "d" (0)
: "cc"
);
kernel_fpu_end(&fpu, KERNEL_FPR);
} else
/*
* Really old machine without stsi block for basic
* cpu information. Report 42.0 bogomips.
*/
capability = 42;
loops_per_jiffy = capability * (500000/HZ);
free_page((unsigned long) info);
}
/*
* calibrate the delay loop
*/
void calibrate_delay(void)
{
s390_adjust_jiffies();
/* Print the good old Bogomips line .. */
printk(KERN_DEBUG "Calibrating delay loop (skipped)... "
"%lu.%02lu BogoMIPS preset\n", loops_per_jiffy/(500000/HZ),
(loops_per_jiffy/(5000/HZ)) % 100);
}
#ifdef CONFIG_DEBUG_FS
#define STSI_FILE(fc, s1, s2) \
static int stsi_open_##fc##_##s1##_##s2(struct inode *inode, struct file *file)\
{ \
file->private_data = (void *) get_zeroed_page(GFP_KERNEL); \
if (!file->private_data) \
return -ENOMEM; \
if (stsi(file->private_data, fc, s1, s2)) { \
free_page((unsigned long)file->private_data); \
file->private_data = NULL; \
return -EACCES; \
} \
return nonseekable_open(inode, file); \
} \
\
static const struct file_operations stsi_##fc##_##s1##_##s2##_fs_ops = { \
.open = stsi_open_##fc##_##s1##_##s2, \
.release = stsi_release, \
.read = stsi_read, \
.llseek = no_llseek, \
};
static int stsi_release(struct inode *inode, struct file *file)
{
free_page((unsigned long)file->private_data);
return 0;
}
static ssize_t stsi_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
{
return simple_read_from_buffer(buf, size, ppos, file->private_data, PAGE_SIZE);
}
STSI_FILE( 1, 1, 1);
STSI_FILE( 1, 2, 1);
STSI_FILE( 1, 2, 2);
STSI_FILE( 2, 2, 1);
STSI_FILE( 2, 2, 2);
STSI_FILE( 3, 2, 2);
STSI_FILE(15, 1, 2);
STSI_FILE(15, 1, 3);
STSI_FILE(15, 1, 4);
STSI_FILE(15, 1, 5);
STSI_FILE(15, 1, 6);
struct stsi_file {
const struct file_operations *fops;
char *name;
};
static struct stsi_file stsi_file[] __initdata = {
{.fops = &stsi_1_1_1_fs_ops, .name = "1_1_1"},
{.fops = &stsi_1_2_1_fs_ops, .name = "1_2_1"},
{.fops = &stsi_1_2_2_fs_ops, .name = "1_2_2"},
{.fops = &stsi_2_2_1_fs_ops, .name = "2_2_1"},
{.fops = &stsi_2_2_2_fs_ops, .name = "2_2_2"},
{.fops = &stsi_3_2_2_fs_ops, .name = "3_2_2"},
{.fops = &stsi_15_1_2_fs_ops, .name = "15_1_2"},
{.fops = &stsi_15_1_3_fs_ops, .name = "15_1_3"},
{.fops = &stsi_15_1_4_fs_ops, .name = "15_1_4"},
{.fops = &stsi_15_1_5_fs_ops, .name = "15_1_5"},
{.fops = &stsi_15_1_6_fs_ops, .name = "15_1_6"},
};
static u8 stsi_0_0_0;
static __init int stsi_init_debugfs(void)
{
struct dentry *stsi_root;
struct stsi_file *sf;
int lvl, i;
stsi_root = debugfs_create_dir("stsi", arch_debugfs_dir);
lvl = stsi(NULL, 0, 0, 0);
if (lvl > 0)
stsi_0_0_0 = lvl;
debugfs_create_u8("0_0_0", 0400, stsi_root, &stsi_0_0_0);
for (i = 0; i < ARRAY_SIZE(stsi_file); i++) {
sf = &stsi_file[i];
debugfs_create_file(sf->name, 0400, stsi_root, NULL, sf->fops);
}
if (IS_ENABLED(CONFIG_SCHED_TOPOLOGY) && MACHINE_HAS_TOPOLOGY) {
char link_to[10];
sprintf(link_to, "15_1_%d", topology_mnest_limit());
debugfs_create_symlink("topology", stsi_root, link_to);
}
return 0;
}
device_initcall(stsi_init_debugfs);
#endif /* CONFIG_DEBUG_FS */
| linux-master | arch/s390/kernel/sysinfo.c |
// SPDX-License-Identifier: GPL-2.0
/*
* DIAG 0x320 support and certificate store handling
*
* Copyright IBM Corp. 2023
* Author(s): Anastasia Eskova <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/key-type.h>
#include <linux/key.h>
#include <linux/keyctl.h>
#include <linux/kobject.h>
#include <linux/module.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/sysfs.h>
#include <crypto/sha2.h>
#include <keys/user-type.h>
#include <asm/debug.h>
#include <asm/diag.h>
#include <asm/ebcdic.h>
#include <asm/sclp.h>
#define DIAG_MAX_RETRIES 10
#define VCE_FLAGS_VALID_MASK 0x80
#define ISM_LEN_DWORDS 4
#define VCSSB_LEN_BYTES 128
#define VCSSB_LEN_NO_CERTS 4
#define VCB_LEN_NO_CERTS 64
#define VC_NAME_LEN_BYTES 64
#define CERT_STORE_KEY_TYPE_NAME "cert_store_key"
#define CERT_STORE_KEYRING_NAME "cert_store"
static debug_info_t *cert_store_dbf;
static debug_info_t *cert_store_hexdump;
#define pr_dbf_msg(fmt, ...) \
debug_sprintf_event(cert_store_dbf, 3, fmt "\n", ## __VA_ARGS__)
enum diag320_subcode {
DIAG320_SUBCODES = 0,
DIAG320_STORAGE = 1,
DIAG320_CERT_BLOCK = 2,
};
enum diag320_rc {
DIAG320_RC_OK = 0x0001,
DIAG320_RC_CS_NOMATCH = 0x0306,
};
/* Verification Certificates Store Support Block (VCSSB). */
struct vcssb {
u32 vcssb_length;
u8 pad_0x04[3];
u8 version;
u8 pad_0x08[8];
u32 cs_token;
u8 pad_0x14[12];
u16 total_vc_index_count;
u16 max_vc_index_count;
u8 pad_0x24[28];
u32 max_vce_length;
u32 max_vcxe_length;
u8 pad_0x48[8];
u32 max_single_vcb_length;
u32 total_vcb_length;
u32 max_single_vcxb_length;
u32 total_vcxb_length;
u8 pad_0x60[32];
} __packed __aligned(8);
/* Verification Certificate Entry (VCE) Header. */
struct vce_header {
u32 vce_length;
u8 flags;
u8 key_type;
u16 vc_index;
u8 vc_name[VC_NAME_LEN_BYTES]; /* EBCDIC */
u8 vc_format;
u8 pad_0x49;
u16 key_id_length;
u8 pad_0x4c;
u8 vc_hash_type;
u16 vc_hash_length;
u8 pad_0x50[4];
u32 vc_length;
u8 pad_0x58[8];
u16 vc_hash_offset;
u16 vc_offset;
u8 pad_0x64[28];
} __packed __aligned(4);
/* Verification Certificate Block (VCB) Header. */
struct vcb_header {
u32 vcb_input_length;
u8 pad_0x04[4];
u16 first_vc_index;
u16 last_vc_index;
u32 pad_0x0c;
u32 cs_token;
u8 pad_0x14[12];
u32 vcb_output_length;
u8 pad_0x24[3];
u8 version;
u16 stored_vc_count;
u16 remaining_vc_count;
u8 pad_0x2c[20];
} __packed __aligned(4);
/* Verification Certificate Block (VCB). */
struct vcb {
struct vcb_header vcb_hdr;
u8 vcb_buf[];
} __packed __aligned(4);
/* Verification Certificate Entry (VCE). */
struct vce {
struct vce_header vce_hdr;
u8 cert_data_buf[];
} __packed __aligned(4);
static void cert_store_key_describe(const struct key *key, struct seq_file *m)
{
char ascii[VC_NAME_LEN_BYTES + 1];
/*
* First 64 bytes of the key description is key name in EBCDIC CP 500.
* Convert it to ASCII for displaying in /proc/keys.
*/
strscpy(ascii, key->description, sizeof(ascii));
EBCASC_500(ascii, VC_NAME_LEN_BYTES);
seq_puts(m, ascii);
seq_puts(m, &key->description[VC_NAME_LEN_BYTES]);
if (key_is_positive(key))
seq_printf(m, ": %u", key->datalen);
}
/*
* Certificate store key type takes over properties of
* user key but cannot be updated.
*/
static struct key_type key_type_cert_store_key = {
.name = CERT_STORE_KEY_TYPE_NAME,
.preparse = user_preparse,
.free_preparse = user_free_preparse,
.instantiate = generic_key_instantiate,
.revoke = user_revoke,
.destroy = user_destroy,
.describe = cert_store_key_describe,
.read = user_read,
};
/* Logging functions. */
static void pr_dbf_vcb(const struct vcb *b)
{
pr_dbf_msg("VCB Header:");
pr_dbf_msg("vcb_input_length: %d", b->vcb_hdr.vcb_input_length);
pr_dbf_msg("first_vc_index: %d", b->vcb_hdr.first_vc_index);
pr_dbf_msg("last_vc_index: %d", b->vcb_hdr.last_vc_index);
pr_dbf_msg("cs_token: %d", b->vcb_hdr.cs_token);
pr_dbf_msg("vcb_output_length: %d", b->vcb_hdr.vcb_output_length);
pr_dbf_msg("version: %d", b->vcb_hdr.version);
pr_dbf_msg("stored_vc_count: %d", b->vcb_hdr.stored_vc_count);
pr_dbf_msg("remaining_vc_count: %d", b->vcb_hdr.remaining_vc_count);
}
static void pr_dbf_vce(const struct vce *e)
{
unsigned char vc_name[VC_NAME_LEN_BYTES + 1];
char log_string[VC_NAME_LEN_BYTES + 40];
pr_dbf_msg("VCE Header:");
pr_dbf_msg("vce_hdr.vce_length: %d", e->vce_hdr.vce_length);
pr_dbf_msg("vce_hdr.flags: %d", e->vce_hdr.flags);
pr_dbf_msg("vce_hdr.key_type: %d", e->vce_hdr.key_type);
pr_dbf_msg("vce_hdr.vc_index: %d", e->vce_hdr.vc_index);
pr_dbf_msg("vce_hdr.vc_format: %d", e->vce_hdr.vc_format);
pr_dbf_msg("vce_hdr.key_id_length: %d", e->vce_hdr.key_id_length);
pr_dbf_msg("vce_hdr.vc_hash_type: %d", e->vce_hdr.vc_hash_type);
pr_dbf_msg("vce_hdr.vc_hash_length: %d", e->vce_hdr.vc_hash_length);
pr_dbf_msg("vce_hdr.vc_hash_offset: %d", e->vce_hdr.vc_hash_offset);
pr_dbf_msg("vce_hdr.vc_length: %d", e->vce_hdr.vc_length);
pr_dbf_msg("vce_hdr.vc_offset: %d", e->vce_hdr.vc_offset);
/* Certificate name in ASCII. */
memcpy(vc_name, e->vce_hdr.vc_name, VC_NAME_LEN_BYTES);
EBCASC_500(vc_name, VC_NAME_LEN_BYTES);
vc_name[VC_NAME_LEN_BYTES] = '\0';
snprintf(log_string, sizeof(log_string),
"index: %d vce_hdr.vc_name (ASCII): %s",
e->vce_hdr.vc_index, vc_name);
debug_text_event(cert_store_hexdump, 3, log_string);
/* Certificate data. */
debug_text_event(cert_store_hexdump, 3, "VCE: Certificate data start");
debug_event(cert_store_hexdump, 3, (u8 *)e->cert_data_buf, 128);
debug_text_event(cert_store_hexdump, 3, "VCE: Certificate data end");
debug_event(cert_store_hexdump, 3,
(u8 *)e->cert_data_buf + e->vce_hdr.vce_length - 128, 128);
}
static void pr_dbf_vcssb(const struct vcssb *s)
{
debug_text_event(cert_store_hexdump, 3, "DIAG320 Subcode1");
debug_event(cert_store_hexdump, 3, (u8 *)s, VCSSB_LEN_BYTES);
pr_dbf_msg("VCSSB:");
pr_dbf_msg("vcssb_length: %u", s->vcssb_length);
pr_dbf_msg("version: %u", s->version);
pr_dbf_msg("cs_token: %u", s->cs_token);
pr_dbf_msg("total_vc_index_count: %u", s->total_vc_index_count);
pr_dbf_msg("max_vc_index_count: %u", s->max_vc_index_count);
pr_dbf_msg("max_vce_length: %u", s->max_vce_length);
pr_dbf_msg("max_vcxe_length: %u", s->max_vce_length);
pr_dbf_msg("max_single_vcb_length: %u", s->max_single_vcb_length);
pr_dbf_msg("total_vcb_length: %u", s->total_vcb_length);
pr_dbf_msg("max_single_vcxb_length: %u", s->max_single_vcxb_length);
pr_dbf_msg("total_vcxb_length: %u", s->total_vcxb_length);
}
static int __diag320(unsigned long subcode, void *addr)
{
union register_pair rp = { .even = (unsigned long)addr, };
asm volatile(
" diag %[rp],%[subcode],0x320\n"
"0: nopr %%r7\n"
EX_TABLE(0b, 0b)
: [rp] "+d" (rp.pair)
: [subcode] "d" (subcode)
: "cc", "memory");
return rp.odd;
}
static int diag320(unsigned long subcode, void *addr)
{
diag_stat_inc(DIAG_STAT_X320);
return __diag320(subcode, addr);
}
/*
* Calculate SHA256 hash of the VCE certificate and compare it to hash stored in
* VCE. Return -EINVAL if hashes don't match.
*/
static int check_certificate_hash(const struct vce *vce)
{
u8 hash[SHA256_DIGEST_SIZE];
u16 vc_hash_length;
u8 *vce_hash;
vce_hash = (u8 *)vce + vce->vce_hdr.vc_hash_offset;
vc_hash_length = vce->vce_hdr.vc_hash_length;
sha256((u8 *)vce + vce->vce_hdr.vc_offset, vce->vce_hdr.vc_length, hash);
if (memcmp(vce_hash, hash, vc_hash_length) == 0)
return 0;
pr_dbf_msg("SHA256 hash of received certificate does not match");
debug_text_event(cert_store_hexdump, 3, "VCE hash:");
debug_event(cert_store_hexdump, 3, vce_hash, SHA256_DIGEST_SIZE);
debug_text_event(cert_store_hexdump, 3, "Calculated hash:");
debug_event(cert_store_hexdump, 3, hash, SHA256_DIGEST_SIZE);
return -EINVAL;
}
static int check_certificate_valid(const struct vce *vce)
{
if (!(vce->vce_hdr.flags & VCE_FLAGS_VALID_MASK)) {
pr_dbf_msg("Certificate entry is invalid");
return -EINVAL;
}
if (vce->vce_hdr.vc_format != 1) {
pr_dbf_msg("Certificate format is not supported");
return -EINVAL;
}
if (vce->vce_hdr.vc_hash_type != 1) {
pr_dbf_msg("Hash type is not supported");
return -EINVAL;
}
return check_certificate_hash(vce);
}
static struct key *get_user_session_keyring(void)
{
key_ref_t us_keyring_ref;
us_keyring_ref = lookup_user_key(KEY_SPEC_USER_SESSION_KEYRING,
KEY_LOOKUP_CREATE, KEY_NEED_LINK);
if (IS_ERR(us_keyring_ref)) {
pr_dbf_msg("Couldn't get user session keyring: %ld",
PTR_ERR(us_keyring_ref));
return ERR_PTR(-ENOKEY);
}
key_ref_put(us_keyring_ref);
return key_ref_to_ptr(us_keyring_ref);
}
/* Invalidate all keys from cert_store keyring. */
static int invalidate_keyring_keys(struct key *keyring)
{
unsigned long num_keys, key_index;
size_t keyring_payload_len;
key_serial_t *key_array;
struct key *current_key;
int rc;
keyring_payload_len = key_type_keyring.read(keyring, NULL, 0);
num_keys = keyring_payload_len / sizeof(key_serial_t);
key_array = kcalloc(num_keys, sizeof(key_serial_t), GFP_KERNEL);
if (!key_array)
return -ENOMEM;
rc = key_type_keyring.read(keyring, (char *)key_array, keyring_payload_len);
if (rc != keyring_payload_len) {
pr_dbf_msg("Couldn't read keyring payload");
goto out;
}
for (key_index = 0; key_index < num_keys; key_index++) {
current_key = key_lookup(key_array[key_index]);
pr_dbf_msg("Invalidating key %08x", current_key->serial);
key_invalidate(current_key);
key_put(current_key);
rc = key_unlink(keyring, current_key);
if (rc) {
pr_dbf_msg("Couldn't unlink key %08x: %d", current_key->serial, rc);
break;
}
}
out:
kfree(key_array);
return rc;
}
static struct key *find_cs_keyring(void)
{
key_ref_t cs_keyring_ref;
struct key *cs_keyring;
cs_keyring_ref = keyring_search(make_key_ref(get_user_session_keyring(), true),
&key_type_keyring, CERT_STORE_KEYRING_NAME,
false);
if (!IS_ERR(cs_keyring_ref)) {
cs_keyring = key_ref_to_ptr(cs_keyring_ref);
key_ref_put(cs_keyring_ref);
goto found;
}
/* Search default locations: thread, process, session keyrings */
cs_keyring = request_key(&key_type_keyring, CERT_STORE_KEYRING_NAME, NULL);
if (IS_ERR(cs_keyring))
return NULL;
key_put(cs_keyring);
found:
return cs_keyring;
}
static void cleanup_cs_keys(void)
{
struct key *cs_keyring;
cs_keyring = find_cs_keyring();
if (!cs_keyring)
return;
pr_dbf_msg("Found cert_store keyring. Purging...");
/*
* Remove cert_store_key_type in case invalidation
* of old cert_store keys failed (= severe error).
*/
if (invalidate_keyring_keys(cs_keyring))
unregister_key_type(&key_type_cert_store_key);
keyring_clear(cs_keyring);
key_invalidate(cs_keyring);
key_put(cs_keyring);
key_unlink(get_user_session_keyring(), cs_keyring);
}
static struct key *create_cs_keyring(void)
{
static struct key *cs_keyring;
/* Cleanup previous cs_keyring and all associated keys if any. */
cleanup_cs_keys();
cs_keyring = keyring_alloc(CERT_STORE_KEYRING_NAME, GLOBAL_ROOT_UID,
GLOBAL_ROOT_GID, current_cred(),
(KEY_POS_ALL & ~KEY_POS_SETATTR) | KEY_USR_VIEW | KEY_USR_READ,
KEY_ALLOC_NOT_IN_QUOTA | KEY_ALLOC_SET_KEEP,
NULL, get_user_session_keyring());
if (IS_ERR(cs_keyring)) {
pr_dbf_msg("Can't allocate cert_store keyring");
return NULL;
}
pr_dbf_msg("Successfully allocated cert_store keyring: %08x", cs_keyring->serial);
/*
* In case a previous clean-up ran into an
* error and unregistered key type.
*/
register_key_type(&key_type_cert_store_key);
return cs_keyring;
}
/*
* Allocate memory and create key description in format
* [key name in EBCDIC]:[VCE index]:[CS token].
* Return a pointer to key description or NULL if memory
* allocation failed. Memory should be freed by caller.
*/
static char *get_key_description(struct vcssb *vcssb, const struct vce *vce)
{
size_t len, name_len;
u32 cs_token;
char *desc;
cs_token = vcssb->cs_token;
/* Description string contains "%64s:%05u:%010u\0". */
name_len = sizeof(vce->vce_hdr.vc_name);
len = name_len + 1 + 5 + 1 + 10 + 1;
desc = kmalloc(len, GFP_KERNEL);
if (!desc)
return NULL;
memcpy(desc, vce->vce_hdr.vc_name, name_len);
snprintf(desc + name_len, len - name_len, ":%05u:%010u",
vce->vce_hdr.vc_index, cs_token);
return desc;
}
/*
* Create a key of type "cert_store_key" using the data from VCE for key
* payload and key description. Link the key to "cert_store" keyring.
*/
static int create_key_from_vce(struct vcssb *vcssb, struct vce *vce,
struct key *keyring)
{
key_ref_t newkey;
char *desc;
int rc;
desc = get_key_description(vcssb, vce);
if (!desc)
return -ENOMEM;
newkey = key_create_or_update(
make_key_ref(keyring, true), CERT_STORE_KEY_TYPE_NAME,
desc, (u8 *)vce + vce->vce_hdr.vc_offset,
vce->vce_hdr.vc_length,
(KEY_POS_ALL & ~KEY_POS_SETATTR) | KEY_USR_VIEW | KEY_USR_READ,
KEY_ALLOC_NOT_IN_QUOTA);
rc = PTR_ERR_OR_ZERO(newkey);
if (rc) {
pr_dbf_msg("Couldn't create a key from Certificate Entry (%d)", rc);
rc = -ENOKEY;
goto out;
}
key_ref_put(newkey);
out:
kfree(desc);
return rc;
}
/* Get Verification Certificate Storage Size block with DIAG320 subcode2. */
static int get_vcssb(struct vcssb *vcssb)
{
int diag320_rc;
memset(vcssb, 0, sizeof(*vcssb));
vcssb->vcssb_length = VCSSB_LEN_BYTES;
diag320_rc = diag320(DIAG320_STORAGE, vcssb);
pr_dbf_vcssb(vcssb);
if (diag320_rc != DIAG320_RC_OK) {
pr_dbf_msg("Diag 320 Subcode 1 returned bad RC: %04x", diag320_rc);
return -EIO;
}
if (vcssb->vcssb_length == VCSSB_LEN_NO_CERTS) {
pr_dbf_msg("No certificates available for current configuration");
return -ENOKEY;
}
return 0;
}
static u32 get_4k_mult_vcb_size(struct vcssb *vcssb)
{
return round_up(vcssb->max_single_vcb_length, PAGE_SIZE);
}
/* Fill input fields of single-entry VCB that will be read by LPAR. */
static void fill_vcb_input(struct vcssb *vcssb, struct vcb *vcb, u16 index)
{
memset(vcb, 0, sizeof(*vcb));
vcb->vcb_hdr.vcb_input_length = get_4k_mult_vcb_size(vcssb);
vcb->vcb_hdr.cs_token = vcssb->cs_token;
/* Request single entry. */
vcb->vcb_hdr.first_vc_index = index;
vcb->vcb_hdr.last_vc_index = index;
}
static void extract_vce_from_sevcb(struct vcb *vcb, struct vce *vce)
{
struct vce *extracted_vce;
extracted_vce = (struct vce *)vcb->vcb_buf;
memcpy(vce, vcb->vcb_buf, extracted_vce->vce_hdr.vce_length);
pr_dbf_vce(vce);
}
static int get_sevcb(struct vcssb *vcssb, u16 index, struct vcb *vcb)
{
int rc, diag320_rc;
fill_vcb_input(vcssb, vcb, index);
diag320_rc = diag320(DIAG320_CERT_BLOCK, vcb);
pr_dbf_msg("Diag 320 Subcode2 RC %2x", diag320_rc);
pr_dbf_vcb(vcb);
switch (diag320_rc) {
case DIAG320_RC_OK:
rc = 0;
if (vcb->vcb_hdr.vcb_output_length == VCB_LEN_NO_CERTS) {
pr_dbf_msg("No certificate entry for index %u", index);
rc = -ENOKEY;
} else if (vcb->vcb_hdr.remaining_vc_count != 0) {
/* Retry on insufficient space. */
pr_dbf_msg("Couldn't get all requested certificates");
rc = -EAGAIN;
}
break;
case DIAG320_RC_CS_NOMATCH:
pr_dbf_msg("Certificate Store token mismatch");
rc = -EAGAIN;
break;
default:
pr_dbf_msg("Diag 320 Subcode2 returned bad rc (0x%4x)", diag320_rc);
rc = -EINVAL;
break;
}
return rc;
}
/*
* Allocate memory for single-entry VCB, get VCB via DIAG320 subcode 2 call,
* extract VCE and create a key from its' certificate.
*/
static int create_key_from_sevcb(struct vcssb *vcssb, u16 index,
struct key *keyring)
{
struct vcb *vcb;
struct vce *vce;
int rc;
rc = -ENOMEM;
vcb = vmalloc(get_4k_mult_vcb_size(vcssb));
vce = vmalloc(vcssb->max_single_vcb_length - sizeof(vcb->vcb_hdr));
if (!vcb || !vce)
goto out;
rc = get_sevcb(vcssb, index, vcb);
if (rc)
goto out;
extract_vce_from_sevcb(vcb, vce);
rc = check_certificate_valid(vce);
if (rc)
goto out;
rc = create_key_from_vce(vcssb, vce, keyring);
if (rc)
goto out;
pr_dbf_msg("Successfully created key from Certificate Entry %d", index);
out:
vfree(vce);
vfree(vcb);
return rc;
}
/*
* Request a single-entry VCB for each VCE available for the partition.
* Create a key from it and link it to cert_store keyring. If no keys
* could be created (i.e. VCEs were invalid) return -ENOKEY.
*/
static int add_certificates_to_keyring(struct vcssb *vcssb, struct key *keyring)
{
int rc, index, count, added;
count = 0;
added = 0;
/* Certificate Store entries indices start with 1 and have no gaps. */
for (index = 1; index < vcssb->total_vc_index_count + 1; index++) {
pr_dbf_msg("Creating key from VCE %u", index);
rc = create_key_from_sevcb(vcssb, index, keyring);
count++;
if (rc == -EAGAIN)
return rc;
if (rc)
pr_dbf_msg("Creating key from VCE %u failed (%d)", index, rc);
else
added++;
}
if (added == 0) {
pr_dbf_msg("Processed %d entries. No keys created", count);
return -ENOKEY;
}
pr_info("Added %d of %d keys to cert_store keyring", added, count);
/*
* Do not allow to link more keys to certificate store keyring after all
* the VCEs were processed.
*/
rc = keyring_restrict(make_key_ref(keyring, true), NULL, NULL);
if (rc)
pr_dbf_msg("Failed to set restriction to cert_store keyring (%d)", rc);
return 0;
}
/*
* Check which DIAG320 subcodes are installed.
* Return -ENOENT if subcodes 1 or 2 are not available.
*/
static int query_diag320_subcodes(void)
{
unsigned long ism[ISM_LEN_DWORDS];
int rc;
rc = diag320(0, ism);
if (rc != DIAG320_RC_OK) {
pr_dbf_msg("DIAG320 subcode query returned %04x", rc);
return -ENOENT;
}
debug_text_event(cert_store_hexdump, 3, "DIAG320 Subcode 0");
debug_event(cert_store_hexdump, 3, ism, sizeof(ism));
if (!test_bit_inv(1, ism) || !test_bit_inv(2, ism)) {
pr_dbf_msg("Not all required DIAG320 subcodes are installed");
return -ENOENT;
}
return 0;
}
/*
* Check if Certificate Store is supported by the firmware and DIAG320 subcodes
* 1 and 2 are installed. Create cert_store keyring and link all certificates
* available for the current partition to it as "cert_store_key" type
* keys. On refresh or error invalidate cert_store keyring and destroy
* all keys of "cert_store_key" type.
*/
static int fill_cs_keyring(void)
{
struct key *cs_keyring;
struct vcssb *vcssb;
int rc;
rc = -ENOMEM;
vcssb = kmalloc(VCSSB_LEN_BYTES, GFP_KERNEL);
if (!vcssb)
goto cleanup_keys;
rc = -ENOENT;
if (!sclp.has_diag320) {
pr_dbf_msg("Certificate Store is not supported");
goto cleanup_keys;
}
rc = query_diag320_subcodes();
if (rc)
goto cleanup_keys;
rc = get_vcssb(vcssb);
if (rc)
goto cleanup_keys;
rc = -ENOMEM;
cs_keyring = create_cs_keyring();
if (!cs_keyring)
goto cleanup_keys;
rc = add_certificates_to_keyring(vcssb, cs_keyring);
if (rc)
goto cleanup_cs_keyring;
goto out;
cleanup_cs_keyring:
key_put(cs_keyring);
cleanup_keys:
cleanup_cs_keys();
out:
kfree(vcssb);
return rc;
}
static DEFINE_MUTEX(cs_refresh_lock);
static int cs_status_val = -1;
static ssize_t cs_status_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
if (cs_status_val == -1)
return sysfs_emit(buf, "uninitialized\n");
else if (cs_status_val == 0)
return sysfs_emit(buf, "ok\n");
return sysfs_emit(buf, "failed (%d)\n", cs_status_val);
}
static struct kobj_attribute cs_status_attr = __ATTR_RO(cs_status);
static ssize_t refresh_store(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t count)
{
int rc, retries;
pr_dbf_msg("Refresh certificate store information requested");
rc = mutex_lock_interruptible(&cs_refresh_lock);
if (rc)
return rc;
for (retries = 0; retries < DIAG_MAX_RETRIES; retries++) {
/* Request certificates from certificate store. */
rc = fill_cs_keyring();
if (rc)
pr_dbf_msg("Failed to refresh certificate store information (%d)", rc);
if (rc != -EAGAIN)
break;
}
cs_status_val = rc;
mutex_unlock(&cs_refresh_lock);
return rc ?: count;
}
static struct kobj_attribute refresh_attr = __ATTR_WO(refresh);
static const struct attribute *cert_store_attrs[] __initconst = {
&cs_status_attr.attr,
&refresh_attr.attr,
NULL,
};
static struct kobject *cert_store_kobj;
static int __init cert_store_init(void)
{
int rc = -ENOMEM;
cert_store_dbf = debug_register("cert_store_msg", 10, 1, 64);
if (!cert_store_dbf)
goto cleanup_dbf;
cert_store_hexdump = debug_register("cert_store_hexdump", 3, 1, 128);
if (!cert_store_hexdump)
goto cleanup_dbf;
debug_register_view(cert_store_hexdump, &debug_hex_ascii_view);
debug_register_view(cert_store_dbf, &debug_sprintf_view);
/* Create directory /sys/firmware/cert_store. */
cert_store_kobj = kobject_create_and_add("cert_store", firmware_kobj);
if (!cert_store_kobj)
goto cleanup_dbf;
rc = sysfs_create_files(cert_store_kobj, cert_store_attrs);
if (rc)
goto cleanup_kobj;
register_key_type(&key_type_cert_store_key);
return rc;
cleanup_kobj:
kobject_put(cert_store_kobj);
cleanup_dbf:
debug_unregister(cert_store_dbf);
debug_unregister(cert_store_hexdump);
return rc;
}
device_initcall(cert_store_init);
| linux-master | arch/s390/kernel/cert_store.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Idle functions for s390.
*
* Copyright IBM Corp. 2014
*
* Author(s): Martin Schwidefsky <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/kernel_stat.h>
#include <linux/notifier.h>
#include <linux/init.h>
#include <linux/cpu.h>
#include <trace/events/power.h>
#include <asm/cpu_mf.h>
#include <asm/cputime.h>
#include <asm/nmi.h>
#include <asm/smp.h>
#include "entry.h"
static DEFINE_PER_CPU(struct s390_idle_data, s390_idle);
void account_idle_time_irq(void)
{
struct s390_idle_data *idle = this_cpu_ptr(&s390_idle);
unsigned long idle_time;
u64 cycles_new[8];
int i;
if (smp_cpu_mtid) {
stcctm(MT_DIAG, smp_cpu_mtid, cycles_new);
for (i = 0; i < smp_cpu_mtid; i++)
this_cpu_add(mt_cycles[i], cycles_new[i] - idle->mt_cycles_enter[i]);
}
idle_time = S390_lowcore.int_clock - idle->clock_idle_enter;
S390_lowcore.steal_timer += idle->clock_idle_enter - S390_lowcore.last_update_clock;
S390_lowcore.last_update_clock = S390_lowcore.int_clock;
S390_lowcore.system_timer += S390_lowcore.last_update_timer - idle->timer_idle_enter;
S390_lowcore.last_update_timer = S390_lowcore.sys_enter_timer;
/* Account time spent with enabled wait psw loaded as idle time. */
WRITE_ONCE(idle->idle_time, READ_ONCE(idle->idle_time) + idle_time);
WRITE_ONCE(idle->idle_count, READ_ONCE(idle->idle_count) + 1);
account_idle_time(cputime_to_nsecs(idle_time));
}
void noinstr arch_cpu_idle(void)
{
struct s390_idle_data *idle = this_cpu_ptr(&s390_idle);
unsigned long psw_mask;
/* Wait for external, I/O or machine check interrupt. */
psw_mask = PSW_KERNEL_BITS | PSW_MASK_WAIT |
PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
clear_cpu_flag(CIF_NOHZ_DELAY);
/* psw_idle() returns with interrupts disabled. */
psw_idle(idle, psw_mask);
}
static ssize_t show_idle_count(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id);
return sysfs_emit(buf, "%lu\n", READ_ONCE(idle->idle_count));
}
DEVICE_ATTR(idle_count, 0444, show_idle_count, NULL);
static ssize_t show_idle_time(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id);
return sysfs_emit(buf, "%lu\n", READ_ONCE(idle->idle_time) >> 12);
}
DEVICE_ATTR(idle_time_us, 0444, show_idle_time, NULL);
void arch_cpu_idle_enter(void)
{
}
void arch_cpu_idle_exit(void)
{
}
void __noreturn arch_cpu_idle_dead(void)
{
cpu_die();
}
| linux-master | arch/s390/kernel/idle.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Kernel Probes (KProbes)
*
* Copyright IBM Corp. 2002, 2006
*
* s390 port, used ppc64 as template. Mike Grundy <[email protected]>
*/
#define pr_fmt(fmt) "kprobes: " fmt
#include <linux/moduleloader.h>
#include <linux/kprobes.h>
#include <linux/ptrace.h>
#include <linux/preempt.h>
#include <linux/stop_machine.h>
#include <linux/kdebug.h>
#include <linux/uaccess.h>
#include <linux/extable.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/hardirq.h>
#include <linux/ftrace.h>
#include <asm/set_memory.h>
#include <asm/sections.h>
#include <asm/dis.h>
#include "kprobes.h"
#include "entry.h"
DEFINE_PER_CPU(struct kprobe *, current_kprobe);
DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
struct kretprobe_blackpoint kretprobe_blacklist[] = { };
static int insn_page_in_use;
void *alloc_insn_page(void)
{
void *page;
page = module_alloc(PAGE_SIZE);
if (!page)
return NULL;
set_memory_rox((unsigned long)page, 1);
return page;
}
static void *alloc_s390_insn_page(void)
{
if (xchg(&insn_page_in_use, 1) == 1)
return NULL;
return &kprobes_insn_page;
}
static void free_s390_insn_page(void *page)
{
xchg(&insn_page_in_use, 0);
}
struct kprobe_insn_cache kprobe_s390_insn_slots = {
.mutex = __MUTEX_INITIALIZER(kprobe_s390_insn_slots.mutex),
.alloc = alloc_s390_insn_page,
.free = free_s390_insn_page,
.pages = LIST_HEAD_INIT(kprobe_s390_insn_slots.pages),
.insn_size = MAX_INSN_SIZE,
};
static void copy_instruction(struct kprobe *p)
{
kprobe_opcode_t insn[MAX_INSN_SIZE];
s64 disp, new_disp;
u64 addr, new_addr;
unsigned int len;
len = insn_length(*p->addr >> 8);
memcpy(&insn, p->addr, len);
p->opcode = insn[0];
if (probe_is_insn_relative_long(&insn[0])) {
/*
* For pc-relative instructions in RIL-b or RIL-c format patch
* the RI2 displacement field. We have already made sure that
* the insn slot for the patched instruction is within the same
* 2GB area as the original instruction (either kernel image or
* module area). Therefore the new displacement will always fit.
*/
disp = *(s32 *)&insn[1];
addr = (u64)(unsigned long)p->addr;
new_addr = (u64)(unsigned long)p->ainsn.insn;
new_disp = ((addr + (disp * 2)) - new_addr) / 2;
*(s32 *)&insn[1] = new_disp;
}
s390_kernel_write(p->ainsn.insn, &insn, len);
}
NOKPROBE_SYMBOL(copy_instruction);
static int s390_get_insn_slot(struct kprobe *p)
{
/*
* Get an insn slot that is within the same 2GB area like the original
* instruction. That way instructions with a 32bit signed displacement
* field can be patched and executed within the insn slot.
*/
p->ainsn.insn = NULL;
if (is_kernel((unsigned long)p->addr))
p->ainsn.insn = get_s390_insn_slot();
else if (is_module_addr(p->addr))
p->ainsn.insn = get_insn_slot();
return p->ainsn.insn ? 0 : -ENOMEM;
}
NOKPROBE_SYMBOL(s390_get_insn_slot);
static void s390_free_insn_slot(struct kprobe *p)
{
if (!p->ainsn.insn)
return;
if (is_kernel((unsigned long)p->addr))
free_s390_insn_slot(p->ainsn.insn, 0);
else
free_insn_slot(p->ainsn.insn, 0);
p->ainsn.insn = NULL;
}
NOKPROBE_SYMBOL(s390_free_insn_slot);
/* Check if paddr is at an instruction boundary */
static bool can_probe(unsigned long paddr)
{
unsigned long addr, offset = 0;
kprobe_opcode_t insn;
struct kprobe *kp;
if (paddr & 0x01)
return false;
if (!kallsyms_lookup_size_offset(paddr, NULL, &offset))
return false;
/* Decode instructions */
addr = paddr - offset;
while (addr < paddr) {
if (copy_from_kernel_nofault(&insn, (void *)addr, sizeof(insn)))
return false;
if (insn >> 8 == 0) {
if (insn != BREAKPOINT_INSTRUCTION) {
/*
* Note that QEMU inserts opcode 0x0000 to implement
* software breakpoints for guests. Since the size of
* the original instruction is unknown, stop following
* instructions and prevent setting a kprobe.
*/
return false;
}
/*
* Check if the instruction has been modified by another
* kprobe, in which case the original instruction is
* decoded.
*/
kp = get_kprobe((void *)addr);
if (!kp) {
/* not a kprobe */
return false;
}
insn = kp->opcode;
}
addr += insn_length(insn >> 8);
}
return addr == paddr;
}
int arch_prepare_kprobe(struct kprobe *p)
{
if (!can_probe((unsigned long)p->addr))
return -EINVAL;
/* Make sure the probe isn't going on a difficult instruction */
if (probe_is_prohibited_opcode(p->addr))
return -EINVAL;
if (s390_get_insn_slot(p))
return -ENOMEM;
copy_instruction(p);
return 0;
}
NOKPROBE_SYMBOL(arch_prepare_kprobe);
struct swap_insn_args {
struct kprobe *p;
unsigned int arm_kprobe : 1;
};
static int swap_instruction(void *data)
{
struct swap_insn_args *args = data;
struct kprobe *p = args->p;
u16 opc;
opc = args->arm_kprobe ? BREAKPOINT_INSTRUCTION : p->opcode;
s390_kernel_write(p->addr, &opc, sizeof(opc));
return 0;
}
NOKPROBE_SYMBOL(swap_instruction);
void arch_arm_kprobe(struct kprobe *p)
{
struct swap_insn_args args = {.p = p, .arm_kprobe = 1};
stop_machine_cpuslocked(swap_instruction, &args, NULL);
}
NOKPROBE_SYMBOL(arch_arm_kprobe);
void arch_disarm_kprobe(struct kprobe *p)
{
struct swap_insn_args args = {.p = p, .arm_kprobe = 0};
stop_machine_cpuslocked(swap_instruction, &args, NULL);
}
NOKPROBE_SYMBOL(arch_disarm_kprobe);
void arch_remove_kprobe(struct kprobe *p)
{
s390_free_insn_slot(p);
}
NOKPROBE_SYMBOL(arch_remove_kprobe);
static void enable_singlestep(struct kprobe_ctlblk *kcb,
struct pt_regs *regs,
unsigned long ip)
{
struct per_regs per_kprobe;
/* Set up the PER control registers %cr9-%cr11 */
per_kprobe.control = PER_EVENT_IFETCH;
per_kprobe.start = ip;
per_kprobe.end = ip;
/* Save control regs and psw mask */
__ctl_store(kcb->kprobe_saved_ctl, 9, 11);
kcb->kprobe_saved_imask = regs->psw.mask &
(PSW_MASK_PER | PSW_MASK_IO | PSW_MASK_EXT);
/* Set PER control regs, turns on single step for the given address */
__ctl_load(per_kprobe, 9, 11);
regs->psw.mask |= PSW_MASK_PER;
regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT);
regs->psw.addr = ip;
}
NOKPROBE_SYMBOL(enable_singlestep);
static void disable_singlestep(struct kprobe_ctlblk *kcb,
struct pt_regs *regs,
unsigned long ip)
{
/* Restore control regs and psw mask, set new psw address */
__ctl_load(kcb->kprobe_saved_ctl, 9, 11);
regs->psw.mask &= ~PSW_MASK_PER;
regs->psw.mask |= kcb->kprobe_saved_imask;
regs->psw.addr = ip;
}
NOKPROBE_SYMBOL(disable_singlestep);
/*
* Activate a kprobe by storing its pointer to current_kprobe. The
* previous kprobe is stored in kcb->prev_kprobe. A stack of up to
* two kprobes can be active, see KPROBE_REENTER.
*/
static void push_kprobe(struct kprobe_ctlblk *kcb, struct kprobe *p)
{
kcb->prev_kprobe.kp = __this_cpu_read(current_kprobe);
kcb->prev_kprobe.status = kcb->kprobe_status;
__this_cpu_write(current_kprobe, p);
}
NOKPROBE_SYMBOL(push_kprobe);
/*
* Deactivate a kprobe by backing up to the previous state. If the
* current state is KPROBE_REENTER prev_kprobe.kp will be non-NULL,
* for any other state prev_kprobe.kp will be NULL.
*/
static void pop_kprobe(struct kprobe_ctlblk *kcb)
{
__this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
kcb->kprobe_status = kcb->prev_kprobe.status;
kcb->prev_kprobe.kp = NULL;
}
NOKPROBE_SYMBOL(pop_kprobe);
static void kprobe_reenter_check(struct kprobe_ctlblk *kcb, struct kprobe *p)
{
switch (kcb->kprobe_status) {
case KPROBE_HIT_SSDONE:
case KPROBE_HIT_ACTIVE:
kprobes_inc_nmissed_count(p);
break;
case KPROBE_HIT_SS:
case KPROBE_REENTER:
default:
/*
* A kprobe on the code path to single step an instruction
* is a BUG. The code path resides in the .kprobes.text
* section and is executed with interrupts disabled.
*/
pr_err("Failed to recover from reentered kprobes.\n");
dump_kprobe(p);
BUG();
}
}
NOKPROBE_SYMBOL(kprobe_reenter_check);
static int kprobe_handler(struct pt_regs *regs)
{
struct kprobe_ctlblk *kcb;
struct kprobe *p;
/*
* We want to disable preemption for the entire duration of kprobe
* processing. That includes the calls to the pre/post handlers
* and single stepping the kprobe instruction.
*/
preempt_disable();
kcb = get_kprobe_ctlblk();
p = get_kprobe((void *)(regs->psw.addr - 2));
if (p) {
if (kprobe_running()) {
/*
* We have hit a kprobe while another is still
* active. This can happen in the pre and post
* handler. Single step the instruction of the
* new probe but do not call any handler function
* of this secondary kprobe.
* push_kprobe and pop_kprobe saves and restores
* the currently active kprobe.
*/
kprobe_reenter_check(kcb, p);
push_kprobe(kcb, p);
kcb->kprobe_status = KPROBE_REENTER;
} else {
/*
* If we have no pre-handler or it returned 0, we
* continue with single stepping. If we have a
* pre-handler and it returned non-zero, it prepped
* for changing execution path, so get out doing
* nothing more here.
*/
push_kprobe(kcb, p);
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
if (p->pre_handler && p->pre_handler(p, regs)) {
pop_kprobe(kcb);
preempt_enable_no_resched();
return 1;
}
kcb->kprobe_status = KPROBE_HIT_SS;
}
enable_singlestep(kcb, regs, (unsigned long) p->ainsn.insn);
return 1;
} /* else:
* No kprobe at this address and no active kprobe. The trap has
* not been caused by a kprobe breakpoint. The race of breakpoint
* vs. kprobe remove does not exist because on s390 as we use
* stop_machine to arm/disarm the breakpoints.
*/
preempt_enable_no_resched();
return 0;
}
NOKPROBE_SYMBOL(kprobe_handler);
/*
* Called after single-stepping. p->addr is the address of the
* instruction whose first byte has been replaced by the "breakpoint"
* instruction. To avoid the SMP problems that can occur when we
* temporarily put back the original opcode to single-step, we
* single-stepped a copy of the instruction. The address of this
* copy is p->ainsn.insn.
*/
static void resume_execution(struct kprobe *p, struct pt_regs *regs)
{
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
unsigned long ip = regs->psw.addr;
int fixup = probe_get_fixup_type(p->ainsn.insn);
if (fixup & FIXUP_PSW_NORMAL)
ip += (unsigned long) p->addr - (unsigned long) p->ainsn.insn;
if (fixup & FIXUP_BRANCH_NOT_TAKEN) {
int ilen = insn_length(p->ainsn.insn[0] >> 8);
if (ip - (unsigned long) p->ainsn.insn == ilen)
ip = (unsigned long) p->addr + ilen;
}
if (fixup & FIXUP_RETURN_REGISTER) {
int reg = (p->ainsn.insn[0] & 0xf0) >> 4;
regs->gprs[reg] += (unsigned long) p->addr -
(unsigned long) p->ainsn.insn;
}
disable_singlestep(kcb, regs, ip);
}
NOKPROBE_SYMBOL(resume_execution);
static int post_kprobe_handler(struct pt_regs *regs)
{
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
struct kprobe *p = kprobe_running();
if (!p)
return 0;
resume_execution(p, regs);
if (kcb->kprobe_status != KPROBE_REENTER && p->post_handler) {
kcb->kprobe_status = KPROBE_HIT_SSDONE;
p->post_handler(p, regs, 0);
}
pop_kprobe(kcb);
preempt_enable_no_resched();
/*
* if somebody else is singlestepping across a probe point, psw mask
* will have PER set, in which case, continue the remaining processing
* of do_single_step, as if this is not a probe hit.
*/
if (regs->psw.mask & PSW_MASK_PER)
return 0;
return 1;
}
NOKPROBE_SYMBOL(post_kprobe_handler);
static int kprobe_trap_handler(struct pt_regs *regs, int trapnr)
{
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
struct kprobe *p = kprobe_running();
switch(kcb->kprobe_status) {
case KPROBE_HIT_SS:
case KPROBE_REENTER:
/*
* We are here because the instruction being single
* stepped caused a page fault. We reset the current
* kprobe and the nip points back to the probe address
* and allow the page fault handler to continue as a
* normal page fault.
*/
disable_singlestep(kcb, regs, (unsigned long) p->addr);
pop_kprobe(kcb);
preempt_enable_no_resched();
break;
case KPROBE_HIT_ACTIVE:
case KPROBE_HIT_SSDONE:
/*
* In case the user-specified fault handler returned
* zero, try to fix up.
*/
if (fixup_exception(regs))
return 1;
/*
* fixup_exception() could not handle it,
* Let do_page_fault() fix it.
*/
break;
default:
break;
}
return 0;
}
NOKPROBE_SYMBOL(kprobe_trap_handler);
int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
{
int ret;
if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT))
local_irq_disable();
ret = kprobe_trap_handler(regs, trapnr);
if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT))
local_irq_restore(regs->psw.mask & ~PSW_MASK_PER);
return ret;
}
NOKPROBE_SYMBOL(kprobe_fault_handler);
/*
* Wrapper routine to for handling exceptions.
*/
int kprobe_exceptions_notify(struct notifier_block *self,
unsigned long val, void *data)
{
struct die_args *args = (struct die_args *) data;
struct pt_regs *regs = args->regs;
int ret = NOTIFY_DONE;
if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT))
local_irq_disable();
switch (val) {
case DIE_BPT:
if (kprobe_handler(regs))
ret = NOTIFY_STOP;
break;
case DIE_SSTEP:
if (post_kprobe_handler(regs))
ret = NOTIFY_STOP;
break;
case DIE_TRAP:
if (!preemptible() && kprobe_running() &&
kprobe_trap_handler(regs, args->trapnr))
ret = NOTIFY_STOP;
break;
default:
break;
}
if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT))
local_irq_restore(regs->psw.mask & ~PSW_MASK_PER);
return ret;
}
NOKPROBE_SYMBOL(kprobe_exceptions_notify);
int __init arch_init_kprobes(void)
{
return 0;
}
int arch_trampoline_kprobe(struct kprobe *p)
{
return 0;
}
NOKPROBE_SYMBOL(arch_trampoline_kprobe);
| linux-master | arch/s390/kernel/kprobes.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Virtual cpu timer based timer functions.
*
* Copyright IBM Corp. 2004, 2012
* Author(s): Jan Glauber <[email protected]>
*/
#include <linux/kernel_stat.h>
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/timex.h>
#include <linux/types.h>
#include <linux/time.h>
#include <asm/alternative.h>
#include <asm/cputime.h>
#include <asm/vtimer.h>
#include <asm/vtime.h>
#include <asm/cpu_mf.h>
#include <asm/smp.h>
#include "entry.h"
static void virt_timer_expire(void);
static LIST_HEAD(virt_timer_list);
static DEFINE_SPINLOCK(virt_timer_lock);
static atomic64_t virt_timer_current;
static atomic64_t virt_timer_elapsed;
DEFINE_PER_CPU(u64, mt_cycles[8]);
static DEFINE_PER_CPU(u64, mt_scaling_mult) = { 1 };
static DEFINE_PER_CPU(u64, mt_scaling_div) = { 1 };
static DEFINE_PER_CPU(u64, mt_scaling_jiffies);
static inline u64 get_vtimer(void)
{
u64 timer;
asm volatile("stpt %0" : "=Q" (timer));
return timer;
}
static inline void set_vtimer(u64 expires)
{
u64 timer;
asm volatile(
" stpt %0\n" /* Store current cpu timer value */
" spt %1" /* Set new value imm. afterwards */
: "=Q" (timer) : "Q" (expires));
S390_lowcore.system_timer += S390_lowcore.last_update_timer - timer;
S390_lowcore.last_update_timer = expires;
}
static inline int virt_timer_forward(u64 elapsed)
{
BUG_ON(!irqs_disabled());
if (list_empty(&virt_timer_list))
return 0;
elapsed = atomic64_add_return(elapsed, &virt_timer_elapsed);
return elapsed >= atomic64_read(&virt_timer_current);
}
static void update_mt_scaling(void)
{
u64 cycles_new[8], *cycles_old;
u64 delta, fac, mult, div;
int i;
stcctm(MT_DIAG, smp_cpu_mtid + 1, cycles_new);
cycles_old = this_cpu_ptr(mt_cycles);
fac = 1;
mult = div = 0;
for (i = 0; i <= smp_cpu_mtid; i++) {
delta = cycles_new[i] - cycles_old[i];
div += delta;
mult *= i + 1;
mult += delta * fac;
fac *= i + 1;
}
div *= fac;
if (div > 0) {
/* Update scaling factor */
__this_cpu_write(mt_scaling_mult, mult);
__this_cpu_write(mt_scaling_div, div);
memcpy(cycles_old, cycles_new,
sizeof(u64) * (smp_cpu_mtid + 1));
}
__this_cpu_write(mt_scaling_jiffies, jiffies_64);
}
static inline u64 update_tsk_timer(unsigned long *tsk_vtime, u64 new)
{
u64 delta;
delta = new - *tsk_vtime;
*tsk_vtime = new;
return delta;
}
static inline u64 scale_vtime(u64 vtime)
{
u64 mult = __this_cpu_read(mt_scaling_mult);
u64 div = __this_cpu_read(mt_scaling_div);
if (smp_cpu_mtid)
return vtime * mult / div;
return vtime;
}
static void account_system_index_scaled(struct task_struct *p, u64 cputime,
enum cpu_usage_stat index)
{
p->stimescaled += cputime_to_nsecs(scale_vtime(cputime));
account_system_index_time(p, cputime_to_nsecs(cputime), index);
}
/*
* Update process times based on virtual cpu times stored by entry.S
* to the lowcore fields user_timer, system_timer & steal_clock.
*/
static int do_account_vtime(struct task_struct *tsk)
{
u64 timer, clock, user, guest, system, hardirq, softirq;
timer = S390_lowcore.last_update_timer;
clock = S390_lowcore.last_update_clock;
asm volatile(
" stpt %0\n" /* Store current cpu timer value */
" stckf %1" /* Store current tod clock value */
: "=Q" (S390_lowcore.last_update_timer),
"=Q" (S390_lowcore.last_update_clock)
: : "cc");
clock = S390_lowcore.last_update_clock - clock;
timer -= S390_lowcore.last_update_timer;
if (hardirq_count())
S390_lowcore.hardirq_timer += timer;
else
S390_lowcore.system_timer += timer;
/* Update MT utilization calculation */
if (smp_cpu_mtid &&
time_after64(jiffies_64, this_cpu_read(mt_scaling_jiffies)))
update_mt_scaling();
/* Calculate cputime delta */
user = update_tsk_timer(&tsk->thread.user_timer,
READ_ONCE(S390_lowcore.user_timer));
guest = update_tsk_timer(&tsk->thread.guest_timer,
READ_ONCE(S390_lowcore.guest_timer));
system = update_tsk_timer(&tsk->thread.system_timer,
READ_ONCE(S390_lowcore.system_timer));
hardirq = update_tsk_timer(&tsk->thread.hardirq_timer,
READ_ONCE(S390_lowcore.hardirq_timer));
softirq = update_tsk_timer(&tsk->thread.softirq_timer,
READ_ONCE(S390_lowcore.softirq_timer));
S390_lowcore.steal_timer +=
clock - user - guest - system - hardirq - softirq;
/* Push account value */
if (user) {
account_user_time(tsk, cputime_to_nsecs(user));
tsk->utimescaled += cputime_to_nsecs(scale_vtime(user));
}
if (guest) {
account_guest_time(tsk, cputime_to_nsecs(guest));
tsk->utimescaled += cputime_to_nsecs(scale_vtime(guest));
}
if (system)
account_system_index_scaled(tsk, system, CPUTIME_SYSTEM);
if (hardirq)
account_system_index_scaled(tsk, hardirq, CPUTIME_IRQ);
if (softirq)
account_system_index_scaled(tsk, softirq, CPUTIME_SOFTIRQ);
return virt_timer_forward(user + guest + system + hardirq + softirq);
}
void vtime_task_switch(struct task_struct *prev)
{
do_account_vtime(prev);
prev->thread.user_timer = S390_lowcore.user_timer;
prev->thread.guest_timer = S390_lowcore.guest_timer;
prev->thread.system_timer = S390_lowcore.system_timer;
prev->thread.hardirq_timer = S390_lowcore.hardirq_timer;
prev->thread.softirq_timer = S390_lowcore.softirq_timer;
S390_lowcore.user_timer = current->thread.user_timer;
S390_lowcore.guest_timer = current->thread.guest_timer;
S390_lowcore.system_timer = current->thread.system_timer;
S390_lowcore.hardirq_timer = current->thread.hardirq_timer;
S390_lowcore.softirq_timer = current->thread.softirq_timer;
}
/*
* In s390, accounting pending user time also implies
* accounting system time in order to correctly compute
* the stolen time accounting.
*/
void vtime_flush(struct task_struct *tsk)
{
u64 steal, avg_steal;
if (do_account_vtime(tsk))
virt_timer_expire();
steal = S390_lowcore.steal_timer;
avg_steal = S390_lowcore.avg_steal_timer / 2;
if ((s64) steal > 0) {
S390_lowcore.steal_timer = 0;
account_steal_time(cputime_to_nsecs(steal));
avg_steal += steal;
}
S390_lowcore.avg_steal_timer = avg_steal;
}
static u64 vtime_delta(void)
{
u64 timer = S390_lowcore.last_update_timer;
S390_lowcore.last_update_timer = get_vtimer();
return timer - S390_lowcore.last_update_timer;
}
/*
* Update process times based on virtual cpu times stored by entry.S
* to the lowcore fields user_timer, system_timer & steal_clock.
*/
void vtime_account_kernel(struct task_struct *tsk)
{
u64 delta = vtime_delta();
if (tsk->flags & PF_VCPU)
S390_lowcore.guest_timer += delta;
else
S390_lowcore.system_timer += delta;
virt_timer_forward(delta);
}
EXPORT_SYMBOL_GPL(vtime_account_kernel);
void vtime_account_softirq(struct task_struct *tsk)
{
u64 delta = vtime_delta();
S390_lowcore.softirq_timer += delta;
virt_timer_forward(delta);
}
void vtime_account_hardirq(struct task_struct *tsk)
{
u64 delta = vtime_delta();
S390_lowcore.hardirq_timer += delta;
virt_timer_forward(delta);
}
/*
* Sorted add to a list. List is linear searched until first bigger
* element is found.
*/
static void list_add_sorted(struct vtimer_list *timer, struct list_head *head)
{
struct vtimer_list *tmp;
list_for_each_entry(tmp, head, entry) {
if (tmp->expires > timer->expires) {
list_add_tail(&timer->entry, &tmp->entry);
return;
}
}
list_add_tail(&timer->entry, head);
}
/*
* Handler for expired virtual CPU timer.
*/
static void virt_timer_expire(void)
{
struct vtimer_list *timer, *tmp;
unsigned long elapsed;
LIST_HEAD(cb_list);
/* walk timer list, fire all expired timers */
spin_lock(&virt_timer_lock);
elapsed = atomic64_read(&virt_timer_elapsed);
list_for_each_entry_safe(timer, tmp, &virt_timer_list, entry) {
if (timer->expires < elapsed)
/* move expired timer to the callback queue */
list_move_tail(&timer->entry, &cb_list);
else
timer->expires -= elapsed;
}
if (!list_empty(&virt_timer_list)) {
timer = list_first_entry(&virt_timer_list,
struct vtimer_list, entry);
atomic64_set(&virt_timer_current, timer->expires);
}
atomic64_sub(elapsed, &virt_timer_elapsed);
spin_unlock(&virt_timer_lock);
/* Do callbacks and recharge periodic timers */
list_for_each_entry_safe(timer, tmp, &cb_list, entry) {
list_del_init(&timer->entry);
timer->function(timer->data);
if (timer->interval) {
/* Recharge interval timer */
timer->expires = timer->interval +
atomic64_read(&virt_timer_elapsed);
spin_lock(&virt_timer_lock);
list_add_sorted(timer, &virt_timer_list);
spin_unlock(&virt_timer_lock);
}
}
}
void init_virt_timer(struct vtimer_list *timer)
{
timer->function = NULL;
INIT_LIST_HEAD(&timer->entry);
}
EXPORT_SYMBOL(init_virt_timer);
static inline int vtimer_pending(struct vtimer_list *timer)
{
return !list_empty(&timer->entry);
}
static void internal_add_vtimer(struct vtimer_list *timer)
{
if (list_empty(&virt_timer_list)) {
/* First timer, just program it. */
atomic64_set(&virt_timer_current, timer->expires);
atomic64_set(&virt_timer_elapsed, 0);
list_add(&timer->entry, &virt_timer_list);
} else {
/* Update timer against current base. */
timer->expires += atomic64_read(&virt_timer_elapsed);
if (likely((s64) timer->expires <
(s64) atomic64_read(&virt_timer_current)))
/* The new timer expires before the current timer. */
atomic64_set(&virt_timer_current, timer->expires);
/* Insert new timer into the list. */
list_add_sorted(timer, &virt_timer_list);
}
}
static void __add_vtimer(struct vtimer_list *timer, int periodic)
{
unsigned long flags;
timer->interval = periodic ? timer->expires : 0;
spin_lock_irqsave(&virt_timer_lock, flags);
internal_add_vtimer(timer);
spin_unlock_irqrestore(&virt_timer_lock, flags);
}
/*
* add_virt_timer - add a oneshot virtual CPU timer
*/
void add_virt_timer(struct vtimer_list *timer)
{
__add_vtimer(timer, 0);
}
EXPORT_SYMBOL(add_virt_timer);
/*
* add_virt_timer_int - add an interval virtual CPU timer
*/
void add_virt_timer_periodic(struct vtimer_list *timer)
{
__add_vtimer(timer, 1);
}
EXPORT_SYMBOL(add_virt_timer_periodic);
static int __mod_vtimer(struct vtimer_list *timer, u64 expires, int periodic)
{
unsigned long flags;
int rc;
BUG_ON(!timer->function);
if (timer->expires == expires && vtimer_pending(timer))
return 1;
spin_lock_irqsave(&virt_timer_lock, flags);
rc = vtimer_pending(timer);
if (rc)
list_del_init(&timer->entry);
timer->interval = periodic ? expires : 0;
timer->expires = expires;
internal_add_vtimer(timer);
spin_unlock_irqrestore(&virt_timer_lock, flags);
return rc;
}
/*
* returns whether it has modified a pending timer (1) or not (0)
*/
int mod_virt_timer(struct vtimer_list *timer, u64 expires)
{
return __mod_vtimer(timer, expires, 0);
}
EXPORT_SYMBOL(mod_virt_timer);
/*
* returns whether it has modified a pending timer (1) or not (0)
*/
int mod_virt_timer_periodic(struct vtimer_list *timer, u64 expires)
{
return __mod_vtimer(timer, expires, 1);
}
EXPORT_SYMBOL(mod_virt_timer_periodic);
/*
* Delete a virtual timer.
*
* returns whether the deleted timer was pending (1) or not (0)
*/
int del_virt_timer(struct vtimer_list *timer)
{
unsigned long flags;
if (!vtimer_pending(timer))
return 0;
spin_lock_irqsave(&virt_timer_lock, flags);
list_del_init(&timer->entry);
spin_unlock_irqrestore(&virt_timer_lock, flags);
return 1;
}
EXPORT_SYMBOL(del_virt_timer);
/*
* Start the virtual CPU timer on the current CPU.
*/
void vtime_init(void)
{
/* set initial cpu timer */
set_vtimer(VTIMER_MAX_SLICE);
/* Setup initial MT scaling values */
if (smp_cpu_mtid) {
__this_cpu_write(mt_scaling_jiffies, jiffies);
__this_cpu_write(mt_scaling_mult, 1);
__this_cpu_write(mt_scaling_div, 1);
stcctm(MT_DIAG, smp_cpu_mtid + 1, this_cpu_ptr(mt_cycles));
}
}
| linux-master | arch/s390/kernel/vtime.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/debugfs.h>
#include <linux/export.h>
#include <linux/init.h>
struct dentry *arch_debugfs_dir;
EXPORT_SYMBOL(arch_debugfs_dir);
static int __init arch_kdebugfs_init(void)
{
arch_debugfs_dir = debugfs_create_dir("s390", NULL);
return 0;
}
postcore_initcall(arch_kdebugfs_init);
| linux-master | arch/s390/kernel/kdebugfs.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2007, 2009
* Author(s): Hongjie Yang <[email protected]>,
*/
#define KMSG_COMPONENT "setup"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/compiler.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/ctype.h>
#include <linux/lockdep.h>
#include <linux/extable.h>
#include <linux/pfn.h>
#include <linux/uaccess.h>
#include <linux/kernel.h>
#include <asm/asm-extable.h>
#include <linux/memblock.h>
#include <asm/diag.h>
#include <asm/ebcdic.h>
#include <asm/ipl.h>
#include <asm/lowcore.h>
#include <asm/processor.h>
#include <asm/sections.h>
#include <asm/setup.h>
#include <asm/sysinfo.h>
#include <asm/cpcmd.h>
#include <asm/sclp.h>
#include <asm/facility.h>
#include <asm/boot_data.h>
#include <asm/switch_to.h>
#include "entry.h"
#define decompressor_handled_param(param) \
static int __init ignore_decompressor_param_##param(char *s) \
{ \
return 0; \
} \
early_param(#param, ignore_decompressor_param_##param)
decompressor_handled_param(mem);
decompressor_handled_param(vmalloc);
decompressor_handled_param(dfltcc);
decompressor_handled_param(facilities);
decompressor_handled_param(nokaslr);
#if IS_ENABLED(CONFIG_KVM)
decompressor_handled_param(prot_virt);
#endif
static void __init kasan_early_init(void)
{
#ifdef CONFIG_KASAN
init_task.kasan_depth = 0;
sclp_early_printk("KernelAddressSanitizer initialized\n");
#endif
}
static void __init reset_tod_clock(void)
{
union tod_clock clk;
if (store_tod_clock_ext_cc(&clk) == 0)
return;
/* TOD clock not running. Set the clock to Unix Epoch. */
if (set_tod_clock(TOD_UNIX_EPOCH) || store_tod_clock_ext_cc(&clk))
disabled_wait();
memset(&tod_clock_base, 0, sizeof(tod_clock_base));
tod_clock_base.tod = TOD_UNIX_EPOCH;
S390_lowcore.last_update_clock = TOD_UNIX_EPOCH;
}
/*
* Initialize storage key for kernel pages
*/
static noinline __init void init_kernel_storage_key(void)
{
#if PAGE_DEFAULT_KEY
unsigned long end_pfn, init_pfn;
end_pfn = PFN_UP(__pa(_end));
for (init_pfn = 0 ; init_pfn < end_pfn; init_pfn++)
page_set_storage_key(init_pfn << PAGE_SHIFT,
PAGE_DEFAULT_KEY, 0);
#endif
}
static __initdata char sysinfo_page[PAGE_SIZE] __aligned(PAGE_SIZE);
static noinline __init void detect_machine_type(void)
{
struct sysinfo_3_2_2 *vmms = (struct sysinfo_3_2_2 *)&sysinfo_page;
/* Check current-configuration-level */
if (stsi(NULL, 0, 0, 0) <= 2) {
S390_lowcore.machine_flags |= MACHINE_FLAG_LPAR;
return;
}
/* Get virtual-machine cpu information. */
if (stsi(vmms, 3, 2, 2) || !vmms->count)
return;
/* Detect known hypervisors */
if (!memcmp(vmms->vm[0].cpi, "\xd2\xe5\xd4", 3))
S390_lowcore.machine_flags |= MACHINE_FLAG_KVM;
else if (!memcmp(vmms->vm[0].cpi, "\xa9\x61\xe5\xd4", 4))
S390_lowcore.machine_flags |= MACHINE_FLAG_VM;
}
/* Remove leading, trailing and double whitespace. */
static inline void strim_all(char *str)
{
char *s;
s = strim(str);
if (s != str)
memmove(str, s, strlen(s));
while (*str) {
if (!isspace(*str++))
continue;
if (isspace(*str)) {
s = skip_spaces(str);
memmove(str, s, strlen(s) + 1);
}
}
}
static noinline __init void setup_arch_string(void)
{
struct sysinfo_1_1_1 *mach = (struct sysinfo_1_1_1 *)&sysinfo_page;
struct sysinfo_3_2_2 *vm = (struct sysinfo_3_2_2 *)&sysinfo_page;
char mstr[80], hvstr[17];
if (stsi(mach, 1, 1, 1))
return;
EBCASC(mach->manufacturer, sizeof(mach->manufacturer));
EBCASC(mach->type, sizeof(mach->type));
EBCASC(mach->model, sizeof(mach->model));
EBCASC(mach->model_capacity, sizeof(mach->model_capacity));
sprintf(mstr, "%-16.16s %-4.4s %-16.16s %-16.16s",
mach->manufacturer, mach->type,
mach->model, mach->model_capacity);
strim_all(mstr);
if (stsi(vm, 3, 2, 2) == 0 && vm->count) {
EBCASC(vm->vm[0].cpi, sizeof(vm->vm[0].cpi));
sprintf(hvstr, "%-16.16s", vm->vm[0].cpi);
strim_all(hvstr);
} else {
sprintf(hvstr, "%s",
MACHINE_IS_LPAR ? "LPAR" :
MACHINE_IS_VM ? "z/VM" :
MACHINE_IS_KVM ? "KVM" : "unknown");
}
dump_stack_set_arch_desc("%s (%s)", mstr, hvstr);
}
static __init void setup_topology(void)
{
int max_mnest;
if (!test_facility(11))
return;
S390_lowcore.machine_flags |= MACHINE_FLAG_TOPOLOGY;
for (max_mnest = 6; max_mnest > 1; max_mnest--) {
if (stsi(&sysinfo_page, 15, 1, max_mnest) == 0)
break;
}
topology_max_mnest = max_mnest;
}
void __do_early_pgm_check(struct pt_regs *regs)
{
if (!fixup_exception(regs))
disabled_wait();
}
static noinline __init void setup_lowcore_early(void)
{
psw_t psw;
psw.addr = (unsigned long)early_pgm_check_handler;
psw.mask = PSW_KERNEL_BITS;
S390_lowcore.program_new_psw = psw;
S390_lowcore.preempt_count = INIT_PREEMPT_COUNT;
}
static noinline __init void setup_facility_list(void)
{
memcpy(alt_stfle_fac_list, stfle_fac_list, sizeof(alt_stfle_fac_list));
if (!IS_ENABLED(CONFIG_KERNEL_NOBP))
__clear_facility(82, alt_stfle_fac_list);
}
static __init void detect_diag9c(void)
{
unsigned int cpu_address;
int rc;
cpu_address = stap();
diag_stat_inc(DIAG_STAT_X09C);
asm volatile(
" diag %2,0,0x9c\n"
"0: la %0,0\n"
"1:\n"
EX_TABLE(0b,1b)
: "=d" (rc) : "0" (-EOPNOTSUPP), "d" (cpu_address) : "cc");
if (!rc)
S390_lowcore.machine_flags |= MACHINE_FLAG_DIAG9C;
}
static __init void detect_machine_facilities(void)
{
if (test_facility(8)) {
S390_lowcore.machine_flags |= MACHINE_FLAG_EDAT1;
__ctl_set_bit(0, 23);
}
if (test_facility(78))
S390_lowcore.machine_flags |= MACHINE_FLAG_EDAT2;
if (test_facility(3))
S390_lowcore.machine_flags |= MACHINE_FLAG_IDTE;
if (test_facility(50) && test_facility(73)) {
S390_lowcore.machine_flags |= MACHINE_FLAG_TE;
__ctl_set_bit(0, 55);
}
if (test_facility(51))
S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_LC;
if (test_facility(129)) {
S390_lowcore.machine_flags |= MACHINE_FLAG_VX;
__ctl_set_bit(0, 17);
}
if (test_facility(130))
S390_lowcore.machine_flags |= MACHINE_FLAG_NX;
if (test_facility(133))
S390_lowcore.machine_flags |= MACHINE_FLAG_GS;
if (test_facility(139) && (tod_clock_base.tod >> 63)) {
/* Enabled signed clock comparator comparisons */
S390_lowcore.machine_flags |= MACHINE_FLAG_SCC;
clock_comparator_max = -1ULL >> 1;
__ctl_set_bit(0, 53);
}
if (IS_ENABLED(CONFIG_PCI) && test_facility(153)) {
S390_lowcore.machine_flags |= MACHINE_FLAG_PCI_MIO;
/* the control bit is set during PCI initialization */
}
if (test_facility(194))
S390_lowcore.machine_flags |= MACHINE_FLAG_RDP;
}
static inline void save_vector_registers(void)
{
#ifdef CONFIG_CRASH_DUMP
if (test_facility(129))
save_vx_regs(boot_cpu_vector_save_area);
#endif
}
static inline void setup_control_registers(void)
{
unsigned long reg;
__ctl_store(reg, 0, 0);
reg |= CR0_LOW_ADDRESS_PROTECTION;
reg |= CR0_EMERGENCY_SIGNAL_SUBMASK;
reg |= CR0_EXTERNAL_CALL_SUBMASK;
__ctl_load(reg, 0, 0);
}
static inline void setup_access_registers(void)
{
unsigned int acrs[NUM_ACRS] = { 0 };
restore_access_regs(acrs);
}
static int __init disable_vector_extension(char *str)
{
S390_lowcore.machine_flags &= ~MACHINE_FLAG_VX;
__ctl_clear_bit(0, 17);
return 0;
}
early_param("novx", disable_vector_extension);
char __bootdata(early_command_line)[COMMAND_LINE_SIZE];
static void __init setup_boot_command_line(void)
{
/* copy arch command line */
strscpy(boot_command_line, early_command_line, COMMAND_LINE_SIZE);
}
static void __init sort_amode31_extable(void)
{
sort_extable(__start_amode31_ex_table, __stop_amode31_ex_table);
}
void __init startup_init(void)
{
kasan_early_init();
reset_tod_clock();
time_early_init();
init_kernel_storage_key();
lockdep_off();
sort_amode31_extable();
setup_lowcore_early();
setup_facility_list();
detect_machine_type();
setup_arch_string();
setup_boot_command_line();
detect_diag9c();
detect_machine_facilities();
save_vector_registers();
setup_topology();
sclp_early_detect();
setup_control_registers();
setup_access_registers();
lockdep_on();
}
| linux-master | arch/s390/kernel/early.c |
// SPDX-License-Identifier: GPL-2.0
/*
* SMP related functions
*
* Copyright IBM Corp. 1999, 2012
* Author(s): Denis Joseph Barrow,
* Martin Schwidefsky <[email protected]>,
*
* based on other smp stuff by
* (c) 1995 Alan Cox, CymruNET Ltd <[email protected]>
* (c) 1998 Ingo Molnar
*
* The code outside of smp.c uses logical cpu numbers, only smp.c does
* the translation of logical to physical cpu ids. All new code that
* operates on physical cpu numbers needs to go into smp.c.
*/
#define KMSG_COMPONENT "cpu"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/workqueue.h>
#include <linux/memblock.h>
#include <linux/export.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/err.h>
#include <linux/spinlock.h>
#include <linux/kernel_stat.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/irqflags.h>
#include <linux/irq_work.h>
#include <linux/cpu.h>
#include <linux/slab.h>
#include <linux/sched/hotplug.h>
#include <linux/sched/task_stack.h>
#include <linux/crash_dump.h>
#include <linux/kprobes.h>
#include <asm/asm-offsets.h>
#include <asm/pfault.h>
#include <asm/diag.h>
#include <asm/switch_to.h>
#include <asm/facility.h>
#include <asm/ipl.h>
#include <asm/setup.h>
#include <asm/irq.h>
#include <asm/tlbflush.h>
#include <asm/vtimer.h>
#include <asm/abs_lowcore.h>
#include <asm/sclp.h>
#include <asm/debug.h>
#include <asm/os_info.h>
#include <asm/sigp.h>
#include <asm/idle.h>
#include <asm/nmi.h>
#include <asm/stacktrace.h>
#include <asm/topology.h>
#include <asm/vdso.h>
#include <asm/maccess.h>
#include "entry.h"
enum {
ec_schedule = 0,
ec_call_function_single,
ec_stop_cpu,
ec_mcck_pending,
ec_irq_work,
};
enum {
CPU_STATE_STANDBY,
CPU_STATE_CONFIGURED,
};
static DEFINE_PER_CPU(struct cpu *, cpu_device);
struct pcpu {
unsigned long ec_mask; /* bit mask for ec_xxx functions */
unsigned long ec_clk; /* sigp timestamp for ec_xxx */
signed char state; /* physical cpu state */
signed char polarization; /* physical polarization */
u16 address; /* physical cpu address */
};
static u8 boot_core_type;
static struct pcpu pcpu_devices[NR_CPUS];
unsigned int smp_cpu_mt_shift;
EXPORT_SYMBOL(smp_cpu_mt_shift);
unsigned int smp_cpu_mtid;
EXPORT_SYMBOL(smp_cpu_mtid);
#ifdef CONFIG_CRASH_DUMP
__vector128 __initdata boot_cpu_vector_save_area[__NUM_VXRS];
#endif
static unsigned int smp_max_threads __initdata = -1U;
cpumask_t cpu_setup_mask;
static int __init early_nosmt(char *s)
{
smp_max_threads = 1;
return 0;
}
early_param("nosmt", early_nosmt);
static int __init early_smt(char *s)
{
get_option(&s, &smp_max_threads);
return 0;
}
early_param("smt", early_smt);
/*
* The smp_cpu_state_mutex must be held when changing the state or polarization
* member of a pcpu data structure within the pcpu_devices array.
*/
DEFINE_MUTEX(smp_cpu_state_mutex);
/*
* Signal processor helper functions.
*/
static inline int __pcpu_sigp_relax(u16 addr, u8 order, unsigned long parm)
{
int cc;
while (1) {
cc = __pcpu_sigp(addr, order, parm, NULL);
if (cc != SIGP_CC_BUSY)
return cc;
cpu_relax();
}
}
static int pcpu_sigp_retry(struct pcpu *pcpu, u8 order, u32 parm)
{
int cc, retry;
for (retry = 0; ; retry++) {
cc = __pcpu_sigp(pcpu->address, order, parm, NULL);
if (cc != SIGP_CC_BUSY)
break;
if (retry >= 3)
udelay(10);
}
return cc;
}
static inline int pcpu_stopped(struct pcpu *pcpu)
{
u32 status;
if (__pcpu_sigp(pcpu->address, SIGP_SENSE,
0, &status) != SIGP_CC_STATUS_STORED)
return 0;
return !!(status & (SIGP_STATUS_CHECK_STOP|SIGP_STATUS_STOPPED));
}
static inline int pcpu_running(struct pcpu *pcpu)
{
if (__pcpu_sigp(pcpu->address, SIGP_SENSE_RUNNING,
0, NULL) != SIGP_CC_STATUS_STORED)
return 1;
/* Status stored condition code is equivalent to cpu not running. */
return 0;
}
/*
* Find struct pcpu by cpu address.
*/
static struct pcpu *pcpu_find_address(const struct cpumask *mask, u16 address)
{
int cpu;
for_each_cpu(cpu, mask)
if (pcpu_devices[cpu].address == address)
return pcpu_devices + cpu;
return NULL;
}
static void pcpu_ec_call(struct pcpu *pcpu, int ec_bit)
{
int order;
if (test_and_set_bit(ec_bit, &pcpu->ec_mask))
return;
order = pcpu_running(pcpu) ? SIGP_EXTERNAL_CALL : SIGP_EMERGENCY_SIGNAL;
pcpu->ec_clk = get_tod_clock_fast();
pcpu_sigp_retry(pcpu, order, 0);
}
static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
{
unsigned long async_stack, nodat_stack, mcck_stack;
struct lowcore *lc;
lc = (struct lowcore *) __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
nodat_stack = __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
async_stack = stack_alloc();
mcck_stack = stack_alloc();
if (!lc || !nodat_stack || !async_stack || !mcck_stack)
goto out;
memcpy(lc, &S390_lowcore, 512);
memset((char *) lc + 512, 0, sizeof(*lc) - 512);
lc->async_stack = async_stack + STACK_INIT_OFFSET;
lc->nodat_stack = nodat_stack + STACK_INIT_OFFSET;
lc->mcck_stack = mcck_stack + STACK_INIT_OFFSET;
lc->cpu_nr = cpu;
lc->spinlock_lockval = arch_spin_lockval(cpu);
lc->spinlock_index = 0;
lc->return_lpswe = gen_lpswe(__LC_RETURN_PSW);
lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW);
lc->preempt_count = PREEMPT_DISABLED;
if (nmi_alloc_mcesa(&lc->mcesad))
goto out;
if (abs_lowcore_map(cpu, lc, true))
goto out_mcesa;
lowcore_ptr[cpu] = lc;
pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, __pa(lc));
return 0;
out_mcesa:
nmi_free_mcesa(&lc->mcesad);
out:
stack_free(mcck_stack);
stack_free(async_stack);
free_pages(nodat_stack, THREAD_SIZE_ORDER);
free_pages((unsigned long) lc, LC_ORDER);
return -ENOMEM;
}
static void pcpu_free_lowcore(struct pcpu *pcpu)
{
unsigned long async_stack, nodat_stack, mcck_stack;
struct lowcore *lc;
int cpu;
cpu = pcpu - pcpu_devices;
lc = lowcore_ptr[cpu];
nodat_stack = lc->nodat_stack - STACK_INIT_OFFSET;
async_stack = lc->async_stack - STACK_INIT_OFFSET;
mcck_stack = lc->mcck_stack - STACK_INIT_OFFSET;
pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, 0);
lowcore_ptr[cpu] = NULL;
abs_lowcore_unmap(cpu);
nmi_free_mcesa(&lc->mcesad);
stack_free(async_stack);
stack_free(mcck_stack);
free_pages(nodat_stack, THREAD_SIZE_ORDER);
free_pages((unsigned long) lc, LC_ORDER);
}
static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
{
struct lowcore *lc, *abs_lc;
lc = lowcore_ptr[cpu];
cpumask_set_cpu(cpu, &init_mm.context.cpu_attach_mask);
cpumask_set_cpu(cpu, mm_cpumask(&init_mm));
lc->cpu_nr = cpu;
lc->restart_flags = RESTART_FLAG_CTLREGS;
lc->spinlock_lockval = arch_spin_lockval(cpu);
lc->spinlock_index = 0;
lc->percpu_offset = __per_cpu_offset[cpu];
lc->kernel_asce = S390_lowcore.kernel_asce;
lc->user_asce = s390_invalid_asce;
lc->machine_flags = S390_lowcore.machine_flags;
lc->user_timer = lc->system_timer =
lc->steal_timer = lc->avg_steal_timer = 0;
abs_lc = get_abs_lowcore();
memcpy(lc->cregs_save_area, abs_lc->cregs_save_area, sizeof(lc->cregs_save_area));
put_abs_lowcore(abs_lc);
lc->cregs_save_area[1] = lc->kernel_asce;
lc->cregs_save_area[7] = lc->user_asce;
save_access_regs((unsigned int *) lc->access_regs_save_area);
arch_spin_lock_setup(cpu);
}
static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk)
{
struct lowcore *lc;
int cpu;
cpu = pcpu - pcpu_devices;
lc = lowcore_ptr[cpu];
lc->kernel_stack = (unsigned long)task_stack_page(tsk) + STACK_INIT_OFFSET;
lc->current_task = (unsigned long)tsk;
lc->lpp = LPP_MAGIC;
lc->current_pid = tsk->pid;
lc->user_timer = tsk->thread.user_timer;
lc->guest_timer = tsk->thread.guest_timer;
lc->system_timer = tsk->thread.system_timer;
lc->hardirq_timer = tsk->thread.hardirq_timer;
lc->softirq_timer = tsk->thread.softirq_timer;
lc->steal_timer = 0;
}
static void pcpu_start_fn(struct pcpu *pcpu, void (*func)(void *), void *data)
{
struct lowcore *lc;
int cpu;
cpu = pcpu - pcpu_devices;
lc = lowcore_ptr[cpu];
lc->restart_stack = lc->kernel_stack;
lc->restart_fn = (unsigned long) func;
lc->restart_data = (unsigned long) data;
lc->restart_source = -1U;
pcpu_sigp_retry(pcpu, SIGP_RESTART, 0);
}
typedef void (pcpu_delegate_fn)(void *);
/*
* Call function via PSW restart on pcpu and stop the current cpu.
*/
static void __pcpu_delegate(pcpu_delegate_fn *func, void *data)
{
func(data); /* should not return */
}
static void pcpu_delegate(struct pcpu *pcpu,
pcpu_delegate_fn *func,
void *data, unsigned long stack)
{
struct lowcore *lc, *abs_lc;
unsigned int source_cpu;
lc = lowcore_ptr[pcpu - pcpu_devices];
source_cpu = stap();
if (pcpu->address == source_cpu) {
call_on_stack(2, stack, void, __pcpu_delegate,
pcpu_delegate_fn *, func, void *, data);
}
/* Stop target cpu (if func returns this stops the current cpu). */
pcpu_sigp_retry(pcpu, SIGP_STOP, 0);
pcpu_sigp_retry(pcpu, SIGP_CPU_RESET, 0);
/* Restart func on the target cpu and stop the current cpu. */
if (lc) {
lc->restart_stack = stack;
lc->restart_fn = (unsigned long)func;
lc->restart_data = (unsigned long)data;
lc->restart_source = source_cpu;
} else {
abs_lc = get_abs_lowcore();
abs_lc->restart_stack = stack;
abs_lc->restart_fn = (unsigned long)func;
abs_lc->restart_data = (unsigned long)data;
abs_lc->restart_source = source_cpu;
put_abs_lowcore(abs_lc);
}
asm volatile(
"0: sigp 0,%0,%2 # sigp restart to target cpu\n"
" brc 2,0b # busy, try again\n"
"1: sigp 0,%1,%3 # sigp stop to current cpu\n"
" brc 2,1b # busy, try again\n"
: : "d" (pcpu->address), "d" (source_cpu),
"K" (SIGP_RESTART), "K" (SIGP_STOP)
: "0", "1", "cc");
for (;;) ;
}
/*
* Enable additional logical cpus for multi-threading.
*/
static int pcpu_set_smt(unsigned int mtid)
{
int cc;
if (smp_cpu_mtid == mtid)
return 0;
cc = __pcpu_sigp(0, SIGP_SET_MULTI_THREADING, mtid, NULL);
if (cc == 0) {
smp_cpu_mtid = mtid;
smp_cpu_mt_shift = 0;
while (smp_cpu_mtid >= (1U << smp_cpu_mt_shift))
smp_cpu_mt_shift++;
pcpu_devices[0].address = stap();
}
return cc;
}
/*
* Call function on an online CPU.
*/
void smp_call_online_cpu(void (*func)(void *), void *data)
{
struct pcpu *pcpu;
/* Use the current cpu if it is online. */
pcpu = pcpu_find_address(cpu_online_mask, stap());
if (!pcpu)
/* Use the first online cpu. */
pcpu = pcpu_devices + cpumask_first(cpu_online_mask);
pcpu_delegate(pcpu, func, data, (unsigned long) restart_stack);
}
/*
* Call function on the ipl CPU.
*/
void smp_call_ipl_cpu(void (*func)(void *), void *data)
{
struct lowcore *lc = lowcore_ptr[0];
if (pcpu_devices[0].address == stap())
lc = &S390_lowcore;
pcpu_delegate(&pcpu_devices[0], func, data,
lc->nodat_stack);
}
int smp_find_processor_id(u16 address)
{
int cpu;
for_each_present_cpu(cpu)
if (pcpu_devices[cpu].address == address)
return cpu;
return -1;
}
void schedule_mcck_handler(void)
{
pcpu_ec_call(pcpu_devices + smp_processor_id(), ec_mcck_pending);
}
bool notrace arch_vcpu_is_preempted(int cpu)
{
if (test_cpu_flag_of(CIF_ENABLED_WAIT, cpu))
return false;
if (pcpu_running(pcpu_devices + cpu))
return false;
return true;
}
EXPORT_SYMBOL(arch_vcpu_is_preempted);
void notrace smp_yield_cpu(int cpu)
{
if (!MACHINE_HAS_DIAG9C)
return;
diag_stat_inc_norecursion(DIAG_STAT_X09C);
asm volatile("diag %0,0,0x9c"
: : "d" (pcpu_devices[cpu].address));
}
EXPORT_SYMBOL_GPL(smp_yield_cpu);
/*
* Send cpus emergency shutdown signal. This gives the cpus the
* opportunity to complete outstanding interrupts.
*/
void notrace smp_emergency_stop(void)
{
static arch_spinlock_t lock = __ARCH_SPIN_LOCK_UNLOCKED;
static cpumask_t cpumask;
u64 end;
int cpu;
arch_spin_lock(&lock);
cpumask_copy(&cpumask, cpu_online_mask);
cpumask_clear_cpu(smp_processor_id(), &cpumask);
end = get_tod_clock() + (1000000UL << 12);
for_each_cpu(cpu, &cpumask) {
struct pcpu *pcpu = pcpu_devices + cpu;
set_bit(ec_stop_cpu, &pcpu->ec_mask);
while (__pcpu_sigp(pcpu->address, SIGP_EMERGENCY_SIGNAL,
0, NULL) == SIGP_CC_BUSY &&
get_tod_clock() < end)
cpu_relax();
}
while (get_tod_clock() < end) {
for_each_cpu(cpu, &cpumask)
if (pcpu_stopped(pcpu_devices + cpu))
cpumask_clear_cpu(cpu, &cpumask);
if (cpumask_empty(&cpumask))
break;
cpu_relax();
}
arch_spin_unlock(&lock);
}
NOKPROBE_SYMBOL(smp_emergency_stop);
/*
* Stop all cpus but the current one.
*/
void smp_send_stop(void)
{
int cpu;
/* Disable all interrupts/machine checks */
__load_psw_mask(PSW_KERNEL_BITS);
trace_hardirqs_off();
debug_set_critical();
if (oops_in_progress)
smp_emergency_stop();
/* stop all processors */
for_each_online_cpu(cpu) {
if (cpu == smp_processor_id())
continue;
pcpu_sigp_retry(pcpu_devices + cpu, SIGP_STOP, 0);
while (!pcpu_stopped(pcpu_devices + cpu))
cpu_relax();
}
}
/*
* This is the main routine where commands issued by other
* cpus are handled.
*/
static void smp_handle_ext_call(void)
{
unsigned long bits;
/* handle bit signal external calls */
bits = xchg(&pcpu_devices[smp_processor_id()].ec_mask, 0);
if (test_bit(ec_stop_cpu, &bits))
smp_stop_cpu();
if (test_bit(ec_schedule, &bits))
scheduler_ipi();
if (test_bit(ec_call_function_single, &bits))
generic_smp_call_function_single_interrupt();
if (test_bit(ec_mcck_pending, &bits))
s390_handle_mcck();
if (test_bit(ec_irq_work, &bits))
irq_work_run();
}
static void do_ext_call_interrupt(struct ext_code ext_code,
unsigned int param32, unsigned long param64)
{
inc_irq_stat(ext_code.code == 0x1202 ? IRQEXT_EXC : IRQEXT_EMS);
smp_handle_ext_call();
}
void arch_send_call_function_ipi_mask(const struct cpumask *mask)
{
int cpu;
for_each_cpu(cpu, mask)
pcpu_ec_call(pcpu_devices + cpu, ec_call_function_single);
}
void arch_send_call_function_single_ipi(int cpu)
{
pcpu_ec_call(pcpu_devices + cpu, ec_call_function_single);
}
/*
* this function sends a 'reschedule' IPI to another CPU.
* it goes straight through and wastes no time serializing
* anything. Worst case is that we lose a reschedule ...
*/
void arch_smp_send_reschedule(int cpu)
{
pcpu_ec_call(pcpu_devices + cpu, ec_schedule);
}
#ifdef CONFIG_IRQ_WORK
void arch_irq_work_raise(void)
{
pcpu_ec_call(pcpu_devices + smp_processor_id(), ec_irq_work);
}
#endif
/*
* parameter area for the set/clear control bit callbacks
*/
struct ec_creg_mask_parms {
unsigned long orval;
unsigned long andval;
int cr;
};
/*
* callback for setting/clearing control bits
*/
static void smp_ctl_bit_callback(void *info)
{
struct ec_creg_mask_parms *pp = info;
unsigned long cregs[16];
__ctl_store(cregs, 0, 15);
cregs[pp->cr] = (cregs[pp->cr] & pp->andval) | pp->orval;
__ctl_load(cregs, 0, 15);
}
static DEFINE_SPINLOCK(ctl_lock);
void smp_ctl_set_clear_bit(int cr, int bit, bool set)
{
struct ec_creg_mask_parms parms = { .cr = cr, };
struct lowcore *abs_lc;
u64 ctlreg;
if (set) {
parms.orval = 1UL << bit;
parms.andval = -1UL;
} else {
parms.orval = 0;
parms.andval = ~(1UL << bit);
}
spin_lock(&ctl_lock);
abs_lc = get_abs_lowcore();
ctlreg = abs_lc->cregs_save_area[cr];
ctlreg = (ctlreg & parms.andval) | parms.orval;
abs_lc->cregs_save_area[cr] = ctlreg;
put_abs_lowcore(abs_lc);
on_each_cpu(smp_ctl_bit_callback, &parms, 1);
spin_unlock(&ctl_lock);
}
EXPORT_SYMBOL(smp_ctl_set_clear_bit);
#ifdef CONFIG_CRASH_DUMP
int smp_store_status(int cpu)
{
struct lowcore *lc;
struct pcpu *pcpu;
unsigned long pa;
pcpu = pcpu_devices + cpu;
lc = lowcore_ptr[cpu];
pa = __pa(&lc->floating_pt_save_area);
if (__pcpu_sigp_relax(pcpu->address, SIGP_STORE_STATUS_AT_ADDRESS,
pa) != SIGP_CC_ORDER_CODE_ACCEPTED)
return -EIO;
if (!MACHINE_HAS_VX && !MACHINE_HAS_GS)
return 0;
pa = lc->mcesad & MCESA_ORIGIN_MASK;
if (MACHINE_HAS_GS)
pa |= lc->mcesad & MCESA_LC_MASK;
if (__pcpu_sigp_relax(pcpu->address, SIGP_STORE_ADDITIONAL_STATUS,
pa) != SIGP_CC_ORDER_CODE_ACCEPTED)
return -EIO;
return 0;
}
/*
* Collect CPU state of the previous, crashed system.
* There are four cases:
* 1) standard zfcp/nvme dump
* condition: OLDMEM_BASE == NULL && is_ipl_type_dump() == true
* The state for all CPUs except the boot CPU needs to be collected
* with sigp stop-and-store-status. The boot CPU state is located in
* the absolute lowcore of the memory stored in the HSA. The zcore code
* will copy the boot CPU state from the HSA.
* 2) stand-alone kdump for SCSI/NVMe (zfcp/nvme dump with swapped memory)
* condition: OLDMEM_BASE != NULL && is_ipl_type_dump() == true
* The state for all CPUs except the boot CPU needs to be collected
* with sigp stop-and-store-status. The firmware or the boot-loader
* stored the registers of the boot CPU in the absolute lowcore in the
* memory of the old system.
* 3) kdump and the old kernel did not store the CPU state,
* or stand-alone kdump for DASD
* condition: OLDMEM_BASE != NULL && !is_kdump_kernel()
* The state for all CPUs except the boot CPU needs to be collected
* with sigp stop-and-store-status. The kexec code or the boot-loader
* stored the registers of the boot CPU in the memory of the old system.
* 4) kdump and the old kernel stored the CPU state
* condition: OLDMEM_BASE != NULL && is_kdump_kernel()
* This case does not exist for s390 anymore, setup_arch explicitly
* deactivates the elfcorehdr= kernel parameter
*/
static bool dump_available(void)
{
return oldmem_data.start || is_ipl_type_dump();
}
void __init smp_save_dump_ipl_cpu(void)
{
struct save_area *sa;
void *regs;
if (!dump_available())
return;
sa = save_area_alloc(true);
regs = memblock_alloc(512, 8);
if (!sa || !regs)
panic("could not allocate memory for boot CPU save area\n");
copy_oldmem_kernel(regs, __LC_FPREGS_SAVE_AREA, 512);
save_area_add_regs(sa, regs);
memblock_free(regs, 512);
if (MACHINE_HAS_VX)
save_area_add_vxrs(sa, boot_cpu_vector_save_area);
}
void __init smp_save_dump_secondary_cpus(void)
{
int addr, boot_cpu_addr, max_cpu_addr;
struct save_area *sa;
void *page;
if (!dump_available())
return;
/* Allocate a page as dumping area for the store status sigps */
page = memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
if (!page)
panic("ERROR: Failed to allocate %lx bytes below %lx\n",
PAGE_SIZE, 1UL << 31);
/* Set multi-threading state to the previous system. */
pcpu_set_smt(sclp.mtid_prev);
boot_cpu_addr = stap();
max_cpu_addr = SCLP_MAX_CORES << sclp.mtid_prev;
for (addr = 0; addr <= max_cpu_addr; addr++) {
if (addr == boot_cpu_addr)
continue;
if (__pcpu_sigp_relax(addr, SIGP_SENSE, 0) ==
SIGP_CC_NOT_OPERATIONAL)
continue;
sa = save_area_alloc(false);
if (!sa)
panic("could not allocate memory for save area\n");
__pcpu_sigp_relax(addr, SIGP_STORE_STATUS_AT_ADDRESS, __pa(page));
save_area_add_regs(sa, page);
if (MACHINE_HAS_VX) {
__pcpu_sigp_relax(addr, SIGP_STORE_ADDITIONAL_STATUS, __pa(page));
save_area_add_vxrs(sa, page);
}
}
memblock_free(page, PAGE_SIZE);
diag_amode31_ops.diag308_reset();
pcpu_set_smt(0);
}
#endif /* CONFIG_CRASH_DUMP */
void smp_cpu_set_polarization(int cpu, int val)
{
pcpu_devices[cpu].polarization = val;
}
int smp_cpu_get_polarization(int cpu)
{
return pcpu_devices[cpu].polarization;
}
int smp_cpu_get_cpu_address(int cpu)
{
return pcpu_devices[cpu].address;
}
static void __ref smp_get_core_info(struct sclp_core_info *info, int early)
{
static int use_sigp_detection;
int address;
if (use_sigp_detection || sclp_get_core_info(info, early)) {
use_sigp_detection = 1;
for (address = 0;
address < (SCLP_MAX_CORES << smp_cpu_mt_shift);
address += (1U << smp_cpu_mt_shift)) {
if (__pcpu_sigp_relax(address, SIGP_SENSE, 0) ==
SIGP_CC_NOT_OPERATIONAL)
continue;
info->core[info->configured].core_id =
address >> smp_cpu_mt_shift;
info->configured++;
}
info->combined = info->configured;
}
}
static int smp_add_present_cpu(int cpu);
static int smp_add_core(struct sclp_core_entry *core, cpumask_t *avail,
bool configured, bool early)
{
struct pcpu *pcpu;
int cpu, nr, i;
u16 address;
nr = 0;
if (sclp.has_core_type && core->type != boot_core_type)
return nr;
cpu = cpumask_first(avail);
address = core->core_id << smp_cpu_mt_shift;
for (i = 0; (i <= smp_cpu_mtid) && (cpu < nr_cpu_ids); i++) {
if (pcpu_find_address(cpu_present_mask, address + i))
continue;
pcpu = pcpu_devices + cpu;
pcpu->address = address + i;
if (configured)
pcpu->state = CPU_STATE_CONFIGURED;
else
pcpu->state = CPU_STATE_STANDBY;
smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
set_cpu_present(cpu, true);
if (!early && smp_add_present_cpu(cpu) != 0)
set_cpu_present(cpu, false);
else
nr++;
cpumask_clear_cpu(cpu, avail);
cpu = cpumask_next(cpu, avail);
}
return nr;
}
static int __smp_rescan_cpus(struct sclp_core_info *info, bool early)
{
struct sclp_core_entry *core;
static cpumask_t avail;
bool configured;
u16 core_id;
int nr, i;
cpus_read_lock();
mutex_lock(&smp_cpu_state_mutex);
nr = 0;
cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask);
/*
* Add IPL core first (which got logical CPU number 0) to make sure
* that all SMT threads get subsequent logical CPU numbers.
*/
if (early) {
core_id = pcpu_devices[0].address >> smp_cpu_mt_shift;
for (i = 0; i < info->configured; i++) {
core = &info->core[i];
if (core->core_id == core_id) {
nr += smp_add_core(core, &avail, true, early);
break;
}
}
}
for (i = 0; i < info->combined; i++) {
configured = i < info->configured;
nr += smp_add_core(&info->core[i], &avail, configured, early);
}
mutex_unlock(&smp_cpu_state_mutex);
cpus_read_unlock();
return nr;
}
void __init smp_detect_cpus(void)
{
unsigned int cpu, mtid, c_cpus, s_cpus;
struct sclp_core_info *info;
u16 address;
/* Get CPU information */
info = memblock_alloc(sizeof(*info), 8);
if (!info)
panic("%s: Failed to allocate %zu bytes align=0x%x\n",
__func__, sizeof(*info), 8);
smp_get_core_info(info, 1);
/* Find boot CPU type */
if (sclp.has_core_type) {
address = stap();
for (cpu = 0; cpu < info->combined; cpu++)
if (info->core[cpu].core_id == address) {
/* The boot cpu dictates the cpu type. */
boot_core_type = info->core[cpu].type;
break;
}
if (cpu >= info->combined)
panic("Could not find boot CPU type");
}
/* Set multi-threading state for the current system */
mtid = boot_core_type ? sclp.mtid : sclp.mtid_cp;
mtid = (mtid < smp_max_threads) ? mtid : smp_max_threads - 1;
pcpu_set_smt(mtid);
/* Print number of CPUs */
c_cpus = s_cpus = 0;
for (cpu = 0; cpu < info->combined; cpu++) {
if (sclp.has_core_type &&
info->core[cpu].type != boot_core_type)
continue;
if (cpu < info->configured)
c_cpus += smp_cpu_mtid + 1;
else
s_cpus += smp_cpu_mtid + 1;
}
pr_info("%d configured CPUs, %d standby CPUs\n", c_cpus, s_cpus);
/* Add CPUs present at boot */
__smp_rescan_cpus(info, true);
memblock_free(info, sizeof(*info));
}
/*
* Activate a secondary processor.
*/
static void smp_start_secondary(void *cpuvoid)
{
int cpu = raw_smp_processor_id();
S390_lowcore.last_update_clock = get_tod_clock();
S390_lowcore.restart_stack = (unsigned long)restart_stack;
S390_lowcore.restart_fn = (unsigned long)do_restart;
S390_lowcore.restart_data = 0;
S390_lowcore.restart_source = -1U;
S390_lowcore.restart_flags = 0;
restore_access_regs(S390_lowcore.access_regs_save_area);
cpu_init();
rcu_cpu_starting(cpu);
init_cpu_timer();
vtime_init();
vdso_getcpu_init();
pfault_init();
cpumask_set_cpu(cpu, &cpu_setup_mask);
update_cpu_masks();
notify_cpu_starting(cpu);
if (topology_cpu_dedicated(cpu))
set_cpu_flag(CIF_DEDICATED_CPU);
else
clear_cpu_flag(CIF_DEDICATED_CPU);
set_cpu_online(cpu, true);
inc_irq_stat(CPU_RST);
local_irq_enable();
cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
}
/* Upping and downing of CPUs */
int __cpu_up(unsigned int cpu, struct task_struct *tidle)
{
struct pcpu *pcpu = pcpu_devices + cpu;
int rc;
if (pcpu->state != CPU_STATE_CONFIGURED)
return -EIO;
if (pcpu_sigp_retry(pcpu, SIGP_INITIAL_CPU_RESET, 0) !=
SIGP_CC_ORDER_CODE_ACCEPTED)
return -EIO;
rc = pcpu_alloc_lowcore(pcpu, cpu);
if (rc)
return rc;
/*
* Make sure global control register contents do not change
* until new CPU has initialized control registers.
*/
spin_lock(&ctl_lock);
pcpu_prepare_secondary(pcpu, cpu);
pcpu_attach_task(pcpu, tidle);
pcpu_start_fn(pcpu, smp_start_secondary, NULL);
/* Wait until cpu puts itself in the online & active maps */
while (!cpu_online(cpu))
cpu_relax();
spin_unlock(&ctl_lock);
return 0;
}
static unsigned int setup_possible_cpus __initdata;
static int __init _setup_possible_cpus(char *s)
{
get_option(&s, &setup_possible_cpus);
return 0;
}
early_param("possible_cpus", _setup_possible_cpus);
int __cpu_disable(void)
{
unsigned long cregs[16];
int cpu;
/* Handle possible pending IPIs */
smp_handle_ext_call();
cpu = smp_processor_id();
set_cpu_online(cpu, false);
cpumask_clear_cpu(cpu, &cpu_setup_mask);
update_cpu_masks();
/* Disable pseudo page faults on this cpu. */
pfault_fini();
/* Disable interrupt sources via control register. */
__ctl_store(cregs, 0, 15);
cregs[0] &= ~0x0000ee70UL; /* disable all external interrupts */
cregs[6] &= ~0xff000000UL; /* disable all I/O interrupts */
cregs[14] &= ~0x1f000000UL; /* disable most machine checks */
__ctl_load(cregs, 0, 15);
clear_cpu_flag(CIF_NOHZ_DELAY);
return 0;
}
void __cpu_die(unsigned int cpu)
{
struct pcpu *pcpu;
/* Wait until target cpu is down */
pcpu = pcpu_devices + cpu;
while (!pcpu_stopped(pcpu))
cpu_relax();
pcpu_free_lowcore(pcpu);
cpumask_clear_cpu(cpu, mm_cpumask(&init_mm));
cpumask_clear_cpu(cpu, &init_mm.context.cpu_attach_mask);
}
void __noreturn cpu_die(void)
{
idle_task_exit();
pcpu_sigp_retry(pcpu_devices + smp_processor_id(), SIGP_STOP, 0);
for (;;) ;
}
void __init smp_fill_possible_mask(void)
{
unsigned int possible, sclp_max, cpu;
sclp_max = max(sclp.mtid, sclp.mtid_cp) + 1;
sclp_max = min(smp_max_threads, sclp_max);
sclp_max = (sclp.max_cores * sclp_max) ?: nr_cpu_ids;
possible = setup_possible_cpus ?: nr_cpu_ids;
possible = min(possible, sclp_max);
for (cpu = 0; cpu < possible && cpu < nr_cpu_ids; cpu++)
set_cpu_possible(cpu, true);
}
void __init smp_prepare_cpus(unsigned int max_cpus)
{
/* request the 0x1201 emergency signal external interrupt */
if (register_external_irq(EXT_IRQ_EMERGENCY_SIG, do_ext_call_interrupt))
panic("Couldn't request external interrupt 0x1201");
/* request the 0x1202 external call external interrupt */
if (register_external_irq(EXT_IRQ_EXTERNAL_CALL, do_ext_call_interrupt))
panic("Couldn't request external interrupt 0x1202");
}
void __init smp_prepare_boot_cpu(void)
{
struct pcpu *pcpu = pcpu_devices;
WARN_ON(!cpu_present(0) || !cpu_online(0));
pcpu->state = CPU_STATE_CONFIGURED;
S390_lowcore.percpu_offset = __per_cpu_offset[0];
smp_cpu_set_polarization(0, POLARIZATION_UNKNOWN);
}
void __init smp_setup_processor_id(void)
{
pcpu_devices[0].address = stap();
S390_lowcore.cpu_nr = 0;
S390_lowcore.spinlock_lockval = arch_spin_lockval(0);
S390_lowcore.spinlock_index = 0;
}
/*
* the frequency of the profiling timer can be changed
* by writing a multiplier value into /proc/profile.
*
* usually you want to run this on all CPUs ;)
*/
int setup_profiling_timer(unsigned int multiplier)
{
return 0;
}
static ssize_t cpu_configure_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
ssize_t count;
mutex_lock(&smp_cpu_state_mutex);
count = sprintf(buf, "%d\n", pcpu_devices[dev->id].state);
mutex_unlock(&smp_cpu_state_mutex);
return count;
}
static ssize_t cpu_configure_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct pcpu *pcpu;
int cpu, val, rc, i;
char delim;
if (sscanf(buf, "%d %c", &val, &delim) != 1)
return -EINVAL;
if (val != 0 && val != 1)
return -EINVAL;
cpus_read_lock();
mutex_lock(&smp_cpu_state_mutex);
rc = -EBUSY;
/* disallow configuration changes of online cpus and cpu 0 */
cpu = dev->id;
cpu = smp_get_base_cpu(cpu);
if (cpu == 0)
goto out;
for (i = 0; i <= smp_cpu_mtid; i++)
if (cpu_online(cpu + i))
goto out;
pcpu = pcpu_devices + cpu;
rc = 0;
switch (val) {
case 0:
if (pcpu->state != CPU_STATE_CONFIGURED)
break;
rc = sclp_core_deconfigure(pcpu->address >> smp_cpu_mt_shift);
if (rc)
break;
for (i = 0; i <= smp_cpu_mtid; i++) {
if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i))
continue;
pcpu[i].state = CPU_STATE_STANDBY;
smp_cpu_set_polarization(cpu + i,
POLARIZATION_UNKNOWN);
}
topology_expect_change();
break;
case 1:
if (pcpu->state != CPU_STATE_STANDBY)
break;
rc = sclp_core_configure(pcpu->address >> smp_cpu_mt_shift);
if (rc)
break;
for (i = 0; i <= smp_cpu_mtid; i++) {
if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i))
continue;
pcpu[i].state = CPU_STATE_CONFIGURED;
smp_cpu_set_polarization(cpu + i,
POLARIZATION_UNKNOWN);
}
topology_expect_change();
break;
default:
break;
}
out:
mutex_unlock(&smp_cpu_state_mutex);
cpus_read_unlock();
return rc ? rc : count;
}
static DEVICE_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store);
static ssize_t show_cpu_address(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%d\n", pcpu_devices[dev->id].address);
}
static DEVICE_ATTR(address, 0444, show_cpu_address, NULL);
static struct attribute *cpu_common_attrs[] = {
&dev_attr_configure.attr,
&dev_attr_address.attr,
NULL,
};
static struct attribute_group cpu_common_attr_group = {
.attrs = cpu_common_attrs,
};
static struct attribute *cpu_online_attrs[] = {
&dev_attr_idle_count.attr,
&dev_attr_idle_time_us.attr,
NULL,
};
static struct attribute_group cpu_online_attr_group = {
.attrs = cpu_online_attrs,
};
static int smp_cpu_online(unsigned int cpu)
{
struct device *s = &per_cpu(cpu_device, cpu)->dev;
return sysfs_create_group(&s->kobj, &cpu_online_attr_group);
}
static int smp_cpu_pre_down(unsigned int cpu)
{
struct device *s = &per_cpu(cpu_device, cpu)->dev;
sysfs_remove_group(&s->kobj, &cpu_online_attr_group);
return 0;
}
static int smp_add_present_cpu(int cpu)
{
struct device *s;
struct cpu *c;
int rc;
c = kzalloc(sizeof(*c), GFP_KERNEL);
if (!c)
return -ENOMEM;
per_cpu(cpu_device, cpu) = c;
s = &c->dev;
c->hotpluggable = 1;
rc = register_cpu(c, cpu);
if (rc)
goto out;
rc = sysfs_create_group(&s->kobj, &cpu_common_attr_group);
if (rc)
goto out_cpu;
rc = topology_cpu_init(c);
if (rc)
goto out_topology;
return 0;
out_topology:
sysfs_remove_group(&s->kobj, &cpu_common_attr_group);
out_cpu:
unregister_cpu(c);
out:
return rc;
}
int __ref smp_rescan_cpus(void)
{
struct sclp_core_info *info;
int nr;
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
smp_get_core_info(info, 0);
nr = __smp_rescan_cpus(info, false);
kfree(info);
if (nr)
topology_schedule_update();
return 0;
}
static ssize_t __ref rescan_store(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t count)
{
int rc;
rc = lock_device_hotplug_sysfs();
if (rc)
return rc;
rc = smp_rescan_cpus();
unlock_device_hotplug();
return rc ? rc : count;
}
static DEVICE_ATTR_WO(rescan);
static int __init s390_smp_init(void)
{
struct device *dev_root;
int cpu, rc = 0;
dev_root = bus_get_dev_root(&cpu_subsys);
if (dev_root) {
rc = device_create_file(dev_root, &dev_attr_rescan);
put_device(dev_root);
if (rc)
return rc;
}
for_each_present_cpu(cpu) {
rc = smp_add_present_cpu(cpu);
if (rc)
goto out;
}
rc = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "s390/smp:online",
smp_cpu_online, smp_cpu_pre_down);
rc = rc <= 0 ? rc : 0;
out:
return rc;
}
subsys_initcall(s390_smp_init);
static __always_inline void set_new_lowcore(struct lowcore *lc)
{
union register_pair dst, src;
u32 pfx;
src.even = (unsigned long) &S390_lowcore;
src.odd = sizeof(S390_lowcore);
dst.even = (unsigned long) lc;
dst.odd = sizeof(*lc);
pfx = __pa(lc);
asm volatile(
" mvcl %[dst],%[src]\n"
" spx %[pfx]\n"
: [dst] "+&d" (dst.pair), [src] "+&d" (src.pair)
: [pfx] "Q" (pfx)
: "memory", "cc");
}
int __init smp_reinit_ipl_cpu(void)
{
unsigned long async_stack, nodat_stack, mcck_stack;
struct lowcore *lc, *lc_ipl;
unsigned long flags, cr0;
u64 mcesad;
lc_ipl = lowcore_ptr[0];
lc = (struct lowcore *) __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
nodat_stack = __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
async_stack = stack_alloc();
mcck_stack = stack_alloc();
if (!lc || !nodat_stack || !async_stack || !mcck_stack || nmi_alloc_mcesa(&mcesad))
panic("Couldn't allocate memory");
local_irq_save(flags);
local_mcck_disable();
set_new_lowcore(lc);
S390_lowcore.nodat_stack = nodat_stack + STACK_INIT_OFFSET;
S390_lowcore.async_stack = async_stack + STACK_INIT_OFFSET;
S390_lowcore.mcck_stack = mcck_stack + STACK_INIT_OFFSET;
__ctl_store(cr0, 0, 0);
__ctl_clear_bit(0, 28); /* disable lowcore protection */
S390_lowcore.mcesad = mcesad;
__ctl_load(cr0, 0, 0);
if (abs_lowcore_map(0, lc, false))
panic("Couldn't remap absolute lowcore");
lowcore_ptr[0] = lc;
local_mcck_enable();
local_irq_restore(flags);
memblock_free_late(__pa(lc_ipl->mcck_stack - STACK_INIT_OFFSET), THREAD_SIZE);
memblock_free_late(__pa(lc_ipl->async_stack - STACK_INIT_OFFSET), THREAD_SIZE);
memblock_free_late(__pa(lc_ipl->nodat_stack - STACK_INIT_OFFSET), THREAD_SIZE);
memblock_free_late(__pa(lc_ipl), sizeof(*lc_ipl));
return 0;
}
| linux-master | arch/s390/kernel/smp.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Linux Guest Relocation (LGR) detection
*
* Copyright IBM Corp. 2012
* Author(s): Michael Holzheu <[email protected]>
*/
#include <linux/init.h>
#include <linux/export.h>
#include <linux/timer.h>
#include <linux/slab.h>
#include <asm/facility.h>
#include <asm/sysinfo.h>
#include <asm/ebcdic.h>
#include <asm/debug.h>
#include <asm/ipl.h>
#define LGR_TIMER_INTERVAL_SECS (30 * 60)
#define VM_LEVEL_MAX 2 /* Maximum is 8, but we only record two levels */
/*
* LGR info: Contains stfle and stsi data
*/
struct lgr_info {
/* Bit field with facility information: 4 DWORDs are stored */
u64 stfle_fac_list[4];
/* Level of system (1 = CEC, 2 = LPAR, 3 = z/VM */
u32 level;
/* Level 1: CEC info (stsi 1.1.1) */
char manufacturer[16];
char type[4];
char sequence[16];
char plant[4];
char model[16];
/* Level 2: LPAR info (stsi 2.2.2) */
u16 lpar_number;
char name[8];
/* Level 3: VM info (stsi 3.2.2) */
u8 vm_count;
struct {
char name[8];
char cpi[16];
} vm[VM_LEVEL_MAX];
} __packed __aligned(8);
/*
* LGR globals
*/
static char lgr_page[PAGE_SIZE] __aligned(PAGE_SIZE);
static struct lgr_info lgr_info_last;
static struct lgr_info lgr_info_cur;
static struct debug_info *lgr_dbf;
/*
* Copy buffer and then convert it to ASCII
*/
static void cpascii(char *dst, char *src, int size)
{
memcpy(dst, src, size);
EBCASC(dst, size);
}
/*
* Fill LGR info with 1.1.1 stsi data
*/
static void lgr_stsi_1_1_1(struct lgr_info *lgr_info)
{
struct sysinfo_1_1_1 *si = (void *) lgr_page;
if (stsi(si, 1, 1, 1))
return;
cpascii(lgr_info->manufacturer, si->manufacturer,
sizeof(si->manufacturer));
cpascii(lgr_info->type, si->type, sizeof(si->type));
cpascii(lgr_info->model, si->model, sizeof(si->model));
cpascii(lgr_info->sequence, si->sequence, sizeof(si->sequence));
cpascii(lgr_info->plant, si->plant, sizeof(si->plant));
}
/*
* Fill LGR info with 2.2.2 stsi data
*/
static void lgr_stsi_2_2_2(struct lgr_info *lgr_info)
{
struct sysinfo_2_2_2 *si = (void *) lgr_page;
if (stsi(si, 2, 2, 2))
return;
cpascii(lgr_info->name, si->name, sizeof(si->name));
lgr_info->lpar_number = si->lpar_number;
}
/*
* Fill LGR info with 3.2.2 stsi data
*/
static void lgr_stsi_3_2_2(struct lgr_info *lgr_info)
{
struct sysinfo_3_2_2 *si = (void *) lgr_page;
int i;
if (stsi(si, 3, 2, 2))
return;
for (i = 0; i < min_t(u8, si->count, VM_LEVEL_MAX); i++) {
cpascii(lgr_info->vm[i].name, si->vm[i].name,
sizeof(si->vm[i].name));
cpascii(lgr_info->vm[i].cpi, si->vm[i].cpi,
sizeof(si->vm[i].cpi));
}
lgr_info->vm_count = si->count;
}
/*
* Fill LGR info with current data
*/
static void lgr_info_get(struct lgr_info *lgr_info)
{
int level;
memset(lgr_info, 0, sizeof(*lgr_info));
stfle(lgr_info->stfle_fac_list, ARRAY_SIZE(lgr_info->stfle_fac_list));
level = stsi(NULL, 0, 0, 0);
lgr_info->level = level;
if (level >= 1)
lgr_stsi_1_1_1(lgr_info);
if (level >= 2)
lgr_stsi_2_2_2(lgr_info);
if (level >= 3)
lgr_stsi_3_2_2(lgr_info);
}
/*
* Check if LGR info has changed and if yes log new LGR info to s390dbf
*/
void lgr_info_log(void)
{
static DEFINE_SPINLOCK(lgr_info_lock);
unsigned long flags;
if (!spin_trylock_irqsave(&lgr_info_lock, flags))
return;
lgr_info_get(&lgr_info_cur);
if (memcmp(&lgr_info_last, &lgr_info_cur, sizeof(lgr_info_cur)) != 0) {
debug_event(lgr_dbf, 1, &lgr_info_cur, sizeof(lgr_info_cur));
lgr_info_last = lgr_info_cur;
}
spin_unlock_irqrestore(&lgr_info_lock, flags);
}
EXPORT_SYMBOL_GPL(lgr_info_log);
static void lgr_timer_set(void);
/*
* LGR timer callback
*/
static void lgr_timer_fn(struct timer_list *unused)
{
lgr_info_log();
lgr_timer_set();
}
static struct timer_list lgr_timer;
/*
* Setup next LGR timer
*/
static void lgr_timer_set(void)
{
mod_timer(&lgr_timer, jiffies + msecs_to_jiffies(LGR_TIMER_INTERVAL_SECS * MSEC_PER_SEC));
}
/*
* Initialize LGR: Add s390dbf, write initial lgr_info and setup timer
*/
static int __init lgr_init(void)
{
lgr_dbf = debug_register("lgr", 1, 1, sizeof(struct lgr_info));
if (!lgr_dbf)
return -ENOMEM;
debug_register_view(lgr_dbf, &debug_hex_ascii_view);
lgr_info_get(&lgr_info_last);
debug_event(lgr_dbf, 1, &lgr_info_last, sizeof(lgr_info_last));
timer_setup(&lgr_timer, lgr_timer_fn, TIMER_DEFERRABLE);
lgr_timer_set();
return 0;
}
device_initcall(lgr_init);
| linux-master | arch/s390/kernel/lgr.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/pgtable.h>
#include <asm/abs_lowcore.h>
unsigned long __bootdata_preserved(__abs_lowcore);
int abs_lowcore_map(int cpu, struct lowcore *lc, bool alloc)
{
unsigned long addr = __abs_lowcore + (cpu * sizeof(struct lowcore));
unsigned long phys = __pa(lc);
int rc, i;
for (i = 0; i < LC_PAGES; i++) {
rc = __vmem_map_4k_page(addr, phys, PAGE_KERNEL, alloc);
if (rc) {
/*
* Do not unmap allocated page tables in case the
* allocation was not requested. In such a case the
* request is expected coming from an atomic context,
* while the unmap attempt might sleep.
*/
if (alloc) {
for (--i; i >= 0; i--) {
addr -= PAGE_SIZE;
vmem_unmap_4k_page(addr);
}
}
return rc;
}
addr += PAGE_SIZE;
phys += PAGE_SIZE;
}
return 0;
}
void abs_lowcore_unmap(int cpu)
{
unsigned long addr = __abs_lowcore + (cpu * sizeof(struct lowcore));
int i;
for (i = 0; i < LC_PAGES; i++) {
vmem_unmap_4k_page(addr);
addr += PAGE_SIZE;
}
}
| linux-master | arch/s390/kernel/abs_lowcore.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Performance event support for s390x - CPU-measurement Counter Facility
*
* Copyright IBM Corp. 2012, 2023
* Author(s): Hendrik Brueckner <[email protected]>
* Thomas Richter <[email protected]>
*/
#define KMSG_COMPONENT "cpum_cf"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/kernel.h>
#include <linux/kernel_stat.h>
#include <linux/percpu.h>
#include <linux/notifier.h>
#include <linux/init.h>
#include <linux/export.h>
#include <linux/miscdevice.h>
#include <linux/perf_event.h>
#include <asm/cpu_mf.h>
#include <asm/hwctrset.h>
#include <asm/debug.h>
enum cpumf_ctr_set {
CPUMF_CTR_SET_BASIC = 0, /* Basic Counter Set */
CPUMF_CTR_SET_USER = 1, /* Problem-State Counter Set */
CPUMF_CTR_SET_CRYPTO = 2, /* Crypto-Activity Counter Set */
CPUMF_CTR_SET_EXT = 3, /* Extended Counter Set */
CPUMF_CTR_SET_MT_DIAG = 4, /* MT-diagnostic Counter Set */
/* Maximum number of counter sets */
CPUMF_CTR_SET_MAX,
};
#define CPUMF_LCCTL_ENABLE_SHIFT 16
#define CPUMF_LCCTL_ACTCTL_SHIFT 0
static inline void ctr_set_enable(u64 *state, u64 ctrsets)
{
*state |= ctrsets << CPUMF_LCCTL_ENABLE_SHIFT;
}
static inline void ctr_set_disable(u64 *state, u64 ctrsets)
{
*state &= ~(ctrsets << CPUMF_LCCTL_ENABLE_SHIFT);
}
static inline void ctr_set_start(u64 *state, u64 ctrsets)
{
*state |= ctrsets << CPUMF_LCCTL_ACTCTL_SHIFT;
}
static inline void ctr_set_stop(u64 *state, u64 ctrsets)
{
*state &= ~(ctrsets << CPUMF_LCCTL_ACTCTL_SHIFT);
}
static inline int ctr_stcctm(enum cpumf_ctr_set set, u64 range, u64 *dest)
{
switch (set) {
case CPUMF_CTR_SET_BASIC:
return stcctm(BASIC, range, dest);
case CPUMF_CTR_SET_USER:
return stcctm(PROBLEM_STATE, range, dest);
case CPUMF_CTR_SET_CRYPTO:
return stcctm(CRYPTO_ACTIVITY, range, dest);
case CPUMF_CTR_SET_EXT:
return stcctm(EXTENDED, range, dest);
case CPUMF_CTR_SET_MT_DIAG:
return stcctm(MT_DIAG_CLEARING, range, dest);
case CPUMF_CTR_SET_MAX:
return 3;
}
return 3;
}
struct cpu_cf_events {
refcount_t refcnt; /* Reference count */
atomic_t ctr_set[CPUMF_CTR_SET_MAX];
u64 state; /* For perf_event_open SVC */
u64 dev_state; /* For /dev/hwctr */
unsigned int flags;
size_t used; /* Bytes used in data */
size_t usedss; /* Bytes used in start/stop */
unsigned char start[PAGE_SIZE]; /* Counter set at event add */
unsigned char stop[PAGE_SIZE]; /* Counter set at event delete */
unsigned char data[PAGE_SIZE]; /* Counter set at /dev/hwctr */
unsigned int sets; /* # Counter set saved in memory */
};
static unsigned int cfdiag_cpu_speed; /* CPU speed for CF_DIAG trailer */
static debug_info_t *cf_dbg;
/*
* The CPU Measurement query counter information instruction contains
* information which varies per machine generation, but is constant and
* does not change when running on a particular machine, such as counter
* first and second version number. This is needed to determine the size
* of counter sets. Extract this information at device driver initialization.
*/
static struct cpumf_ctr_info cpumf_ctr_info;
struct cpu_cf_ptr {
struct cpu_cf_events *cpucf;
};
static struct cpu_cf_root { /* Anchor to per CPU data */
refcount_t refcnt; /* Overall active events */
struct cpu_cf_ptr __percpu *cfptr;
} cpu_cf_root;
/*
* Serialize event initialization and event removal. Both are called from
* user space in task context with perf_event_open() and close()
* system calls.
*
* This mutex serializes functions cpum_cf_alloc_cpu() called at event
* initialization via cpumf_pmu_event_init() and function cpum_cf_free_cpu()
* called at event removal via call back function hw_perf_event_destroy()
* when the event is deleted. They are serialized to enforce correct
* bookkeeping of pointer and reference counts anchored by
* struct cpu_cf_root and the access to cpu_cf_root::refcnt and the
* per CPU pointers stored in cpu_cf_root::cfptr.
*/
static DEFINE_MUTEX(pmc_reserve_mutex);
/*
* Get pointer to per-cpu structure.
*
* Function get_cpu_cfhw() is called from
* - cfset_copy_all(): This function is protected by cpus_read_lock(), so
* CPU hot plug remove can not happen. Event removal requires a close()
* first.
*
* Function this_cpu_cfhw() is called from perf common code functions:
* - pmu_{en|dis}able(), pmu_{add|del}()and pmu_{start|stop}():
* All functions execute with interrupts disabled on that particular CPU.
* - cfset_ioctl_{on|off}, cfset_cpu_read(): see comment cfset_copy_all().
*
* Therefore it is safe to access the CPU specific pointer to the event.
*/
static struct cpu_cf_events *get_cpu_cfhw(int cpu)
{
struct cpu_cf_ptr __percpu *p = cpu_cf_root.cfptr;
if (p) {
struct cpu_cf_ptr *q = per_cpu_ptr(p, cpu);
return q->cpucf;
}
return NULL;
}
static struct cpu_cf_events *this_cpu_cfhw(void)
{
return get_cpu_cfhw(smp_processor_id());
}
/* Disable counter sets on dedicated CPU */
static void cpum_cf_reset_cpu(void *flags)
{
lcctl(0);
}
/* Free per CPU data when the last event is removed. */
static void cpum_cf_free_root(void)
{
if (!refcount_dec_and_test(&cpu_cf_root.refcnt))
return;
free_percpu(cpu_cf_root.cfptr);
cpu_cf_root.cfptr = NULL;
irq_subclass_unregister(IRQ_SUBCLASS_MEASUREMENT_ALERT);
on_each_cpu(cpum_cf_reset_cpu, NULL, 1);
debug_sprintf_event(cf_dbg, 4, "%s root.refcnt %u cfptr %d\n",
__func__, refcount_read(&cpu_cf_root.refcnt),
!cpu_cf_root.cfptr);
}
/*
* On initialization of first event also allocate per CPU data dynamically.
* Start with an array of pointers, the array size is the maximum number of
* CPUs possible, which might be larger than the number of CPUs currently
* online.
*/
static int cpum_cf_alloc_root(void)
{
int rc = 0;
if (refcount_inc_not_zero(&cpu_cf_root.refcnt))
return rc;
/* The memory is already zeroed. */
cpu_cf_root.cfptr = alloc_percpu(struct cpu_cf_ptr);
if (cpu_cf_root.cfptr) {
refcount_set(&cpu_cf_root.refcnt, 1);
on_each_cpu(cpum_cf_reset_cpu, NULL, 1);
irq_subclass_register(IRQ_SUBCLASS_MEASUREMENT_ALERT);
} else {
rc = -ENOMEM;
}
return rc;
}
/* Free CPU counter data structure for a PMU */
static void cpum_cf_free_cpu(int cpu)
{
struct cpu_cf_events *cpuhw;
struct cpu_cf_ptr *p;
mutex_lock(&pmc_reserve_mutex);
/*
* When invoked via CPU hotplug handler, there might be no events
* installed or that particular CPU might not have an
* event installed. This anchor pointer can be NULL!
*/
if (!cpu_cf_root.cfptr)
goto out;
p = per_cpu_ptr(cpu_cf_root.cfptr, cpu);
cpuhw = p->cpucf;
/*
* Might be zero when called from CPU hotplug handler and no event
* installed on that CPU, but on different CPUs.
*/
if (!cpuhw)
goto out;
if (refcount_dec_and_test(&cpuhw->refcnt)) {
kfree(cpuhw);
p->cpucf = NULL;
}
cpum_cf_free_root();
out:
mutex_unlock(&pmc_reserve_mutex);
}
/* Allocate CPU counter data structure for a PMU. Called under mutex lock. */
static int cpum_cf_alloc_cpu(int cpu)
{
struct cpu_cf_events *cpuhw;
struct cpu_cf_ptr *p;
int rc;
mutex_lock(&pmc_reserve_mutex);
rc = cpum_cf_alloc_root();
if (rc)
goto unlock;
p = per_cpu_ptr(cpu_cf_root.cfptr, cpu);
cpuhw = p->cpucf;
if (!cpuhw) {
cpuhw = kzalloc(sizeof(*cpuhw), GFP_KERNEL);
if (cpuhw) {
p->cpucf = cpuhw;
refcount_set(&cpuhw->refcnt, 1);
} else {
rc = -ENOMEM;
}
} else {
refcount_inc(&cpuhw->refcnt);
}
if (rc) {
/*
* Error in allocation of event, decrement anchor. Since
* cpu_cf_event in not created, its destroy() function is not
* invoked. Adjust the reference counter for the anchor.
*/
cpum_cf_free_root();
}
unlock:
mutex_unlock(&pmc_reserve_mutex);
return rc;
}
/*
* Create/delete per CPU data structures for /dev/hwctr interface and events
* created by perf_event_open().
* If cpu is -1, track task on all available CPUs. This requires
* allocation of hardware data structures for all CPUs. This setup handles
* perf_event_open() with task context and /dev/hwctr interface.
* If cpu is non-zero install event on this CPU only. This setup handles
* perf_event_open() with CPU context.
*/
static int cpum_cf_alloc(int cpu)
{
cpumask_var_t mask;
int rc;
if (cpu == -1) {
if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
return -ENOMEM;
for_each_online_cpu(cpu) {
rc = cpum_cf_alloc_cpu(cpu);
if (rc) {
for_each_cpu(cpu, mask)
cpum_cf_free_cpu(cpu);
break;
}
cpumask_set_cpu(cpu, mask);
}
free_cpumask_var(mask);
} else {
rc = cpum_cf_alloc_cpu(cpu);
}
return rc;
}
static void cpum_cf_free(int cpu)
{
if (cpu == -1) {
for_each_online_cpu(cpu)
cpum_cf_free_cpu(cpu);
} else {
cpum_cf_free_cpu(cpu);
}
}
#define CF_DIAG_CTRSET_DEF 0xfeef /* Counter set header mark */
/* interval in seconds */
/* Counter sets are stored as data stream in a page sized memory buffer and
* exported to user space via raw data attached to the event sample data.
* Each counter set starts with an eight byte header consisting of:
* - a two byte eye catcher (0xfeef)
* - a one byte counter set number
* - a two byte counter set size (indicates the number of counters in this set)
* - a three byte reserved value (must be zero) to make the header the same
* size as a counter value.
* All counter values are eight byte in size.
*
* All counter sets are followed by a 64 byte trailer.
* The trailer consists of a:
* - flag field indicating valid fields when corresponding bit set
* - the counter facility first and second version number
* - the CPU speed if nonzero
* - the time stamp the counter sets have been collected
* - the time of day (TOD) base value
* - the machine type.
*
* The counter sets are saved when the process is prepared to be executed on a
* CPU and saved again when the process is going to be removed from a CPU.
* The difference of both counter sets are calculated and stored in the event
* sample data area.
*/
struct cf_ctrset_entry { /* CPU-M CF counter set entry (8 byte) */
unsigned int def:16; /* 0-15 Data Entry Format */
unsigned int set:16; /* 16-31 Counter set identifier */
unsigned int ctr:16; /* 32-47 Number of stored counters */
unsigned int res1:16; /* 48-63 Reserved */
};
struct cf_trailer_entry { /* CPU-M CF_DIAG trailer (64 byte) */
/* 0 - 7 */
union {
struct {
unsigned int clock_base:1; /* TOD clock base set */
unsigned int speed:1; /* CPU speed set */
/* Measurement alerts */
unsigned int mtda:1; /* Loss of MT ctr. data alert */
unsigned int caca:1; /* Counter auth. change alert */
unsigned int lcda:1; /* Loss of counter data alert */
};
unsigned long flags; /* 0-63 All indicators */
};
/* 8 - 15 */
unsigned int cfvn:16; /* 64-79 Ctr First Version */
unsigned int csvn:16; /* 80-95 Ctr Second Version */
unsigned int cpu_speed:32; /* 96-127 CPU speed */
/* 16 - 23 */
unsigned long timestamp; /* 128-191 Timestamp (TOD) */
/* 24 - 55 */
union {
struct {
unsigned long progusage1;
unsigned long progusage2;
unsigned long progusage3;
unsigned long tod_base;
};
unsigned long progusage[4];
};
/* 56 - 63 */
unsigned int mach_type:16; /* Machine type */
unsigned int res1:16; /* Reserved */
unsigned int res2:32; /* Reserved */
};
/* Create the trailer data at the end of a page. */
static void cfdiag_trailer(struct cf_trailer_entry *te)
{
struct cpuid cpuid;
te->cfvn = cpumf_ctr_info.cfvn; /* Counter version numbers */
te->csvn = cpumf_ctr_info.csvn;
get_cpu_id(&cpuid); /* Machine type */
te->mach_type = cpuid.machine;
te->cpu_speed = cfdiag_cpu_speed;
if (te->cpu_speed)
te->speed = 1;
te->clock_base = 1; /* Save clock base */
te->tod_base = tod_clock_base.tod;
te->timestamp = get_tod_clock_fast();
}
/*
* The number of counters per counter set varies between machine generations,
* but is constant when running on a particular machine generation.
* Determine each counter set size at device driver initialization and
* retrieve it later.
*/
static size_t cpumf_ctr_setsizes[CPUMF_CTR_SET_MAX];
static void cpum_cf_make_setsize(enum cpumf_ctr_set ctrset)
{
size_t ctrset_size = 0;
switch (ctrset) {
case CPUMF_CTR_SET_BASIC:
if (cpumf_ctr_info.cfvn >= 1)
ctrset_size = 6;
break;
case CPUMF_CTR_SET_USER:
if (cpumf_ctr_info.cfvn == 1)
ctrset_size = 6;
else if (cpumf_ctr_info.cfvn >= 3)
ctrset_size = 2;
break;
case CPUMF_CTR_SET_CRYPTO:
if (cpumf_ctr_info.csvn >= 1 && cpumf_ctr_info.csvn <= 5)
ctrset_size = 16;
else if (cpumf_ctr_info.csvn == 6 || cpumf_ctr_info.csvn == 7)
ctrset_size = 20;
break;
case CPUMF_CTR_SET_EXT:
if (cpumf_ctr_info.csvn == 1)
ctrset_size = 32;
else if (cpumf_ctr_info.csvn == 2)
ctrset_size = 48;
else if (cpumf_ctr_info.csvn >= 3 && cpumf_ctr_info.csvn <= 5)
ctrset_size = 128;
else if (cpumf_ctr_info.csvn == 6 || cpumf_ctr_info.csvn == 7)
ctrset_size = 160;
break;
case CPUMF_CTR_SET_MT_DIAG:
if (cpumf_ctr_info.csvn > 3)
ctrset_size = 48;
break;
case CPUMF_CTR_SET_MAX:
break;
}
cpumf_ctr_setsizes[ctrset] = ctrset_size;
}
/*
* Return the maximum possible counter set size (in number of 8 byte counters)
* depending on type and model number.
*/
static size_t cpum_cf_read_setsize(enum cpumf_ctr_set ctrset)
{
return cpumf_ctr_setsizes[ctrset];
}
/* Read a counter set. The counter set number determines the counter set and
* the CPUM-CF first and second version number determine the number of
* available counters in each counter set.
* Each counter set starts with header containing the counter set number and
* the number of eight byte counters.
*
* The functions returns the number of bytes occupied by this counter set
* including the header.
* If there is no counter in the counter set, this counter set is useless and
* zero is returned on this case.
*
* Note that the counter sets may not be enabled or active and the stcctm
* instruction might return error 3. Depending on error_ok value this is ok,
* for example when called from cpumf_pmu_start() call back function.
*/
static size_t cfdiag_getctrset(struct cf_ctrset_entry *ctrdata, int ctrset,
size_t room, bool error_ok)
{
size_t ctrset_size, need = 0;
int rc = 3; /* Assume write failure */
ctrdata->def = CF_DIAG_CTRSET_DEF;
ctrdata->set = ctrset;
ctrdata->res1 = 0;
ctrset_size = cpum_cf_read_setsize(ctrset);
if (ctrset_size) { /* Save data */
need = ctrset_size * sizeof(u64) + sizeof(*ctrdata);
if (need <= room) {
rc = ctr_stcctm(ctrset, ctrset_size,
(u64 *)(ctrdata + 1));
}
if (rc != 3 || error_ok)
ctrdata->ctr = ctrset_size;
else
need = 0;
}
return need;
}
static const u64 cpumf_ctr_ctl[CPUMF_CTR_SET_MAX] = {
[CPUMF_CTR_SET_BASIC] = 0x02,
[CPUMF_CTR_SET_USER] = 0x04,
[CPUMF_CTR_SET_CRYPTO] = 0x08,
[CPUMF_CTR_SET_EXT] = 0x01,
[CPUMF_CTR_SET_MT_DIAG] = 0x20,
};
/* Read out all counter sets and save them in the provided data buffer.
* The last 64 byte host an artificial trailer entry.
*/
static size_t cfdiag_getctr(void *data, size_t sz, unsigned long auth,
bool error_ok)
{
struct cf_trailer_entry *trailer;
size_t offset = 0, done;
int i;
memset(data, 0, sz);
sz -= sizeof(*trailer); /* Always room for trailer */
for (i = CPUMF_CTR_SET_BASIC; i < CPUMF_CTR_SET_MAX; ++i) {
struct cf_ctrset_entry *ctrdata = data + offset;
if (!(auth & cpumf_ctr_ctl[i]))
continue; /* Counter set not authorized */
done = cfdiag_getctrset(ctrdata, i, sz - offset, error_ok);
offset += done;
}
trailer = data + offset;
cfdiag_trailer(trailer);
return offset + sizeof(*trailer);
}
/* Calculate the difference for each counter in a counter set. */
static void cfdiag_diffctrset(u64 *pstart, u64 *pstop, int counters)
{
for (; --counters >= 0; ++pstart, ++pstop)
if (*pstop >= *pstart)
*pstop -= *pstart;
else
*pstop = *pstart - *pstop + 1;
}
/* Scan the counter sets and calculate the difference of each counter
* in each set. The result is the increment of each counter during the
* period the counter set has been activated.
*
* Return true on success.
*/
static int cfdiag_diffctr(struct cpu_cf_events *cpuhw, unsigned long auth)
{
struct cf_trailer_entry *trailer_start, *trailer_stop;
struct cf_ctrset_entry *ctrstart, *ctrstop;
size_t offset = 0;
auth &= (1 << CPUMF_LCCTL_ENABLE_SHIFT) - 1;
do {
ctrstart = (struct cf_ctrset_entry *)(cpuhw->start + offset);
ctrstop = (struct cf_ctrset_entry *)(cpuhw->stop + offset);
if (memcmp(ctrstop, ctrstart, sizeof(*ctrstop))) {
pr_err_once("cpum_cf_diag counter set compare error "
"in set %i\n", ctrstart->set);
return 0;
}
auth &= ~cpumf_ctr_ctl[ctrstart->set];
if (ctrstart->def == CF_DIAG_CTRSET_DEF) {
cfdiag_diffctrset((u64 *)(ctrstart + 1),
(u64 *)(ctrstop + 1), ctrstart->ctr);
offset += ctrstart->ctr * sizeof(u64) +
sizeof(*ctrstart);
}
} while (ctrstart->def && auth);
/* Save time_stamp from start of event in stop's trailer */
trailer_start = (struct cf_trailer_entry *)(cpuhw->start + offset);
trailer_stop = (struct cf_trailer_entry *)(cpuhw->stop + offset);
trailer_stop->progusage[0] = trailer_start->timestamp;
return 1;
}
static enum cpumf_ctr_set get_counter_set(u64 event)
{
int set = CPUMF_CTR_SET_MAX;
if (event < 32)
set = CPUMF_CTR_SET_BASIC;
else if (event < 64)
set = CPUMF_CTR_SET_USER;
else if (event < 128)
set = CPUMF_CTR_SET_CRYPTO;
else if (event < 288)
set = CPUMF_CTR_SET_EXT;
else if (event >= 448 && event < 496)
set = CPUMF_CTR_SET_MT_DIAG;
return set;
}
static int validate_ctr_version(const u64 config, enum cpumf_ctr_set set)
{
u16 mtdiag_ctl;
int err = 0;
/* check required version for counter sets */
switch (set) {
case CPUMF_CTR_SET_BASIC:
case CPUMF_CTR_SET_USER:
if (cpumf_ctr_info.cfvn < 1)
err = -EOPNOTSUPP;
break;
case CPUMF_CTR_SET_CRYPTO:
if ((cpumf_ctr_info.csvn >= 1 && cpumf_ctr_info.csvn <= 5 &&
config > 79) || (cpumf_ctr_info.csvn >= 6 && config > 83))
err = -EOPNOTSUPP;
break;
case CPUMF_CTR_SET_EXT:
if (cpumf_ctr_info.csvn < 1)
err = -EOPNOTSUPP;
if ((cpumf_ctr_info.csvn == 1 && config > 159) ||
(cpumf_ctr_info.csvn == 2 && config > 175) ||
(cpumf_ctr_info.csvn >= 3 && cpumf_ctr_info.csvn <= 5 &&
config > 255) ||
(cpumf_ctr_info.csvn >= 6 && config > 287))
err = -EOPNOTSUPP;
break;
case CPUMF_CTR_SET_MT_DIAG:
if (cpumf_ctr_info.csvn <= 3)
err = -EOPNOTSUPP;
/*
* MT-diagnostic counters are read-only. The counter set
* is automatically enabled and activated on all CPUs with
* multithreading (SMT). Deactivation of multithreading
* also disables the counter set. State changes are ignored
* by lcctl(). Because Linux controls SMT enablement through
* a kernel parameter only, the counter set is either disabled
* or enabled and active.
*
* Thus, the counters can only be used if SMT is on and the
* counter set is enabled and active.
*/
mtdiag_ctl = cpumf_ctr_ctl[CPUMF_CTR_SET_MT_DIAG];
if (!((cpumf_ctr_info.auth_ctl & mtdiag_ctl) &&
(cpumf_ctr_info.enable_ctl & mtdiag_ctl) &&
(cpumf_ctr_info.act_ctl & mtdiag_ctl)))
err = -EOPNOTSUPP;
break;
case CPUMF_CTR_SET_MAX:
err = -EOPNOTSUPP;
}
return err;
}
/*
* Change the CPUMF state to active.
* Enable and activate the CPU-counter sets according
* to the per-cpu control state.
*/
static void cpumf_pmu_enable(struct pmu *pmu)
{
struct cpu_cf_events *cpuhw = this_cpu_cfhw();
int err;
if (!cpuhw || (cpuhw->flags & PMU_F_ENABLED))
return;
err = lcctl(cpuhw->state | cpuhw->dev_state);
if (err)
pr_err("Enabling the performance measuring unit failed with rc=%x\n", err);
else
cpuhw->flags |= PMU_F_ENABLED;
}
/*
* Change the CPUMF state to inactive.
* Disable and enable (inactive) the CPU-counter sets according
* to the per-cpu control state.
*/
static void cpumf_pmu_disable(struct pmu *pmu)
{
struct cpu_cf_events *cpuhw = this_cpu_cfhw();
u64 inactive;
int err;
if (!cpuhw || !(cpuhw->flags & PMU_F_ENABLED))
return;
inactive = cpuhw->state & ~((1 << CPUMF_LCCTL_ENABLE_SHIFT) - 1);
inactive |= cpuhw->dev_state;
err = lcctl(inactive);
if (err)
pr_err("Disabling the performance measuring unit failed with rc=%x\n", err);
else
cpuhw->flags &= ~PMU_F_ENABLED;
}
/* Release the PMU if event is the last perf event */
static void hw_perf_event_destroy(struct perf_event *event)
{
cpum_cf_free(event->cpu);
}
/* CPUMF <-> perf event mappings for kernel+userspace (basic set) */
static const int cpumf_generic_events_basic[] = {
[PERF_COUNT_HW_CPU_CYCLES] = 0,
[PERF_COUNT_HW_INSTRUCTIONS] = 1,
[PERF_COUNT_HW_CACHE_REFERENCES] = -1,
[PERF_COUNT_HW_CACHE_MISSES] = -1,
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = -1,
[PERF_COUNT_HW_BRANCH_MISSES] = -1,
[PERF_COUNT_HW_BUS_CYCLES] = -1,
};
/* CPUMF <-> perf event mappings for userspace (problem-state set) */
static const int cpumf_generic_events_user[] = {
[PERF_COUNT_HW_CPU_CYCLES] = 32,
[PERF_COUNT_HW_INSTRUCTIONS] = 33,
[PERF_COUNT_HW_CACHE_REFERENCES] = -1,
[PERF_COUNT_HW_CACHE_MISSES] = -1,
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = -1,
[PERF_COUNT_HW_BRANCH_MISSES] = -1,
[PERF_COUNT_HW_BUS_CYCLES] = -1,
};
static int is_userspace_event(u64 ev)
{
return cpumf_generic_events_user[PERF_COUNT_HW_CPU_CYCLES] == ev ||
cpumf_generic_events_user[PERF_COUNT_HW_INSTRUCTIONS] == ev;
}
static int __hw_perf_event_init(struct perf_event *event, unsigned int type)
{
struct perf_event_attr *attr = &event->attr;
struct hw_perf_event *hwc = &event->hw;
enum cpumf_ctr_set set;
u64 ev;
switch (type) {
case PERF_TYPE_RAW:
/* Raw events are used to access counters directly,
* hence do not permit excludes */
if (attr->exclude_kernel || attr->exclude_user ||
attr->exclude_hv)
return -EOPNOTSUPP;
ev = attr->config;
break;
case PERF_TYPE_HARDWARE:
if (is_sampling_event(event)) /* No sampling support */
return -ENOENT;
ev = attr->config;
if (!attr->exclude_user && attr->exclude_kernel) {
/*
* Count user space (problem-state) only
* Handle events 32 and 33 as 0:u and 1:u
*/
if (!is_userspace_event(ev)) {
if (ev >= ARRAY_SIZE(cpumf_generic_events_user))
return -EOPNOTSUPP;
ev = cpumf_generic_events_user[ev];
}
} else if (!attr->exclude_kernel && attr->exclude_user) {
/* No support for kernel space counters only */
return -EOPNOTSUPP;
} else {
/* Count user and kernel space, incl. events 32 + 33 */
if (!is_userspace_event(ev)) {
if (ev >= ARRAY_SIZE(cpumf_generic_events_basic))
return -EOPNOTSUPP;
ev = cpumf_generic_events_basic[ev];
}
}
break;
default:
return -ENOENT;
}
if (ev == -1)
return -ENOENT;
if (ev > PERF_CPUM_CF_MAX_CTR)
return -ENOENT;
/* Obtain the counter set to which the specified counter belongs */
set = get_counter_set(ev);
switch (set) {
case CPUMF_CTR_SET_BASIC:
case CPUMF_CTR_SET_USER:
case CPUMF_CTR_SET_CRYPTO:
case CPUMF_CTR_SET_EXT:
case CPUMF_CTR_SET_MT_DIAG:
/*
* Use the hardware perf event structure to store the
* counter number in the 'config' member and the counter
* set number in the 'config_base' as bit mask.
* It is later used to enable/disable the counter(s).
*/
hwc->config = ev;
hwc->config_base = cpumf_ctr_ctl[set];
break;
case CPUMF_CTR_SET_MAX:
/* The counter could not be associated to a counter set */
return -EINVAL;
}
/* Initialize for using the CPU-measurement counter facility */
if (cpum_cf_alloc(event->cpu))
return -ENOMEM;
event->destroy = hw_perf_event_destroy;
/*
* Finally, validate version and authorization of the counter set.
* If the particular CPU counter set is not authorized,
* return with -ENOENT in order to fall back to other
* PMUs that might suffice the event request.
*/
if (!(hwc->config_base & cpumf_ctr_info.auth_ctl))
return -ENOENT;
return validate_ctr_version(hwc->config, set);
}
/* Events CPU_CYLCES and INSTRUCTIONS can be submitted with two different
* attribute::type values:
* - PERF_TYPE_HARDWARE:
* - pmu->type:
* Handle both type of invocations identical. They address the same hardware.
* The result is different when event modifiers exclude_kernel and/or
* exclude_user are also set.
*/
static int cpumf_pmu_event_type(struct perf_event *event)
{
u64 ev = event->attr.config;
if (cpumf_generic_events_basic[PERF_COUNT_HW_CPU_CYCLES] == ev ||
cpumf_generic_events_basic[PERF_COUNT_HW_INSTRUCTIONS] == ev ||
cpumf_generic_events_user[PERF_COUNT_HW_CPU_CYCLES] == ev ||
cpumf_generic_events_user[PERF_COUNT_HW_INSTRUCTIONS] == ev)
return PERF_TYPE_HARDWARE;
return PERF_TYPE_RAW;
}
static int cpumf_pmu_event_init(struct perf_event *event)
{
unsigned int type = event->attr.type;
int err;
if (type == PERF_TYPE_HARDWARE || type == PERF_TYPE_RAW)
err = __hw_perf_event_init(event, type);
else if (event->pmu->type == type)
/* Registered as unknown PMU */
err = __hw_perf_event_init(event, cpumf_pmu_event_type(event));
else
return -ENOENT;
if (unlikely(err) && event->destroy)
event->destroy(event);
return err;
}
static int hw_perf_event_reset(struct perf_event *event)
{
u64 prev, new;
int err;
do {
prev = local64_read(&event->hw.prev_count);
err = ecctr(event->hw.config, &new);
if (err) {
if (err != 3)
break;
/* The counter is not (yet) available. This
* might happen if the counter set to which
* this counter belongs is in the disabled
* state.
*/
new = 0;
}
} while (local64_cmpxchg(&event->hw.prev_count, prev, new) != prev);
return err;
}
static void hw_perf_event_update(struct perf_event *event)
{
u64 prev, new, delta;
int err;
do {
prev = local64_read(&event->hw.prev_count);
err = ecctr(event->hw.config, &new);
if (err)
return;
} while (local64_cmpxchg(&event->hw.prev_count, prev, new) != prev);
delta = (prev <= new) ? new - prev
: (-1ULL - prev) + new + 1; /* overflow */
local64_add(delta, &event->count);
}
static void cpumf_pmu_read(struct perf_event *event)
{
if (event->hw.state & PERF_HES_STOPPED)
return;
hw_perf_event_update(event);
}
static void cpumf_pmu_start(struct perf_event *event, int flags)
{
struct cpu_cf_events *cpuhw = this_cpu_cfhw();
struct hw_perf_event *hwc = &event->hw;
int i;
if (!(hwc->state & PERF_HES_STOPPED))
return;
hwc->state = 0;
/* (Re-)enable and activate the counter set */
ctr_set_enable(&cpuhw->state, hwc->config_base);
ctr_set_start(&cpuhw->state, hwc->config_base);
/* The counter set to which this counter belongs can be already active.
* Because all counters in a set are active, the event->hw.prev_count
* needs to be synchronized. At this point, the counter set can be in
* the inactive or disabled state.
*/
if (hwc->config == PERF_EVENT_CPUM_CF_DIAG) {
cpuhw->usedss = cfdiag_getctr(cpuhw->start,
sizeof(cpuhw->start),
hwc->config_base, true);
} else {
hw_perf_event_reset(event);
}
/* Increment refcount for counter sets */
for (i = CPUMF_CTR_SET_BASIC; i < CPUMF_CTR_SET_MAX; ++i)
if ((hwc->config_base & cpumf_ctr_ctl[i]))
atomic_inc(&cpuhw->ctr_set[i]);
}
/* Create perf event sample with the counter sets as raw data. The sample
* is then pushed to the event subsystem and the function checks for
* possible event overflows. If an event overflow occurs, the PMU is
* stopped.
*
* Return non-zero if an event overflow occurred.
*/
static int cfdiag_push_sample(struct perf_event *event,
struct cpu_cf_events *cpuhw)
{
struct perf_sample_data data;
struct perf_raw_record raw;
struct pt_regs regs;
int overflow;
/* Setup perf sample */
perf_sample_data_init(&data, 0, event->hw.last_period);
memset(®s, 0, sizeof(regs));
memset(&raw, 0, sizeof(raw));
if (event->attr.sample_type & PERF_SAMPLE_CPU)
data.cpu_entry.cpu = event->cpu;
if (event->attr.sample_type & PERF_SAMPLE_RAW) {
raw.frag.size = cpuhw->usedss;
raw.frag.data = cpuhw->stop;
perf_sample_save_raw_data(&data, &raw);
}
overflow = perf_event_overflow(event, &data, ®s);
if (overflow)
event->pmu->stop(event, 0);
perf_event_update_userpage(event);
return overflow;
}
static void cpumf_pmu_stop(struct perf_event *event, int flags)
{
struct cpu_cf_events *cpuhw = this_cpu_cfhw();
struct hw_perf_event *hwc = &event->hw;
int i;
if (!(hwc->state & PERF_HES_STOPPED)) {
/* Decrement reference count for this counter set and if this
* is the last used counter in the set, clear activation
* control and set the counter set state to inactive.
*/
for (i = CPUMF_CTR_SET_BASIC; i < CPUMF_CTR_SET_MAX; ++i) {
if (!(hwc->config_base & cpumf_ctr_ctl[i]))
continue;
if (!atomic_dec_return(&cpuhw->ctr_set[i]))
ctr_set_stop(&cpuhw->state, cpumf_ctr_ctl[i]);
}
hwc->state |= PERF_HES_STOPPED;
}
if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
if (hwc->config == PERF_EVENT_CPUM_CF_DIAG) {
local64_inc(&event->count);
cpuhw->usedss = cfdiag_getctr(cpuhw->stop,
sizeof(cpuhw->stop),
event->hw.config_base,
false);
if (cfdiag_diffctr(cpuhw, event->hw.config_base))
cfdiag_push_sample(event, cpuhw);
} else {
hw_perf_event_update(event);
}
hwc->state |= PERF_HES_UPTODATE;
}
}
static int cpumf_pmu_add(struct perf_event *event, int flags)
{
struct cpu_cf_events *cpuhw = this_cpu_cfhw();
ctr_set_enable(&cpuhw->state, event->hw.config_base);
event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
if (flags & PERF_EF_START)
cpumf_pmu_start(event, PERF_EF_RELOAD);
return 0;
}
static void cpumf_pmu_del(struct perf_event *event, int flags)
{
struct cpu_cf_events *cpuhw = this_cpu_cfhw();
int i;
cpumf_pmu_stop(event, PERF_EF_UPDATE);
/* Check if any counter in the counter set is still used. If not used,
* change the counter set to the disabled state. This also clears the
* content of all counters in the set.
*
* When a new perf event has been added but not yet started, this can
* clear enable control and resets all counters in a set. Therefore,
* cpumf_pmu_start() always has to reenable a counter set.
*/
for (i = CPUMF_CTR_SET_BASIC; i < CPUMF_CTR_SET_MAX; ++i)
if (!atomic_read(&cpuhw->ctr_set[i]))
ctr_set_disable(&cpuhw->state, cpumf_ctr_ctl[i]);
}
/* Performance monitoring unit for s390x */
static struct pmu cpumf_pmu = {
.task_ctx_nr = perf_sw_context,
.capabilities = PERF_PMU_CAP_NO_INTERRUPT,
.pmu_enable = cpumf_pmu_enable,
.pmu_disable = cpumf_pmu_disable,
.event_init = cpumf_pmu_event_init,
.add = cpumf_pmu_add,
.del = cpumf_pmu_del,
.start = cpumf_pmu_start,
.stop = cpumf_pmu_stop,
.read = cpumf_pmu_read,
};
static struct cfset_session { /* CPUs and counter set bit mask */
struct list_head head; /* Head of list of active processes */
} cfset_session = {
.head = LIST_HEAD_INIT(cfset_session.head)
};
static refcount_t cfset_opencnt = REFCOUNT_INIT(0); /* Access count */
/*
* Synchronize access to device /dev/hwc. This mutex protects against
* concurrent access to functions cfset_open() and cfset_release().
* Same for CPU hotplug add and remove events triggering
* cpum_cf_online_cpu() and cpum_cf_offline_cpu().
* It also serializes concurrent device ioctl access from multiple
* processes accessing /dev/hwc.
*
* The mutex protects concurrent access to the /dev/hwctr session management
* struct cfset_session and reference counting variable cfset_opencnt.
*/
static DEFINE_MUTEX(cfset_ctrset_mutex);
/*
* CPU hotplug handles only /dev/hwctr device.
* For perf_event_open() the CPU hotplug handling is done on kernel common
* code:
* - CPU add: Nothing is done since a file descriptor can not be created
* and returned to the user.
* - CPU delete: Handled by common code via pmu_disable(), pmu_stop() and
* pmu_delete(). The event itself is removed when the file descriptor is
* closed.
*/
static int cfset_online_cpu(unsigned int cpu);
static int cpum_cf_online_cpu(unsigned int cpu)
{
int rc = 0;
/*
* Ignore notification for perf_event_open().
* Handle only /dev/hwctr device sessions.
*/
mutex_lock(&cfset_ctrset_mutex);
if (refcount_read(&cfset_opencnt)) {
rc = cpum_cf_alloc_cpu(cpu);
if (!rc)
cfset_online_cpu(cpu);
}
mutex_unlock(&cfset_ctrset_mutex);
return rc;
}
static int cfset_offline_cpu(unsigned int cpu);
static int cpum_cf_offline_cpu(unsigned int cpu)
{
/*
* During task exit processing of grouped perf events triggered by CPU
* hotplug processing, pmu_disable() is called as part of perf context
* removal process. Therefore do not trigger event removal now for
* perf_event_open() created events. Perf common code triggers event
* destruction when the event file descriptor is closed.
*
* Handle only /dev/hwctr device sessions.
*/
mutex_lock(&cfset_ctrset_mutex);
if (refcount_read(&cfset_opencnt)) {
cfset_offline_cpu(cpu);
cpum_cf_free_cpu(cpu);
}
mutex_unlock(&cfset_ctrset_mutex);
return 0;
}
/* Return true if store counter set multiple instruction is available */
static inline int stccm_avail(void)
{
return test_facility(142);
}
/* CPU-measurement alerts for the counter facility */
static void cpumf_measurement_alert(struct ext_code ext_code,
unsigned int alert, unsigned long unused)
{
struct cpu_cf_events *cpuhw;
if (!(alert & CPU_MF_INT_CF_MASK))
return;
inc_irq_stat(IRQEXT_CMC);
/*
* Measurement alerts are shared and might happen when the PMU
* is not reserved. Ignore these alerts in this case.
*/
cpuhw = this_cpu_cfhw();
if (!cpuhw)
return;
/* counter authorization change alert */
if (alert & CPU_MF_INT_CF_CACA)
qctri(&cpumf_ctr_info);
/* loss of counter data alert */
if (alert & CPU_MF_INT_CF_LCDA)
pr_err("CPU[%i] Counter data was lost\n", smp_processor_id());
/* loss of MT counter data alert */
if (alert & CPU_MF_INT_CF_MTDA)
pr_warn("CPU[%i] MT counter data was lost\n",
smp_processor_id());
}
static int cfset_init(void);
static int __init cpumf_pmu_init(void)
{
int rc;
/* Extract counter measurement facility information */
if (!cpum_cf_avail() || qctri(&cpumf_ctr_info))
return -ENODEV;
/* Determine and store counter set sizes for later reference */
for (rc = CPUMF_CTR_SET_BASIC; rc < CPUMF_CTR_SET_MAX; ++rc)
cpum_cf_make_setsize(rc);
/*
* Clear bit 15 of cr0 to unauthorize problem-state to
* extract measurement counters
*/
ctl_clear_bit(0, 48);
/* register handler for measurement-alert interruptions */
rc = register_external_irq(EXT_IRQ_MEASURE_ALERT,
cpumf_measurement_alert);
if (rc) {
pr_err("Registering for CPU-measurement alerts failed with rc=%i\n", rc);
return rc;
}
/* Setup s390dbf facility */
cf_dbg = debug_register(KMSG_COMPONENT, 2, 1, 128);
if (!cf_dbg) {
pr_err("Registration of s390dbf(cpum_cf) failed\n");
rc = -ENOMEM;
goto out1;
}
debug_register_view(cf_dbg, &debug_sprintf_view);
cpumf_pmu.attr_groups = cpumf_cf_event_group();
rc = perf_pmu_register(&cpumf_pmu, "cpum_cf", -1);
if (rc) {
pr_err("Registering the cpum_cf PMU failed with rc=%i\n", rc);
goto out2;
} else if (stccm_avail()) { /* Setup counter set device */
cfset_init();
}
rc = cpuhp_setup_state(CPUHP_AP_PERF_S390_CF_ONLINE,
"perf/s390/cf:online",
cpum_cf_online_cpu, cpum_cf_offline_cpu);
return rc;
out2:
debug_unregister_view(cf_dbg, &debug_sprintf_view);
debug_unregister(cf_dbg);
out1:
unregister_external_irq(EXT_IRQ_MEASURE_ALERT, cpumf_measurement_alert);
return rc;
}
/* Support for the CPU Measurement Facility counter set extraction using
* device /dev/hwctr. This allows user space programs to extract complete
* counter set via normal file operations.
*/
struct cfset_call_on_cpu_parm { /* Parm struct for smp_call_on_cpu */
unsigned int sets; /* Counter set bit mask */
atomic_t cpus_ack; /* # CPUs successfully executed func */
};
struct cfset_request { /* CPUs and counter set bit mask */
unsigned long ctrset; /* Bit mask of counter set to read */
cpumask_t mask; /* CPU mask to read from */
struct list_head node; /* Chain to cfset_session.head */
};
static void cfset_session_init(void)
{
INIT_LIST_HEAD(&cfset_session.head);
}
/* Remove current request from global bookkeeping. Maintain a counter set bit
* mask on a per CPU basis.
* Done in process context under mutex protection.
*/
static void cfset_session_del(struct cfset_request *p)
{
list_del(&p->node);
}
/* Add current request to global bookkeeping. Maintain a counter set bit mask
* on a per CPU basis.
* Done in process context under mutex protection.
*/
static void cfset_session_add(struct cfset_request *p)
{
list_add(&p->node, &cfset_session.head);
}
/* The /dev/hwctr device access uses PMU_F_IN_USE to mark the device access
* path is currently used.
* The cpu_cf_events::dev_state is used to denote counter sets in use by this
* interface. It is always or'ed in. If this interface is not active, its
* value is zero and no additional counter sets will be included.
*
* The cpu_cf_events::state is used by the perf_event_open SVC and remains
* unchanged.
*
* perf_pmu_enable() and perf_pmu_enable() and its call backs
* cpumf_pmu_enable() and cpumf_pmu_disable() are called by the
* performance measurement subsystem to enable per process
* CPU Measurement counter facility.
* The XXX_enable() and XXX_disable functions are used to turn off
* x86 performance monitoring interrupt (PMI) during scheduling.
* s390 uses these calls to temporarily stop and resume the active CPU
* counters sets during scheduling.
*
* We do allow concurrent access of perf_event_open() SVC and /dev/hwctr
* device access. The perf_event_open() SVC interface makes a lot of effort
* to only run the counters while the calling process is actively scheduled
* to run.
* When /dev/hwctr interface is also used at the same time, the counter sets
* will keep running, even when the process is scheduled off a CPU.
* However this is not a problem and does not lead to wrong counter values
* for the perf_event_open() SVC. The current counter value will be recorded
* during schedule-in. At schedule-out time the current counter value is
* extracted again and the delta is calculated and added to the event.
*/
/* Stop all counter sets via ioctl interface */
static void cfset_ioctl_off(void *parm)
{
struct cpu_cf_events *cpuhw = this_cpu_cfhw();
struct cfset_call_on_cpu_parm *p = parm;
int rc;
/* Check if any counter set used by /dev/hwctr */
for (rc = CPUMF_CTR_SET_BASIC; rc < CPUMF_CTR_SET_MAX; ++rc)
if ((p->sets & cpumf_ctr_ctl[rc])) {
if (!atomic_dec_return(&cpuhw->ctr_set[rc])) {
ctr_set_disable(&cpuhw->dev_state,
cpumf_ctr_ctl[rc]);
ctr_set_stop(&cpuhw->dev_state,
cpumf_ctr_ctl[rc]);
}
}
/* Keep perf_event_open counter sets */
rc = lcctl(cpuhw->dev_state | cpuhw->state);
if (rc)
pr_err("Counter set stop %#llx of /dev/%s failed rc=%i\n",
cpuhw->state, S390_HWCTR_DEVICE, rc);
if (!cpuhw->dev_state)
cpuhw->flags &= ~PMU_F_IN_USE;
}
/* Start counter sets on particular CPU */
static void cfset_ioctl_on(void *parm)
{
struct cpu_cf_events *cpuhw = this_cpu_cfhw();
struct cfset_call_on_cpu_parm *p = parm;
int rc;
cpuhw->flags |= PMU_F_IN_USE;
ctr_set_enable(&cpuhw->dev_state, p->sets);
ctr_set_start(&cpuhw->dev_state, p->sets);
for (rc = CPUMF_CTR_SET_BASIC; rc < CPUMF_CTR_SET_MAX; ++rc)
if ((p->sets & cpumf_ctr_ctl[rc]))
atomic_inc(&cpuhw->ctr_set[rc]);
rc = lcctl(cpuhw->dev_state | cpuhw->state); /* Start counter sets */
if (!rc)
atomic_inc(&p->cpus_ack);
else
pr_err("Counter set start %#llx of /dev/%s failed rc=%i\n",
cpuhw->dev_state | cpuhw->state, S390_HWCTR_DEVICE, rc);
}
static void cfset_release_cpu(void *p)
{
struct cpu_cf_events *cpuhw = this_cpu_cfhw();
int rc;
cpuhw->dev_state = 0;
rc = lcctl(cpuhw->state); /* Keep perf_event_open counter sets */
if (rc)
pr_err("Counter set release %#llx of /dev/%s failed rc=%i\n",
cpuhw->state, S390_HWCTR_DEVICE, rc);
}
/* This modifies the process CPU mask to adopt it to the currently online
* CPUs. Offline CPUs can not be addresses. This call terminates the access
* and is usually followed by close() or a new iotcl(..., START, ...) which
* creates a new request structure.
*/
static void cfset_all_stop(struct cfset_request *req)
{
struct cfset_call_on_cpu_parm p = {
.sets = req->ctrset,
};
cpumask_and(&req->mask, &req->mask, cpu_online_mask);
on_each_cpu_mask(&req->mask, cfset_ioctl_off, &p, 1);
}
/* Release function is also called when application gets terminated without
* doing a proper ioctl(..., S390_HWCTR_STOP, ...) command.
*/
static int cfset_release(struct inode *inode, struct file *file)
{
mutex_lock(&cfset_ctrset_mutex);
/* Open followed by close/exit has no private_data */
if (file->private_data) {
cfset_all_stop(file->private_data);
cfset_session_del(file->private_data);
kfree(file->private_data);
file->private_data = NULL;
}
if (refcount_dec_and_test(&cfset_opencnt)) { /* Last close */
on_each_cpu(cfset_release_cpu, NULL, 1);
cpum_cf_free(-1);
}
mutex_unlock(&cfset_ctrset_mutex);
return 0;
}
/*
* Open via /dev/hwctr device. Allocate all per CPU resources on the first
* open of the device. The last close releases all per CPU resources.
* Parallel perf_event_open system calls also use per CPU resources.
* These invocations are handled via reference counting on the per CPU data
* structures.
*/
static int cfset_open(struct inode *inode, struct file *file)
{
int rc = 0;
if (!perfmon_capable())
return -EPERM;
file->private_data = NULL;
mutex_lock(&cfset_ctrset_mutex);
if (!refcount_inc_not_zero(&cfset_opencnt)) { /* First open */
rc = cpum_cf_alloc(-1);
if (!rc) {
cfset_session_init();
refcount_set(&cfset_opencnt, 1);
}
}
mutex_unlock(&cfset_ctrset_mutex);
/* nonseekable_open() never fails */
return rc ?: nonseekable_open(inode, file);
}
static int cfset_all_start(struct cfset_request *req)
{
struct cfset_call_on_cpu_parm p = {
.sets = req->ctrset,
.cpus_ack = ATOMIC_INIT(0),
};
cpumask_var_t mask;
int rc = 0;
if (!alloc_cpumask_var(&mask, GFP_KERNEL))
return -ENOMEM;
cpumask_and(mask, &req->mask, cpu_online_mask);
on_each_cpu_mask(mask, cfset_ioctl_on, &p, 1);
if (atomic_read(&p.cpus_ack) != cpumask_weight(mask)) {
on_each_cpu_mask(mask, cfset_ioctl_off, &p, 1);
rc = -EIO;
}
free_cpumask_var(mask);
return rc;
}
/* Return the maximum required space for all possible CPUs in case one
* CPU will be onlined during the START, READ, STOP cycles.
* To find out the size of the counter sets, any one CPU will do. They
* all have the same counter sets.
*/
static size_t cfset_needspace(unsigned int sets)
{
size_t bytes = 0;
int i;
for (i = CPUMF_CTR_SET_BASIC; i < CPUMF_CTR_SET_MAX; ++i) {
if (!(sets & cpumf_ctr_ctl[i]))
continue;
bytes += cpum_cf_read_setsize(i) * sizeof(u64) +
sizeof(((struct s390_ctrset_setdata *)0)->set) +
sizeof(((struct s390_ctrset_setdata *)0)->no_cnts);
}
bytes = sizeof(((struct s390_ctrset_read *)0)->no_cpus) + nr_cpu_ids *
(bytes + sizeof(((struct s390_ctrset_cpudata *)0)->cpu_nr) +
sizeof(((struct s390_ctrset_cpudata *)0)->no_sets));
return bytes;
}
static int cfset_all_copy(unsigned long arg, cpumask_t *mask)
{
struct s390_ctrset_read __user *ctrset_read;
unsigned int cpu, cpus, rc = 0;
void __user *uptr;
ctrset_read = (struct s390_ctrset_read __user *)arg;
uptr = ctrset_read->data;
for_each_cpu(cpu, mask) {
struct cpu_cf_events *cpuhw = get_cpu_cfhw(cpu);
struct s390_ctrset_cpudata __user *ctrset_cpudata;
ctrset_cpudata = uptr;
rc = put_user(cpu, &ctrset_cpudata->cpu_nr);
rc |= put_user(cpuhw->sets, &ctrset_cpudata->no_sets);
rc |= copy_to_user(ctrset_cpudata->data, cpuhw->data,
cpuhw->used);
if (rc) {
rc = -EFAULT;
goto out;
}
uptr += sizeof(struct s390_ctrset_cpudata) + cpuhw->used;
cond_resched();
}
cpus = cpumask_weight(mask);
if (put_user(cpus, &ctrset_read->no_cpus))
rc = -EFAULT;
out:
return rc;
}
static size_t cfset_cpuset_read(struct s390_ctrset_setdata *p, int ctrset,
int ctrset_size, size_t room)
{
size_t need = 0;
int rc = -1;
need = sizeof(*p) + sizeof(u64) * ctrset_size;
if (need <= room) {
p->set = cpumf_ctr_ctl[ctrset];
p->no_cnts = ctrset_size;
rc = ctr_stcctm(ctrset, ctrset_size, (u64 *)p->cv);
if (rc == 3) /* Nothing stored */
need = 0;
}
return need;
}
/* Read all counter sets. */
static void cfset_cpu_read(void *parm)
{
struct cpu_cf_events *cpuhw = this_cpu_cfhw();
struct cfset_call_on_cpu_parm *p = parm;
int set, set_size;
size_t space;
/* No data saved yet */
cpuhw->used = 0;
cpuhw->sets = 0;
memset(cpuhw->data, 0, sizeof(cpuhw->data));
/* Scan the counter sets */
for (set = CPUMF_CTR_SET_BASIC; set < CPUMF_CTR_SET_MAX; ++set) {
struct s390_ctrset_setdata *sp = (void *)cpuhw->data +
cpuhw->used;
if (!(p->sets & cpumf_ctr_ctl[set]))
continue; /* Counter set not in list */
set_size = cpum_cf_read_setsize(set);
space = sizeof(cpuhw->data) - cpuhw->used;
space = cfset_cpuset_read(sp, set, set_size, space);
if (space) {
cpuhw->used += space;
cpuhw->sets += 1;
}
}
}
static int cfset_all_read(unsigned long arg, struct cfset_request *req)
{
struct cfset_call_on_cpu_parm p;
cpumask_var_t mask;
int rc;
if (!alloc_cpumask_var(&mask, GFP_KERNEL))
return -ENOMEM;
p.sets = req->ctrset;
cpumask_and(mask, &req->mask, cpu_online_mask);
on_each_cpu_mask(mask, cfset_cpu_read, &p, 1);
rc = cfset_all_copy(arg, mask);
free_cpumask_var(mask);
return rc;
}
static long cfset_ioctl_read(unsigned long arg, struct cfset_request *req)
{
int ret = -ENODATA;
if (req && req->ctrset)
ret = cfset_all_read(arg, req);
return ret;
}
static long cfset_ioctl_stop(struct file *file)
{
struct cfset_request *req = file->private_data;
int ret = -ENXIO;
if (req) {
cfset_all_stop(req);
cfset_session_del(req);
kfree(req);
file->private_data = NULL;
ret = 0;
}
return ret;
}
static long cfset_ioctl_start(unsigned long arg, struct file *file)
{
struct s390_ctrset_start __user *ustart;
struct s390_ctrset_start start;
struct cfset_request *preq;
void __user *umask;
unsigned int len;
int ret = 0;
size_t need;
if (file->private_data)
return -EBUSY;
ustart = (struct s390_ctrset_start __user *)arg;
if (copy_from_user(&start, ustart, sizeof(start)))
return -EFAULT;
if (start.version != S390_HWCTR_START_VERSION)
return -EINVAL;
if (start.counter_sets & ~(cpumf_ctr_ctl[CPUMF_CTR_SET_BASIC] |
cpumf_ctr_ctl[CPUMF_CTR_SET_USER] |
cpumf_ctr_ctl[CPUMF_CTR_SET_CRYPTO] |
cpumf_ctr_ctl[CPUMF_CTR_SET_EXT] |
cpumf_ctr_ctl[CPUMF_CTR_SET_MT_DIAG]))
return -EINVAL; /* Invalid counter set */
if (!start.counter_sets)
return -EINVAL; /* No counter set at all? */
preq = kzalloc(sizeof(*preq), GFP_KERNEL);
if (!preq)
return -ENOMEM;
cpumask_clear(&preq->mask);
len = min_t(u64, start.cpumask_len, cpumask_size());
umask = (void __user *)start.cpumask;
if (copy_from_user(&preq->mask, umask, len)) {
kfree(preq);
return -EFAULT;
}
if (cpumask_empty(&preq->mask)) {
kfree(preq);
return -EINVAL;
}
need = cfset_needspace(start.counter_sets);
if (put_user(need, &ustart->data_bytes)) {
kfree(preq);
return -EFAULT;
}
preq->ctrset = start.counter_sets;
ret = cfset_all_start(preq);
if (!ret) {
cfset_session_add(preq);
file->private_data = preq;
} else {
kfree(preq);
}
return ret;
}
/* Entry point to the /dev/hwctr device interface.
* The ioctl system call supports three subcommands:
* S390_HWCTR_START: Start the specified counter sets on a CPU list. The
* counter set keeps running until explicitly stopped. Returns the number
* of bytes needed to store the counter values. If another S390_HWCTR_START
* ioctl subcommand is called without a previous S390_HWCTR_STOP stop
* command on the same file descriptor, -EBUSY is returned.
* S390_HWCTR_READ: Read the counter set values from specified CPU list given
* with the S390_HWCTR_START command.
* S390_HWCTR_STOP: Stops the counter sets on the CPU list given with the
* previous S390_HWCTR_START subcommand.
*/
static long cfset_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
int ret;
cpus_read_lock();
mutex_lock(&cfset_ctrset_mutex);
switch (cmd) {
case S390_HWCTR_START:
ret = cfset_ioctl_start(arg, file);
break;
case S390_HWCTR_STOP:
ret = cfset_ioctl_stop(file);
break;
case S390_HWCTR_READ:
ret = cfset_ioctl_read(arg, file->private_data);
break;
default:
ret = -ENOTTY;
break;
}
mutex_unlock(&cfset_ctrset_mutex);
cpus_read_unlock();
return ret;
}
static const struct file_operations cfset_fops = {
.owner = THIS_MODULE,
.open = cfset_open,
.release = cfset_release,
.unlocked_ioctl = cfset_ioctl,
.compat_ioctl = cfset_ioctl,
.llseek = no_llseek
};
static struct miscdevice cfset_dev = {
.name = S390_HWCTR_DEVICE,
.minor = MISC_DYNAMIC_MINOR,
.fops = &cfset_fops,
.mode = 0666,
};
/* Hotplug add of a CPU. Scan through all active processes and add
* that CPU to the list of CPUs supplied with ioctl(..., START, ...).
*/
static int cfset_online_cpu(unsigned int cpu)
{
struct cfset_call_on_cpu_parm p;
struct cfset_request *rp;
if (!list_empty(&cfset_session.head)) {
list_for_each_entry(rp, &cfset_session.head, node) {
p.sets = rp->ctrset;
cfset_ioctl_on(&p);
cpumask_set_cpu(cpu, &rp->mask);
}
}
return 0;
}
/* Hotplug remove of a CPU. Scan through all active processes and clear
* that CPU from the list of CPUs supplied with ioctl(..., START, ...).
* Adjust reference counts.
*/
static int cfset_offline_cpu(unsigned int cpu)
{
struct cfset_call_on_cpu_parm p;
struct cfset_request *rp;
if (!list_empty(&cfset_session.head)) {
list_for_each_entry(rp, &cfset_session.head, node) {
p.sets = rp->ctrset;
cfset_ioctl_off(&p);
cpumask_clear_cpu(cpu, &rp->mask);
}
}
return 0;
}
static void cfdiag_read(struct perf_event *event)
{
}
static int get_authctrsets(void)
{
unsigned long auth = 0;
enum cpumf_ctr_set i;
for (i = CPUMF_CTR_SET_BASIC; i < CPUMF_CTR_SET_MAX; ++i) {
if (cpumf_ctr_info.auth_ctl & cpumf_ctr_ctl[i])
auth |= cpumf_ctr_ctl[i];
}
return auth;
}
/* Setup the event. Test for authorized counter sets and only include counter
* sets which are authorized at the time of the setup. Including unauthorized
* counter sets result in specification exception (and panic).
*/
static int cfdiag_event_init2(struct perf_event *event)
{
struct perf_event_attr *attr = &event->attr;
int err = 0;
/* Set sample_period to indicate sampling */
event->hw.config = attr->config;
event->hw.sample_period = attr->sample_period;
local64_set(&event->hw.period_left, event->hw.sample_period);
local64_set(&event->count, 0);
event->hw.last_period = event->hw.sample_period;
/* Add all authorized counter sets to config_base. The
* the hardware init function is either called per-cpu or just once
* for all CPUS (event->cpu == -1). This depends on the whether
* counting is started for all CPUs or on a per workload base where
* the perf event moves from one CPU to another CPU.
* Checking the authorization on any CPU is fine as the hardware
* applies the same authorization settings to all CPUs.
*/
event->hw.config_base = get_authctrsets();
/* No authorized counter sets, nothing to count/sample */
if (!event->hw.config_base)
err = -EINVAL;
return err;
}
static int cfdiag_event_init(struct perf_event *event)
{
struct perf_event_attr *attr = &event->attr;
int err = -ENOENT;
if (event->attr.config != PERF_EVENT_CPUM_CF_DIAG ||
event->attr.type != event->pmu->type)
goto out;
/* Raw events are used to access counters directly,
* hence do not permit excludes.
* This event is useless without PERF_SAMPLE_RAW to return counter set
* values as raw data.
*/
if (attr->exclude_kernel || attr->exclude_user || attr->exclude_hv ||
!(attr->sample_type & (PERF_SAMPLE_CPU | PERF_SAMPLE_RAW))) {
err = -EOPNOTSUPP;
goto out;
}
/* Initialize for using the CPU-measurement counter facility */
if (cpum_cf_alloc(event->cpu))
return -ENOMEM;
event->destroy = hw_perf_event_destroy;
err = cfdiag_event_init2(event);
if (unlikely(err))
event->destroy(event);
out:
return err;
}
/* Create cf_diag/events/CF_DIAG event sysfs file. This counter is used
* to collect the complete counter sets for a scheduled process. Target
* are complete counter sets attached as raw data to the artificial event.
* This results in complete counter sets available when a process is
* scheduled. Contains the delta of every counter while the process was
* running.
*/
CPUMF_EVENT_ATTR(CF_DIAG, CF_DIAG, PERF_EVENT_CPUM_CF_DIAG);
static struct attribute *cfdiag_events_attr[] = {
CPUMF_EVENT_PTR(CF_DIAG, CF_DIAG),
NULL,
};
PMU_FORMAT_ATTR(event, "config:0-63");
static struct attribute *cfdiag_format_attr[] = {
&format_attr_event.attr,
NULL,
};
static struct attribute_group cfdiag_events_group = {
.name = "events",
.attrs = cfdiag_events_attr,
};
static struct attribute_group cfdiag_format_group = {
.name = "format",
.attrs = cfdiag_format_attr,
};
static const struct attribute_group *cfdiag_attr_groups[] = {
&cfdiag_events_group,
&cfdiag_format_group,
NULL,
};
/* Performance monitoring unit for event CF_DIAG. Since this event
* is also started and stopped via the perf_event_open() system call, use
* the same event enable/disable call back functions. They do not
* have a pointer to the perf_event strcture as first parameter.
*
* The functions XXX_add, XXX_del, XXX_start and XXX_stop are also common.
* Reuse them and distinguish the event (always first parameter) via
* 'config' member.
*/
static struct pmu cf_diag = {
.task_ctx_nr = perf_sw_context,
.event_init = cfdiag_event_init,
.pmu_enable = cpumf_pmu_enable,
.pmu_disable = cpumf_pmu_disable,
.add = cpumf_pmu_add,
.del = cpumf_pmu_del,
.start = cpumf_pmu_start,
.stop = cpumf_pmu_stop,
.read = cfdiag_read,
.attr_groups = cfdiag_attr_groups
};
/* Calculate memory needed to store all counter sets together with header and
* trailer data. This is independent of the counter set authorization which
* can vary depending on the configuration.
*/
static size_t cfdiag_maxsize(struct cpumf_ctr_info *info)
{
size_t max_size = sizeof(struct cf_trailer_entry);
enum cpumf_ctr_set i;
for (i = CPUMF_CTR_SET_BASIC; i < CPUMF_CTR_SET_MAX; ++i) {
size_t size = cpum_cf_read_setsize(i);
if (size)
max_size += size * sizeof(u64) +
sizeof(struct cf_ctrset_entry);
}
return max_size;
}
/* Get the CPU speed, try sampling facility first and CPU attributes second. */
static void cfdiag_get_cpu_speed(void)
{
unsigned long mhz;
if (cpum_sf_avail()) { /* Sampling facility first */
struct hws_qsi_info_block si;
memset(&si, 0, sizeof(si));
if (!qsi(&si)) {
cfdiag_cpu_speed = si.cpu_speed;
return;
}
}
/* Fallback: CPU speed extract static part. Used in case
* CPU Measurement Sampling Facility is turned off.
*/
mhz = __ecag(ECAG_CPU_ATTRIBUTE, 0);
if (mhz != -1UL)
cfdiag_cpu_speed = mhz & 0xffffffff;
}
static int cfset_init(void)
{
size_t need;
int rc;
cfdiag_get_cpu_speed();
/* Make sure the counter set data fits into predefined buffer. */
need = cfdiag_maxsize(&cpumf_ctr_info);
if (need > sizeof(((struct cpu_cf_events *)0)->start)) {
pr_err("Insufficient memory for PMU(cpum_cf_diag) need=%zu\n",
need);
return -ENOMEM;
}
rc = misc_register(&cfset_dev);
if (rc) {
pr_err("Registration of /dev/%s failed rc=%i\n",
cfset_dev.name, rc);
goto out;
}
rc = perf_pmu_register(&cf_diag, "cpum_cf_diag", -1);
if (rc) {
misc_deregister(&cfset_dev);
pr_err("Registration of PMU(cpum_cf_diag) failed with rc=%i\n",
rc);
}
out:
return rc;
}
device_initcall(cpumf_pmu_init);
| linux-master | arch/s390/kernel/perf_cpum_cf.c |
// SPDX-License-Identifier: GPL-2.0
#include "../../../../lib/vdso/gettimeofday.c"
#include "vdso.h"
int __s390_vdso_gettimeofday(struct __kernel_old_timeval *tv,
struct timezone *tz)
{
return __cvdso_gettimeofday(tv, tz);
}
int __s390_vdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts)
{
return __cvdso_clock_gettime(clock, ts);
}
int __s390_vdso_clock_getres(clockid_t clock, struct __kernel_timespec *ts)
{
return __cvdso_clock_getres(clock, ts);
}
| linux-master | arch/s390/kernel/vdso64/vdso64_generic.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright IBM Corp. 2020 */
#include <linux/compiler.h>
#include <linux/getcpu.h>
#include <asm/timex.h>
#include "vdso.h"
int __s390_vdso_getcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *unused)
{
union tod_clock clk;
/* CPU number is stored in the programmable field of the TOD clock */
store_tod_clock_ext(&clk);
if (cpu)
*cpu = clk.pf;
/* NUMA node is always zero */
if (node)
*node = 0;
return 0;
}
| linux-master | arch/s390/kernel/vdso64/getcpu.c |
// SPDX-License-Identifier: GPL-2.0
#define __HAVE_ARCH_MEMCMP /* arch function */
#include "../lib/string.c"
| linux-master | arch/s390/purgatory/string.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Purgatory code running between two kernels.
*
* Copyright IBM Corp. 2018
*
* Author(s): Philipp Rudo <[email protected]>
*/
#include <linux/kexec.h>
#include <linux/string.h>
#include <crypto/sha2.h>
#include <asm/purgatory.h>
int verify_sha256_digest(void)
{
struct kexec_sha_region *ptr, *end;
u8 digest[SHA256_DIGEST_SIZE];
struct sha256_state sctx;
sha256_init(&sctx);
end = purgatory_sha_regions + ARRAY_SIZE(purgatory_sha_regions);
for (ptr = purgatory_sha_regions; ptr < end; ptr++)
sha256_update(&sctx, (uint8_t *)(ptr->start), ptr->len);
sha256_final(&sctx, digest);
if (memcmp(digest, purgatory_sha256_digest, sizeof(digest)))
return 1;
return 0;
}
| linux-master | arch/s390/purgatory/purgatory.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/export.h>
#include "harddog.h"
#if IS_MODULE(CONFIG_UML_WATCHDOG)
EXPORT_SYMBOL(start_watchdog);
EXPORT_SYMBOL(stop_watchdog);
EXPORT_SYMBOL(ping_watchdog);
#endif
| linux-master | arch/um/drivers/harddog_user_exp.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2000 - 2007 Jeff Dike (jdike@{linux.intel,addtoit}.com)
*/
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include "chan.h"
#include <os.h>
#include <irq_kern.h>
#ifdef CONFIG_NOCONFIG_CHAN
static void *not_configged_init(char *str, int device,
const struct chan_opts *opts)
{
printk(KERN_ERR "Using a channel type which is configured out of "
"UML\n");
return NULL;
}
static int not_configged_open(int input, int output, int primary, void *data,
char **dev_out)
{
printk(KERN_ERR "Using a channel type which is configured out of "
"UML\n");
return -ENODEV;
}
static void not_configged_close(int fd, void *data)
{
printk(KERN_ERR "Using a channel type which is configured out of "
"UML\n");
}
static int not_configged_read(int fd, char *c_out, void *data)
{
printk(KERN_ERR "Using a channel type which is configured out of "
"UML\n");
return -EIO;
}
static int not_configged_write(int fd, const char *buf, int len, void *data)
{
printk(KERN_ERR "Using a channel type which is configured out of "
"UML\n");
return -EIO;
}
static int not_configged_console_write(int fd, const char *buf, int len)
{
printk(KERN_ERR "Using a channel type which is configured out of "
"UML\n");
return -EIO;
}
static int not_configged_window_size(int fd, void *data, unsigned short *rows,
unsigned short *cols)
{
printk(KERN_ERR "Using a channel type which is configured out of "
"UML\n");
return -ENODEV;
}
static void not_configged_free(void *data)
{
printk(KERN_ERR "Using a channel type which is configured out of "
"UML\n");
}
static const struct chan_ops not_configged_ops = {
.init = not_configged_init,
.open = not_configged_open,
.close = not_configged_close,
.read = not_configged_read,
.write = not_configged_write,
.console_write = not_configged_console_write,
.window_size = not_configged_window_size,
.free = not_configged_free,
.winch = 0,
};
#endif /* CONFIG_NOCONFIG_CHAN */
static int open_one_chan(struct chan *chan)
{
int fd, err;
if (chan->opened)
return 0;
if (chan->ops->open == NULL)
fd = 0;
else fd = (*chan->ops->open)(chan->input, chan->output, chan->primary,
chan->data, &chan->dev);
if (fd < 0)
return fd;
err = os_set_fd_block(fd, 0);
if (err) {
(*chan->ops->close)(fd, chan->data);
return err;
}
chan->fd = fd;
chan->opened = 1;
return 0;
}
static int open_chan(struct list_head *chans)
{
struct list_head *ele;
struct chan *chan;
int ret, err = 0;
list_for_each(ele, chans) {
chan = list_entry(ele, struct chan, list);
ret = open_one_chan(chan);
if (chan->primary)
err = ret;
}
return err;
}
void chan_enable_winch(struct chan *chan, struct tty_port *port)
{
if (chan && chan->primary && chan->ops->winch)
register_winch(chan->fd, port);
}
static void line_timer_cb(struct work_struct *work)
{
struct line *line = container_of(work, struct line, task.work);
if (!line->throttled)
chan_interrupt(line, line->read_irq);
}
int enable_chan(struct line *line)
{
struct list_head *ele;
struct chan *chan;
int err;
INIT_DELAYED_WORK(&line->task, line_timer_cb);
list_for_each(ele, &line->chan_list) {
chan = list_entry(ele, struct chan, list);
err = open_one_chan(chan);
if (err) {
if (chan->primary)
goto out_close;
continue;
}
if (chan->enabled)
continue;
err = line_setup_irq(chan->fd, chan->input, chan->output, line,
chan);
if (err)
goto out_close;
chan->enabled = 1;
}
return 0;
out_close:
close_chan(line);
return err;
}
/* Items are added in IRQ context, when free_irq can't be called, and
* removed in process context, when it can.
* This handles interrupt sources which disappear, and which need to
* be permanently disabled. This is discovered in IRQ context, but
* the freeing of the IRQ must be done later.
*/
static DEFINE_SPINLOCK(irqs_to_free_lock);
static LIST_HEAD(irqs_to_free);
void free_irqs(void)
{
struct chan *chan;
LIST_HEAD(list);
struct list_head *ele;
unsigned long flags;
spin_lock_irqsave(&irqs_to_free_lock, flags);
list_splice_init(&irqs_to_free, &list);
spin_unlock_irqrestore(&irqs_to_free_lock, flags);
list_for_each(ele, &list) {
chan = list_entry(ele, struct chan, free_list);
if (chan->input && chan->enabled)
um_free_irq(chan->line->read_irq, chan);
if (chan->output && chan->enabled)
um_free_irq(chan->line->write_irq, chan);
chan->enabled = 0;
}
}
static void close_one_chan(struct chan *chan, int delay_free_irq)
{
unsigned long flags;
if (!chan->opened)
return;
if (delay_free_irq) {
spin_lock_irqsave(&irqs_to_free_lock, flags);
list_add(&chan->free_list, &irqs_to_free);
spin_unlock_irqrestore(&irqs_to_free_lock, flags);
} else {
if (chan->input && chan->enabled)
um_free_irq(chan->line->read_irq, chan);
if (chan->output && chan->enabled)
um_free_irq(chan->line->write_irq, chan);
chan->enabled = 0;
}
if (chan->ops->close != NULL)
(*chan->ops->close)(chan->fd, chan->data);
chan->opened = 0;
chan->fd = -1;
}
void close_chan(struct line *line)
{
struct chan *chan;
/* Close in reverse order as open in case more than one of them
* refers to the same device and they save and restore that device's
* state. Then, the first one opened will have the original state,
* so it must be the last closed.
*/
list_for_each_entry_reverse(chan, &line->chan_list, list) {
close_one_chan(chan, 0);
}
}
void deactivate_chan(struct chan *chan, int irq)
{
if (chan && chan->enabled)
deactivate_fd(chan->fd, irq);
}
int write_chan(struct chan *chan, const char *buf, int len,
int write_irq)
{
int n, ret = 0;
if (len == 0 || !chan || !chan->ops->write)
return 0;
n = chan->ops->write(chan->fd, buf, len, chan->data);
if (chan->primary) {
ret = n;
}
return ret;
}
int console_write_chan(struct chan *chan, const char *buf, int len)
{
int n, ret = 0;
if (!chan || !chan->ops->console_write)
return 0;
n = chan->ops->console_write(chan->fd, buf, len);
if (chan->primary)
ret = n;
return ret;
}
int console_open_chan(struct line *line, struct console *co)
{
int err;
err = open_chan(&line->chan_list);
if (err)
return err;
printk(KERN_INFO "Console initialized on /dev/%s%d\n", co->name,
co->index);
return 0;
}
int chan_window_size(struct line *line, unsigned short *rows_out,
unsigned short *cols_out)
{
struct chan *chan;
chan = line->chan_in;
if (chan && chan->primary) {
if (chan->ops->window_size == NULL)
return 0;
return chan->ops->window_size(chan->fd, chan->data,
rows_out, cols_out);
}
chan = line->chan_out;
if (chan && chan->primary) {
if (chan->ops->window_size == NULL)
return 0;
return chan->ops->window_size(chan->fd, chan->data,
rows_out, cols_out);
}
return 0;
}
static void free_one_chan(struct chan *chan)
{
list_del(&chan->list);
close_one_chan(chan, 0);
if (chan->ops->free != NULL)
(*chan->ops->free)(chan->data);
if (chan->primary && chan->output)
ignore_sigio_fd(chan->fd);
kfree(chan);
}
static void free_chan(struct list_head *chans)
{
struct list_head *ele, *next;
struct chan *chan;
list_for_each_safe(ele, next, chans) {
chan = list_entry(ele, struct chan, list);
free_one_chan(chan);
}
}
static int one_chan_config_string(struct chan *chan, char *str, int size,
char **error_out)
{
int n = 0;
if (chan == NULL) {
CONFIG_CHUNK(str, size, n, "none", 1);
return n;
}
CONFIG_CHUNK(str, size, n, chan->ops->type, 0);
if (chan->dev == NULL) {
CONFIG_CHUNK(str, size, n, "", 1);
return n;
}
CONFIG_CHUNK(str, size, n, ":", 0);
CONFIG_CHUNK(str, size, n, chan->dev, 0);
return n;
}
static int chan_pair_config_string(struct chan *in, struct chan *out,
char *str, int size, char **error_out)
{
int n;
n = one_chan_config_string(in, str, size, error_out);
str += n;
size -= n;
if (in == out) {
CONFIG_CHUNK(str, size, n, "", 1);
return n;
}
CONFIG_CHUNK(str, size, n, ",", 1);
n = one_chan_config_string(out, str, size, error_out);
str += n;
size -= n;
CONFIG_CHUNK(str, size, n, "", 1);
return n;
}
int chan_config_string(struct line *line, char *str, int size,
char **error_out)
{
struct chan *in = line->chan_in, *out = line->chan_out;
if (in && !in->primary)
in = NULL;
if (out && !out->primary)
out = NULL;
return chan_pair_config_string(in, out, str, size, error_out);
}
struct chan_type {
char *key;
const struct chan_ops *ops;
};
static const struct chan_type chan_table[] = {
{ "fd", &fd_ops },
#ifdef CONFIG_NULL_CHAN
{ "null", &null_ops },
#else
{ "null", ¬_configged_ops },
#endif
#ifdef CONFIG_PORT_CHAN
{ "port", &port_ops },
#else
{ "port", ¬_configged_ops },
#endif
#ifdef CONFIG_PTY_CHAN
{ "pty", &pty_ops },
{ "pts", &pts_ops },
#else
{ "pty", ¬_configged_ops },
{ "pts", ¬_configged_ops },
#endif
#ifdef CONFIG_TTY_CHAN
{ "tty", &tty_ops },
#else
{ "tty", ¬_configged_ops },
#endif
#ifdef CONFIG_XTERM_CHAN
{ "xterm", &xterm_ops },
#else
{ "xterm", ¬_configged_ops },
#endif
};
static struct chan *parse_chan(struct line *line, char *str, int device,
const struct chan_opts *opts, char **error_out)
{
const struct chan_type *entry;
const struct chan_ops *ops;
struct chan *chan;
void *data;
int i;
ops = NULL;
data = NULL;
for(i = 0; i < ARRAY_SIZE(chan_table); i++) {
entry = &chan_table[i];
if (!strncmp(str, entry->key, strlen(entry->key))) {
ops = entry->ops;
str += strlen(entry->key);
break;
}
}
if (ops == NULL) {
*error_out = "No match for configured backends";
return NULL;
}
data = (*ops->init)(str, device, opts);
if (data == NULL) {
*error_out = "Configuration failed";
return NULL;
}
chan = kmalloc(sizeof(*chan), GFP_ATOMIC);
if (chan == NULL) {
*error_out = "Memory allocation failed";
return NULL;
}
*chan = ((struct chan) { .list = LIST_HEAD_INIT(chan->list),
.free_list =
LIST_HEAD_INIT(chan->free_list),
.line = line,
.primary = 1,
.input = 0,
.output = 0,
.opened = 0,
.enabled = 0,
.fd = -1,
.ops = ops,
.data = data });
return chan;
}
int parse_chan_pair(char *str, struct line *line, int device,
const struct chan_opts *opts, char **error_out)
{
struct list_head *chans = &line->chan_list;
struct chan *new;
char *in, *out;
if (!list_empty(chans)) {
line->chan_in = line->chan_out = NULL;
free_chan(chans);
INIT_LIST_HEAD(chans);
}
if (!str)
return 0;
out = strchr(str, ',');
if (out != NULL) {
in = str;
*out = '\0';
out++;
new = parse_chan(line, in, device, opts, error_out);
if (new == NULL)
return -1;
new->input = 1;
list_add(&new->list, chans);
line->chan_in = new;
new = parse_chan(line, out, device, opts, error_out);
if (new == NULL)
return -1;
list_add(&new->list, chans);
new->output = 1;
line->chan_out = new;
}
else {
new = parse_chan(line, str, device, opts, error_out);
if (new == NULL)
return -1;
list_add(&new->list, chans);
new->input = 1;
new->output = 1;
line->chan_in = line->chan_out = new;
}
return 0;
}
void chan_interrupt(struct line *line, int irq)
{
struct tty_port *port = &line->port;
struct chan *chan = line->chan_in;
int err;
char c;
if (!chan || !chan->ops->read)
goto out;
do {
if (!tty_buffer_request_room(port, 1)) {
schedule_delayed_work(&line->task, 1);
goto out;
}
err = chan->ops->read(chan->fd, &c, chan->data);
if (err > 0)
tty_insert_flip_char(port, c, TTY_NORMAL);
} while (err > 0);
if (err == -EIO) {
if (chan->primary) {
tty_port_tty_hangup(&line->port, false);
if (line->chan_out != chan)
close_one_chan(line->chan_out, 1);
}
close_one_chan(chan, 1);
if (chan->primary)
return;
}
out:
tty_flip_buffer_push(port);
}
| linux-master | arch/um/drivers/chan_kern.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2020 Intel Corporation
* Author: Johannes Berg <[email protected]>
*/
#include <linux/platform_device.h>
#include <linux/time-internal.h>
#include <linux/suspend.h>
#include <linux/err.h>
#include <linux/rtc.h>
#include <kern_util.h>
#include <irq_kern.h>
#include <os.h>
#include "rtc.h"
static time64_t uml_rtc_alarm_time;
static bool uml_rtc_alarm_enabled;
static struct rtc_device *uml_rtc;
static int uml_rtc_irq_fd, uml_rtc_irq;
#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
static void uml_rtc_time_travel_alarm(struct time_travel_event *ev)
{
uml_rtc_send_timetravel_alarm();
}
static struct time_travel_event uml_rtc_alarm_event = {
.fn = uml_rtc_time_travel_alarm,
};
#endif
static int uml_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
struct timespec64 ts;
/* Use this to get correct time in time-travel mode */
read_persistent_clock64(&ts);
rtc_time64_to_tm(timespec64_to_ktime(ts) / NSEC_PER_SEC, tm);
return 0;
}
static int uml_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
rtc_time64_to_tm(uml_rtc_alarm_time, &alrm->time);
alrm->enabled = uml_rtc_alarm_enabled;
return 0;
}
static int uml_rtc_alarm_irq_enable(struct device *dev, unsigned int enable)
{
unsigned long long secs;
if (!enable && !uml_rtc_alarm_enabled)
return 0;
uml_rtc_alarm_enabled = enable;
secs = uml_rtc_alarm_time - ktime_get_real_seconds();
if (time_travel_mode == TT_MODE_OFF) {
if (!enable) {
uml_rtc_disable_alarm();
return 0;
}
/* enable or update */
return uml_rtc_enable_alarm(secs);
} else {
time_travel_del_event(¨_rtc_alarm_event);
if (enable)
time_travel_add_event_rel(¨_rtc_alarm_event,
secs * NSEC_PER_SEC);
}
return 0;
}
static int uml_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
uml_rtc_alarm_irq_enable(dev, 0);
uml_rtc_alarm_time = rtc_tm_to_time64(&alrm->time);
uml_rtc_alarm_irq_enable(dev, alrm->enabled);
return 0;
}
static const struct rtc_class_ops uml_rtc_ops = {
.read_time = uml_rtc_read_time,
.read_alarm = uml_rtc_read_alarm,
.alarm_irq_enable = uml_rtc_alarm_irq_enable,
.set_alarm = uml_rtc_set_alarm,
};
static irqreturn_t uml_rtc_interrupt(int irq, void *data)
{
unsigned long long c = 0;
/* alarm triggered, it's now off */
uml_rtc_alarm_enabled = false;
os_read_file(uml_rtc_irq_fd, &c, sizeof(c));
WARN_ON(c == 0);
pm_system_wakeup();
rtc_update_irq(uml_rtc, 1, RTC_IRQF | RTC_AF);
return IRQ_HANDLED;
}
static int uml_rtc_setup(void)
{
int err;
err = uml_rtc_start(time_travel_mode != TT_MODE_OFF);
if (WARN(err < 0, "err = %d\n", err))
return err;
uml_rtc_irq_fd = err;
err = um_request_irq(UM_IRQ_ALLOC, uml_rtc_irq_fd, IRQ_READ,
uml_rtc_interrupt, 0, "rtc", NULL);
if (err < 0) {
uml_rtc_stop(time_travel_mode != TT_MODE_OFF);
return err;
}
irq_set_irq_wake(err, 1);
uml_rtc_irq = err;
return 0;
}
static void uml_rtc_cleanup(void)
{
um_free_irq(uml_rtc_irq, NULL);
uml_rtc_stop(time_travel_mode != TT_MODE_OFF);
}
static int uml_rtc_probe(struct platform_device *pdev)
{
int err;
err = uml_rtc_setup();
if (err)
return err;
uml_rtc = devm_rtc_allocate_device(&pdev->dev);
if (IS_ERR(uml_rtc)) {
err = PTR_ERR(uml_rtc);
goto cleanup;
}
uml_rtc->ops = ¨_rtc_ops;
device_init_wakeup(&pdev->dev, 1);
err = devm_rtc_register_device(uml_rtc);
if (err)
goto cleanup;
return 0;
cleanup:
uml_rtc_cleanup();
return err;
}
static int uml_rtc_remove(struct platform_device *pdev)
{
device_init_wakeup(&pdev->dev, 0);
uml_rtc_cleanup();
return 0;
}
static struct platform_driver uml_rtc_driver = {
.probe = uml_rtc_probe,
.remove = uml_rtc_remove,
.driver = {
.name = "uml-rtc",
},
};
static int __init uml_rtc_init(void)
{
struct platform_device *pdev;
int err;
err = platform_driver_register(¨_rtc_driver);
if (err)
return err;
pdev = platform_device_alloc("uml-rtc", 0);
if (!pdev) {
err = -ENOMEM;
goto unregister;
}
err = platform_device_add(pdev);
if (err)
goto unregister;
return 0;
unregister:
platform_device_put(pdev);
platform_driver_unregister(¨_rtc_driver);
return err;
}
device_initcall(uml_rtc_init);
| linux-master | arch/um/drivers/rtc_kern.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2000, 2002 Jeff Dike ([email protected])
*/
#include <linux/fs.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/major.h>
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/console.h>
#include <asm/termbits.h>
#include <asm/irq.h>
#include "chan.h"
#include <init.h>
#include <irq_user.h>
#include "mconsole_kern.h"
static const int ssl_version = 1;
#define NR_PORTS 64
static void ssl_announce(char *dev_name, int dev)
{
printk(KERN_INFO "Serial line %d assigned device '%s'\n", dev,
dev_name);
}
/* Almost const, except that xterm_title may be changed in an initcall */
static struct chan_opts opts = {
.announce = ssl_announce,
.xterm_title = "Serial Line #%d",
.raw = 1,
};
static int ssl_config(char *str, char **error_out);
static int ssl_get_config(char *dev, char *str, int size, char **error_out);
static int ssl_remove(int n, char **error_out);
/* Const, except for .mc.list */
static struct line_driver driver = {
.name = "UML serial line",
.device_name = "ttyS",
.major = TTY_MAJOR,
.minor_start = 64,
.type = TTY_DRIVER_TYPE_SERIAL,
.subtype = 0,
.read_irq_name = "ssl",
.write_irq_name = "ssl-write",
.mc = {
.list = LIST_HEAD_INIT(driver.mc.list),
.name = "ssl",
.config = ssl_config,
.get_config = ssl_get_config,
.id = line_id,
.remove = ssl_remove,
},
};
/* The array is initialized by line_init, at initcall time. The
* elements are locked individually as needed.
*/
static char *conf[NR_PORTS];
static char *def_conf = CONFIG_SSL_CHAN;
static struct line serial_lines[NR_PORTS];
static int ssl_config(char *str, char **error_out)
{
return line_config(serial_lines, ARRAY_SIZE(serial_lines), str, &opts,
error_out);
}
static int ssl_get_config(char *dev, char *str, int size, char **error_out)
{
return line_get_config(dev, serial_lines, ARRAY_SIZE(serial_lines), str,
size, error_out);
}
static int ssl_remove(int n, char **error_out)
{
return line_remove(serial_lines, ARRAY_SIZE(serial_lines), n,
error_out);
}
static int ssl_install(struct tty_driver *driver, struct tty_struct *tty)
{
return line_install(driver, tty, &serial_lines[tty->index]);
}
static const struct tty_operations ssl_ops = {
.open = line_open,
.close = line_close,
.write = line_write,
.write_room = line_write_room,
.chars_in_buffer = line_chars_in_buffer,
.flush_buffer = line_flush_buffer,
.flush_chars = line_flush_chars,
.throttle = line_throttle,
.unthrottle = line_unthrottle,
.install = ssl_install,
.hangup = line_hangup,
};
/* Changed by ssl_init and referenced by ssl_exit, which are both serialized
* by being an initcall and exitcall, respectively.
*/
static int ssl_init_done;
static void ssl_console_write(struct console *c, const char *string,
unsigned len)
{
struct line *line = &serial_lines[c->index];
unsigned long flags;
spin_lock_irqsave(&line->lock, flags);
console_write_chan(line->chan_out, string, len);
spin_unlock_irqrestore(&line->lock, flags);
}
static struct tty_driver *ssl_console_device(struct console *c, int *index)
{
*index = c->index;
return driver.driver;
}
static int ssl_console_setup(struct console *co, char *options)
{
struct line *line = &serial_lines[co->index];
return console_open_chan(line, co);
}
/* No locking for register_console call - relies on single-threaded initcalls */
static struct console ssl_cons = {
.name = "ttyS",
.write = ssl_console_write,
.device = ssl_console_device,
.setup = ssl_console_setup,
.flags = CON_PRINTBUFFER|CON_ANYTIME,
.index = -1,
};
static int ssl_init(void)
{
char *new_title;
int err;
int i;
printk(KERN_INFO "Initializing software serial port version %d\n",
ssl_version);
err = register_lines(&driver, &ssl_ops, serial_lines,
ARRAY_SIZE(serial_lines));
if (err)
return err;
new_title = add_xterm_umid(opts.xterm_title);
if (new_title != NULL)
opts.xterm_title = new_title;
for (i = 0; i < NR_PORTS; i++) {
char *error;
char *s = conf[i];
if (!s)
s = def_conf;
if (setup_one_line(serial_lines, i, s, &opts, &error))
printk(KERN_ERR "setup_one_line failed for "
"device %d : %s\n", i, error);
}
ssl_init_done = 1;
register_console(&ssl_cons);
return 0;
}
late_initcall(ssl_init);
static void ssl_exit(void)
{
if (!ssl_init_done)
return;
close_lines(serial_lines, ARRAY_SIZE(serial_lines));
}
__uml_exitcall(ssl_exit);
static int ssl_chan_setup(char *str)
{
line_setup(conf, NR_PORTS, &def_conf, str, "serial line");
return 1;
}
__setup("ssl", ssl_chan_setup);
__channel_help(ssl_chan_setup, "ssl");
static int ssl_non_raw_setup(char *str)
{
opts.raw = 0;
return 1;
}
__setup("ssl-non-raw", ssl_non_raw_setup);
__channel_help(ssl_non_raw_setup, "set serial lines to non-raw mode");
| linux-master | arch/um/drivers/ssl.c |
/* UML hardware watchdog, shamelessly stolen from:
*
* SoftDog 0.05: A Software Watchdog Device
*
* (c) Copyright 1996 Alan Cox <[email protected]>, All Rights Reserved.
* http://www.redhat.com
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Neither Alan Cox nor CymruNet Ltd. admit liability nor provide
* warranty for any of this software. This material is provided
* "AS-IS" and at no charge.
*
* (c) Copyright 1995 Alan Cox <[email protected]>
*
* Software only watchdog driver. Unlike its big brother the WDT501P
* driver this won't always recover a failed machine.
*
* 03/96: Angelo Haritsis <[email protected]> :
* Modularised.
* Added soft_margin; use upon insmod to change the timer delay.
* NB: uses same minor as wdt (WATCHDOG_MINOR); we could use separate
* minors.
*
* 19980911 Alan Cox
* Made SMP safe for 2.3.x
*
* 20011127 Joel Becker ([email protected]>
* Added soft_noboot; Allows testing the softdog trigger without
* requiring a recompile.
* Added WDIOC_GETTIMEOUT and WDIOC_SETTIMOUT.
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/miscdevice.h>
#include <linux/watchdog.h>
#include <linux/reboot.h>
#include <linux/mutex.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/uaccess.h>
#include "mconsole.h"
#include "harddog.h"
MODULE_LICENSE("GPL");
static DEFINE_MUTEX(harddog_mutex);
static DEFINE_SPINLOCK(lock);
static int timer_alive;
static int harddog_in_fd = -1;
static int harddog_out_fd = -1;
/*
* Allow only one person to hold it open
*/
static int harddog_open(struct inode *inode, struct file *file)
{
int err = -EBUSY;
char *sock = NULL;
mutex_lock(&harddog_mutex);
spin_lock(&lock);
if(timer_alive)
goto err;
#ifdef CONFIG_WATCHDOG_NOWAYOUT
__module_get(THIS_MODULE);
#endif
#ifdef CONFIG_MCONSOLE
sock = mconsole_notify_socket();
#endif
err = start_watchdog(&harddog_in_fd, &harddog_out_fd, sock);
if(err)
goto err;
timer_alive = 1;
spin_unlock(&lock);
mutex_unlock(&harddog_mutex);
return stream_open(inode, file);
err:
spin_unlock(&lock);
mutex_unlock(&harddog_mutex);
return err;
}
static int harddog_release(struct inode *inode, struct file *file)
{
/*
* Shut off the timer.
*/
spin_lock(&lock);
stop_watchdog(harddog_in_fd, harddog_out_fd);
harddog_in_fd = -1;
harddog_out_fd = -1;
timer_alive=0;
spin_unlock(&lock);
return 0;
}
static ssize_t harddog_write(struct file *file, const char __user *data, size_t len,
loff_t *ppos)
{
/*
* Refresh the timer.
*/
if(len)
return ping_watchdog(harddog_out_fd);
return 0;
}
static int harddog_ioctl_unlocked(struct file *file,
unsigned int cmd, unsigned long arg)
{
void __user *argp= (void __user *)arg;
static struct watchdog_info ident = {
WDIOC_SETTIMEOUT,
0,
"UML Hardware Watchdog"
};
switch (cmd) {
default:
return -ENOTTY;
case WDIOC_GETSUPPORT:
if(copy_to_user(argp, &ident, sizeof(ident)))
return -EFAULT;
return 0;
case WDIOC_GETSTATUS:
case WDIOC_GETBOOTSTATUS:
return put_user(0,(int __user *)argp);
case WDIOC_KEEPALIVE:
return ping_watchdog(harddog_out_fd);
}
}
static long harddog_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
long ret;
mutex_lock(&harddog_mutex);
ret = harddog_ioctl_unlocked(file, cmd, arg);
mutex_unlock(&harddog_mutex);
return ret;
}
static const struct file_operations harddog_fops = {
.owner = THIS_MODULE,
.write = harddog_write,
.unlocked_ioctl = harddog_ioctl,
.compat_ioctl = compat_ptr_ioctl,
.open = harddog_open,
.release = harddog_release,
.llseek = no_llseek,
};
static struct miscdevice harddog_miscdev = {
.minor = WATCHDOG_MINOR,
.name = "watchdog",
.fops = &harddog_fops,
};
module_misc_device(harddog_miscdev);
| linux-master | arch/um/drivers/harddog_kern.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2020 Intel Corporation
* Author: Johannes Berg <[email protected]>
*/
#include <stdbool.h>
#include <os.h>
#include <errno.h>
#include <sched.h>
#include <unistd.h>
#include <kern_util.h>
#include <sys/select.h>
#include <stdio.h>
#include <sys/timerfd.h>
#include "rtc.h"
static int uml_rtc_irq_fds[2];
void uml_rtc_send_timetravel_alarm(void)
{
unsigned long long c = 1;
CATCH_EINTR(write(uml_rtc_irq_fds[1], &c, sizeof(c)));
}
int uml_rtc_start(bool timetravel)
{
int err;
if (timetravel) {
int err = os_pipe(uml_rtc_irq_fds, 1, 1);
if (err)
goto fail;
} else {
uml_rtc_irq_fds[0] = timerfd_create(CLOCK_REALTIME, TFD_CLOEXEC);
if (uml_rtc_irq_fds[0] < 0) {
err = -errno;
goto fail;
}
/* apparently timerfd won't send SIGIO, use workaround */
sigio_broken(uml_rtc_irq_fds[0]);
err = add_sigio_fd(uml_rtc_irq_fds[0]);
if (err < 0) {
close(uml_rtc_irq_fds[0]);
goto fail;
}
}
return uml_rtc_irq_fds[0];
fail:
uml_rtc_stop(timetravel);
return err;
}
int uml_rtc_enable_alarm(unsigned long long delta_seconds)
{
struct itimerspec it = {
.it_value = {
.tv_sec = delta_seconds,
},
};
if (timerfd_settime(uml_rtc_irq_fds[0], 0, &it, NULL))
return -errno;
return 0;
}
void uml_rtc_disable_alarm(void)
{
uml_rtc_enable_alarm(0);
}
void uml_rtc_stop(bool timetravel)
{
if (timetravel)
os_close_file(uml_rtc_irq_fds[1]);
else
ignore_sigio_fd(uml_rtc_irq_fds[0]);
os_close_file(uml_rtc_irq_fds[0]);
}
| linux-master | arch/um/drivers/rtc_user.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2000 - 2007 Jeff Dike (jdike@{linux.intel,addtoit}.com)
*/
#include <stdlib.h>
#include <unistd.h>
#include <errno.h>
#include <sched.h>
#include <signal.h>
#include <termios.h>
#include <sys/ioctl.h>
#include "chan_user.h"
#include <os.h>
#include <um_malloc.h>
void generic_close(int fd, void *unused)
{
close(fd);
}
int generic_read(int fd, char *c_out, void *unused)
{
int n;
n = read(fd, c_out, sizeof(*c_out));
if (n > 0)
return n;
else if (n == 0)
return -EIO;
else if (errno == EAGAIN)
return 0;
return -errno;
}
/* XXX Trivial wrapper around write */
int generic_write(int fd, const char *buf, int n, void *unused)
{
int err;
err = write(fd, buf, n);
if (err > 0)
return err;
else if (errno == EAGAIN)
return 0;
else if (err == 0)
return -EIO;
return -errno;
}
int generic_window_size(int fd, void *unused, unsigned short *rows_out,
unsigned short *cols_out)
{
struct winsize size;
int ret;
if (ioctl(fd, TIOCGWINSZ, &size) < 0)
return -errno;
ret = ((*rows_out != size.ws_row) || (*cols_out != size.ws_col));
*rows_out = size.ws_row;
*cols_out = size.ws_col;
return ret;
}
void generic_free(void *data)
{
kfree(data);
}
int generic_console_write(int fd, const char *buf, int n)
{
sigset_t old, no_sigio;
struct termios save, new;
int err;
if (isatty(fd)) {
sigemptyset(&no_sigio);
sigaddset(&no_sigio, SIGIO);
if (sigprocmask(SIG_BLOCK, &no_sigio, &old))
goto error;
CATCH_EINTR(err = tcgetattr(fd, &save));
if (err)
goto error;
new = save;
/*
* The terminal becomes a bit less raw, to handle \n also as
* "Carriage Return", not only as "New Line". Otherwise, the new
* line won't start at the first column.
*/
new.c_oflag |= OPOST;
CATCH_EINTR(err = tcsetattr(fd, TCSAFLUSH, &new));
if (err)
goto error;
}
err = generic_write(fd, buf, n, NULL);
/*
* Restore raw mode, in any case; we *must* ignore any error apart
* EINTR, except for debug.
*/
if (isatty(fd)) {
CATCH_EINTR(tcsetattr(fd, TCSAFLUSH, &save));
sigprocmask(SIG_SETMASK, &old, NULL);
}
return err;
error:
return -errno;
}
/*
* UML SIGWINCH handling
*
* The point of this is to handle SIGWINCH on consoles which have host
* ttys and relay them inside UML to whatever might be running on the
* console and cares about the window size (since SIGWINCH notifies
* about terminal size changes).
*
* So, we have a separate thread for each host tty attached to a UML
* device (side-issue - I'm annoyed that one thread can't have
* multiple controlling ttys for the purpose of handling SIGWINCH, but
* I imagine there are other reasons that doesn't make any sense).
*
* SIGWINCH can't be received synchronously, so you have to set up to
* receive it as a signal. That being the case, if you are going to
* wait for it, it is convenient to sit in sigsuspend() and wait for
* the signal to bounce you out of it (see below for how we make sure
* to exit only on SIGWINCH).
*/
static void winch_handler(int sig)
{
}
struct winch_data {
int pty_fd;
int pipe_fd;
};
static int winch_thread(void *arg)
{
struct winch_data *data = arg;
sigset_t sigs;
int pty_fd, pipe_fd;
int count;
char c = 1;
pty_fd = data->pty_fd;
pipe_fd = data->pipe_fd;
count = write(pipe_fd, &c, sizeof(c));
if (count != sizeof(c))
printk(UM_KERN_ERR "winch_thread : failed to write "
"synchronization byte, err = %d\n", -count);
/*
* We are not using SIG_IGN on purpose, so don't fix it as I thought to
* do! If using SIG_IGN, the sigsuspend() call below would not stop on
* SIGWINCH.
*/
signal(SIGWINCH, winch_handler);
sigfillset(&sigs);
/* Block all signals possible. */
if (sigprocmask(SIG_SETMASK, &sigs, NULL) < 0) {
printk(UM_KERN_ERR "winch_thread : sigprocmask failed, "
"errno = %d\n", errno);
exit(1);
}
/* In sigsuspend(), block anything else than SIGWINCH. */
sigdelset(&sigs, SIGWINCH);
if (setsid() < 0) {
printk(UM_KERN_ERR "winch_thread : setsid failed, errno = %d\n",
errno);
exit(1);
}
if (ioctl(pty_fd, TIOCSCTTY, 0) < 0) {
printk(UM_KERN_ERR "winch_thread : TIOCSCTTY failed on "
"fd %d err = %d\n", pty_fd, errno);
exit(1);
}
if (tcsetpgrp(pty_fd, os_getpid()) < 0) {
printk(UM_KERN_ERR "winch_thread : tcsetpgrp failed on "
"fd %d err = %d\n", pty_fd, errno);
exit(1);
}
/*
* These are synchronization calls between various UML threads on the
* host - since they are not different kernel threads, we cannot use
* kernel semaphores. We don't use SysV semaphores because they are
* persistent.
*/
count = read(pipe_fd, &c, sizeof(c));
if (count != sizeof(c))
printk(UM_KERN_ERR "winch_thread : failed to read "
"synchronization byte, err = %d\n", errno);
while(1) {
/*
* This will be interrupted by SIGWINCH only, since
* other signals are blocked.
*/
sigsuspend(&sigs);
count = write(pipe_fd, &c, sizeof(c));
if (count != sizeof(c))
printk(UM_KERN_ERR "winch_thread : write failed, "
"err = %d\n", errno);
}
}
static int winch_tramp(int fd, struct tty_port *port, int *fd_out,
unsigned long *stack_out)
{
struct winch_data data;
int fds[2], n, err, pid;
char c;
err = os_pipe(fds, 1, 1);
if (err < 0) {
printk(UM_KERN_ERR "winch_tramp : os_pipe failed, err = %d\n",
-err);
goto out;
}
data = ((struct winch_data) { .pty_fd = fd,
.pipe_fd = fds[1] } );
/*
* CLONE_FILES so this thread doesn't hold open files which are open
* now, but later closed in a different thread. This is a
* problem with /dev/net/tun, which if held open by this
* thread, prevents the TUN/TAP device from being reused.
*/
pid = run_helper_thread(winch_thread, &data, CLONE_FILES, stack_out);
if (pid < 0) {
err = pid;
printk(UM_KERN_ERR "fork of winch_thread failed - errno = %d\n",
-err);
goto out_close;
}
*fd_out = fds[0];
n = read(fds[0], &c, sizeof(c));
if (n != sizeof(c)) {
printk(UM_KERN_ERR "winch_tramp : failed to read "
"synchronization byte\n");
printk(UM_KERN_ERR "read failed, err = %d\n", errno);
printk(UM_KERN_ERR "fd %d will not support SIGWINCH\n", fd);
err = -EINVAL;
goto out_close;
}
err = os_set_fd_block(*fd_out, 0);
if (err) {
printk(UM_KERN_ERR "winch_tramp: failed to set thread_fd "
"non-blocking.\n");
goto out_close;
}
return pid;
out_close:
close(fds[1]);
close(fds[0]);
out:
return err;
}
void register_winch(int fd, struct tty_port *port)
{
unsigned long stack;
int pid, thread, count, thread_fd = -1;
char c = 1;
if (!isatty(fd))
return;
pid = tcgetpgrp(fd);
if (is_skas_winch(pid, fd, port)) {
register_winch_irq(-1, fd, -1, port, 0);
return;
}
if (pid == -1) {
thread = winch_tramp(fd, port, &thread_fd, &stack);
if (thread < 0)
return;
register_winch_irq(thread_fd, fd, thread, port, stack);
count = write(thread_fd, &c, sizeof(c));
if (count != sizeof(c))
printk(UM_KERN_ERR "register_winch : failed to write "
"synchronization byte, err = %d\n", errno);
}
}
| linux-master | arch/um/drivers/chan_user.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
*/
#include <stdio.h>
#include <unistd.h>
#include <errno.h>
#include <os.h>
#include "harddog.h"
struct dog_data {
int stdin_fd;
int stdout_fd;
int close_me[2];
};
static void pre_exec(void *d)
{
struct dog_data *data = d;
dup2(data->stdin_fd, 0);
dup2(data->stdout_fd, 1);
dup2(data->stdout_fd, 2);
close(data->stdin_fd);
close(data->stdout_fd);
close(data->close_me[0]);
close(data->close_me[1]);
}
int start_watchdog(int *in_fd_ret, int *out_fd_ret, char *sock)
{
struct dog_data data;
int in_fds[2], out_fds[2], pid, n, err;
char pid_buf[sizeof("nnnnnnn\0")], c;
char *pid_args[] = { "/usr/bin/uml_watchdog", "-pid", pid_buf, NULL };
char *mconsole_args[] = { "/usr/bin/uml_watchdog", "-mconsole", NULL,
NULL };
char **args = NULL;
err = os_pipe(in_fds, 1, 0);
if (err < 0) {
printk("harddog_open - os_pipe failed, err = %d\n", -err);
goto out;
}
err = os_pipe(out_fds, 1, 0);
if (err < 0) {
printk("harddog_open - os_pipe failed, err = %d\n", -err);
goto out_close_in;
}
data.stdin_fd = out_fds[0];
data.stdout_fd = in_fds[1];
data.close_me[0] = out_fds[1];
data.close_me[1] = in_fds[0];
if (sock != NULL) {
mconsole_args[2] = sock;
args = mconsole_args;
}
else {
/* XXX The os_getpid() is not SMP correct */
sprintf(pid_buf, "%d", os_getpid());
args = pid_args;
}
pid = run_helper(pre_exec, &data, args);
close(out_fds[0]);
close(in_fds[1]);
if (pid < 0) {
err = -pid;
printk("harddog_open - run_helper failed, errno = %d\n", -err);
goto out_close_out;
}
n = read(in_fds[0], &c, sizeof(c));
if (n == 0) {
printk("harddog_open - EOF on watchdog pipe\n");
helper_wait(pid);
err = -EIO;
goto out_close_out;
}
else if (n < 0) {
printk("harddog_open - read of watchdog pipe failed, "
"err = %d\n", errno);
helper_wait(pid);
err = n;
goto out_close_out;
}
*in_fd_ret = in_fds[0];
*out_fd_ret = out_fds[1];
return 0;
out_close_in:
close(in_fds[0]);
close(in_fds[1]);
out_close_out:
close(out_fds[0]);
close(out_fds[1]);
out:
return err;
}
void stop_watchdog(int in_fd, int out_fd)
{
close(in_fd);
close(out_fd);
}
int ping_watchdog(int fd)
{
int n;
char c = '\n';
n = write(fd, &c, sizeof(c));
if (n != sizeof(c)) {
printk("ping_watchdog - write failed, ret = %d, err = %d\n",
n, errno);
if (n < 0)
return n;
return -EIO;
}
return 1;
}
| linux-master | arch/um/drivers/harddog_user.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
*/
#include <linux/if_arp.h>
#include <linux/init.h>
#include <linux/netdevice.h>
#include <net_kern.h>
#include "slip.h"
struct slip_init {
char *gate_addr;
};
static void slip_init(struct net_device *dev, void *data)
{
struct uml_net_private *private;
struct slip_data *spri;
struct slip_init *init = data;
private = netdev_priv(dev);
spri = (struct slip_data *) private->user;
memset(spri->name, 0, sizeof(spri->name));
spri->addr = NULL;
spri->gate_addr = init->gate_addr;
spri->slave = -1;
spri->dev = dev;
slip_proto_init(&spri->slip);
dev->hard_header_len = 0;
dev->header_ops = NULL;
dev->addr_len = 0;
dev->type = ARPHRD_SLIP;
dev->tx_queue_len = 256;
dev->flags = IFF_NOARP;
printk("SLIP backend - SLIP IP = %s\n", spri->gate_addr);
}
static unsigned short slip_protocol(struct sk_buff *skbuff)
{
return htons(ETH_P_IP);
}
static int slip_read(int fd, struct sk_buff *skb, struct uml_net_private *lp)
{
return slip_user_read(fd, skb_mac_header(skb), skb->dev->mtu,
(struct slip_data *) &lp->user);
}
static int slip_write(int fd, struct sk_buff *skb, struct uml_net_private *lp)
{
return slip_user_write(fd, skb->data, skb->len,
(struct slip_data *) &lp->user);
}
static const struct net_kern_info slip_kern_info = {
.init = slip_init,
.protocol = slip_protocol,
.read = slip_read,
.write = slip_write,
};
static int slip_setup(char *str, char **mac_out, void *data)
{
struct slip_init *init = data;
*init = ((struct slip_init) { .gate_addr = NULL });
if (str[0] != '\0')
init->gate_addr = str;
return 1;
}
static struct transport slip_transport = {
.list = LIST_HEAD_INIT(slip_transport.list),
.name = "slip",
.setup = slip_setup,
.user = &slip_user_info,
.kern = &slip_kern_info,
.private_size = sizeof(struct slip_data),
.setup_size = sizeof(struct slip_init),
};
static int register_slip(void)
{
register_transport(&slip_transport);
return 0;
}
late_initcall(register_slip);
| linux-master | arch/um/drivers/slip_kern.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
* Copyright (C) 2001 Lennert Buytenhek ([email protected]) and
* James Leu ([email protected]).
* Copyright (C) 2001 by various other people who didn't put their name here.
*/
#include <stdint.h>
#include <string.h>
#include <unistd.h>
#include <errno.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <sys/time.h>
#include <sys/un.h>
#include "daemon.h"
#include <net_user.h>
#include <os.h>
#include <um_malloc.h>
enum request_type { REQ_NEW_CONTROL };
#define SWITCH_MAGIC 0xfeedface
struct request_v3 {
uint32_t magic;
uint32_t version;
enum request_type type;
struct sockaddr_un sock;
};
static struct sockaddr_un *new_addr(void *name, int len)
{
struct sockaddr_un *sun;
sun = uml_kmalloc(sizeof(struct sockaddr_un), UM_GFP_KERNEL);
if (sun == NULL) {
printk(UM_KERN_ERR "new_addr: allocation of sockaddr_un "
"failed\n");
return NULL;
}
sun->sun_family = AF_UNIX;
memcpy(sun->sun_path, name, len);
return sun;
}
static int connect_to_switch(struct daemon_data *pri)
{
struct sockaddr_un *ctl_addr = pri->ctl_addr;
struct sockaddr_un *local_addr = pri->local_addr;
struct sockaddr_un *sun;
struct request_v3 req;
int fd, n, err;
pri->control = socket(AF_UNIX, SOCK_STREAM, 0);
if (pri->control < 0) {
err = -errno;
printk(UM_KERN_ERR "daemon_open : control socket failed, "
"errno = %d\n", -err);
return err;
}
if (connect(pri->control, (struct sockaddr *) ctl_addr,
sizeof(*ctl_addr)) < 0) {
err = -errno;
printk(UM_KERN_ERR "daemon_open : control connect failed, "
"errno = %d\n", -err);
goto out;
}
fd = socket(AF_UNIX, SOCK_DGRAM, 0);
if (fd < 0) {
err = -errno;
printk(UM_KERN_ERR "daemon_open : data socket failed, "
"errno = %d\n", -err);
goto out;
}
if (bind(fd, (struct sockaddr *) local_addr, sizeof(*local_addr)) < 0) {
err = -errno;
printk(UM_KERN_ERR "daemon_open : data bind failed, "
"errno = %d\n", -err);
goto out_close;
}
sun = uml_kmalloc(sizeof(struct sockaddr_un), UM_GFP_KERNEL);
if (sun == NULL) {
printk(UM_KERN_ERR "new_addr: allocation of sockaddr_un "
"failed\n");
err = -ENOMEM;
goto out_close;
}
req.magic = SWITCH_MAGIC;
req.version = SWITCH_VERSION;
req.type = REQ_NEW_CONTROL;
req.sock = *local_addr;
n = write(pri->control, &req, sizeof(req));
if (n != sizeof(req)) {
printk(UM_KERN_ERR "daemon_open : control setup request "
"failed, err = %d\n", -errno);
err = -ENOTCONN;
goto out_free;
}
n = read(pri->control, sun, sizeof(*sun));
if (n != sizeof(*sun)) {
printk(UM_KERN_ERR "daemon_open : read of data socket failed, "
"err = %d\n", -errno);
err = -ENOTCONN;
goto out_free;
}
pri->data_addr = sun;
return fd;
out_free:
kfree(sun);
out_close:
close(fd);
out:
close(pri->control);
return err;
}
static int daemon_user_init(void *data, void *dev)
{
struct daemon_data *pri = data;
struct timeval tv;
struct {
char zero;
int pid;
int usecs;
} name;
if (!strcmp(pri->sock_type, "unix"))
pri->ctl_addr = new_addr(pri->ctl_sock,
strlen(pri->ctl_sock) + 1);
name.zero = 0;
name.pid = os_getpid();
gettimeofday(&tv, NULL);
name.usecs = tv.tv_usec;
pri->local_addr = new_addr(&name, sizeof(name));
pri->dev = dev;
pri->fd = connect_to_switch(pri);
if (pri->fd < 0) {
kfree(pri->local_addr);
pri->local_addr = NULL;
return pri->fd;
}
return 0;
}
static int daemon_open(void *data)
{
struct daemon_data *pri = data;
return pri->fd;
}
static void daemon_remove(void *data)
{
struct daemon_data *pri = data;
close(pri->fd);
pri->fd = -1;
close(pri->control);
pri->control = -1;
kfree(pri->data_addr);
pri->data_addr = NULL;
kfree(pri->ctl_addr);
pri->ctl_addr = NULL;
kfree(pri->local_addr);
pri->local_addr = NULL;
}
int daemon_user_write(int fd, void *buf, int len, struct daemon_data *pri)
{
struct sockaddr_un *data_addr = pri->data_addr;
return net_sendto(fd, buf, len, data_addr, sizeof(*data_addr));
}
const struct net_user_info daemon_user_info = {
.init = daemon_user_init,
.open = daemon_open,
.close = NULL,
.remove = daemon_remove,
.add_address = NULL,
.delete_address = NULL,
.mtu = ETH_MAX_PACKET,
.max_packet = ETH_MAX_PACKET + ETH_HEADER_OTHER,
};
| linux-master | arch/um/drivers/daemon_user.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
*/
#include <errno.h>
#include <pcap.h>
#include <string.h>
#include <asm/types.h>
#include <net_user.h>
#include "pcap_user.h"
#include <um_malloc.h>
#define PCAP_FD(p) (*(int *)(p))
static int pcap_user_init(void *data, void *dev)
{
struct pcap_data *pri = data;
pcap_t *p;
char errors[PCAP_ERRBUF_SIZE];
p = pcap_open_live(pri->host_if, ETH_MAX_PACKET + ETH_HEADER_OTHER,
pri->promisc, 0, errors);
if (p == NULL) {
printk(UM_KERN_ERR "pcap_user_init : pcap_open_live failed - "
"'%s'\n", errors);
return -EINVAL;
}
pri->dev = dev;
pri->pcap = p;
return 0;
}
static int pcap_user_open(void *data)
{
struct pcap_data *pri = data;
__u32 netmask;
int err;
if (pri->pcap == NULL)
return -ENODEV;
if (pri->filter != NULL) {
err = dev_netmask(pri->dev, &netmask);
if (err < 0) {
printk(UM_KERN_ERR "pcap_user_open : dev_netmask failed\n");
return -EIO;
}
pri->compiled = uml_kmalloc(sizeof(struct bpf_program),
UM_GFP_KERNEL);
if (pri->compiled == NULL) {
printk(UM_KERN_ERR "pcap_user_open : kmalloc failed\n");
return -ENOMEM;
}
err = pcap_compile(pri->pcap,
(struct bpf_program *) pri->compiled,
pri->filter, pri->optimize, netmask);
if (err < 0) {
printk(UM_KERN_ERR "pcap_user_open : pcap_compile failed - "
"'%s'\n", pcap_geterr(pri->pcap));
goto out;
}
err = pcap_setfilter(pri->pcap, pri->compiled);
if (err < 0) {
printk(UM_KERN_ERR "pcap_user_open : pcap_setfilter "
"failed - '%s'\n", pcap_geterr(pri->pcap));
goto out;
}
}
return PCAP_FD(pri->pcap);
out:
kfree(pri->compiled);
return -EIO;
}
static void pcap_remove(void *data)
{
struct pcap_data *pri = data;
if (pri->compiled != NULL)
pcap_freecode(pri->compiled);
if (pri->pcap != NULL)
pcap_close(pri->pcap);
}
struct pcap_handler_data {
char *buffer;
int len;
};
static void handler(u_char *data, const struct pcap_pkthdr *header,
const u_char *packet)
{
int len;
struct pcap_handler_data *hdata = (struct pcap_handler_data *) data;
len = hdata->len < header->caplen ? hdata->len : header->caplen;
memcpy(hdata->buffer, packet, len);
hdata->len = len;
}
int pcap_user_read(int fd, void *buffer, int len, struct pcap_data *pri)
{
struct pcap_handler_data hdata = ((struct pcap_handler_data)
{ .buffer = buffer,
.len = len });
int n;
n = pcap_dispatch(pri->pcap, 1, handler, (u_char *) &hdata);
if (n < 0) {
printk(UM_KERN_ERR "pcap_dispatch failed - %s\n",
pcap_geterr(pri->pcap));
return -EIO;
}
else if (n == 0)
return 0;
return hdata.len;
}
const struct net_user_info pcap_user_info = {
.init = pcap_user_init,
.open = pcap_user_open,
.close = NULL,
.remove = pcap_remove,
.add_address = NULL,
.delete_address = NULL,
.mtu = ETH_MAX_PACKET,
.max_packet = ETH_MAX_PACKET + ETH_HEADER_OTHER,
};
| linux-master | arch/um/drivers/pcap_user.c |
// SPDX-License-Identifier: GPL-2.0
/*
* user-mode-linux networking multicast transport
* Copyright (C) 2001 by Harald Welte <[email protected]>
* Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
*
* based on the existing uml-networking code, which is
* Copyright (C) 2001 Lennert Buytenhek ([email protected]) and
* James Leu ([email protected]).
* Copyright (C) 2001 by various other people who didn't put their name here.
*
*/
#include <linux/init.h>
#include <linux/netdevice.h>
#include "umcast.h"
#include <net_kern.h>
struct umcast_init {
char *addr;
int lport;
int rport;
int ttl;
bool unicast;
};
static void umcast_init(struct net_device *dev, void *data)
{
struct uml_net_private *pri;
struct umcast_data *dpri;
struct umcast_init *init = data;
pri = netdev_priv(dev);
dpri = (struct umcast_data *) pri->user;
dpri->addr = init->addr;
dpri->lport = init->lport;
dpri->rport = init->rport;
dpri->unicast = init->unicast;
dpri->ttl = init->ttl;
dpri->dev = dev;
if (dpri->unicast) {
printk(KERN_INFO "ucast backend address: %s:%u listen port: "
"%u\n", dpri->addr, dpri->rport, dpri->lport);
} else {
printk(KERN_INFO "mcast backend multicast address: %s:%u, "
"TTL:%u\n", dpri->addr, dpri->lport, dpri->ttl);
}
}
static int umcast_read(int fd, struct sk_buff *skb, struct uml_net_private *lp)
{
return net_recvfrom(fd, skb_mac_header(skb),
skb->dev->mtu + ETH_HEADER_OTHER);
}
static int umcast_write(int fd, struct sk_buff *skb, struct uml_net_private *lp)
{
return umcast_user_write(fd, skb->data, skb->len,
(struct umcast_data *) &lp->user);
}
static const struct net_kern_info umcast_kern_info = {
.init = umcast_init,
.protocol = eth_protocol,
.read = umcast_read,
.write = umcast_write,
};
static int mcast_setup(char *str, char **mac_out, void *data)
{
struct umcast_init *init = data;
char *port_str = NULL, *ttl_str = NULL, *remain;
char *last;
*init = ((struct umcast_init)
{ .addr = "239.192.168.1",
.lport = 1102,
.ttl = 1 });
remain = split_if_spec(str, mac_out, &init->addr, &port_str, &ttl_str,
NULL);
if (remain != NULL) {
printk(KERN_ERR "mcast_setup - Extra garbage on "
"specification : '%s'\n", remain);
return 0;
}
if (port_str != NULL) {
init->lport = simple_strtoul(port_str, &last, 10);
if ((*last != '\0') || (last == port_str)) {
printk(KERN_ERR "mcast_setup - Bad port : '%s'\n",
port_str);
return 0;
}
}
if (ttl_str != NULL) {
init->ttl = simple_strtoul(ttl_str, &last, 10);
if ((*last != '\0') || (last == ttl_str)) {
printk(KERN_ERR "mcast_setup - Bad ttl : '%s'\n",
ttl_str);
return 0;
}
}
init->unicast = false;
init->rport = init->lport;
printk(KERN_INFO "Configured mcast device: %s:%u-%u\n", init->addr,
init->lport, init->ttl);
return 1;
}
static int ucast_setup(char *str, char **mac_out, void *data)
{
struct umcast_init *init = data;
char *lport_str = NULL, *rport_str = NULL, *remain;
char *last;
*init = ((struct umcast_init)
{ .addr = "",
.lport = 1102,
.rport = 1102 });
remain = split_if_spec(str, mac_out, &init->addr,
&lport_str, &rport_str, NULL);
if (remain != NULL) {
printk(KERN_ERR "ucast_setup - Extra garbage on "
"specification : '%s'\n", remain);
return 0;
}
if (lport_str != NULL) {
init->lport = simple_strtoul(lport_str, &last, 10);
if ((*last != '\0') || (last == lport_str)) {
printk(KERN_ERR "ucast_setup - Bad listen port : "
"'%s'\n", lport_str);
return 0;
}
}
if (rport_str != NULL) {
init->rport = simple_strtoul(rport_str, &last, 10);
if ((*last != '\0') || (last == rport_str)) {
printk(KERN_ERR "ucast_setup - Bad remote port : "
"'%s'\n", rport_str);
return 0;
}
}
init->unicast = true;
printk(KERN_INFO "Configured ucast device: :%u -> %s:%u\n",
init->lport, init->addr, init->rport);
return 1;
}
static struct transport mcast_transport = {
.list = LIST_HEAD_INIT(mcast_transport.list),
.name = "mcast",
.setup = mcast_setup,
.user = &umcast_user_info,
.kern = &umcast_kern_info,
.private_size = sizeof(struct umcast_data),
.setup_size = sizeof(struct umcast_init),
};
static struct transport ucast_transport = {
.list = LIST_HEAD_INIT(ucast_transport.list),
.name = "ucast",
.setup = ucast_setup,
.user = &umcast_user_info,
.kern = &umcast_kern_info,
.private_size = sizeof(struct umcast_data),
.setup_size = sizeof(struct umcast_init),
};
static int register_umcast(void)
{
register_transport(&mcast_transport);
register_transport(&ucast_transport);
return 0;
}
late_initcall(register_umcast);
| linux-master | arch/um/drivers/umcast_kern.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
*/
#include <stdbool.h>
#include <stdio.h>
#include <unistd.h>
#include <stdarg.h>
#include <errno.h>
#include <stddef.h>
#include <string.h>
#include <sys/ioctl.h>
#include <net/if.h>
#include <linux/if_tun.h>
#include <arpa/inet.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <sys/socket.h>
#include <sys/un.h>
#include <netinet/ip.h>
#include <linux/if_ether.h>
#include <linux/if_packet.h>
#include <sys/wait.h>
#include <sys/uio.h>
#include <linux/virtio_net.h>
#include <netdb.h>
#include <stdlib.h>
#include <os.h>
#include <limits.h>
#include <um_malloc.h>
#include "vector_user.h"
#define ID_GRE 0
#define ID_L2TPV3 1
#define ID_BESS 2
#define ID_MAX 2
#define TOKEN_IFNAME "ifname"
#define TOKEN_SCRIPT "ifup"
#define TRANS_RAW "raw"
#define TRANS_RAW_LEN strlen(TRANS_RAW)
#define TRANS_FD "fd"
#define TRANS_FD_LEN strlen(TRANS_FD)
#define VNET_HDR_FAIL "could not enable vnet headers on fd %d"
#define TUN_GET_F_FAIL "tapraw: TUNGETFEATURES failed: %s"
#define L2TPV3_BIND_FAIL "l2tpv3_open : could not bind socket err=%i"
#define UNIX_BIND_FAIL "unix_open : could not bind socket err=%i"
#define BPF_ATTACH_FAIL "Failed to attach filter size %d prog %px to %d, err %d\n"
#define BPF_DETACH_FAIL "Failed to detach filter size %d prog %px to %d, err %d\n"
#define MAX_UN_LEN 107
static const char padchar[] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ";
static const char *template = "tapXXXXXX";
/* This is very ugly and brute force lookup, but it is done
* only once at initialization so not worth doing hashes or
* anything more intelligent
*/
char *uml_vector_fetch_arg(struct arglist *ifspec, char *token)
{
int i;
for (i = 0; i < ifspec->numargs; i++) {
if (strcmp(ifspec->tokens[i], token) == 0)
return ifspec->values[i];
}
return NULL;
}
struct arglist *uml_parse_vector_ifspec(char *arg)
{
struct arglist *result;
int pos, len;
bool parsing_token = true, next_starts = true;
if (arg == NULL)
return NULL;
result = uml_kmalloc(sizeof(struct arglist), UM_GFP_KERNEL);
if (result == NULL)
return NULL;
result->numargs = 0;
len = strlen(arg);
for (pos = 0; pos < len; pos++) {
if (next_starts) {
if (parsing_token) {
result->tokens[result->numargs] = arg + pos;
} else {
result->values[result->numargs] = arg + pos;
result->numargs++;
}
next_starts = false;
}
if (*(arg + pos) == '=') {
if (parsing_token)
parsing_token = false;
else
goto cleanup;
next_starts = true;
(*(arg + pos)) = '\0';
}
if (*(arg + pos) == ',') {
parsing_token = true;
next_starts = true;
(*(arg + pos)) = '\0';
}
}
return result;
cleanup:
printk(UM_KERN_ERR "vector_setup - Couldn't parse '%s'\n", arg);
kfree(result);
return NULL;
}
/*
* Socket/FD configuration functions. These return an structure
* of rx and tx descriptors to cover cases where these are not
* the same (f.e. read via raw socket and write via tap).
*/
#define PATH_NET_TUN "/dev/net/tun"
static int create_tap_fd(char *iface)
{
struct ifreq ifr;
int fd = -1;
int err = -ENOMEM, offload;
fd = open(PATH_NET_TUN, O_RDWR);
if (fd < 0) {
printk(UM_KERN_ERR "uml_tap: failed to open tun device\n");
goto tap_fd_cleanup;
}
memset(&ifr, 0, sizeof(ifr));
ifr.ifr_flags = IFF_TAP | IFF_NO_PI | IFF_VNET_HDR;
strscpy(ifr.ifr_name, iface, sizeof(ifr.ifr_name));
err = ioctl(fd, TUNSETIFF, (void *) &ifr);
if (err != 0) {
printk(UM_KERN_ERR "uml_tap: failed to select tap interface\n");
goto tap_fd_cleanup;
}
offload = TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6;
ioctl(fd, TUNSETOFFLOAD, offload);
return fd;
tap_fd_cleanup:
if (fd >= 0)
os_close_file(fd);
return err;
}
static int create_raw_fd(char *iface, int flags, int proto)
{
struct ifreq ifr;
int fd = -1;
struct sockaddr_ll sock;
int err = -ENOMEM;
fd = socket(AF_PACKET, SOCK_RAW, flags);
if (fd == -1) {
err = -errno;
goto raw_fd_cleanup;
}
memset(&ifr, 0, sizeof(ifr));
strscpy(ifr.ifr_name, iface, sizeof(ifr.ifr_name));
if (ioctl(fd, SIOCGIFINDEX, (void *) &ifr) < 0) {
err = -errno;
goto raw_fd_cleanup;
}
sock.sll_family = AF_PACKET;
sock.sll_protocol = htons(proto);
sock.sll_ifindex = ifr.ifr_ifindex;
if (bind(fd,
(struct sockaddr *) &sock, sizeof(struct sockaddr_ll)) < 0) {
err = -errno;
goto raw_fd_cleanup;
}
return fd;
raw_fd_cleanup:
printk(UM_KERN_ERR "user_init_raw: init failed, error %d", err);
if (fd >= 0)
os_close_file(fd);
return err;
}
static struct vector_fds *user_init_tap_fds(struct arglist *ifspec)
{
int fd = -1, i;
char *iface;
struct vector_fds *result = NULL;
bool dynamic = false;
char dynamic_ifname[IFNAMSIZ];
char *argv[] = {NULL, NULL, NULL, NULL};
iface = uml_vector_fetch_arg(ifspec, TOKEN_IFNAME);
if (iface == NULL) {
dynamic = true;
iface = dynamic_ifname;
srand(getpid());
}
result = uml_kmalloc(sizeof(struct vector_fds), UM_GFP_KERNEL);
if (result == NULL) {
printk(UM_KERN_ERR "uml_tap: failed to allocate file descriptors\n");
goto tap_cleanup;
}
result->rx_fd = -1;
result->tx_fd = -1;
result->remote_addr = NULL;
result->remote_addr_size = 0;
/* TAP */
do {
if (dynamic) {
strcpy(iface, template);
for (i = 0; i < strlen(iface); i++) {
if (iface[i] == 'X') {
iface[i] = padchar[rand() % strlen(padchar)];
}
}
}
fd = create_tap_fd(iface);
if ((fd < 0) && (!dynamic)) {
printk(UM_KERN_ERR "uml_tap: failed to create tun interface\n");
goto tap_cleanup;
}
result->tx_fd = fd;
result->rx_fd = fd;
} while (fd < 0);
argv[0] = uml_vector_fetch_arg(ifspec, TOKEN_SCRIPT);
if (argv[0]) {
argv[1] = iface;
run_helper(NULL, NULL, argv);
}
return result;
tap_cleanup:
printk(UM_KERN_ERR "user_init_tap: init failed, error %d", fd);
kfree(result);
return NULL;
}
static struct vector_fds *user_init_hybrid_fds(struct arglist *ifspec)
{
char *iface;
struct vector_fds *result = NULL;
char *argv[] = {NULL, NULL, NULL, NULL};
iface = uml_vector_fetch_arg(ifspec, TOKEN_IFNAME);
if (iface == NULL) {
printk(UM_KERN_ERR "uml_tap: failed to parse interface spec\n");
goto hybrid_cleanup;
}
result = uml_kmalloc(sizeof(struct vector_fds), UM_GFP_KERNEL);
if (result == NULL) {
printk(UM_KERN_ERR "uml_tap: failed to allocate file descriptors\n");
goto hybrid_cleanup;
}
result->rx_fd = -1;
result->tx_fd = -1;
result->remote_addr = NULL;
result->remote_addr_size = 0;
/* TAP */
result->tx_fd = create_tap_fd(iface);
if (result->tx_fd < 0) {
printk(UM_KERN_ERR "uml_tap: failed to create tun interface: %i\n", result->tx_fd);
goto hybrid_cleanup;
}
/* RAW */
result->rx_fd = create_raw_fd(iface, ETH_P_ALL, ETH_P_ALL);
if (result->rx_fd == -1) {
printk(UM_KERN_ERR
"uml_tap: failed to create paired raw socket: %i\n", result->rx_fd);
goto hybrid_cleanup;
}
argv[0] = uml_vector_fetch_arg(ifspec, TOKEN_SCRIPT);
if (argv[0]) {
argv[1] = iface;
run_helper(NULL, NULL, argv);
}
return result;
hybrid_cleanup:
printk(UM_KERN_ERR "user_init_hybrid: init failed");
kfree(result);
return NULL;
}
static struct vector_fds *user_init_unix_fds(struct arglist *ifspec, int id)
{
int fd = -1;
int socktype;
char *src, *dst;
struct vector_fds *result = NULL;
struct sockaddr_un *local_addr = NULL, *remote_addr = NULL;
src = uml_vector_fetch_arg(ifspec, "src");
dst = uml_vector_fetch_arg(ifspec, "dst");
result = uml_kmalloc(sizeof(struct vector_fds), UM_GFP_KERNEL);
if (result == NULL) {
printk(UM_KERN_ERR "unix open:cannot allocate remote addr");
goto unix_cleanup;
}
remote_addr = uml_kmalloc(sizeof(struct sockaddr_un), UM_GFP_KERNEL);
if (remote_addr == NULL) {
printk(UM_KERN_ERR "unix open:cannot allocate remote addr");
goto unix_cleanup;
}
switch (id) {
case ID_BESS:
socktype = SOCK_SEQPACKET;
if ((src != NULL) && (strlen(src) <= MAX_UN_LEN)) {
local_addr = uml_kmalloc(sizeof(struct sockaddr_un), UM_GFP_KERNEL);
if (local_addr == NULL) {
printk(UM_KERN_ERR "bess open:cannot allocate local addr");
goto unix_cleanup;
}
local_addr->sun_family = AF_UNIX;
memcpy(local_addr->sun_path, src, strlen(src) + 1);
}
if ((dst == NULL) || (strlen(dst) > MAX_UN_LEN))
goto unix_cleanup;
remote_addr->sun_family = AF_UNIX;
memcpy(remote_addr->sun_path, dst, strlen(dst) + 1);
break;
default:
printk(KERN_ERR "Unsupported unix socket type\n");
return NULL;
}
fd = socket(AF_UNIX, socktype, 0);
if (fd == -1) {
printk(UM_KERN_ERR
"unix open: could not open socket, error = %d",
-errno
);
goto unix_cleanup;
}
if (local_addr != NULL) {
if (bind(fd, (struct sockaddr *) local_addr, sizeof(struct sockaddr_un))) {
printk(UM_KERN_ERR UNIX_BIND_FAIL, errno);
goto unix_cleanup;
}
}
switch (id) {
case ID_BESS:
if (connect(fd, (const struct sockaddr *) remote_addr, sizeof(struct sockaddr_un)) < 0) {
printk(UM_KERN_ERR "bess open:cannot connect to %s %i", remote_addr->sun_path, -errno);
goto unix_cleanup;
}
break;
}
result->rx_fd = fd;
result->tx_fd = fd;
result->remote_addr_size = sizeof(struct sockaddr_un);
result->remote_addr = remote_addr;
return result;
unix_cleanup:
if (fd >= 0)
os_close_file(fd);
kfree(remote_addr);
kfree(result);
return NULL;
}
static int strtofd(const char *nptr)
{
long fd;
char *endptr;
if (nptr == NULL)
return -1;
errno = 0;
fd = strtol(nptr, &endptr, 10);
if (nptr == endptr ||
errno != 0 ||
*endptr != '\0' ||
fd < 0 ||
fd > INT_MAX) {
return -1;
}
return fd;
}
static struct vector_fds *user_init_fd_fds(struct arglist *ifspec)
{
int fd = -1;
char *fdarg = NULL;
struct vector_fds *result = NULL;
fdarg = uml_vector_fetch_arg(ifspec, "fd");
fd = strtofd(fdarg);
if (fd == -1) {
printk(UM_KERN_ERR "fd open: bad or missing fd argument");
goto fd_cleanup;
}
result = uml_kmalloc(sizeof(struct vector_fds), UM_GFP_KERNEL);
if (result == NULL) {
printk(UM_KERN_ERR "fd open: allocation failed");
goto fd_cleanup;
}
result->rx_fd = fd;
result->tx_fd = fd;
result->remote_addr_size = 0;
result->remote_addr = NULL;
return result;
fd_cleanup:
if (fd >= 0)
os_close_file(fd);
kfree(result);
return NULL;
}
static struct vector_fds *user_init_raw_fds(struct arglist *ifspec)
{
int rxfd = -1, txfd = -1;
int err = -ENOMEM;
char *iface;
struct vector_fds *result = NULL;
char *argv[] = {NULL, NULL, NULL, NULL};
iface = uml_vector_fetch_arg(ifspec, TOKEN_IFNAME);
if (iface == NULL)
goto raw_cleanup;
rxfd = create_raw_fd(iface, ETH_P_ALL, ETH_P_ALL);
if (rxfd == -1) {
err = -errno;
goto raw_cleanup;
}
txfd = create_raw_fd(iface, 0, ETH_P_IP); /* Turn off RX on this fd */
if (txfd == -1) {
err = -errno;
goto raw_cleanup;
}
result = uml_kmalloc(sizeof(struct vector_fds), UM_GFP_KERNEL);
if (result != NULL) {
result->rx_fd = rxfd;
result->tx_fd = txfd;
result->remote_addr = NULL;
result->remote_addr_size = 0;
}
argv[0] = uml_vector_fetch_arg(ifspec, TOKEN_SCRIPT);
if (argv[0]) {
argv[1] = iface;
run_helper(NULL, NULL, argv);
}
return result;
raw_cleanup:
printk(UM_KERN_ERR "user_init_raw: init failed, error %d", err);
kfree(result);
return NULL;
}
bool uml_raw_enable_qdisc_bypass(int fd)
{
int optval = 1;
if (setsockopt(fd,
SOL_PACKET, PACKET_QDISC_BYPASS,
&optval, sizeof(optval)) != 0) {
return false;
}
return true;
}
bool uml_raw_enable_vnet_headers(int fd)
{
int optval = 1;
if (setsockopt(fd,
SOL_PACKET, PACKET_VNET_HDR,
&optval, sizeof(optval)) != 0) {
printk(UM_KERN_INFO VNET_HDR_FAIL, fd);
return false;
}
return true;
}
bool uml_tap_enable_vnet_headers(int fd)
{
unsigned int features;
int len = sizeof(struct virtio_net_hdr);
if (ioctl(fd, TUNGETFEATURES, &features) == -1) {
printk(UM_KERN_INFO TUN_GET_F_FAIL, strerror(errno));
return false;
}
if ((features & IFF_VNET_HDR) == 0) {
printk(UM_KERN_INFO "tapraw: No VNET HEADER support");
return false;
}
ioctl(fd, TUNSETVNETHDRSZ, &len);
return true;
}
static struct vector_fds *user_init_socket_fds(struct arglist *ifspec, int id)
{
int err = -ENOMEM;
int fd = -1, gairet;
struct addrinfo srchints;
struct addrinfo dsthints;
bool v6, udp;
char *value;
char *src, *dst, *srcport, *dstport;
struct addrinfo *gairesult = NULL;
struct vector_fds *result = NULL;
value = uml_vector_fetch_arg(ifspec, "v6");
v6 = false;
udp = false;
if (value != NULL) {
if (strtol((const char *) value, NULL, 10) > 0)
v6 = true;
}
value = uml_vector_fetch_arg(ifspec, "udp");
if (value != NULL) {
if (strtol((const char *) value, NULL, 10) > 0)
udp = true;
}
src = uml_vector_fetch_arg(ifspec, "src");
dst = uml_vector_fetch_arg(ifspec, "dst");
srcport = uml_vector_fetch_arg(ifspec, "srcport");
dstport = uml_vector_fetch_arg(ifspec, "dstport");
memset(&dsthints, 0, sizeof(dsthints));
if (v6)
dsthints.ai_family = AF_INET6;
else
dsthints.ai_family = AF_INET;
switch (id) {
case ID_GRE:
dsthints.ai_socktype = SOCK_RAW;
dsthints.ai_protocol = IPPROTO_GRE;
break;
case ID_L2TPV3:
if (udp) {
dsthints.ai_socktype = SOCK_DGRAM;
dsthints.ai_protocol = 0;
} else {
dsthints.ai_socktype = SOCK_RAW;
dsthints.ai_protocol = IPPROTO_L2TP;
}
break;
default:
printk(KERN_ERR "Unsupported socket type\n");
return NULL;
}
memcpy(&srchints, &dsthints, sizeof(struct addrinfo));
gairet = getaddrinfo(src, srcport, &dsthints, &gairesult);
if ((gairet != 0) || (gairesult == NULL)) {
printk(UM_KERN_ERR
"socket_open : could not resolve src, error = %s",
gai_strerror(gairet)
);
return NULL;
}
fd = socket(gairesult->ai_family,
gairesult->ai_socktype, gairesult->ai_protocol);
if (fd == -1) {
printk(UM_KERN_ERR
"socket_open : could not open socket, error = %d",
-errno
);
goto cleanup;
}
if (bind(fd,
(struct sockaddr *) gairesult->ai_addr,
gairesult->ai_addrlen)) {
printk(UM_KERN_ERR L2TPV3_BIND_FAIL, errno);
goto cleanup;
}
if (gairesult != NULL)
freeaddrinfo(gairesult);
gairesult = NULL;
gairet = getaddrinfo(dst, dstport, &dsthints, &gairesult);
if ((gairet != 0) || (gairesult == NULL)) {
printk(UM_KERN_ERR
"socket_open : could not resolve dst, error = %s",
gai_strerror(gairet)
);
return NULL;
}
result = uml_kmalloc(sizeof(struct vector_fds), UM_GFP_KERNEL);
if (result != NULL) {
result->rx_fd = fd;
result->tx_fd = fd;
result->remote_addr = uml_kmalloc(
gairesult->ai_addrlen, UM_GFP_KERNEL);
if (result->remote_addr == NULL)
goto cleanup;
result->remote_addr_size = gairesult->ai_addrlen;
memcpy(
result->remote_addr,
gairesult->ai_addr,
gairesult->ai_addrlen
);
}
freeaddrinfo(gairesult);
return result;
cleanup:
if (gairesult != NULL)
freeaddrinfo(gairesult);
printk(UM_KERN_ERR "user_init_socket: init failed, error %d", err);
if (fd >= 0)
os_close_file(fd);
if (result != NULL) {
kfree(result->remote_addr);
kfree(result);
}
return NULL;
}
struct vector_fds *uml_vector_user_open(
int unit,
struct arglist *parsed
)
{
char *transport;
if (parsed == NULL) {
printk(UM_KERN_ERR "no parsed config for unit %d\n", unit);
return NULL;
}
transport = uml_vector_fetch_arg(parsed, "transport");
if (transport == NULL) {
printk(UM_KERN_ERR "missing transport for unit %d\n", unit);
return NULL;
}
if (strncmp(transport, TRANS_RAW, TRANS_RAW_LEN) == 0)
return user_init_raw_fds(parsed);
if (strncmp(transport, TRANS_HYBRID, TRANS_HYBRID_LEN) == 0)
return user_init_hybrid_fds(parsed);
if (strncmp(transport, TRANS_TAP, TRANS_TAP_LEN) == 0)
return user_init_tap_fds(parsed);
if (strncmp(transport, TRANS_GRE, TRANS_GRE_LEN) == 0)
return user_init_socket_fds(parsed, ID_GRE);
if (strncmp(transport, TRANS_L2TPV3, TRANS_L2TPV3_LEN) == 0)
return user_init_socket_fds(parsed, ID_L2TPV3);
if (strncmp(transport, TRANS_BESS, TRANS_BESS_LEN) == 0)
return user_init_unix_fds(parsed, ID_BESS);
if (strncmp(transport, TRANS_FD, TRANS_FD_LEN) == 0)
return user_init_fd_fds(parsed);
return NULL;
}
int uml_vector_sendmsg(int fd, void *hdr, int flags)
{
int n;
CATCH_EINTR(n = sendmsg(fd, (struct msghdr *) hdr, flags));
if ((n < 0) && (errno == EAGAIN))
return 0;
if (n >= 0)
return n;
else
return -errno;
}
int uml_vector_recvmsg(int fd, void *hdr, int flags)
{
int n;
struct msghdr *msg = (struct msghdr *) hdr;
CATCH_EINTR(n = readv(fd, msg->msg_iov, msg->msg_iovlen));
if ((n < 0) && (errno == EAGAIN))
return 0;
if (n >= 0)
return n;
else
return -errno;
}
int uml_vector_writev(int fd, void *hdr, int iovcount)
{
int n;
CATCH_EINTR(n = writev(fd, (struct iovec *) hdr, iovcount));
if ((n < 0) && ((errno == EAGAIN) || (errno == ENOBUFS)))
return 0;
if (n >= 0)
return n;
else
return -errno;
}
int uml_vector_sendmmsg(
int fd,
void *msgvec,
unsigned int vlen,
unsigned int flags)
{
int n;
CATCH_EINTR(n = sendmmsg(fd, (struct mmsghdr *) msgvec, vlen, flags));
if ((n < 0) && ((errno == EAGAIN) || (errno == ENOBUFS)))
return 0;
if (n >= 0)
return n;
else
return -errno;
}
int uml_vector_recvmmsg(
int fd,
void *msgvec,
unsigned int vlen,
unsigned int flags)
{
int n;
CATCH_EINTR(
n = recvmmsg(fd, (struct mmsghdr *) msgvec, vlen, flags, 0));
if ((n < 0) && (errno == EAGAIN))
return 0;
if (n >= 0)
return n;
else
return -errno;
}
int uml_vector_attach_bpf(int fd, void *bpf)
{
struct sock_fprog *prog = bpf;
int err = setsockopt(fd, SOL_SOCKET, SO_ATTACH_FILTER, bpf, sizeof(struct sock_fprog));
if (err < 0)
printk(KERN_ERR BPF_ATTACH_FAIL, prog->len, prog->filter, fd, -errno);
return err;
}
int uml_vector_detach_bpf(int fd, void *bpf)
{
struct sock_fprog *prog = bpf;
int err = setsockopt(fd, SOL_SOCKET, SO_DETACH_FILTER, bpf, sizeof(struct sock_fprog));
if (err < 0)
printk(KERN_ERR BPF_DETACH_FAIL, prog->len, prog->filter, fd, -errno);
return err;
}
void *uml_vector_default_bpf(const void *mac)
{
struct sock_filter *bpf;
uint32_t *mac1 = (uint32_t *)(mac + 2);
uint16_t *mac2 = (uint16_t *) mac;
struct sock_fprog *bpf_prog;
bpf_prog = uml_kmalloc(sizeof(struct sock_fprog), UM_GFP_KERNEL);
if (bpf_prog) {
bpf_prog->len = DEFAULT_BPF_LEN;
bpf_prog->filter = NULL;
} else {
return NULL;
}
bpf = uml_kmalloc(
sizeof(struct sock_filter) * DEFAULT_BPF_LEN, UM_GFP_KERNEL);
if (bpf) {
bpf_prog->filter = bpf;
/* ld [8] */
bpf[0] = (struct sock_filter){ 0x20, 0, 0, 0x00000008 };
/* jeq #0xMAC[2-6] jt 2 jf 5*/
bpf[1] = (struct sock_filter){ 0x15, 0, 3, ntohl(*mac1)};
/* ldh [6] */
bpf[2] = (struct sock_filter){ 0x28, 0, 0, 0x00000006 };
/* jeq #0xMAC[0-1] jt 4 jf 5 */
bpf[3] = (struct sock_filter){ 0x15, 0, 1, ntohs(*mac2)};
/* ret #0 */
bpf[4] = (struct sock_filter){ 0x6, 0, 0, 0x00000000 };
/* ret #0x40000 */
bpf[5] = (struct sock_filter){ 0x6, 0, 0, 0x00040000 };
} else {
kfree(bpf_prog);
bpf_prog = NULL;
}
return bpf_prog;
}
/* Note - this function requires a valid mac being passed as an arg */
void *uml_vector_user_bpf(char *filename)
{
struct sock_filter *bpf;
struct sock_fprog *bpf_prog;
struct stat statbuf;
int res, ffd = -1;
if (filename == NULL)
return NULL;
if (stat(filename, &statbuf) < 0) {
printk(KERN_ERR "Error %d reading bpf file", -errno);
return false;
}
bpf_prog = uml_kmalloc(sizeof(struct sock_fprog), UM_GFP_KERNEL);
if (bpf_prog == NULL) {
printk(KERN_ERR "Failed to allocate bpf prog buffer");
return NULL;
}
bpf_prog->len = statbuf.st_size / sizeof(struct sock_filter);
bpf_prog->filter = NULL;
ffd = os_open_file(filename, of_read(OPENFLAGS()), 0);
if (ffd < 0) {
printk(KERN_ERR "Error %d opening bpf file", -errno);
goto bpf_failed;
}
bpf = uml_kmalloc(statbuf.st_size, UM_GFP_KERNEL);
if (bpf == NULL) {
printk(KERN_ERR "Failed to allocate bpf buffer");
goto bpf_failed;
}
bpf_prog->filter = bpf;
res = os_read_file(ffd, bpf, statbuf.st_size);
if (res < statbuf.st_size) {
printk(KERN_ERR "Failed to read bpf program %s, error %d", filename, res);
kfree(bpf);
goto bpf_failed;
}
os_close_file(ffd);
return bpf_prog;
bpf_failed:
if (ffd > 0)
os_close_file(ffd);
kfree(bpf_prog);
return NULL;
}
| linux-master | arch/um/drivers/vector_user.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2007 Jeff Dike (jdike@{linux.intel,addtoit}.com)
*/
/*
* _XOPEN_SOURCE is needed for pread, but we define _GNU_SOURCE, which defines
* that.
*/
#include <unistd.h>
#include <errno.h>
#include <string.h>
#include <arpa/inet.h>
#include <endian.h>
#include "cow.h"
#include "cow_sys.h"
#define PATH_LEN_V1 256
/* unsigned time_t works until year 2106 */
typedef __u32 time32_t;
struct cow_header_v1 {
__s32 magic;
__s32 version;
char backing_file[PATH_LEN_V1];
time32_t mtime;
__u64 size;
__s32 sectorsize;
} __attribute__((packed));
/*
* Define PATH_LEN_V3 as the usual value of MAXPATHLEN, just hard-code it in
* case other systems have different values for MAXPATHLEN.
*
* The same must hold for V2 - we want file format compatibility, not anything
* else.
*/
#define PATH_LEN_V3 4096
#define PATH_LEN_V2 PATH_LEN_V3
struct cow_header_v2 {
__u32 magic;
__u32 version;
char backing_file[PATH_LEN_V2];
time32_t mtime;
__u64 size;
__s32 sectorsize;
} __attribute__((packed));
/*
* Changes from V2 -
* PATH_LEN_V3 as described above
* Explicitly specify field bit lengths for systems with different
* lengths for the usual C types. Not sure whether char or
* time_t should be changed, this can be changed later without
* breaking compatibility
* Add alignment field so that different alignments can be used for the
* bitmap and data
* Add cow_format field to allow for the possibility of different ways
* of specifying the COW blocks. For now, the only value is 0,
* for the traditional COW bitmap.
* Move the backing_file field to the end of the header. This allows
* for the possibility of expanding it into the padding required
* by the bitmap alignment.
* The bitmap and data portions of the file will be aligned as specified
* by the alignment field. This is to allow COW files to be
* put on devices with restrictions on access alignments, such as
* /dev/raw, with a 512 byte alignment restriction. This also
* allows the data to be more aligned more strictly than on
* sector boundaries. This is needed for ubd-mmap, which needs
* the data to be page aligned.
* Fixed (finally!) the rounding bug
*/
/*
* Until Dec2005, __attribute__((packed)) was left out from the below
* definition, leading on 64-bit systems to 4 bytes of padding after mtime, to
* align size to 8-byte alignment. This shifted all fields above (no padding
* was present on 32-bit, no other padding was added).
*
* However, this _can be detected_: it means that cow_format (always 0 until
* now) is shifted onto the first 4 bytes of backing_file, where it is otherwise
* impossible to find 4 zeros. -bb */
struct cow_header_v3 {
__u32 magic;
__u32 version;
__u32 mtime;
__u64 size;
__u32 sectorsize;
__u32 alignment;
__u32 cow_format;
char backing_file[PATH_LEN_V3];
} __attribute__((packed));
/* This is the broken layout used by some 64-bit binaries. */
struct cow_header_v3_broken {
__u32 magic;
__u32 version;
__s64 mtime;
__u64 size;
__u32 sectorsize;
__u32 alignment;
__u32 cow_format;
char backing_file[PATH_LEN_V3];
};
/* COW format definitions - for now, we have only the usual COW bitmap */
#define COW_BITMAP 0
union cow_header {
struct cow_header_v1 v1;
struct cow_header_v2 v2;
struct cow_header_v3 v3;
struct cow_header_v3_broken v3_b;
};
#define COW_MAGIC 0x4f4f4f4d /* MOOO */
#define COW_VERSION 3
#define DIV_ROUND(x, len) (((x) + (len) - 1) / (len))
#define ROUND_UP(x, align) DIV_ROUND(x, align) * (align)
void cow_sizes(int version, __u64 size, int sectorsize, int align,
int bitmap_offset, unsigned long *bitmap_len_out,
int *data_offset_out)
{
if (version < 3) {
*bitmap_len_out = (size + sectorsize - 1) / (8 * sectorsize);
*data_offset_out = bitmap_offset + *bitmap_len_out;
*data_offset_out = (*data_offset_out + sectorsize - 1) /
sectorsize;
*data_offset_out *= sectorsize;
}
else {
*bitmap_len_out = DIV_ROUND(size, sectorsize);
*bitmap_len_out = DIV_ROUND(*bitmap_len_out, 8);
*data_offset_out = bitmap_offset + *bitmap_len_out;
*data_offset_out = ROUND_UP(*data_offset_out, align);
}
}
static int absolutize(char *to, int size, char *from)
{
char save_cwd[256], *slash;
int remaining;
if (getcwd(save_cwd, sizeof(save_cwd)) == NULL) {
cow_printf("absolutize : unable to get cwd - errno = %d\n",
errno);
return -1;
}
slash = strrchr(from, '/');
if (slash != NULL) {
*slash = '\0';
if (chdir(from)) {
*slash = '/';
cow_printf("absolutize : Can't cd to '%s' - "
"errno = %d\n", from, errno);
return -1;
}
*slash = '/';
if (getcwd(to, size) == NULL) {
cow_printf("absolutize : unable to get cwd of '%s' - "
"errno = %d\n", from, errno);
return -1;
}
remaining = size - strlen(to);
if (strlen(slash) + 1 > remaining) {
cow_printf("absolutize : unable to fit '%s' into %d "
"chars\n", from, size);
return -1;
}
strcat(to, slash);
}
else {
if (strlen(save_cwd) + 1 + strlen(from) + 1 > size) {
cow_printf("absolutize : unable to fit '%s' into %d "
"chars\n", from, size);
return -1;
}
strcpy(to, save_cwd);
strcat(to, "/");
strcat(to, from);
}
if (chdir(save_cwd)) {
cow_printf("absolutize : Can't cd to '%s' - "
"errno = %d\n", save_cwd, errno);
return -1;
}
return 0;
}
int write_cow_header(char *cow_file, int fd, char *backing_file,
int sectorsize, int alignment, unsigned long long *size)
{
struct cow_header_v3 *header;
long long modtime;
int err;
err = cow_seek_file(fd, 0);
if (err < 0) {
cow_printf("write_cow_header - lseek failed, err = %d\n", -err);
goto out;
}
err = -ENOMEM;
header = cow_malloc(sizeof(*header));
if (header == NULL) {
cow_printf("write_cow_header - failed to allocate COW V3 "
"header\n");
goto out;
}
header->magic = htobe32(COW_MAGIC);
header->version = htobe32(COW_VERSION);
err = -EINVAL;
if (strlen(backing_file) > sizeof(header->backing_file) - 1) {
/* Below, %zd is for a size_t value */
cow_printf("Backing file name \"%s\" is too long - names are "
"limited to %zd characters\n", backing_file,
sizeof(header->backing_file) - 1);
goto out_free;
}
if (absolutize(header->backing_file, sizeof(header->backing_file),
backing_file))
goto out_free;
err = os_file_modtime(header->backing_file, &modtime);
if (err < 0) {
cow_printf("write_cow_header - backing file '%s' mtime "
"request failed, err = %d\n", header->backing_file,
-err);
goto out_free;
}
err = cow_file_size(header->backing_file, size);
if (err < 0) {
cow_printf("write_cow_header - couldn't get size of "
"backing file '%s', err = %d\n",
header->backing_file, -err);
goto out_free;
}
header->mtime = htobe32(modtime);
header->size = htobe64(*size);
header->sectorsize = htobe32(sectorsize);
header->alignment = htobe32(alignment);
header->cow_format = COW_BITMAP;
err = cow_write_file(fd, header, sizeof(*header));
if (err != sizeof(*header)) {
cow_printf("write_cow_header - write of header to "
"new COW file '%s' failed, err = %d\n", cow_file,
-err);
goto out_free;
}
err = 0;
out_free:
cow_free(header);
out:
return err;
}
int file_reader(__u64 offset, char *buf, int len, void *arg)
{
int fd = *((int *) arg);
return pread(fd, buf, len, offset);
}
/* XXX Need to sanity-check the values read from the header */
int read_cow_header(int (*reader)(__u64, char *, int, void *), void *arg,
__u32 *version_out, char **backing_file_out,
long long *mtime_out, unsigned long long *size_out,
int *sectorsize_out, __u32 *align_out,
int *bitmap_offset_out)
{
union cow_header *header;
char *file;
int err, n;
unsigned long version, magic;
header = cow_malloc(sizeof(*header));
if (header == NULL) {
cow_printf("read_cow_header - Failed to allocate header\n");
return -ENOMEM;
}
err = -EINVAL;
n = (*reader)(0, (char *) header, sizeof(*header), arg);
if (n < offsetof(typeof(header->v1), backing_file)) {
cow_printf("read_cow_header - short header\n");
goto out;
}
magic = header->v1.magic;
if (magic == COW_MAGIC)
version = header->v1.version;
else if (magic == be32toh(COW_MAGIC))
version = be32toh(header->v1.version);
/* No error printed because the non-COW case comes through here */
else goto out;
*version_out = version;
if (version == 1) {
if (n < sizeof(header->v1)) {
cow_printf("read_cow_header - failed to read V1 "
"header\n");
goto out;
}
*mtime_out = header->v1.mtime;
*size_out = header->v1.size;
*sectorsize_out = header->v1.sectorsize;
*bitmap_offset_out = sizeof(header->v1);
*align_out = *sectorsize_out;
file = header->v1.backing_file;
}
else if (version == 2) {
if (n < sizeof(header->v2)) {
cow_printf("read_cow_header - failed to read V2 "
"header\n");
goto out;
}
*mtime_out = be32toh(header->v2.mtime);
*size_out = be64toh(header->v2.size);
*sectorsize_out = be32toh(header->v2.sectorsize);
*bitmap_offset_out = sizeof(header->v2);
*align_out = *sectorsize_out;
file = header->v2.backing_file;
}
/* This is very subtle - see above at union cow_header definition */
else if (version == 3 && (*((int*)header->v3.backing_file) != 0)) {
if (n < sizeof(header->v3)) {
cow_printf("read_cow_header - failed to read V3 "
"header\n");
goto out;
}
*mtime_out = be32toh(header->v3.mtime);
*size_out = be64toh(header->v3.size);
*sectorsize_out = be32toh(header->v3.sectorsize);
*align_out = be32toh(header->v3.alignment);
if (*align_out == 0) {
cow_printf("read_cow_header - invalid COW header, "
"align == 0\n");
}
*bitmap_offset_out = ROUND_UP(sizeof(header->v3), *align_out);
file = header->v3.backing_file;
}
else if (version == 3) {
cow_printf("read_cow_header - broken V3 file with"
" 64-bit layout - recovering content.\n");
if (n < sizeof(header->v3_b)) {
cow_printf("read_cow_header - failed to read V3 "
"header\n");
goto out;
}
/*
* this was used until Dec2005 - 64bits are needed to represent
* 2106+. I.e. we can safely do this truncating cast.
*
* Additionally, we must use be32toh() instead of be64toh(), since
* the program used to use the former (tested - I got mtime
* mismatch "0 vs whatever").
*
* Ever heard about bug-to-bug-compatibility ? ;-) */
*mtime_out = (time32_t) be32toh(header->v3_b.mtime);
*size_out = be64toh(header->v3_b.size);
*sectorsize_out = be32toh(header->v3_b.sectorsize);
*align_out = be32toh(header->v3_b.alignment);
if (*align_out == 0) {
cow_printf("read_cow_header - invalid COW header, "
"align == 0\n");
}
*bitmap_offset_out = ROUND_UP(sizeof(header->v3_b), *align_out);
file = header->v3_b.backing_file;
}
else {
cow_printf("read_cow_header - invalid COW version\n");
goto out;
}
err = -ENOMEM;
*backing_file_out = cow_strdup(file);
if (*backing_file_out == NULL) {
cow_printf("read_cow_header - failed to allocate backing "
"file\n");
goto out;
}
err = 0;
out:
cow_free(header);
return err;
}
int init_cow_file(int fd, char *cow_file, char *backing_file, int sectorsize,
int alignment, int *bitmap_offset_out,
unsigned long *bitmap_len_out, int *data_offset_out)
{
unsigned long long size, offset;
char zero = 0;
int err;
err = write_cow_header(cow_file, fd, backing_file, sectorsize,
alignment, &size);
if (err)
goto out;
*bitmap_offset_out = ROUND_UP(sizeof(struct cow_header_v3), alignment);
cow_sizes(COW_VERSION, size, sectorsize, alignment, *bitmap_offset_out,
bitmap_len_out, data_offset_out);
offset = *data_offset_out + size - sizeof(zero);
err = cow_seek_file(fd, offset);
if (err < 0) {
cow_printf("cow bitmap lseek failed : err = %d\n", -err);
goto out;
}
/*
* does not really matter how much we write it is just to set EOF
* this also sets the entire COW bitmap
* to zero without having to allocate it
*/
err = cow_write_file(fd, &zero, sizeof(zero));
if (err != sizeof(zero)) {
cow_printf("Write of bitmap to new COW file '%s' failed, "
"err = %d\n", cow_file, -err);
if (err >= 0)
err = -EINVAL;
goto out;
}
return 0;
out:
return err;
}
| linux-master | arch/um/drivers/cow_user.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2007 Luca Bigliardi ([email protected]).
*/
#include <stddef.h>
#include <errno.h>
#include <libvdeplug.h>
#include <net_user.h>
#include <um_malloc.h>
#include "vde.h"
static int vde_user_init(void *data, void *dev)
{
struct vde_data *pri = data;
VDECONN *conn = NULL;
int err = -EINVAL;
pri->dev = dev;
conn = vde_open(pri->vde_switch, pri->descr, pri->args);
if (conn == NULL) {
err = -errno;
printk(UM_KERN_ERR "vde_user_init: vde_open failed, "
"errno = %d\n", errno);
return err;
}
printk(UM_KERN_INFO "vde backend - connection opened\n");
pri->conn = conn;
return 0;
}
static int vde_user_open(void *data)
{
struct vde_data *pri = data;
if (pri->conn != NULL)
return vde_datafd(pri->conn);
printk(UM_KERN_WARNING "vde_open - we have no VDECONN to open");
return -EINVAL;
}
static void vde_remove(void *data)
{
struct vde_data *pri = data;
if (pri->conn != NULL) {
printk(UM_KERN_INFO "vde backend - closing connection\n");
vde_close(pri->conn);
pri->conn = NULL;
kfree(pri->args);
pri->args = NULL;
return;
}
printk(UM_KERN_WARNING "vde_remove - we have no VDECONN to remove");
}
const struct net_user_info vde_user_info = {
.init = vde_user_init,
.open = vde_user_open,
.close = NULL,
.remove = vde_remove,
.add_address = NULL,
.delete_address = NULL,
.mtu = ETH_MAX_PACKET,
.max_packet = ETH_MAX_PACKET + ETH_HEADER_OTHER,
};
void vde_init_libstuff(struct vde_data *vpri, struct vde_init *init)
{
struct vde_open_args *args;
vpri->args = uml_kmalloc(sizeof(struct vde_open_args), UM_GFP_KERNEL);
if (vpri->args == NULL) {
printk(UM_KERN_ERR "vde_init_libstuff - vde_open_args "
"allocation failed");
return;
}
args = vpri->args;
args->port = init->port;
args->group = init->group;
args->mode = init->mode ? init->mode : 0700;
args->port ? printk("port %d", args->port) :
printk("undefined port");
}
int vde_user_read(void *conn, void *buf, int len)
{
VDECONN *vconn = conn;
int rv;
if (vconn == NULL)
return 0;
rv = vde_recv(vconn, buf, len, 0);
if (rv < 0) {
if (errno == EAGAIN)
return 0;
return -errno;
}
else if (rv == 0)
return -ENOTCONN;
return rv;
}
int vde_user_write(void *conn, void *buf, int len)
{
VDECONN *vconn = conn;
if (vconn == NULL)
return 0;
return vde_send(vconn, buf, len, 0);
}
| linux-master | arch/um/drivers/vde_user.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
*/
#include <linux/irqreturn.h>
#include <linux/kd.h>
#include <linux/sched/signal.h>
#include <linux/slab.h>
#include "chan.h"
#include <irq_kern.h>
#include <irq_user.h>
#include <kern_util.h>
#include <os.h>
#define LINE_BUFSIZE 4096
static irqreturn_t line_interrupt(int irq, void *data)
{
struct chan *chan = data;
struct line *line = chan->line;
if (line)
chan_interrupt(line, irq);
return IRQ_HANDLED;
}
/*
* Returns the free space inside the ring buffer of this line.
*
* Should be called while holding line->lock (this does not modify data).
*/
static unsigned int write_room(struct line *line)
{
int n;
if (line->buffer == NULL)
return LINE_BUFSIZE - 1;
/* This is for the case where the buffer is wrapped! */
n = line->head - line->tail;
if (n <= 0)
n += LINE_BUFSIZE; /* The other case */
return n - 1;
}
unsigned int line_write_room(struct tty_struct *tty)
{
struct line *line = tty->driver_data;
unsigned long flags;
unsigned int room;
spin_lock_irqsave(&line->lock, flags);
room = write_room(line);
spin_unlock_irqrestore(&line->lock, flags);
return room;
}
unsigned int line_chars_in_buffer(struct tty_struct *tty)
{
struct line *line = tty->driver_data;
unsigned long flags;
unsigned int ret;
spin_lock_irqsave(&line->lock, flags);
/* write_room subtracts 1 for the needed NULL, so we readd it.*/
ret = LINE_BUFSIZE - (write_room(line) + 1);
spin_unlock_irqrestore(&line->lock, flags);
return ret;
}
/*
* This copies the content of buf into the circular buffer associated with
* this line.
* The return value is the number of characters actually copied, i.e. the ones
* for which there was space: this function is not supposed to ever flush out
* the circular buffer.
*
* Must be called while holding line->lock!
*/
static int buffer_data(struct line *line, const char *buf, int len)
{
int end, room;
if (line->buffer == NULL) {
line->buffer = kmalloc(LINE_BUFSIZE, GFP_ATOMIC);
if (line->buffer == NULL) {
printk(KERN_ERR "buffer_data - atomic allocation "
"failed\n");
return 0;
}
line->head = line->buffer;
line->tail = line->buffer;
}
room = write_room(line);
len = (len > room) ? room : len;
end = line->buffer + LINE_BUFSIZE - line->tail;
if (len < end) {
memcpy(line->tail, buf, len);
line->tail += len;
}
else {
/* The circular buffer is wrapping */
memcpy(line->tail, buf, end);
buf += end;
memcpy(line->buffer, buf, len - end);
line->tail = line->buffer + len - end;
}
return len;
}
/*
* Flushes the ring buffer to the output channels. That is, write_chan is
* called, passing it line->head as buffer, and an appropriate count.
*
* On exit, returns 1 when the buffer is empty,
* 0 when the buffer is not empty on exit,
* and -errno when an error occurred.
*
* Must be called while holding line->lock!*/
static int flush_buffer(struct line *line)
{
int n, count;
if ((line->buffer == NULL) || (line->head == line->tail))
return 1;
if (line->tail < line->head) {
/* line->buffer + LINE_BUFSIZE is the end of the buffer! */
count = line->buffer + LINE_BUFSIZE - line->head;
n = write_chan(line->chan_out, line->head, count,
line->write_irq);
if (n < 0)
return n;
if (n == count) {
/*
* We have flushed from ->head to buffer end, now we
* must flush only from the beginning to ->tail.
*/
line->head = line->buffer;
} else {
line->head += n;
return 0;
}
}
count = line->tail - line->head;
n = write_chan(line->chan_out, line->head, count,
line->write_irq);
if (n < 0)
return n;
line->head += n;
return line->head == line->tail;
}
void line_flush_buffer(struct tty_struct *tty)
{
struct line *line = tty->driver_data;
unsigned long flags;
spin_lock_irqsave(&line->lock, flags);
flush_buffer(line);
spin_unlock_irqrestore(&line->lock, flags);
}
/*
* We map both ->flush_chars and ->put_char (which go in pair) onto
* ->flush_buffer and ->write. Hope it's not that bad.
*/
void line_flush_chars(struct tty_struct *tty)
{
line_flush_buffer(tty);
}
ssize_t line_write(struct tty_struct *tty, const u8 *buf, size_t len)
{
struct line *line = tty->driver_data;
unsigned long flags;
int n, ret = 0;
spin_lock_irqsave(&line->lock, flags);
if (line->head != line->tail)
ret = buffer_data(line, buf, len);
else {
n = write_chan(line->chan_out, buf, len,
line->write_irq);
if (n < 0) {
ret = n;
goto out_up;
}
len -= n;
ret += n;
if (len > 0)
ret += buffer_data(line, buf + n, len);
}
out_up:
spin_unlock_irqrestore(&line->lock, flags);
return ret;
}
void line_throttle(struct tty_struct *tty)
{
struct line *line = tty->driver_data;
deactivate_chan(line->chan_in, line->read_irq);
line->throttled = 1;
}
void line_unthrottle(struct tty_struct *tty)
{
struct line *line = tty->driver_data;
line->throttled = 0;
chan_interrupt(line, line->read_irq);
}
static irqreturn_t line_write_interrupt(int irq, void *data)
{
struct chan *chan = data;
struct line *line = chan->line;
int err;
/*
* Interrupts are disabled here because genirq keep irqs disabled when
* calling the action handler.
*/
spin_lock(&line->lock);
err = flush_buffer(line);
if (err == 0) {
spin_unlock(&line->lock);
return IRQ_NONE;
} else if ((err < 0) && (err != -EAGAIN)) {
line->head = line->buffer;
line->tail = line->buffer;
}
spin_unlock(&line->lock);
tty_port_tty_wakeup(&line->port);
return IRQ_HANDLED;
}
int line_setup_irq(int fd, int input, int output, struct line *line, void *data)
{
const struct line_driver *driver = line->driver;
int err;
if (input) {
err = um_request_irq(UM_IRQ_ALLOC, fd, IRQ_READ,
line_interrupt, 0,
driver->read_irq_name, data);
if (err < 0)
return err;
line->read_irq = err;
}
if (output) {
err = um_request_irq(UM_IRQ_ALLOC, fd, IRQ_WRITE,
line_write_interrupt, 0,
driver->write_irq_name, data);
if (err < 0)
return err;
line->write_irq = err;
}
return 0;
}
static int line_activate(struct tty_port *port, struct tty_struct *tty)
{
int ret;
struct line *line = tty->driver_data;
ret = enable_chan(line);
if (ret)
return ret;
if (!line->sigio) {
chan_enable_winch(line->chan_out, port);
line->sigio = 1;
}
chan_window_size(line, &tty->winsize.ws_row,
&tty->winsize.ws_col);
return 0;
}
static void unregister_winch(struct tty_struct *tty);
static void line_destruct(struct tty_port *port)
{
struct tty_struct *tty = tty_port_tty_get(port);
struct line *line = tty->driver_data;
if (line->sigio) {
unregister_winch(tty);
line->sigio = 0;
}
}
static const struct tty_port_operations line_port_ops = {
.activate = line_activate,
.destruct = line_destruct,
};
int line_open(struct tty_struct *tty, struct file *filp)
{
struct line *line = tty->driver_data;
return tty_port_open(&line->port, tty, filp);
}
int line_install(struct tty_driver *driver, struct tty_struct *tty,
struct line *line)
{
int ret;
ret = tty_standard_install(driver, tty);
if (ret)
return ret;
tty->driver_data = line;
return 0;
}
void line_close(struct tty_struct *tty, struct file * filp)
{
struct line *line = tty->driver_data;
tty_port_close(&line->port, tty, filp);
}
void line_hangup(struct tty_struct *tty)
{
struct line *line = tty->driver_data;
tty_port_hangup(&line->port);
}
void close_lines(struct line *lines, int nlines)
{
int i;
for(i = 0; i < nlines; i++)
close_chan(&lines[i]);
}
int setup_one_line(struct line *lines, int n, char *init,
const struct chan_opts *opts, char **error_out)
{
struct line *line = &lines[n];
struct tty_driver *driver = line->driver->driver;
int err = -EINVAL;
if (line->port.count) {
*error_out = "Device is already open";
goto out;
}
if (!strcmp(init, "none")) {
if (line->valid) {
line->valid = 0;
kfree(line->init_str);
tty_unregister_device(driver, n);
parse_chan_pair(NULL, line, n, opts, error_out);
err = 0;
}
} else {
char *new = kstrdup(init, GFP_KERNEL);
if (!new) {
*error_out = "Failed to allocate memory";
return -ENOMEM;
}
if (line->valid) {
tty_unregister_device(driver, n);
kfree(line->init_str);
}
line->init_str = new;
line->valid = 1;
err = parse_chan_pair(new, line, n, opts, error_out);
if (!err) {
struct device *d = tty_port_register_device(&line->port,
driver, n, NULL);
if (IS_ERR(d)) {
*error_out = "Failed to register device";
err = PTR_ERR(d);
parse_chan_pair(NULL, line, n, opts, error_out);
}
}
if (err) {
line->init_str = NULL;
line->valid = 0;
kfree(new);
}
}
out:
return err;
}
/*
* Common setup code for both startup command line and mconsole initialization.
* @lines contains the array (of size @num) to modify;
* @init is the setup string;
* @error_out is an error string in the case of failure;
*/
int line_setup(char **conf, unsigned int num, char **def,
char *init, char *name)
{
char *error;
if (*init == '=') {
/*
* We said con=/ssl= instead of con#=, so we are configuring all
* consoles at once.
*/
*def = init + 1;
} else {
char *end;
unsigned n = simple_strtoul(init, &end, 0);
if (*end != '=') {
error = "Couldn't parse device number";
goto out;
}
if (n >= num) {
error = "Device number out of range";
goto out;
}
conf[n] = end + 1;
}
return 0;
out:
printk(KERN_ERR "Failed to set up %s with "
"configuration string \"%s\" : %s\n", name, init, error);
return -EINVAL;
}
int line_config(struct line *lines, unsigned int num, char *str,
const struct chan_opts *opts, char **error_out)
{
char *end;
int n;
if (*str == '=') {
*error_out = "Can't configure all devices from mconsole";
return -EINVAL;
}
n = simple_strtoul(str, &end, 0);
if (*end++ != '=') {
*error_out = "Couldn't parse device number";
return -EINVAL;
}
if (n >= num) {
*error_out = "Device number out of range";
return -EINVAL;
}
return setup_one_line(lines, n, end, opts, error_out);
}
int line_get_config(char *name, struct line *lines, unsigned int num, char *str,
int size, char **error_out)
{
struct line *line;
char *end;
int dev, n = 0;
dev = simple_strtoul(name, &end, 0);
if ((*end != '\0') || (end == name)) {
*error_out = "line_get_config failed to parse device number";
return 0;
}
if ((dev < 0) || (dev >= num)) {
*error_out = "device number out of range";
return 0;
}
line = &lines[dev];
if (!line->valid)
CONFIG_CHUNK(str, size, n, "none", 1);
else {
struct tty_struct *tty = tty_port_tty_get(&line->port);
if (tty == NULL) {
CONFIG_CHUNK(str, size, n, line->init_str, 1);
} else {
n = chan_config_string(line, str, size, error_out);
tty_kref_put(tty);
}
}
return n;
}
int line_id(char **str, int *start_out, int *end_out)
{
char *end;
int n;
n = simple_strtoul(*str, &end, 0);
if ((*end != '\0') || (end == *str))
return -1;
*str = end;
*start_out = n;
*end_out = n;
return n;
}
int line_remove(struct line *lines, unsigned int num, int n, char **error_out)
{
if (n >= num) {
*error_out = "Device number out of range";
return -EINVAL;
}
return setup_one_line(lines, n, "none", NULL, error_out);
}
int register_lines(struct line_driver *line_driver,
const struct tty_operations *ops,
struct line *lines, int nlines)
{
struct tty_driver *driver;
int err;
int i;
driver = tty_alloc_driver(nlines, TTY_DRIVER_REAL_RAW |
TTY_DRIVER_DYNAMIC_DEV);
if (IS_ERR(driver))
return PTR_ERR(driver);
driver->driver_name = line_driver->name;
driver->name = line_driver->device_name;
driver->major = line_driver->major;
driver->minor_start = line_driver->minor_start;
driver->type = line_driver->type;
driver->subtype = line_driver->subtype;
driver->init_termios = tty_std_termios;
for (i = 0; i < nlines; i++) {
tty_port_init(&lines[i].port);
lines[i].port.ops = &line_port_ops;
spin_lock_init(&lines[i].lock);
lines[i].driver = line_driver;
INIT_LIST_HEAD(&lines[i].chan_list);
}
tty_set_operations(driver, ops);
err = tty_register_driver(driver);
if (err) {
printk(KERN_ERR "register_lines : can't register %s driver\n",
line_driver->name);
tty_driver_kref_put(driver);
for (i = 0; i < nlines; i++)
tty_port_destroy(&lines[i].port);
return err;
}
line_driver->driver = driver;
mconsole_register_dev(&line_driver->mc);
return 0;
}
static DEFINE_SPINLOCK(winch_handler_lock);
static LIST_HEAD(winch_handlers);
struct winch {
struct list_head list;
int fd;
int tty_fd;
int pid;
struct tty_port *port;
unsigned long stack;
struct work_struct work;
};
static void __free_winch(struct work_struct *work)
{
struct winch *winch = container_of(work, struct winch, work);
um_free_irq(WINCH_IRQ, winch);
if (winch->pid != -1)
os_kill_process(winch->pid, 1);
if (winch->stack != 0)
free_stack(winch->stack, 0);
kfree(winch);
}
static void free_winch(struct winch *winch)
{
int fd = winch->fd;
winch->fd = -1;
if (fd != -1)
os_close_file(fd);
__free_winch(&winch->work);
}
static irqreturn_t winch_interrupt(int irq, void *data)
{
struct winch *winch = data;
struct tty_struct *tty;
struct line *line;
int fd = winch->fd;
int err;
char c;
struct pid *pgrp;
if (fd != -1) {
err = generic_read(fd, &c, NULL);
if (err < 0) {
if (err != -EAGAIN) {
winch->fd = -1;
list_del(&winch->list);
os_close_file(fd);
printk(KERN_ERR "winch_interrupt : "
"read failed, errno = %d\n", -err);
printk(KERN_ERR "fd %d is losing SIGWINCH "
"support\n", winch->tty_fd);
INIT_WORK(&winch->work, __free_winch);
schedule_work(&winch->work);
return IRQ_HANDLED;
}
goto out;
}
}
tty = tty_port_tty_get(winch->port);
if (tty != NULL) {
line = tty->driver_data;
if (line != NULL) {
chan_window_size(line, &tty->winsize.ws_row,
&tty->winsize.ws_col);
pgrp = tty_get_pgrp(tty);
if (pgrp)
kill_pgrp(pgrp, SIGWINCH, 1);
put_pid(pgrp);
}
tty_kref_put(tty);
}
out:
return IRQ_HANDLED;
}
void register_winch_irq(int fd, int tty_fd, int pid, struct tty_port *port,
unsigned long stack)
{
struct winch *winch;
winch = kmalloc(sizeof(*winch), GFP_KERNEL);
if (winch == NULL) {
printk(KERN_ERR "register_winch_irq - kmalloc failed\n");
goto cleanup;
}
*winch = ((struct winch) { .list = LIST_HEAD_INIT(winch->list),
.fd = fd,
.tty_fd = tty_fd,
.pid = pid,
.port = port,
.stack = stack });
if (um_request_irq(WINCH_IRQ, fd, IRQ_READ, winch_interrupt,
IRQF_SHARED, "winch", winch) < 0) {
printk(KERN_ERR "register_winch_irq - failed to register "
"IRQ\n");
goto out_free;
}
spin_lock(&winch_handler_lock);
list_add(&winch->list, &winch_handlers);
spin_unlock(&winch_handler_lock);
return;
out_free:
kfree(winch);
cleanup:
os_kill_process(pid, 1);
os_close_file(fd);
if (stack != 0)
free_stack(stack, 0);
}
static void unregister_winch(struct tty_struct *tty)
{
struct list_head *ele, *next;
struct winch *winch;
struct tty_struct *wtty;
spin_lock(&winch_handler_lock);
list_for_each_safe(ele, next, &winch_handlers) {
winch = list_entry(ele, struct winch, list);
wtty = tty_port_tty_get(winch->port);
if (wtty == tty) {
list_del(&winch->list);
spin_unlock(&winch_handler_lock);
free_winch(winch);
break;
}
tty_kref_put(wtty);
}
spin_unlock(&winch_handler_lock);
}
static void winch_cleanup(void)
{
struct winch *winch;
spin_lock(&winch_handler_lock);
while ((winch = list_first_entry_or_null(&winch_handlers,
struct winch, list))) {
list_del(&winch->list);
spin_unlock(&winch_handler_lock);
free_winch(winch);
spin_lock(&winch_handler_lock);
}
spin_unlock(&winch_handler_lock);
}
__uml_exitcall(winch_cleanup);
char *add_xterm_umid(char *base)
{
char *umid, *title;
int len;
umid = get_umid();
if (*umid == '\0')
return base;
len = strlen(base) + strlen(" ()") + strlen(umid) + 1;
title = kmalloc(len, GFP_KERNEL);
if (title == NULL) {
printk(KERN_ERR "Failed to allocate buffer for xterm title\n");
return base;
}
snprintf(title, len, "%s (%s)", base, umid);
return title;
}
| linux-master | arch/um/drivers/line.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2001 Lennert Buytenhek ([email protected]) and
* James Leu ([email protected]).
* Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
* Copyright (C) 2001 by various other people who didn't put their name here.
*/
#include <linux/init.h>
#include <linux/netdevice.h>
#include <net_kern.h>
#include "daemon.h"
struct daemon_init {
char *sock_type;
char *ctl_sock;
};
static void daemon_init(struct net_device *dev, void *data)
{
struct uml_net_private *pri;
struct daemon_data *dpri;
struct daemon_init *init = data;
pri = netdev_priv(dev);
dpri = (struct daemon_data *) pri->user;
dpri->sock_type = init->sock_type;
dpri->ctl_sock = init->ctl_sock;
dpri->fd = -1;
dpri->control = -1;
dpri->dev = dev;
/* We will free this pointer. If it contains crap we're burned. */
dpri->ctl_addr = NULL;
dpri->data_addr = NULL;
dpri->local_addr = NULL;
printk("daemon backend (uml_switch version %d) - %s:%s",
SWITCH_VERSION, dpri->sock_type, dpri->ctl_sock);
printk("\n");
}
static int daemon_read(int fd, struct sk_buff *skb, struct uml_net_private *lp)
{
return net_recvfrom(fd, skb_mac_header(skb),
skb->dev->mtu + ETH_HEADER_OTHER);
}
static int daemon_write(int fd, struct sk_buff *skb, struct uml_net_private *lp)
{
return daemon_user_write(fd, skb->data, skb->len,
(struct daemon_data *) &lp->user);
}
static const struct net_kern_info daemon_kern_info = {
.init = daemon_init,
.protocol = eth_protocol,
.read = daemon_read,
.write = daemon_write,
};
static int daemon_setup(char *str, char **mac_out, void *data)
{
struct daemon_init *init = data;
char *remain;
*init = ((struct daemon_init)
{ .sock_type = "unix",
.ctl_sock = CONFIG_UML_NET_DAEMON_DEFAULT_SOCK });
remain = split_if_spec(str, mac_out, &init->sock_type, &init->ctl_sock,
NULL);
if (remain != NULL)
printk(KERN_WARNING "daemon_setup : Ignoring data socket "
"specification\n");
return 1;
}
static struct transport daemon_transport = {
.list = LIST_HEAD_INIT(daemon_transport.list),
.name = "daemon",
.setup = daemon_setup,
.user = &daemon_user_info,
.kern = &daemon_kern_info,
.private_size = sizeof(struct daemon_data),
.setup_size = sizeof(struct daemon_init),
};
static int register_daemon(void)
{
register_transport(&daemon_transport);
return 0;
}
late_initcall(register_daemon);
| linux-master | arch/um/drivers/daemon_kern.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
*/
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <errno.h>
#include <fcntl.h>
#include <string.h>
#include <termios.h>
#include <sys/wait.h>
#include <net_user.h>
#include <os.h>
#include "slip.h"
#include <um_malloc.h>
static int slip_user_init(void *data, void *dev)
{
struct slip_data *pri = data;
pri->dev = dev;
return 0;
}
static int set_up_tty(int fd)
{
int i;
struct termios tios;
if (tcgetattr(fd, &tios) < 0) {
printk(UM_KERN_ERR "could not get initial terminal "
"attributes\n");
return -1;
}
tios.c_cflag = CS8 | CREAD | HUPCL | CLOCAL;
tios.c_iflag = IGNBRK | IGNPAR;
tios.c_oflag = 0;
tios.c_lflag = 0;
for (i = 0; i < NCCS; i++)
tios.c_cc[i] = 0;
tios.c_cc[VMIN] = 1;
tios.c_cc[VTIME] = 0;
cfsetospeed(&tios, B38400);
cfsetispeed(&tios, B38400);
if (tcsetattr(fd, TCSAFLUSH, &tios) < 0) {
printk(UM_KERN_ERR "failed to set terminal attributes\n");
return -1;
}
return 0;
}
struct slip_pre_exec_data {
int stdin_fd;
int stdout_fd;
int close_me;
};
static void slip_pre_exec(void *arg)
{
struct slip_pre_exec_data *data = arg;
if (data->stdin_fd >= 0)
dup2(data->stdin_fd, 0);
dup2(data->stdout_fd, 1);
if (data->close_me >= 0)
close(data->close_me);
}
static int slip_tramp(char **argv, int fd)
{
struct slip_pre_exec_data pe_data;
char *output;
int pid, fds[2], err, output_len;
err = os_pipe(fds, 1, 0);
if (err < 0) {
printk(UM_KERN_ERR "slip_tramp : pipe failed, err = %d\n",
-err);
goto out;
}
err = 0;
pe_data.stdin_fd = fd;
pe_data.stdout_fd = fds[1];
pe_data.close_me = fds[0];
err = run_helper(slip_pre_exec, &pe_data, argv);
if (err < 0)
goto out_close;
pid = err;
output_len = UM_KERN_PAGE_SIZE;
output = uml_kmalloc(output_len, UM_GFP_KERNEL);
if (output == NULL) {
printk(UM_KERN_ERR "slip_tramp : failed to allocate output "
"buffer\n");
os_kill_process(pid, 1);
err = -ENOMEM;
goto out_close;
}
close(fds[1]);
read_output(fds[0], output, output_len);
printk("%s", output);
err = helper_wait(pid);
close(fds[0]);
kfree(output);
return err;
out_close:
close(fds[0]);
close(fds[1]);
out:
return err;
}
static int slip_open(void *data)
{
struct slip_data *pri = data;
char version_buf[sizeof("nnnnn\0")];
char gate_buf[sizeof("nnn.nnn.nnn.nnn\0")];
char *argv[] = { "uml_net", version_buf, "slip", "up", gate_buf,
NULL };
int sfd, mfd, err;
err = get_pty();
if (err < 0) {
printk(UM_KERN_ERR "slip-open : Failed to open pty, err = %d\n",
-err);
goto out;
}
mfd = err;
err = open(ptsname(mfd), O_RDWR, 0);
if (err < 0) {
printk(UM_KERN_ERR "Couldn't open tty for slip line, "
"err = %d\n", -err);
goto out_close;
}
sfd = err;
err = set_up_tty(sfd);
if (err)
goto out_close2;
pri->slave = sfd;
pri->slip.pos = 0;
pri->slip.esc = 0;
if (pri->gate_addr != NULL) {
sprintf(version_buf, "%d", UML_NET_VERSION);
strcpy(gate_buf, pri->gate_addr);
err = slip_tramp(argv, sfd);
if (err < 0) {
printk(UM_KERN_ERR "slip_tramp failed - err = %d\n",
-err);
goto out_close2;
}
err = os_get_ifname(pri->slave, pri->name);
if (err < 0) {
printk(UM_KERN_ERR "get_ifname failed, err = %d\n",
-err);
goto out_close2;
}
iter_addresses(pri->dev, open_addr, pri->name);
}
else {
err = os_set_slip(sfd);
if (err < 0) {
printk(UM_KERN_ERR "Failed to set slip discipline "
"encapsulation - err = %d\n", -err);
goto out_close2;
}
}
return mfd;
out_close2:
close(sfd);
out_close:
close(mfd);
out:
return err;
}
static void slip_close(int fd, void *data)
{
struct slip_data *pri = data;
char version_buf[sizeof("nnnnn\0")];
char *argv[] = { "uml_net", version_buf, "slip", "down", pri->name,
NULL };
int err;
if (pri->gate_addr != NULL)
iter_addresses(pri->dev, close_addr, pri->name);
sprintf(version_buf, "%d", UML_NET_VERSION);
err = slip_tramp(argv, pri->slave);
if (err != 0)
printk(UM_KERN_ERR "slip_tramp failed - errno = %d\n", -err);
close(fd);
close(pri->slave);
pri->slave = -1;
}
int slip_user_read(int fd, void *buf, int len, struct slip_data *pri)
{
return slip_proto_read(fd, buf, len, &pri->slip);
}
int slip_user_write(int fd, void *buf, int len, struct slip_data *pri)
{
return slip_proto_write(fd, buf, len, &pri->slip);
}
static void slip_add_addr(unsigned char *addr, unsigned char *netmask,
void *data)
{
struct slip_data *pri = data;
if (pri->slave < 0)
return;
open_addr(addr, netmask, pri->name);
}
static void slip_del_addr(unsigned char *addr, unsigned char *netmask,
void *data)
{
struct slip_data *pri = data;
if (pri->slave < 0)
return;
close_addr(addr, netmask, pri->name);
}
const struct net_user_info slip_user_info = {
.init = slip_user_init,
.open = slip_open,
.close = slip_close,
.remove = NULL,
.add_address = slip_add_addr,
.delete_address = slip_del_addr,
.mtu = BUF_SIZE,
.max_packet = BUF_SIZE,
};
| linux-master | arch/um/drivers/slip_user.c |
/* Copyright (C) 2005 - 2008 Jeff Dike <jdike@{linux.intel,addtoit}.com> */
/* Much of this ripped from drivers/char/hw_random.c, see there for other
* copyright.
*
* This software may be used and distributed according to the terms
* of the GNU General Public License, incorporated herein by reference.
*/
#include <linux/sched/signal.h>
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/interrupt.h>
#include <linux/miscdevice.h>
#include <linux/hw_random.h>
#include <linux/delay.h>
#include <linux/uaccess.h>
#include <init.h>
#include <irq_kern.h>
#include <os.h>
/*
* core module information
*/
#define RNG_MODULE_NAME "hw_random"
/* Changed at init time, in the non-modular case, and at module load
* time, in the module case. Presumably, the module subsystem
* protects against a module being loaded twice at the same time.
*/
static int random_fd = -1;
static struct hwrng hwrng;
static DECLARE_COMPLETION(have_data);
static int rng_dev_read(struct hwrng *rng, void *buf, size_t max, bool block)
{
int ret;
for (;;) {
ret = os_read_file(random_fd, buf, max);
if (block && ret == -EAGAIN) {
add_sigio_fd(random_fd);
ret = wait_for_completion_killable(&have_data);
ignore_sigio_fd(random_fd);
deactivate_fd(random_fd, RANDOM_IRQ);
if (ret < 0)
break;
} else {
break;
}
}
return ret != -EAGAIN ? ret : 0;
}
static irqreturn_t random_interrupt(int irq, void *data)
{
complete(&have_data);
return IRQ_HANDLED;
}
/*
* rng_init - initialize RNG module
*/
static int __init rng_init (void)
{
int err;
err = os_open_file("/dev/random", of_read(OPENFLAGS()), 0);
if (err < 0)
goto out;
random_fd = err;
err = um_request_irq(RANDOM_IRQ, random_fd, IRQ_READ, random_interrupt,
0, "random", NULL);
if (err < 0)
goto err_out_cleanup_hw;
sigio_broken(random_fd);
hwrng.name = RNG_MODULE_NAME;
hwrng.read = rng_dev_read;
err = hwrng_register(&hwrng);
if (err) {
pr_err(RNG_MODULE_NAME " registering failed (%d)\n", err);
goto err_out_cleanup_hw;
}
out:
return err;
err_out_cleanup_hw:
os_close_file(random_fd);
random_fd = -1;
goto out;
}
/*
* rng_cleanup - shutdown RNG module
*/
static void cleanup(void)
{
free_irq_by_fd(random_fd);
os_close_file(random_fd);
}
static void __exit rng_cleanup(void)
{
hwrng_unregister(&hwrng);
os_close_file(random_fd);
}
module_init (rng_init);
module_exit (rng_cleanup);
__uml_exitcall(cleanup);
MODULE_DESCRIPTION("UML Host Random Number Generator (RNG) driver");
MODULE_LICENSE("GPL");
| linux-master | arch/um/drivers/random.c |
// SPDX-License-Identifier: GPL-2.0
/*
* user-mode-linux networking multicast transport
* Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
* Copyright (C) 2001 by Harald Welte <[email protected]>
*
* based on the existing uml-networking code, which is
* Copyright (C) 2001 Lennert Buytenhek ([email protected]) and
* James Leu ([email protected]).
* Copyright (C) 2001 by various other people who didn't put their name here.
*
*
*/
#include <unistd.h>
#include <errno.h>
#include <netinet/in.h>
#include "umcast.h"
#include <net_user.h>
#include <um_malloc.h>
static struct sockaddr_in *new_addr(char *addr, unsigned short port)
{
struct sockaddr_in *sin;
sin = uml_kmalloc(sizeof(struct sockaddr_in), UM_GFP_KERNEL);
if (sin == NULL) {
printk(UM_KERN_ERR "new_addr: allocation of sockaddr_in "
"failed\n");
return NULL;
}
sin->sin_family = AF_INET;
if (addr)
sin->sin_addr.s_addr = in_aton(addr);
else
sin->sin_addr.s_addr = INADDR_ANY;
sin->sin_port = htons(port);
return sin;
}
static int umcast_user_init(void *data, void *dev)
{
struct umcast_data *pri = data;
pri->remote_addr = new_addr(pri->addr, pri->rport);
if (pri->unicast)
pri->listen_addr = new_addr(NULL, pri->lport);
else
pri->listen_addr = pri->remote_addr;
pri->dev = dev;
return 0;
}
static void umcast_remove(void *data)
{
struct umcast_data *pri = data;
kfree(pri->listen_addr);
if (pri->unicast)
kfree(pri->remote_addr);
pri->listen_addr = pri->remote_addr = NULL;
}
static int umcast_open(void *data)
{
struct umcast_data *pri = data;
struct sockaddr_in *lsin = pri->listen_addr;
struct sockaddr_in *rsin = pri->remote_addr;
struct ip_mreq mreq;
int fd, yes = 1, err = -EINVAL;
if ((!pri->unicast && lsin->sin_addr.s_addr == 0) ||
(rsin->sin_addr.s_addr == 0) ||
(lsin->sin_port == 0) || (rsin->sin_port == 0))
goto out;
fd = socket(AF_INET, SOCK_DGRAM, 0);
if (fd < 0) {
err = -errno;
printk(UM_KERN_ERR "umcast_open : data socket failed, "
"errno = %d\n", errno);
goto out;
}
if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &yes, sizeof(yes)) < 0) {
err = -errno;
printk(UM_KERN_ERR "umcast_open: SO_REUSEADDR failed, "
"errno = %d\n", errno);
goto out_close;
}
if (!pri->unicast) {
/* set ttl according to config */
if (setsockopt(fd, SOL_IP, IP_MULTICAST_TTL, &pri->ttl,
sizeof(pri->ttl)) < 0) {
err = -errno;
printk(UM_KERN_ERR "umcast_open: IP_MULTICAST_TTL "
"failed, error = %d\n", errno);
goto out_close;
}
/* set LOOP, so data does get fed back to local sockets */
if (setsockopt(fd, SOL_IP, IP_MULTICAST_LOOP,
&yes, sizeof(yes)) < 0) {
err = -errno;
printk(UM_KERN_ERR "umcast_open: IP_MULTICAST_LOOP "
"failed, error = %d\n", errno);
goto out_close;
}
}
/* bind socket to the address */
if (bind(fd, (struct sockaddr *) lsin, sizeof(*lsin)) < 0) {
err = -errno;
printk(UM_KERN_ERR "umcast_open : data bind failed, "
"errno = %d\n", errno);
goto out_close;
}
if (!pri->unicast) {
/* subscribe to the multicast group */
mreq.imr_multiaddr.s_addr = lsin->sin_addr.s_addr;
mreq.imr_interface.s_addr = 0;
if (setsockopt(fd, SOL_IP, IP_ADD_MEMBERSHIP,
&mreq, sizeof(mreq)) < 0) {
err = -errno;
printk(UM_KERN_ERR "umcast_open: IP_ADD_MEMBERSHIP "
"failed, error = %d\n", errno);
printk(UM_KERN_ERR "There appears not to be a "
"multicast-capable network interface on the "
"host.\n");
printk(UM_KERN_ERR "eth0 should be configured in order "
"to use the multicast transport.\n");
goto out_close;
}
}
return fd;
out_close:
close(fd);
out:
return err;
}
static void umcast_close(int fd, void *data)
{
struct umcast_data *pri = data;
if (!pri->unicast) {
struct ip_mreq mreq;
struct sockaddr_in *lsin = pri->listen_addr;
mreq.imr_multiaddr.s_addr = lsin->sin_addr.s_addr;
mreq.imr_interface.s_addr = 0;
if (setsockopt(fd, SOL_IP, IP_DROP_MEMBERSHIP,
&mreq, sizeof(mreq)) < 0) {
printk(UM_KERN_ERR "umcast_close: IP_DROP_MEMBERSHIP "
"failed, error = %d\n", errno);
}
}
close(fd);
}
int umcast_user_write(int fd, void *buf, int len, struct umcast_data *pri)
{
struct sockaddr_in *data_addr = pri->remote_addr;
return net_sendto(fd, buf, len, data_addr, sizeof(*data_addr));
}
const struct net_user_info umcast_user_info = {
.init = umcast_user_init,
.open = umcast_open,
.close = umcast_close,
.remove = umcast_remove,
.add_address = NULL,
.delete_address = NULL,
.mtu = ETH_MAX_PACKET,
.max_packet = ETH_MAX_PACKET + ETH_HEADER_OTHER,
};
| linux-master | arch/um/drivers/umcast_user.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
*/
#include <linux/init.h>
#include <linux/netdevice.h>
#include <net_kern.h>
#include "pcap_user.h"
struct pcap_init {
char *host_if;
int promisc;
int optimize;
char *filter;
};
void pcap_init_kern(struct net_device *dev, void *data)
{
struct uml_net_private *pri;
struct pcap_data *ppri;
struct pcap_init *init = data;
pri = netdev_priv(dev);
ppri = (struct pcap_data *) pri->user;
ppri->host_if = init->host_if;
ppri->promisc = init->promisc;
ppri->optimize = init->optimize;
ppri->filter = init->filter;
printk("pcap backend, host interface %s\n", ppri->host_if);
}
static int pcap_read(int fd, struct sk_buff *skb, struct uml_net_private *lp)
{
return pcap_user_read(fd, skb_mac_header(skb),
skb->dev->mtu + ETH_HEADER_OTHER,
(struct pcap_data *) &lp->user);
}
static int pcap_write(int fd, struct sk_buff *skb, struct uml_net_private *lp)
{
return -EPERM;
}
static const struct net_kern_info pcap_kern_info = {
.init = pcap_init_kern,
.protocol = eth_protocol,
.read = pcap_read,
.write = pcap_write,
};
int pcap_setup(char *str, char **mac_out, void *data)
{
struct pcap_init *init = data;
char *remain, *host_if = NULL, *options[2] = { NULL, NULL };
int i;
*init = ((struct pcap_init)
{ .host_if = "eth0",
.promisc = 1,
.optimize = 0,
.filter = NULL });
remain = split_if_spec(str, &host_if, &init->filter,
&options[0], &options[1], mac_out, NULL);
if (remain != NULL) {
printk(KERN_ERR "pcap_setup - Extra garbage on "
"specification : '%s'\n", remain);
return 0;
}
if (host_if != NULL)
init->host_if = host_if;
for (i = 0; i < ARRAY_SIZE(options); i++) {
if (options[i] == NULL)
continue;
if (!strcmp(options[i], "promisc"))
init->promisc = 1;
else if (!strcmp(options[i], "nopromisc"))
init->promisc = 0;
else if (!strcmp(options[i], "optimize"))
init->optimize = 1;
else if (!strcmp(options[i], "nooptimize"))
init->optimize = 0;
else {
printk(KERN_ERR "pcap_setup : bad option - '%s'\n",
options[i]);
return 0;
}
}
return 1;
}
static struct transport pcap_transport = {
.list = LIST_HEAD_INIT(pcap_transport.list),
.name = "pcap",
.setup = pcap_setup,
.user = &pcap_user_info,
.kern = &pcap_kern_info,
.private_size = sizeof(struct pcap_data),
.setup_size = sizeof(struct pcap_init),
};
static int register_pcap(void)
{
register_transport(&pcap_transport);
return 0;
}
late_initcall(register_pcap);
| linux-master | arch/um/drivers/pcap_kern.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2017 - 2019 Cambridge Greys Limited
* Copyright (C) 2011 - 2014 Cisco Systems Inc
* Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
* Copyright (C) 2001 Lennert Buytenhek ([email protected]) and
* James Leu ([email protected]).
* Copyright (C) 2001 by various other people who didn't put their name here.
*/
#include <linux/memblock.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/inetdevice.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/netdevice.h>
#include <linux/platform_device.h>
#include <linux/rtnetlink.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/firmware.h>
#include <linux/fs.h>
#include <uapi/linux/filter.h>
#include <init.h>
#include <irq_kern.h>
#include <irq_user.h>
#include <net_kern.h>
#include <os.h>
#include "mconsole_kern.h"
#include "vector_user.h"
#include "vector_kern.h"
/*
* Adapted from network devices with the following major changes:
* All transports are static - simplifies the code significantly
* Multiple FDs/IRQs per device
* Vector IO optionally used for read/write, falling back to legacy
* based on configuration and/or availability
* Configuration is no longer positional - L2TPv3 and GRE require up to
* 10 parameters, passing this as positional is not fit for purpose.
* Only socket transports are supported
*/
#define DRIVER_NAME "uml-vector"
struct vector_cmd_line_arg {
struct list_head list;
int unit;
char *arguments;
};
struct vector_device {
struct list_head list;
struct net_device *dev;
struct platform_device pdev;
int unit;
int opened;
};
static LIST_HEAD(vec_cmd_line);
static DEFINE_SPINLOCK(vector_devices_lock);
static LIST_HEAD(vector_devices);
static int driver_registered;
static void vector_eth_configure(int n, struct arglist *def);
static int vector_mmsg_rx(struct vector_private *vp, int budget);
/* Argument accessors to set variables (and/or set default values)
* mtu, buffer sizing, default headroom, etc
*/
#define DEFAULT_HEADROOM 2
#define SAFETY_MARGIN 32
#define DEFAULT_VECTOR_SIZE 64
#define TX_SMALL_PACKET 128
#define MAX_IOV_SIZE (MAX_SKB_FRAGS + 1)
static const struct {
const char string[ETH_GSTRING_LEN];
} ethtool_stats_keys[] = {
{ "rx_queue_max" },
{ "rx_queue_running_average" },
{ "tx_queue_max" },
{ "tx_queue_running_average" },
{ "rx_encaps_errors" },
{ "tx_timeout_count" },
{ "tx_restart_queue" },
{ "tx_kicks" },
{ "tx_flow_control_xon" },
{ "tx_flow_control_xoff" },
{ "rx_csum_offload_good" },
{ "rx_csum_offload_errors"},
{ "sg_ok"},
{ "sg_linearized"},
};
#define VECTOR_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
static void vector_reset_stats(struct vector_private *vp)
{
vp->estats.rx_queue_max = 0;
vp->estats.rx_queue_running_average = 0;
vp->estats.tx_queue_max = 0;
vp->estats.tx_queue_running_average = 0;
vp->estats.rx_encaps_errors = 0;
vp->estats.tx_timeout_count = 0;
vp->estats.tx_restart_queue = 0;
vp->estats.tx_kicks = 0;
vp->estats.tx_flow_control_xon = 0;
vp->estats.tx_flow_control_xoff = 0;
vp->estats.sg_ok = 0;
vp->estats.sg_linearized = 0;
}
static int get_mtu(struct arglist *def)
{
char *mtu = uml_vector_fetch_arg(def, "mtu");
long result;
if (mtu != NULL) {
if (kstrtoul(mtu, 10, &result) == 0)
if ((result < (1 << 16) - 1) && (result >= 576))
return result;
}
return ETH_MAX_PACKET;
}
static char *get_bpf_file(struct arglist *def)
{
return uml_vector_fetch_arg(def, "bpffile");
}
static bool get_bpf_flash(struct arglist *def)
{
char *allow = uml_vector_fetch_arg(def, "bpfflash");
long result;
if (allow != NULL) {
if (kstrtoul(allow, 10, &result) == 0)
return (allow > 0);
}
return false;
}
static int get_depth(struct arglist *def)
{
char *mtu = uml_vector_fetch_arg(def, "depth");
long result;
if (mtu != NULL) {
if (kstrtoul(mtu, 10, &result) == 0)
return result;
}
return DEFAULT_VECTOR_SIZE;
}
static int get_headroom(struct arglist *def)
{
char *mtu = uml_vector_fetch_arg(def, "headroom");
long result;
if (mtu != NULL) {
if (kstrtoul(mtu, 10, &result) == 0)
return result;
}
return DEFAULT_HEADROOM;
}
static int get_req_size(struct arglist *def)
{
char *gro = uml_vector_fetch_arg(def, "gro");
long result;
if (gro != NULL) {
if (kstrtoul(gro, 10, &result) == 0) {
if (result > 0)
return 65536;
}
}
return get_mtu(def) + ETH_HEADER_OTHER +
get_headroom(def) + SAFETY_MARGIN;
}
static int get_transport_options(struct arglist *def)
{
char *transport = uml_vector_fetch_arg(def, "transport");
char *vector = uml_vector_fetch_arg(def, "vec");
int vec_rx = VECTOR_RX;
int vec_tx = VECTOR_TX;
long parsed;
int result = 0;
if (transport == NULL)
return -EINVAL;
if (vector != NULL) {
if (kstrtoul(vector, 10, &parsed) == 0) {
if (parsed == 0) {
vec_rx = 0;
vec_tx = 0;
}
}
}
if (get_bpf_flash(def))
result = VECTOR_BPF_FLASH;
if (strncmp(transport, TRANS_TAP, TRANS_TAP_LEN) == 0)
return result;
if (strncmp(transport, TRANS_HYBRID, TRANS_HYBRID_LEN) == 0)
return (result | vec_rx | VECTOR_BPF);
if (strncmp(transport, TRANS_RAW, TRANS_RAW_LEN) == 0)
return (result | vec_rx | vec_tx | VECTOR_QDISC_BYPASS);
return (result | vec_rx | vec_tx);
}
/* A mini-buffer for packet drop read
* All of our supported transports are datagram oriented and we always
* read using recvmsg or recvmmsg. If we pass a buffer which is smaller
* than the packet size it still counts as full packet read and will
* clean the incoming stream to keep sigio/epoll happy
*/
#define DROP_BUFFER_SIZE 32
static char *drop_buffer;
/* Array backed queues optimized for bulk enqueue/dequeue and
* 1:N (small values of N) or 1:1 enqueuer/dequeuer ratios.
* For more details and full design rationale see
* http://foswiki.cambridgegreys.com/Main/EatYourTailAndEnjoyIt
*/
/*
* Advance the mmsg queue head by n = advance. Resets the queue to
* maximum enqueue/dequeue-at-once capacity if possible. Called by
* dequeuers. Caller must hold the head_lock!
*/
static int vector_advancehead(struct vector_queue *qi, int advance)
{
int queue_depth;
qi->head =
(qi->head + advance)
% qi->max_depth;
spin_lock(&qi->tail_lock);
qi->queue_depth -= advance;
/* we are at 0, use this to
* reset head and tail so we can use max size vectors
*/
if (qi->queue_depth == 0) {
qi->head = 0;
qi->tail = 0;
}
queue_depth = qi->queue_depth;
spin_unlock(&qi->tail_lock);
return queue_depth;
}
/* Advance the queue tail by n = advance.
* This is called by enqueuers which should hold the
* head lock already
*/
static int vector_advancetail(struct vector_queue *qi, int advance)
{
int queue_depth;
qi->tail =
(qi->tail + advance)
% qi->max_depth;
spin_lock(&qi->head_lock);
qi->queue_depth += advance;
queue_depth = qi->queue_depth;
spin_unlock(&qi->head_lock);
return queue_depth;
}
static int prep_msg(struct vector_private *vp,
struct sk_buff *skb,
struct iovec *iov)
{
int iov_index = 0;
int nr_frags, frag;
skb_frag_t *skb_frag;
nr_frags = skb_shinfo(skb)->nr_frags;
if (nr_frags > MAX_IOV_SIZE) {
if (skb_linearize(skb) != 0)
goto drop;
}
if (vp->header_size > 0) {
iov[iov_index].iov_len = vp->header_size;
vp->form_header(iov[iov_index].iov_base, skb, vp);
iov_index++;
}
iov[iov_index].iov_base = skb->data;
if (nr_frags > 0) {
iov[iov_index].iov_len = skb->len - skb->data_len;
vp->estats.sg_ok++;
} else
iov[iov_index].iov_len = skb->len;
iov_index++;
for (frag = 0; frag < nr_frags; frag++) {
skb_frag = &skb_shinfo(skb)->frags[frag];
iov[iov_index].iov_base = skb_frag_address_safe(skb_frag);
iov[iov_index].iov_len = skb_frag_size(skb_frag);
iov_index++;
}
return iov_index;
drop:
return -1;
}
/*
* Generic vector enqueue with support for forming headers using transport
* specific callback. Allows GRE, L2TPv3, RAW and other transports
* to use a common enqueue procedure in vector mode
*/
static int vector_enqueue(struct vector_queue *qi, struct sk_buff *skb)
{
struct vector_private *vp = netdev_priv(qi->dev);
int queue_depth;
int packet_len;
struct mmsghdr *mmsg_vector = qi->mmsg_vector;
int iov_count;
spin_lock(&qi->tail_lock);
spin_lock(&qi->head_lock);
queue_depth = qi->queue_depth;
spin_unlock(&qi->head_lock);
if (skb)
packet_len = skb->len;
if (queue_depth < qi->max_depth) {
*(qi->skbuff_vector + qi->tail) = skb;
mmsg_vector += qi->tail;
iov_count = prep_msg(
vp,
skb,
mmsg_vector->msg_hdr.msg_iov
);
if (iov_count < 1)
goto drop;
mmsg_vector->msg_hdr.msg_iovlen = iov_count;
mmsg_vector->msg_hdr.msg_name = vp->fds->remote_addr;
mmsg_vector->msg_hdr.msg_namelen = vp->fds->remote_addr_size;
queue_depth = vector_advancetail(qi, 1);
} else
goto drop;
spin_unlock(&qi->tail_lock);
return queue_depth;
drop:
qi->dev->stats.tx_dropped++;
if (skb != NULL) {
packet_len = skb->len;
dev_consume_skb_any(skb);
netdev_completed_queue(qi->dev, 1, packet_len);
}
spin_unlock(&qi->tail_lock);
return queue_depth;
}
static int consume_vector_skbs(struct vector_queue *qi, int count)
{
struct sk_buff *skb;
int skb_index;
int bytes_compl = 0;
for (skb_index = qi->head; skb_index < qi->head + count; skb_index++) {
skb = *(qi->skbuff_vector + skb_index);
/* mark as empty to ensure correct destruction if
* needed
*/
bytes_compl += skb->len;
*(qi->skbuff_vector + skb_index) = NULL;
dev_consume_skb_any(skb);
}
qi->dev->stats.tx_bytes += bytes_compl;
qi->dev->stats.tx_packets += count;
netdev_completed_queue(qi->dev, count, bytes_compl);
return vector_advancehead(qi, count);
}
/*
* Generic vector deque via sendmmsg with support for forming headers
* using transport specific callback. Allows GRE, L2TPv3, RAW and
* other transports to use a common dequeue procedure in vector mode
*/
static int vector_send(struct vector_queue *qi)
{
struct vector_private *vp = netdev_priv(qi->dev);
struct mmsghdr *send_from;
int result = 0, send_len, queue_depth = qi->max_depth;
if (spin_trylock(&qi->head_lock)) {
if (spin_trylock(&qi->tail_lock)) {
/* update queue_depth to current value */
queue_depth = qi->queue_depth;
spin_unlock(&qi->tail_lock);
while (queue_depth > 0) {
/* Calculate the start of the vector */
send_len = queue_depth;
send_from = qi->mmsg_vector;
send_from += qi->head;
/* Adjust vector size if wraparound */
if (send_len + qi->head > qi->max_depth)
send_len = qi->max_depth - qi->head;
/* Try to TX as many packets as possible */
if (send_len > 0) {
result = uml_vector_sendmmsg(
vp->fds->tx_fd,
send_from,
send_len,
0
);
vp->in_write_poll =
(result != send_len);
}
/* For some of the sendmmsg error scenarios
* we may end being unsure in the TX success
* for all packets. It is safer to declare
* them all TX-ed and blame the network.
*/
if (result < 0) {
if (net_ratelimit())
netdev_err(vp->dev, "sendmmsg err=%i\n",
result);
vp->in_error = true;
result = send_len;
}
if (result > 0) {
queue_depth =
consume_vector_skbs(qi, result);
/* This is equivalent to an TX IRQ.
* Restart the upper layers to feed us
* more packets.
*/
if (result > vp->estats.tx_queue_max)
vp->estats.tx_queue_max = result;
vp->estats.tx_queue_running_average =
(vp->estats.tx_queue_running_average + result) >> 1;
}
netif_wake_queue(qi->dev);
/* if TX is busy, break out of the send loop,
* poll write IRQ will reschedule xmit for us
*/
if (result != send_len) {
vp->estats.tx_restart_queue++;
break;
}
}
}
spin_unlock(&qi->head_lock);
}
return queue_depth;
}
/* Queue destructor. Deliberately stateless so we can use
* it in queue cleanup if initialization fails.
*/
static void destroy_queue(struct vector_queue *qi)
{
int i;
struct iovec *iov;
struct vector_private *vp = netdev_priv(qi->dev);
struct mmsghdr *mmsg_vector;
if (qi == NULL)
return;
/* deallocate any skbuffs - we rely on any unused to be
* set to NULL.
*/
if (qi->skbuff_vector != NULL) {
for (i = 0; i < qi->max_depth; i++) {
if (*(qi->skbuff_vector + i) != NULL)
dev_kfree_skb_any(*(qi->skbuff_vector + i));
}
kfree(qi->skbuff_vector);
}
/* deallocate matching IOV structures including header buffs */
if (qi->mmsg_vector != NULL) {
mmsg_vector = qi->mmsg_vector;
for (i = 0; i < qi->max_depth; i++) {
iov = mmsg_vector->msg_hdr.msg_iov;
if (iov != NULL) {
if ((vp->header_size > 0) &&
(iov->iov_base != NULL))
kfree(iov->iov_base);
kfree(iov);
}
mmsg_vector++;
}
kfree(qi->mmsg_vector);
}
kfree(qi);
}
/*
* Queue constructor. Create a queue with a given side.
*/
static struct vector_queue *create_queue(
struct vector_private *vp,
int max_size,
int header_size,
int num_extra_frags)
{
struct vector_queue *result;
int i;
struct iovec *iov;
struct mmsghdr *mmsg_vector;
result = kmalloc(sizeof(struct vector_queue), GFP_KERNEL);
if (result == NULL)
return NULL;
result->max_depth = max_size;
result->dev = vp->dev;
result->mmsg_vector = kmalloc(
(sizeof(struct mmsghdr) * max_size), GFP_KERNEL);
if (result->mmsg_vector == NULL)
goto out_mmsg_fail;
result->skbuff_vector = kmalloc(
(sizeof(void *) * max_size), GFP_KERNEL);
if (result->skbuff_vector == NULL)
goto out_skb_fail;
/* further failures can be handled safely by destroy_queue*/
mmsg_vector = result->mmsg_vector;
for (i = 0; i < max_size; i++) {
/* Clear all pointers - we use non-NULL as marking on
* what to free on destruction
*/
*(result->skbuff_vector + i) = NULL;
mmsg_vector->msg_hdr.msg_iov = NULL;
mmsg_vector++;
}
mmsg_vector = result->mmsg_vector;
result->max_iov_frags = num_extra_frags;
for (i = 0; i < max_size; i++) {
if (vp->header_size > 0)
iov = kmalloc_array(3 + num_extra_frags,
sizeof(struct iovec),
GFP_KERNEL
);
else
iov = kmalloc_array(2 + num_extra_frags,
sizeof(struct iovec),
GFP_KERNEL
);
if (iov == NULL)
goto out_fail;
mmsg_vector->msg_hdr.msg_iov = iov;
mmsg_vector->msg_hdr.msg_iovlen = 1;
mmsg_vector->msg_hdr.msg_control = NULL;
mmsg_vector->msg_hdr.msg_controllen = 0;
mmsg_vector->msg_hdr.msg_flags = MSG_DONTWAIT;
mmsg_vector->msg_hdr.msg_name = NULL;
mmsg_vector->msg_hdr.msg_namelen = 0;
if (vp->header_size > 0) {
iov->iov_base = kmalloc(header_size, GFP_KERNEL);
if (iov->iov_base == NULL)
goto out_fail;
iov->iov_len = header_size;
mmsg_vector->msg_hdr.msg_iovlen = 2;
iov++;
}
iov->iov_base = NULL;
iov->iov_len = 0;
mmsg_vector++;
}
spin_lock_init(&result->head_lock);
spin_lock_init(&result->tail_lock);
result->queue_depth = 0;
result->head = 0;
result->tail = 0;
return result;
out_skb_fail:
kfree(result->mmsg_vector);
out_mmsg_fail:
kfree(result);
return NULL;
out_fail:
destroy_queue(result);
return NULL;
}
/*
* We do not use the RX queue as a proper wraparound queue for now
* This is not necessary because the consumption via napi_gro_receive()
* happens in-line. While we can try using the return code of
* netif_rx() for flow control there are no drivers doing this today.
* For this RX specific use we ignore the tail/head locks and
* just read into a prepared queue filled with skbuffs.
*/
static struct sk_buff *prep_skb(
struct vector_private *vp,
struct user_msghdr *msg)
{
int linear = vp->max_packet + vp->headroom + SAFETY_MARGIN;
struct sk_buff *result;
int iov_index = 0, len;
struct iovec *iov = msg->msg_iov;
int err, nr_frags, frag;
skb_frag_t *skb_frag;
if (vp->req_size <= linear)
len = linear;
else
len = vp->req_size;
result = alloc_skb_with_frags(
linear,
len - vp->max_packet,
3,
&err,
GFP_ATOMIC
);
if (vp->header_size > 0)
iov_index++;
if (result == NULL) {
iov[iov_index].iov_base = NULL;
iov[iov_index].iov_len = 0;
goto done;
}
skb_reserve(result, vp->headroom);
result->dev = vp->dev;
skb_put(result, vp->max_packet);
result->data_len = len - vp->max_packet;
result->len += len - vp->max_packet;
skb_reset_mac_header(result);
result->ip_summed = CHECKSUM_NONE;
iov[iov_index].iov_base = result->data;
iov[iov_index].iov_len = vp->max_packet;
iov_index++;
nr_frags = skb_shinfo(result)->nr_frags;
for (frag = 0; frag < nr_frags; frag++) {
skb_frag = &skb_shinfo(result)->frags[frag];
iov[iov_index].iov_base = skb_frag_address_safe(skb_frag);
if (iov[iov_index].iov_base != NULL)
iov[iov_index].iov_len = skb_frag_size(skb_frag);
else
iov[iov_index].iov_len = 0;
iov_index++;
}
done:
msg->msg_iovlen = iov_index;
return result;
}
/* Prepare queue for recvmmsg one-shot rx - fill with fresh sk_buffs*/
static void prep_queue_for_rx(struct vector_queue *qi)
{
struct vector_private *vp = netdev_priv(qi->dev);
struct mmsghdr *mmsg_vector = qi->mmsg_vector;
void **skbuff_vector = qi->skbuff_vector;
int i;
if (qi->queue_depth == 0)
return;
for (i = 0; i < qi->queue_depth; i++) {
/* it is OK if allocation fails - recvmmsg with NULL data in
* iov argument still performs an RX, just drops the packet
* This allows us stop faffing around with a "drop buffer"
*/
*skbuff_vector = prep_skb(vp, &mmsg_vector->msg_hdr);
skbuff_vector++;
mmsg_vector++;
}
qi->queue_depth = 0;
}
static struct vector_device *find_device(int n)
{
struct vector_device *device;
struct list_head *ele;
spin_lock(&vector_devices_lock);
list_for_each(ele, &vector_devices) {
device = list_entry(ele, struct vector_device, list);
if (device->unit == n)
goto out;
}
device = NULL;
out:
spin_unlock(&vector_devices_lock);
return device;
}
static int vector_parse(char *str, int *index_out, char **str_out,
char **error_out)
{
int n, len, err;
char *start = str;
len = strlen(str);
while ((*str != ':') && (strlen(str) > 1))
str++;
if (*str != ':') {
*error_out = "Expected ':' after device number";
return -EINVAL;
}
*str = '\0';
err = kstrtouint(start, 0, &n);
if (err < 0) {
*error_out = "Bad device number";
return err;
}
str++;
if (find_device(n)) {
*error_out = "Device already configured";
return -EINVAL;
}
*index_out = n;
*str_out = str;
return 0;
}
static int vector_config(char *str, char **error_out)
{
int err, n;
char *params;
struct arglist *parsed;
err = vector_parse(str, &n, ¶ms, error_out);
if (err != 0)
return err;
/* This string is broken up and the pieces used by the underlying
* driver. We should copy it to make sure things do not go wrong
* later.
*/
params = kstrdup(params, GFP_KERNEL);
if (params == NULL) {
*error_out = "vector_config failed to strdup string";
return -ENOMEM;
}
parsed = uml_parse_vector_ifspec(params);
if (parsed == NULL) {
*error_out = "vector_config failed to parse parameters";
kfree(params);
return -EINVAL;
}
vector_eth_configure(n, parsed);
return 0;
}
static int vector_id(char **str, int *start_out, int *end_out)
{
char *end;
int n;
n = simple_strtoul(*str, &end, 0);
if ((*end != '\0') || (end == *str))
return -1;
*start_out = n;
*end_out = n;
*str = end;
return n;
}
static int vector_remove(int n, char **error_out)
{
struct vector_device *vec_d;
struct net_device *dev;
struct vector_private *vp;
vec_d = find_device(n);
if (vec_d == NULL)
return -ENODEV;
dev = vec_d->dev;
vp = netdev_priv(dev);
if (vp->fds != NULL)
return -EBUSY;
unregister_netdev(dev);
platform_device_unregister(&vec_d->pdev);
return 0;
}
/*
* There is no shared per-transport initialization code, so
* we will just initialize each interface one by one and
* add them to a list
*/
static struct platform_driver uml_net_driver = {
.driver = {
.name = DRIVER_NAME,
},
};
static void vector_device_release(struct device *dev)
{
struct vector_device *device = dev_get_drvdata(dev);
struct net_device *netdev = device->dev;
list_del(&device->list);
kfree(device);
free_netdev(netdev);
}
/* Bog standard recv using recvmsg - not used normally unless the user
* explicitly specifies not to use recvmmsg vector RX.
*/
static int vector_legacy_rx(struct vector_private *vp)
{
int pkt_len;
struct user_msghdr hdr;
struct iovec iov[2 + MAX_IOV_SIZE]; /* header + data use case only */
int iovpos = 0;
struct sk_buff *skb;
int header_check;
hdr.msg_name = NULL;
hdr.msg_namelen = 0;
hdr.msg_iov = (struct iovec *) &iov;
hdr.msg_control = NULL;
hdr.msg_controllen = 0;
hdr.msg_flags = 0;
if (vp->header_size > 0) {
iov[0].iov_base = vp->header_rxbuffer;
iov[0].iov_len = vp->header_size;
}
skb = prep_skb(vp, &hdr);
if (skb == NULL) {
/* Read a packet into drop_buffer and don't do
* anything with it.
*/
iov[iovpos].iov_base = drop_buffer;
iov[iovpos].iov_len = DROP_BUFFER_SIZE;
hdr.msg_iovlen = 1;
vp->dev->stats.rx_dropped++;
}
pkt_len = uml_vector_recvmsg(vp->fds->rx_fd, &hdr, 0);
if (pkt_len < 0) {
vp->in_error = true;
return pkt_len;
}
if (skb != NULL) {
if (pkt_len > vp->header_size) {
if (vp->header_size > 0) {
header_check = vp->verify_header(
vp->header_rxbuffer, skb, vp);
if (header_check < 0) {
dev_kfree_skb_irq(skb);
vp->dev->stats.rx_dropped++;
vp->estats.rx_encaps_errors++;
return 0;
}
if (header_check > 0) {
vp->estats.rx_csum_offload_good++;
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
}
pskb_trim(skb, pkt_len - vp->rx_header_size);
skb->protocol = eth_type_trans(skb, skb->dev);
vp->dev->stats.rx_bytes += skb->len;
vp->dev->stats.rx_packets++;
napi_gro_receive(&vp->napi, skb);
} else {
dev_kfree_skb_irq(skb);
}
}
return pkt_len;
}
/*
* Packet at a time TX which falls back to vector TX if the
* underlying transport is busy.
*/
static int writev_tx(struct vector_private *vp, struct sk_buff *skb)
{
struct iovec iov[3 + MAX_IOV_SIZE];
int iov_count, pkt_len = 0;
iov[0].iov_base = vp->header_txbuffer;
iov_count = prep_msg(vp, skb, (struct iovec *) &iov);
if (iov_count < 1)
goto drop;
pkt_len = uml_vector_writev(
vp->fds->tx_fd,
(struct iovec *) &iov,
iov_count
);
if (pkt_len < 0)
goto drop;
netif_trans_update(vp->dev);
netif_wake_queue(vp->dev);
if (pkt_len > 0) {
vp->dev->stats.tx_bytes += skb->len;
vp->dev->stats.tx_packets++;
} else {
vp->dev->stats.tx_dropped++;
}
consume_skb(skb);
return pkt_len;
drop:
vp->dev->stats.tx_dropped++;
consume_skb(skb);
if (pkt_len < 0)
vp->in_error = true;
return pkt_len;
}
/*
* Receive as many messages as we can in one call using the special
* mmsg vector matched to an skb vector which we prepared earlier.
*/
static int vector_mmsg_rx(struct vector_private *vp, int budget)
{
int packet_count, i;
struct vector_queue *qi = vp->rx_queue;
struct sk_buff *skb;
struct mmsghdr *mmsg_vector = qi->mmsg_vector;
void **skbuff_vector = qi->skbuff_vector;
int header_check;
/* Refresh the vector and make sure it is with new skbs and the
* iovs are updated to point to them.
*/
prep_queue_for_rx(qi);
/* Fire the Lazy Gun - get as many packets as we can in one go. */
if (budget > qi->max_depth)
budget = qi->max_depth;
packet_count = uml_vector_recvmmsg(
vp->fds->rx_fd, qi->mmsg_vector, qi->max_depth, 0);
if (packet_count < 0)
vp->in_error = true;
if (packet_count <= 0)
return packet_count;
/* We treat packet processing as enqueue, buffer refresh as dequeue
* The queue_depth tells us how many buffers have been used and how
* many do we need to prep the next time prep_queue_for_rx() is called.
*/
qi->queue_depth = packet_count;
for (i = 0; i < packet_count; i++) {
skb = (*skbuff_vector);
if (mmsg_vector->msg_len > vp->header_size) {
if (vp->header_size > 0) {
header_check = vp->verify_header(
mmsg_vector->msg_hdr.msg_iov->iov_base,
skb,
vp
);
if (header_check < 0) {
/* Overlay header failed to verify - discard.
* We can actually keep this skb and reuse it,
* but that will make the prep logic too
* complex.
*/
dev_kfree_skb_irq(skb);
vp->estats.rx_encaps_errors++;
continue;
}
if (header_check > 0) {
vp->estats.rx_csum_offload_good++;
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
}
pskb_trim(skb,
mmsg_vector->msg_len - vp->rx_header_size);
skb->protocol = eth_type_trans(skb, skb->dev);
/*
* We do not need to lock on updating stats here
* The interrupt loop is non-reentrant.
*/
vp->dev->stats.rx_bytes += skb->len;
vp->dev->stats.rx_packets++;
napi_gro_receive(&vp->napi, skb);
} else {
/* Overlay header too short to do anything - discard.
* We can actually keep this skb and reuse it,
* but that will make the prep logic too complex.
*/
if (skb != NULL)
dev_kfree_skb_irq(skb);
}
(*skbuff_vector) = NULL;
/* Move to the next buffer element */
mmsg_vector++;
skbuff_vector++;
}
if (packet_count > 0) {
if (vp->estats.rx_queue_max < packet_count)
vp->estats.rx_queue_max = packet_count;
vp->estats.rx_queue_running_average =
(vp->estats.rx_queue_running_average + packet_count) >> 1;
}
return packet_count;
}
static int vector_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct vector_private *vp = netdev_priv(dev);
int queue_depth = 0;
if (vp->in_error) {
deactivate_fd(vp->fds->rx_fd, vp->rx_irq);
if ((vp->fds->rx_fd != vp->fds->tx_fd) && (vp->tx_irq != 0))
deactivate_fd(vp->fds->tx_fd, vp->tx_irq);
return NETDEV_TX_BUSY;
}
if ((vp->options & VECTOR_TX) == 0) {
writev_tx(vp, skb);
return NETDEV_TX_OK;
}
/* We do BQL only in the vector path, no point doing it in
* packet at a time mode as there is no device queue
*/
netdev_sent_queue(vp->dev, skb->len);
queue_depth = vector_enqueue(vp->tx_queue, skb);
if (queue_depth < vp->tx_queue->max_depth && netdev_xmit_more()) {
mod_timer(&vp->tl, vp->coalesce);
return NETDEV_TX_OK;
} else {
queue_depth = vector_send(vp->tx_queue);
if (queue_depth > 0)
napi_schedule(&vp->napi);
}
return NETDEV_TX_OK;
}
static irqreturn_t vector_rx_interrupt(int irq, void *dev_id)
{
struct net_device *dev = dev_id;
struct vector_private *vp = netdev_priv(dev);
if (!netif_running(dev))
return IRQ_NONE;
napi_schedule(&vp->napi);
return IRQ_HANDLED;
}
static irqreturn_t vector_tx_interrupt(int irq, void *dev_id)
{
struct net_device *dev = dev_id;
struct vector_private *vp = netdev_priv(dev);
if (!netif_running(dev))
return IRQ_NONE;
/* We need to pay attention to it only if we got
* -EAGAIN or -ENOBUFFS from sendmmsg. Otherwise
* we ignore it. In the future, it may be worth
* it to improve the IRQ controller a bit to make
* tweaking the IRQ mask less costly
*/
napi_schedule(&vp->napi);
return IRQ_HANDLED;
}
static int irq_rr;
static int vector_net_close(struct net_device *dev)
{
struct vector_private *vp = netdev_priv(dev);
unsigned long flags;
netif_stop_queue(dev);
del_timer(&vp->tl);
if (vp->fds == NULL)
return 0;
/* Disable and free all IRQS */
if (vp->rx_irq > 0) {
um_free_irq(vp->rx_irq, dev);
vp->rx_irq = 0;
}
if (vp->tx_irq > 0) {
um_free_irq(vp->tx_irq, dev);
vp->tx_irq = 0;
}
napi_disable(&vp->napi);
netif_napi_del(&vp->napi);
if (vp->fds->rx_fd > 0) {
if (vp->bpf)
uml_vector_detach_bpf(vp->fds->rx_fd, vp->bpf);
os_close_file(vp->fds->rx_fd);
vp->fds->rx_fd = -1;
}
if (vp->fds->tx_fd > 0) {
os_close_file(vp->fds->tx_fd);
vp->fds->tx_fd = -1;
}
if (vp->bpf != NULL)
kfree(vp->bpf->filter);
kfree(vp->bpf);
vp->bpf = NULL;
kfree(vp->fds->remote_addr);
kfree(vp->transport_data);
kfree(vp->header_rxbuffer);
kfree(vp->header_txbuffer);
if (vp->rx_queue != NULL)
destroy_queue(vp->rx_queue);
if (vp->tx_queue != NULL)
destroy_queue(vp->tx_queue);
kfree(vp->fds);
vp->fds = NULL;
spin_lock_irqsave(&vp->lock, flags);
vp->opened = false;
vp->in_error = false;
spin_unlock_irqrestore(&vp->lock, flags);
return 0;
}
static int vector_poll(struct napi_struct *napi, int budget)
{
struct vector_private *vp = container_of(napi, struct vector_private, napi);
int work_done = 0;
int err;
bool tx_enqueued = false;
if ((vp->options & VECTOR_TX) != 0)
tx_enqueued = (vector_send(vp->tx_queue) > 0);
if ((vp->options & VECTOR_RX) > 0)
err = vector_mmsg_rx(vp, budget);
else {
err = vector_legacy_rx(vp);
if (err > 0)
err = 1;
}
if (err > 0)
work_done += err;
if (tx_enqueued || err > 0)
napi_schedule(napi);
if (work_done < budget)
napi_complete_done(napi, work_done);
return work_done;
}
static void vector_reset_tx(struct work_struct *work)
{
struct vector_private *vp =
container_of(work, struct vector_private, reset_tx);
netdev_reset_queue(vp->dev);
netif_start_queue(vp->dev);
netif_wake_queue(vp->dev);
}
static int vector_net_open(struct net_device *dev)
{
struct vector_private *vp = netdev_priv(dev);
unsigned long flags;
int err = -EINVAL;
struct vector_device *vdevice;
spin_lock_irqsave(&vp->lock, flags);
if (vp->opened) {
spin_unlock_irqrestore(&vp->lock, flags);
return -ENXIO;
}
vp->opened = true;
spin_unlock_irqrestore(&vp->lock, flags);
vp->bpf = uml_vector_user_bpf(get_bpf_file(vp->parsed));
vp->fds = uml_vector_user_open(vp->unit, vp->parsed);
if (vp->fds == NULL)
goto out_close;
if (build_transport_data(vp) < 0)
goto out_close;
if ((vp->options & VECTOR_RX) > 0) {
vp->rx_queue = create_queue(
vp,
get_depth(vp->parsed),
vp->rx_header_size,
MAX_IOV_SIZE
);
vp->rx_queue->queue_depth = get_depth(vp->parsed);
} else {
vp->header_rxbuffer = kmalloc(
vp->rx_header_size,
GFP_KERNEL
);
if (vp->header_rxbuffer == NULL)
goto out_close;
}
if ((vp->options & VECTOR_TX) > 0) {
vp->tx_queue = create_queue(
vp,
get_depth(vp->parsed),
vp->header_size,
MAX_IOV_SIZE
);
} else {
vp->header_txbuffer = kmalloc(vp->header_size, GFP_KERNEL);
if (vp->header_txbuffer == NULL)
goto out_close;
}
netif_napi_add_weight(vp->dev, &vp->napi, vector_poll,
get_depth(vp->parsed));
napi_enable(&vp->napi);
/* READ IRQ */
err = um_request_irq(
irq_rr + VECTOR_BASE_IRQ, vp->fds->rx_fd,
IRQ_READ, vector_rx_interrupt,
IRQF_SHARED, dev->name, dev);
if (err < 0) {
netdev_err(dev, "vector_open: failed to get rx irq(%d)\n", err);
err = -ENETUNREACH;
goto out_close;
}
vp->rx_irq = irq_rr + VECTOR_BASE_IRQ;
dev->irq = irq_rr + VECTOR_BASE_IRQ;
irq_rr = (irq_rr + 1) % VECTOR_IRQ_SPACE;
/* WRITE IRQ - we need it only if we have vector TX */
if ((vp->options & VECTOR_TX) > 0) {
err = um_request_irq(
irq_rr + VECTOR_BASE_IRQ, vp->fds->tx_fd,
IRQ_WRITE, vector_tx_interrupt,
IRQF_SHARED, dev->name, dev);
if (err < 0) {
netdev_err(dev,
"vector_open: failed to get tx irq(%d)\n", err);
err = -ENETUNREACH;
goto out_close;
}
vp->tx_irq = irq_rr + VECTOR_BASE_IRQ;
irq_rr = (irq_rr + 1) % VECTOR_IRQ_SPACE;
}
if ((vp->options & VECTOR_QDISC_BYPASS) != 0) {
if (!uml_raw_enable_qdisc_bypass(vp->fds->rx_fd))
vp->options |= VECTOR_BPF;
}
if (((vp->options & VECTOR_BPF) != 0) && (vp->bpf == NULL))
vp->bpf = uml_vector_default_bpf(dev->dev_addr);
if (vp->bpf != NULL)
uml_vector_attach_bpf(vp->fds->rx_fd, vp->bpf);
netif_start_queue(dev);
vector_reset_stats(vp);
/* clear buffer - it can happen that the host side of the interface
* is full when we get here. In this case, new data is never queued,
* SIGIOs never arrive, and the net never works.
*/
napi_schedule(&vp->napi);
vdevice = find_device(vp->unit);
vdevice->opened = 1;
if ((vp->options & VECTOR_TX) != 0)
add_timer(&vp->tl);
return 0;
out_close:
vector_net_close(dev);
return err;
}
static void vector_net_set_multicast_list(struct net_device *dev)
{
/* TODO: - we can do some BPF games here */
return;
}
static void vector_net_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct vector_private *vp = netdev_priv(dev);
vp->estats.tx_timeout_count++;
netif_trans_update(dev);
schedule_work(&vp->reset_tx);
}
static netdev_features_t vector_fix_features(struct net_device *dev,
netdev_features_t features)
{
features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
return features;
}
static int vector_set_features(struct net_device *dev,
netdev_features_t features)
{
struct vector_private *vp = netdev_priv(dev);
/* Adjust buffer sizes for GSO/GRO. Unfortunately, there is
* no way to negotiate it on raw sockets, so we can change
* only our side.
*/
if (features & NETIF_F_GRO)
/* All new frame buffers will be GRO-sized */
vp->req_size = 65536;
else
/* All new frame buffers will be normal sized */
vp->req_size = vp->max_packet + vp->headroom + SAFETY_MARGIN;
return 0;
}
#ifdef CONFIG_NET_POLL_CONTROLLER
static void vector_net_poll_controller(struct net_device *dev)
{
disable_irq(dev->irq);
vector_rx_interrupt(dev->irq, dev);
enable_irq(dev->irq);
}
#endif
static void vector_net_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
strscpy(info->driver, DRIVER_NAME, sizeof(info->driver));
}
static int vector_net_load_bpf_flash(struct net_device *dev,
struct ethtool_flash *efl)
{
struct vector_private *vp = netdev_priv(dev);
struct vector_device *vdevice;
const struct firmware *fw;
int result = 0;
if (!(vp->options & VECTOR_BPF_FLASH)) {
netdev_err(dev, "loading firmware not permitted: %s\n", efl->data);
return -1;
}
spin_lock(&vp->lock);
if (vp->bpf != NULL) {
if (vp->opened)
uml_vector_detach_bpf(vp->fds->rx_fd, vp->bpf);
kfree(vp->bpf->filter);
vp->bpf->filter = NULL;
} else {
vp->bpf = kmalloc(sizeof(struct sock_fprog), GFP_ATOMIC);
if (vp->bpf == NULL) {
netdev_err(dev, "failed to allocate memory for firmware\n");
goto flash_fail;
}
}
vdevice = find_device(vp->unit);
if (request_firmware(&fw, efl->data, &vdevice->pdev.dev))
goto flash_fail;
vp->bpf->filter = kmemdup(fw->data, fw->size, GFP_ATOMIC);
if (!vp->bpf->filter)
goto free_buffer;
vp->bpf->len = fw->size / sizeof(struct sock_filter);
release_firmware(fw);
if (vp->opened)
result = uml_vector_attach_bpf(vp->fds->rx_fd, vp->bpf);
spin_unlock(&vp->lock);
return result;
free_buffer:
release_firmware(fw);
flash_fail:
spin_unlock(&vp->lock);
if (vp->bpf != NULL)
kfree(vp->bpf->filter);
kfree(vp->bpf);
vp->bpf = NULL;
return -1;
}
static void vector_get_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring,
struct kernel_ethtool_ringparam *kernel_ring,
struct netlink_ext_ack *extack)
{
struct vector_private *vp = netdev_priv(netdev);
ring->rx_max_pending = vp->rx_queue->max_depth;
ring->tx_max_pending = vp->tx_queue->max_depth;
ring->rx_pending = vp->rx_queue->max_depth;
ring->tx_pending = vp->tx_queue->max_depth;
}
static void vector_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
{
switch (stringset) {
case ETH_SS_TEST:
*buf = '\0';
break;
case ETH_SS_STATS:
memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
break;
default:
WARN_ON(1);
break;
}
}
static int vector_get_sset_count(struct net_device *dev, int sset)
{
switch (sset) {
case ETH_SS_TEST:
return 0;
case ETH_SS_STATS:
return VECTOR_NUM_STATS;
default:
return -EOPNOTSUPP;
}
}
static void vector_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *estats,
u64 *tmp_stats)
{
struct vector_private *vp = netdev_priv(dev);
memcpy(tmp_stats, &vp->estats, sizeof(struct vector_estats));
}
static int vector_get_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec,
struct kernel_ethtool_coalesce *kernel_coal,
struct netlink_ext_ack *extack)
{
struct vector_private *vp = netdev_priv(netdev);
ec->tx_coalesce_usecs = (vp->coalesce * 1000000) / HZ;
return 0;
}
static int vector_set_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec,
struct kernel_ethtool_coalesce *kernel_coal,
struct netlink_ext_ack *extack)
{
struct vector_private *vp = netdev_priv(netdev);
vp->coalesce = (ec->tx_coalesce_usecs * HZ) / 1000000;
if (vp->coalesce == 0)
vp->coalesce = 1;
return 0;
}
static const struct ethtool_ops vector_net_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_TX_USECS,
.get_drvinfo = vector_net_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_ts_info = ethtool_op_get_ts_info,
.get_ringparam = vector_get_ringparam,
.get_strings = vector_get_strings,
.get_sset_count = vector_get_sset_count,
.get_ethtool_stats = vector_get_ethtool_stats,
.get_coalesce = vector_get_coalesce,
.set_coalesce = vector_set_coalesce,
.flash_device = vector_net_load_bpf_flash,
};
static const struct net_device_ops vector_netdev_ops = {
.ndo_open = vector_net_open,
.ndo_stop = vector_net_close,
.ndo_start_xmit = vector_net_start_xmit,
.ndo_set_rx_mode = vector_net_set_multicast_list,
.ndo_tx_timeout = vector_net_tx_timeout,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_fix_features = vector_fix_features,
.ndo_set_features = vector_set_features,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = vector_net_poll_controller,
#endif
};
static void vector_timer_expire(struct timer_list *t)
{
struct vector_private *vp = from_timer(vp, t, tl);
vp->estats.tx_kicks++;
napi_schedule(&vp->napi);
}
static void vector_eth_configure(
int n,
struct arglist *def
)
{
struct vector_device *device;
struct net_device *dev;
struct vector_private *vp;
int err;
device = kzalloc(sizeof(*device), GFP_KERNEL);
if (device == NULL) {
printk(KERN_ERR "eth_configure failed to allocate struct "
"vector_device\n");
return;
}
dev = alloc_etherdev(sizeof(struct vector_private));
if (dev == NULL) {
printk(KERN_ERR "eth_configure: failed to allocate struct "
"net_device for vec%d\n", n);
goto out_free_device;
}
dev->mtu = get_mtu(def);
INIT_LIST_HEAD(&device->list);
device->unit = n;
/* If this name ends up conflicting with an existing registered
* netdevice, that is OK, register_netdev{,ice}() will notice this
* and fail.
*/
snprintf(dev->name, sizeof(dev->name), "vec%d", n);
uml_net_setup_etheraddr(dev, uml_vector_fetch_arg(def, "mac"));
vp = netdev_priv(dev);
/* sysfs register */
if (!driver_registered) {
platform_driver_register(¨_net_driver);
driver_registered = 1;
}
device->pdev.id = n;
device->pdev.name = DRIVER_NAME;
device->pdev.dev.release = vector_device_release;
dev_set_drvdata(&device->pdev.dev, device);
if (platform_device_register(&device->pdev))
goto out_free_netdev;
SET_NETDEV_DEV(dev, &device->pdev.dev);
device->dev = dev;
*vp = ((struct vector_private)
{
.list = LIST_HEAD_INIT(vp->list),
.dev = dev,
.unit = n,
.options = get_transport_options(def),
.rx_irq = 0,
.tx_irq = 0,
.parsed = def,
.max_packet = get_mtu(def) + ETH_HEADER_OTHER,
/* TODO - we need to calculate headroom so that ip header
* is 16 byte aligned all the time
*/
.headroom = get_headroom(def),
.form_header = NULL,
.verify_header = NULL,
.header_rxbuffer = NULL,
.header_txbuffer = NULL,
.header_size = 0,
.rx_header_size = 0,
.rexmit_scheduled = false,
.opened = false,
.transport_data = NULL,
.in_write_poll = false,
.coalesce = 2,
.req_size = get_req_size(def),
.in_error = false,
.bpf = NULL
});
dev->features = dev->hw_features = (NETIF_F_SG | NETIF_F_FRAGLIST);
INIT_WORK(&vp->reset_tx, vector_reset_tx);
timer_setup(&vp->tl, vector_timer_expire, 0);
spin_lock_init(&vp->lock);
/* FIXME */
dev->netdev_ops = &vector_netdev_ops;
dev->ethtool_ops = &vector_net_ethtool_ops;
dev->watchdog_timeo = (HZ >> 1);
/* primary IRQ - fixme */
dev->irq = 0; /* we will adjust this once opened */
rtnl_lock();
err = register_netdevice(dev);
rtnl_unlock();
if (err)
goto out_undo_user_init;
spin_lock(&vector_devices_lock);
list_add(&device->list, &vector_devices);
spin_unlock(&vector_devices_lock);
return;
out_undo_user_init:
return;
out_free_netdev:
free_netdev(dev);
out_free_device:
kfree(device);
}
/*
* Invoked late in the init
*/
static int __init vector_init(void)
{
struct list_head *ele;
struct vector_cmd_line_arg *def;
struct arglist *parsed;
list_for_each(ele, &vec_cmd_line) {
def = list_entry(ele, struct vector_cmd_line_arg, list);
parsed = uml_parse_vector_ifspec(def->arguments);
if (parsed != NULL)
vector_eth_configure(def->unit, parsed);
}
return 0;
}
/* Invoked at initial argument parsing, only stores
* arguments until a proper vector_init is called
* later
*/
static int __init vector_setup(char *str)
{
char *error;
int n, err;
struct vector_cmd_line_arg *new;
err = vector_parse(str, &n, &str, &error);
if (err) {
printk(KERN_ERR "vector_setup - Couldn't parse '%s' : %s\n",
str, error);
return 1;
}
new = memblock_alloc(sizeof(*new), SMP_CACHE_BYTES);
if (!new)
panic("%s: Failed to allocate %zu bytes\n", __func__,
sizeof(*new));
INIT_LIST_HEAD(&new->list);
new->unit = n;
new->arguments = str;
list_add_tail(&new->list, &vec_cmd_line);
return 1;
}
__setup("vec", vector_setup);
__uml_help(vector_setup,
"vec[0-9]+:<option>=<value>,<option>=<value>\n"
" Configure a vector io network device.\n\n"
);
late_initcall(vector_init);
static struct mc_device vector_mc = {
.list = LIST_HEAD_INIT(vector_mc.list),
.name = "vec",
.config = vector_config,
.get_config = NULL,
.id = vector_id,
.remove = vector_remove,
};
#ifdef CONFIG_INET
static int vector_inetaddr_event(
struct notifier_block *this,
unsigned long event,
void *ptr)
{
return NOTIFY_DONE;
}
static struct notifier_block vector_inetaddr_notifier = {
.notifier_call = vector_inetaddr_event,
};
static void inet_register(void)
{
register_inetaddr_notifier(&vector_inetaddr_notifier);
}
#else
static inline void inet_register(void)
{
}
#endif
static int vector_net_init(void)
{
mconsole_register_dev(&vector_mc);
inet_register();
return 0;
}
__initcall(vector_net_init);
| linux-master | arch/um/drivers/vector_kern.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* arch/um/drivers/mmapper_kern.c
*
* BRIEF MODULE DESCRIPTION
*
* Copyright (C) 2000 RidgeRun, Inc.
* Author: RidgeRun, Inc.
* Greg Lonnon [email protected] or [email protected]
*
*/
#include <linux/stddef.h>
#include <linux/types.h>
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/uaccess.h>
#include <mem_user.h>
/* These are set in mmapper_init, which is called at boot time */
static unsigned long mmapper_size;
static unsigned long p_buf;
static char *v_buf;
static ssize_t mmapper_read(struct file *file, char __user *buf, size_t count,
loff_t *ppos)
{
return simple_read_from_buffer(buf, count, ppos, v_buf, mmapper_size);
}
static ssize_t mmapper_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
if (*ppos > mmapper_size)
return -EINVAL;
return simple_write_to_buffer(v_buf, mmapper_size, ppos, buf, count);
}
static long mmapper_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
return -ENOIOCTLCMD;
}
static int mmapper_mmap(struct file *file, struct vm_area_struct *vma)
{
int ret = -EINVAL;
int size;
if (vma->vm_pgoff != 0)
goto out;
size = vma->vm_end - vma->vm_start;
if (size > mmapper_size)
return -EFAULT;
/*
* XXX A comment above remap_pfn_range says it should only be
* called when the mm semaphore is held
*/
if (remap_pfn_range(vma, vma->vm_start, p_buf >> PAGE_SHIFT, size,
vma->vm_page_prot))
goto out;
ret = 0;
out:
return ret;
}
static int mmapper_open(struct inode *inode, struct file *file)
{
return 0;
}
static int mmapper_release(struct inode *inode, struct file *file)
{
return 0;
}
static const struct file_operations mmapper_fops = {
.owner = THIS_MODULE,
.read = mmapper_read,
.write = mmapper_write,
.unlocked_ioctl = mmapper_ioctl,
.mmap = mmapper_mmap,
.open = mmapper_open,
.release = mmapper_release,
.llseek = default_llseek,
};
/*
* No locking needed - only used (and modified) by below initcall and exitcall.
*/
static struct miscdevice mmapper_dev = {
.minor = MISC_DYNAMIC_MINOR,
.name = "mmapper",
.fops = &mmapper_fops
};
static int __init mmapper_init(void)
{
int err;
printk(KERN_INFO "Mapper v0.1\n");
v_buf = (char *) find_iomem("mmapper", &mmapper_size);
if (mmapper_size == 0) {
printk(KERN_ERR "mmapper_init - find_iomem failed\n");
return -ENODEV;
}
p_buf = __pa(v_buf);
err = misc_register(&mmapper_dev);
if (err) {
printk(KERN_ERR "mmapper - misc_register failed, err = %d\n",
err);
return err;
}
return 0;
}
static void __exit mmapper_exit(void)
{
misc_deregister(&mmapper_dev);
}
module_init(mmapper_init);
module_exit(mmapper_exit);
MODULE_AUTHOR("Greg Lonnon <[email protected]>");
MODULE_DESCRIPTION("DSPLinux simulator mmapper driver");
MODULE_LICENSE("GPL");
| linux-master | arch/um/drivers/mmapper_kern.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2007 Luca Bigliardi ([email protected]).
*
* Transport usage:
* ethN=vde,<vde_switch>,<mac addr>,<port>,<group>,<mode>,<description>
*
*/
#include <linux/init.h>
#include <linux/netdevice.h>
#include <net_kern.h>
#include <net_user.h>
#include "vde.h"
static void vde_init(struct net_device *dev, void *data)
{
struct vde_init *init = data;
struct uml_net_private *pri;
struct vde_data *vpri;
pri = netdev_priv(dev);
vpri = (struct vde_data *) pri->user;
vpri->vde_switch = init->vde_switch;
vpri->descr = init->descr ? init->descr : "UML vde_transport";
vpri->args = NULL;
vpri->conn = NULL;
vpri->dev = dev;
printk("vde backend - %s, ", vpri->vde_switch ?
vpri->vde_switch : "(default socket)");
vde_init_libstuff(vpri, init);
printk("\n");
}
static int vde_read(int fd, struct sk_buff *skb, struct uml_net_private *lp)
{
struct vde_data *pri = (struct vde_data *) &lp->user;
if (pri->conn != NULL)
return vde_user_read(pri->conn, skb_mac_header(skb),
skb->dev->mtu + ETH_HEADER_OTHER);
printk(KERN_ERR "vde_read - we have no VDECONN to read from");
return -EBADF;
}
static int vde_write(int fd, struct sk_buff *skb, struct uml_net_private *lp)
{
struct vde_data *pri = (struct vde_data *) &lp->user;
if (pri->conn != NULL)
return vde_user_write((void *)pri->conn, skb->data,
skb->len);
printk(KERN_ERR "vde_write - we have no VDECONN to write to");
return -EBADF;
}
static const struct net_kern_info vde_kern_info = {
.init = vde_init,
.protocol = eth_protocol,
.read = vde_read,
.write = vde_write,
};
static int vde_setup(char *str, char **mac_out, void *data)
{
struct vde_init *init = data;
char *remain, *port_str = NULL, *mode_str = NULL, *last;
*init = ((struct vde_init)
{ .vde_switch = NULL,
.descr = NULL,
.port = 0,
.group = NULL,
.mode = 0 });
remain = split_if_spec(str, &init->vde_switch, mac_out, &port_str,
&init->group, &mode_str, &init->descr, NULL);
if (remain != NULL)
printk(KERN_WARNING "vde_setup - Ignoring extra data :"
"'%s'\n", remain);
if (port_str != NULL) {
init->port = simple_strtoul(port_str, &last, 10);
if ((*last != '\0') || (last == port_str)) {
printk(KERN_ERR "vde_setup - Bad port : '%s'\n",
port_str);
return 0;
}
}
if (mode_str != NULL) {
init->mode = simple_strtoul(mode_str, &last, 8);
if ((*last != '\0') || (last == mode_str)) {
printk(KERN_ERR "vde_setup - Bad mode : '%s'\n",
mode_str);
return 0;
}
}
printk(KERN_INFO "Configured vde device: %s\n", init->vde_switch ?
init->vde_switch : "(default socket)");
return 1;
}
static struct transport vde_transport = {
.list = LIST_HEAD_INIT(vde_transport.list),
.name = "vde",
.setup = vde_setup,
.user = &vde_user_info,
.kern = &vde_kern_info,
.private_size = sizeof(struct vde_data),
.setup_size = sizeof(struct vde_init),
};
static int register_vde(void)
{
register_transport(&vde_transport);
return 0;
}
late_initcall(register_vde);
| linux-master | arch/um/drivers/vde_kern.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
*/
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <errno.h>
#include <fcntl.h>
#include <string.h>
#include <termios.h>
#include <sys/stat.h>
#include "chan_user.h"
#include <os.h>
#include <um_malloc.h>
struct pty_chan {
void (*announce)(char *dev_name, int dev);
int dev;
int raw;
struct termios tt;
char dev_name[sizeof("/dev/pts/0123456\0")];
};
static void *pty_chan_init(char *str, int device, const struct chan_opts *opts)
{
struct pty_chan *data;
data = uml_kmalloc(sizeof(*data), UM_GFP_KERNEL);
if (data == NULL)
return NULL;
*data = ((struct pty_chan) { .announce = opts->announce,
.dev = device,
.raw = opts->raw });
return data;
}
static int pts_open(int input, int output, int primary, void *d,
char **dev_out)
{
struct pty_chan *data = d;
char *dev;
int fd, err;
fd = get_pty();
if (fd < 0) {
err = -errno;
printk(UM_KERN_ERR "open_pts : Failed to open pts\n");
return err;
}
if (data->raw) {
CATCH_EINTR(err = tcgetattr(fd, &data->tt));
if (err)
goto out_close;
err = raw(fd);
if (err)
goto out_close;
}
dev = ptsname(fd);
sprintf(data->dev_name, "%s", dev);
*dev_out = data->dev_name;
if (data->announce)
(*data->announce)(dev, data->dev);
return fd;
out_close:
close(fd);
return err;
}
static int getmaster(char *line)
{
struct stat buf;
char *pty, *bank, *cp;
int master, err;
pty = &line[strlen("/dev/ptyp")];
for (bank = "pqrs"; *bank; bank++) {
line[strlen("/dev/pty")] = *bank;
*pty = '0';
/* Did we hit the end ? */
if ((stat(line, &buf) < 0) && (errno == ENOENT))
break;
for (cp = "0123456789abcdef"; *cp; cp++) {
*pty = *cp;
master = open(line, O_RDWR);
if (master >= 0) {
char *tp = &line[strlen("/dev/")];
/* verify slave side is usable */
*tp = 't';
err = access(line, R_OK | W_OK);
*tp = 'p';
if (!err)
return master;
close(master);
}
}
}
printk(UM_KERN_ERR "getmaster - no usable host pty devices\n");
return -ENOENT;
}
static int pty_open(int input, int output, int primary, void *d,
char **dev_out)
{
struct pty_chan *data = d;
int fd, err;
char dev[sizeof("/dev/ptyxx\0")] = "/dev/ptyxx";
fd = getmaster(dev);
if (fd < 0)
return fd;
if (data->raw) {
err = raw(fd);
if (err) {
close(fd);
return err;
}
}
if (data->announce)
(*data->announce)(dev, data->dev);
sprintf(data->dev_name, "%s", dev);
*dev_out = data->dev_name;
return fd;
}
const struct chan_ops pty_ops = {
.type = "pty",
.init = pty_chan_init,
.open = pty_open,
.close = generic_close,
.read = generic_read,
.write = generic_write,
.console_write = generic_console_write,
.window_size = generic_window_size,
.free = generic_free,
.winch = 0,
};
const struct chan_ops pts_ops = {
.type = "pts",
.init = pty_chan_init,
.open = pts_open,
.close = generic_close,
.read = generic_read,
.write = generic_write,
.console_write = generic_console_write,
.window_size = generic_window_size,
.free = generic_free,
.winch = 0,
};
| linux-master | arch/um/drivers/pty.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
*/
#include <unistd.h>
#include <errno.h>
#include <string.h>
#include <sys/wait.h>
#include <net_user.h>
#include <os.h>
#include "slirp.h"
static int slirp_user_init(void *data, void *dev)
{
struct slirp_data *pri = data;
pri->dev = dev;
return 0;
}
struct slirp_pre_exec_data {
int stdin_fd;
int stdout_fd;
};
static void slirp_pre_exec(void *arg)
{
struct slirp_pre_exec_data *data = arg;
if (data->stdin_fd != -1)
dup2(data->stdin_fd, 0);
if (data->stdout_fd != -1)
dup2(data->stdout_fd, 1);
}
static int slirp_tramp(char **argv, int fd)
{
struct slirp_pre_exec_data pe_data;
int pid;
pe_data.stdin_fd = fd;
pe_data.stdout_fd = fd;
pid = run_helper(slirp_pre_exec, &pe_data, argv);
return pid;
}
static int slirp_open(void *data)
{
struct slirp_data *pri = data;
int fds[2], pid, err;
err = os_pipe(fds, 1, 1);
if (err)
return err;
err = slirp_tramp(pri->argw.argv, fds[1]);
if (err < 0) {
printk(UM_KERN_ERR "slirp_tramp failed - errno = %d\n", -err);
goto out;
}
pid = err;
pri->slave = fds[1];
pri->slip.pos = 0;
pri->slip.esc = 0;
pri->pid = err;
return fds[0];
out:
close(fds[0]);
close(fds[1]);
return err;
}
static void slirp_close(int fd, void *data)
{
struct slirp_data *pri = data;
int err;
close(fd);
close(pri->slave);
pri->slave = -1;
if (pri->pid<1) {
printk(UM_KERN_ERR "slirp_close: no child process to shut "
"down\n");
return;
}
#if 0
if (kill(pri->pid, SIGHUP)<0) {
printk(UM_KERN_ERR "slirp_close: sending hangup to %d failed "
"(%d)\n", pri->pid, errno);
}
#endif
err = helper_wait(pri->pid);
if (err < 0)
return;
pri->pid = -1;
}
int slirp_user_read(int fd, void *buf, int len, struct slirp_data *pri)
{
return slip_proto_read(fd, buf, len, &pri->slip);
}
int slirp_user_write(int fd, void *buf, int len, struct slirp_data *pri)
{
return slip_proto_write(fd, buf, len, &pri->slip);
}
const struct net_user_info slirp_user_info = {
.init = slirp_user_init,
.open = slirp_open,
.close = slirp_close,
.remove = NULL,
.add_address = NULL,
.delete_address = NULL,
.mtu = BUF_SIZE,
.max_packet = BUF_SIZE,
};
| linux-master | arch/um/drivers/slirp_user.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2001 - 2007 Jeff Dike (jdike@{linux.intel,addtoit}.com)
*/
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <errno.h>
#include <termios.h>
#include "chan_user.h"
#include <os.h>
#include <um_malloc.h>
struct fd_chan {
int fd;
int raw;
struct termios tt;
char str[sizeof("1234567890\0")];
};
static void *fd_init(char *str, int device, const struct chan_opts *opts)
{
struct fd_chan *data;
char *end;
int n;
if (*str != ':') {
printk(UM_KERN_ERR "fd_init : channel type 'fd' must specify a "
"file descriptor\n");
return NULL;
}
str++;
n = strtoul(str, &end, 0);
if ((*end != '\0') || (end == str)) {
printk(UM_KERN_ERR "fd_init : couldn't parse file descriptor "
"'%s'\n", str);
return NULL;
}
data = uml_kmalloc(sizeof(*data), UM_GFP_KERNEL);
if (data == NULL)
return NULL;
*data = ((struct fd_chan) { .fd = n,
.raw = opts->raw });
return data;
}
static int fd_open(int input, int output, int primary, void *d, char **dev_out)
{
struct fd_chan *data = d;
int err;
if (data->raw && isatty(data->fd)) {
CATCH_EINTR(err = tcgetattr(data->fd, &data->tt));
if (err)
return err;
err = raw(data->fd);
if (err)
return err;
}
sprintf(data->str, "%d", data->fd);
*dev_out = data->str;
return data->fd;
}
static void fd_close(int fd, void *d)
{
struct fd_chan *data = d;
int err;
if (!data->raw || !isatty(fd))
return;
CATCH_EINTR(err = tcsetattr(fd, TCSAFLUSH, &data->tt));
if (err)
printk(UM_KERN_ERR "Failed to restore terminal state - "
"errno = %d\n", -err);
data->raw = 0;
}
const struct chan_ops fd_ops = {
.type = "fd",
.init = fd_init,
.open = fd_open,
.close = fd_close,
.read = generic_read,
.write = generic_write,
.console_write = generic_console_write,
.window_size = generic_window_size,
.free = generic_free,
.winch = 1,
};
| linux-master | arch/um/drivers/fd.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2002 Steve Schmidtke
*/
#include <linux/fs.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/sound.h>
#include <linux/soundcard.h>
#include <linux/mutex.h>
#include <linux/uaccess.h>
#include <init.h>
#include <os.h>
struct hostaudio_state {
int fd;
};
struct hostmixer_state {
int fd;
};
#define HOSTAUDIO_DEV_DSP "/dev/sound/dsp"
#define HOSTAUDIO_DEV_MIXER "/dev/sound/mixer"
/*
* Changed either at boot time or module load time. At boot, this is
* single-threaded; at module load, multiple modules would each have
* their own copy of these variables.
*/
static char *dsp = HOSTAUDIO_DEV_DSP;
static char *mixer = HOSTAUDIO_DEV_MIXER;
#define DSP_HELP \
" This is used to specify the host dsp device to the hostaudio driver.\n" \
" The default is \"" HOSTAUDIO_DEV_DSP "\".\n\n"
#define MIXER_HELP \
" This is used to specify the host mixer device to the hostaudio driver.\n"\
" The default is \"" HOSTAUDIO_DEV_MIXER "\".\n\n"
module_param(dsp, charp, 0644);
MODULE_PARM_DESC(dsp, DSP_HELP);
module_param(mixer, charp, 0644);
MODULE_PARM_DESC(mixer, MIXER_HELP);
#ifndef MODULE
static int set_dsp(char *name, int *add)
{
dsp = name;
return 0;
}
__uml_setup("dsp=", set_dsp, "dsp=<dsp device>\n" DSP_HELP);
static int set_mixer(char *name, int *add)
{
mixer = name;
return 0;
}
__uml_setup("mixer=", set_mixer, "mixer=<mixer device>\n" MIXER_HELP);
#endif
static DEFINE_MUTEX(hostaudio_mutex);
/* /dev/dsp file operations */
static ssize_t hostaudio_read(struct file *file, char __user *buffer,
size_t count, loff_t *ppos)
{
struct hostaudio_state *state = file->private_data;
void *kbuf;
int err;
#ifdef DEBUG
printk(KERN_DEBUG "hostaudio: read called, count = %d\n", count);
#endif
kbuf = kmalloc(count, GFP_KERNEL);
if (kbuf == NULL)
return -ENOMEM;
err = os_read_file(state->fd, kbuf, count);
if (err < 0)
goto out;
if (copy_to_user(buffer, kbuf, err))
err = -EFAULT;
out:
kfree(kbuf);
return err;
}
static ssize_t hostaudio_write(struct file *file, const char __user *buffer,
size_t count, loff_t *ppos)
{
struct hostaudio_state *state = file->private_data;
void *kbuf;
int err;
#ifdef DEBUG
printk(KERN_DEBUG "hostaudio: write called, count = %d\n", count);
#endif
kbuf = memdup_user(buffer, count);
if (IS_ERR(kbuf))
return PTR_ERR(kbuf);
err = os_write_file(state->fd, kbuf, count);
if (err < 0)
goto out;
*ppos += err;
out:
kfree(kbuf);
return err;
}
static __poll_t hostaudio_poll(struct file *file,
struct poll_table_struct *wait)
{
#ifdef DEBUG
printk(KERN_DEBUG "hostaudio: poll called (unimplemented)\n");
#endif
return 0;
}
static long hostaudio_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
struct hostaudio_state *state = file->private_data;
unsigned long data = 0;
int err;
#ifdef DEBUG
printk(KERN_DEBUG "hostaudio: ioctl called, cmd = %u\n", cmd);
#endif
switch(cmd){
case SNDCTL_DSP_SPEED:
case SNDCTL_DSP_STEREO:
case SNDCTL_DSP_GETBLKSIZE:
case SNDCTL_DSP_CHANNELS:
case SNDCTL_DSP_SUBDIVIDE:
case SNDCTL_DSP_SETFRAGMENT:
if (get_user(data, (int __user *) arg))
return -EFAULT;
break;
default:
break;
}
err = os_ioctl_generic(state->fd, cmd, (unsigned long) &data);
switch(cmd){
case SNDCTL_DSP_SPEED:
case SNDCTL_DSP_STEREO:
case SNDCTL_DSP_GETBLKSIZE:
case SNDCTL_DSP_CHANNELS:
case SNDCTL_DSP_SUBDIVIDE:
case SNDCTL_DSP_SETFRAGMENT:
if (put_user(data, (int __user *) arg))
return -EFAULT;
break;
default:
break;
}
return err;
}
static int hostaudio_open(struct inode *inode, struct file *file)
{
struct hostaudio_state *state;
int r = 0, w = 0;
int ret;
#ifdef DEBUG
kernel_param_lock(THIS_MODULE);
printk(KERN_DEBUG "hostaudio: open called (host: %s)\n", dsp);
kernel_param_unlock(THIS_MODULE);
#endif
state = kmalloc(sizeof(struct hostaudio_state), GFP_KERNEL);
if (state == NULL)
return -ENOMEM;
if (file->f_mode & FMODE_READ)
r = 1;
if (file->f_mode & FMODE_WRITE)
w = 1;
kernel_param_lock(THIS_MODULE);
mutex_lock(&hostaudio_mutex);
ret = os_open_file(dsp, of_set_rw(OPENFLAGS(), r, w), 0);
mutex_unlock(&hostaudio_mutex);
kernel_param_unlock(THIS_MODULE);
if (ret < 0) {
kfree(state);
return ret;
}
state->fd = ret;
file->private_data = state;
return 0;
}
static int hostaudio_release(struct inode *inode, struct file *file)
{
struct hostaudio_state *state = file->private_data;
#ifdef DEBUG
printk(KERN_DEBUG "hostaudio: release called\n");
#endif
os_close_file(state->fd);
kfree(state);
return 0;
}
/* /dev/mixer file operations */
static long hostmixer_ioctl_mixdev(struct file *file,
unsigned int cmd, unsigned long arg)
{
struct hostmixer_state *state = file->private_data;
#ifdef DEBUG
printk(KERN_DEBUG "hostmixer: ioctl called\n");
#endif
return os_ioctl_generic(state->fd, cmd, arg);
}
static int hostmixer_open_mixdev(struct inode *inode, struct file *file)
{
struct hostmixer_state *state;
int r = 0, w = 0;
int ret;
#ifdef DEBUG
printk(KERN_DEBUG "hostmixer: open called (host: %s)\n", mixer);
#endif
state = kmalloc(sizeof(struct hostmixer_state), GFP_KERNEL);
if (state == NULL)
return -ENOMEM;
if (file->f_mode & FMODE_READ)
r = 1;
if (file->f_mode & FMODE_WRITE)
w = 1;
kernel_param_lock(THIS_MODULE);
mutex_lock(&hostaudio_mutex);
ret = os_open_file(mixer, of_set_rw(OPENFLAGS(), r, w), 0);
mutex_unlock(&hostaudio_mutex);
kernel_param_unlock(THIS_MODULE);
if (ret < 0) {
kernel_param_lock(THIS_MODULE);
printk(KERN_ERR "hostaudio_open_mixdev failed to open '%s', "
"err = %d\n", dsp, -ret);
kernel_param_unlock(THIS_MODULE);
kfree(state);
return ret;
}
file->private_data = state;
return 0;
}
static int hostmixer_release(struct inode *inode, struct file *file)
{
struct hostmixer_state *state = file->private_data;
#ifdef DEBUG
printk(KERN_DEBUG "hostmixer: release called\n");
#endif
os_close_file(state->fd);
kfree(state);
return 0;
}
/* kernel module operations */
static const struct file_operations hostaudio_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.read = hostaudio_read,
.write = hostaudio_write,
.poll = hostaudio_poll,
.unlocked_ioctl = hostaudio_ioctl,
.compat_ioctl = compat_ptr_ioctl,
.mmap = NULL,
.open = hostaudio_open,
.release = hostaudio_release,
};
static const struct file_operations hostmixer_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.unlocked_ioctl = hostmixer_ioctl_mixdev,
.open = hostmixer_open_mixdev,
.release = hostmixer_release,
};
static struct {
int dev_audio;
int dev_mixer;
} module_data;
MODULE_AUTHOR("Steve Schmidtke");
MODULE_DESCRIPTION("UML Audio Relay");
MODULE_LICENSE("GPL");
static int __init hostaudio_init_module(void)
{
kernel_param_lock(THIS_MODULE);
printk(KERN_INFO "UML Audio Relay (host dsp = %s, host mixer = %s)\n",
dsp, mixer);
kernel_param_unlock(THIS_MODULE);
module_data.dev_audio = register_sound_dsp(&hostaudio_fops, -1);
if (module_data.dev_audio < 0) {
printk(KERN_ERR "hostaudio: couldn't register DSP device!\n");
return -ENODEV;
}
module_data.dev_mixer = register_sound_mixer(&hostmixer_fops, -1);
if (module_data.dev_mixer < 0) {
printk(KERN_ERR "hostmixer: couldn't register mixer "
"device!\n");
unregister_sound_dsp(module_data.dev_audio);
return -ENODEV;
}
return 0;
}
static void __exit hostaudio_cleanup_module (void)
{
unregister_sound_mixer(module_data.dev_mixer);
unregister_sound_dsp(module_data.dev_audio);
}
module_init(hostaudio_init_module);
module_exit(hostaudio_cleanup_module);
| linux-master | arch/um/drivers/hostaudio_kern.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2000, 2001 Jeff Dike ([email protected])
*/
#include <linux/posix_types.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/types.h>
#include <linux/major.h>
#include <linux/kdev_t.h>
#include <linux/console.h>
#include <linux/string.h>
#include <linux/sched.h>
#include <linux/list.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/hardirq.h>
#include <asm/current.h>
#include <asm/irq.h>
#include "stdio_console.h"
#include "chan.h"
#include <irq_user.h>
#include "mconsole_kern.h"
#include <init.h>
#define MAX_TTYS (16)
static void stdio_announce(char *dev_name, int dev)
{
printk(KERN_INFO "Virtual console %d assigned device '%s'\n", dev,
dev_name);
}
/* Almost const, except that xterm_title may be changed in an initcall */
static struct chan_opts opts = {
.announce = stdio_announce,
.xterm_title = "Virtual Console #%d",
.raw = 1,
};
static int con_config(char *str, char **error_out);
static int con_get_config(char *dev, char *str, int size, char **error_out);
static int con_remove(int n, char **con_remove);
/* Const, except for .mc.list */
static struct line_driver driver = {
.name = "UML console",
.device_name = "tty",
.major = TTY_MAJOR,
.minor_start = 0,
.type = TTY_DRIVER_TYPE_CONSOLE,
.subtype = SYSTEM_TYPE_CONSOLE,
.read_irq_name = "console",
.write_irq_name = "console-write",
.mc = {
.list = LIST_HEAD_INIT(driver.mc.list),
.name = "con",
.config = con_config,
.get_config = con_get_config,
.id = line_id,
.remove = con_remove,
},
};
/* The array is initialized by line_init, at initcall time. The
* elements are locked individually as needed.
*/
static char *vt_conf[MAX_TTYS];
static char *def_conf;
static struct line vts[MAX_TTYS];
static int con_config(char *str, char **error_out)
{
return line_config(vts, ARRAY_SIZE(vts), str, &opts, error_out);
}
static int con_get_config(char *dev, char *str, int size, char **error_out)
{
return line_get_config(dev, vts, ARRAY_SIZE(vts), str, size, error_out);
}
static int con_remove(int n, char **error_out)
{
return line_remove(vts, ARRAY_SIZE(vts), n, error_out);
}
/* Set in an initcall, checked in an exitcall */
static int con_init_done;
static int con_install(struct tty_driver *driver, struct tty_struct *tty)
{
return line_install(driver, tty, &vts[tty->index]);
}
static const struct tty_operations console_ops = {
.open = line_open,
.install = con_install,
.close = line_close,
.write = line_write,
.write_room = line_write_room,
.chars_in_buffer = line_chars_in_buffer,
.flush_buffer = line_flush_buffer,
.flush_chars = line_flush_chars,
.throttle = line_throttle,
.unthrottle = line_unthrottle,
.hangup = line_hangup,
};
static void uml_console_write(struct console *console, const char *string,
unsigned len)
{
struct line *line = &vts[console->index];
unsigned long flags;
spin_lock_irqsave(&line->lock, flags);
console_write_chan(line->chan_out, string, len);
spin_unlock_irqrestore(&line->lock, flags);
}
static struct tty_driver *uml_console_device(struct console *c, int *index)
{
*index = c->index;
return driver.driver;
}
static int uml_console_setup(struct console *co, char *options)
{
struct line *line = &vts[co->index];
return console_open_chan(line, co);
}
/* No locking for register_console call - relies on single-threaded initcalls */
static struct console stdiocons = {
.name = "tty",
.write = uml_console_write,
.device = uml_console_device,
.setup = uml_console_setup,
.flags = CON_PRINTBUFFER|CON_ANYTIME,
.index = -1,
};
static int stdio_init(void)
{
char *new_title;
int err;
int i;
err = register_lines(&driver, &console_ops, vts,
ARRAY_SIZE(vts));
if (err)
return err;
printk(KERN_INFO "Initialized stdio console driver\n");
new_title = add_xterm_umid(opts.xterm_title);
if(new_title != NULL)
opts.xterm_title = new_title;
for (i = 0; i < MAX_TTYS; i++) {
char *error;
char *s = vt_conf[i];
if (!s)
s = def_conf;
if (!s)
s = i ? CONFIG_CON_CHAN : CONFIG_CON_ZERO_CHAN;
if (setup_one_line(vts, i, s, &opts, &error))
printk(KERN_ERR "setup_one_line failed for "
"device %d : %s\n", i, error);
}
con_init_done = 1;
register_console(&stdiocons);
return 0;
}
late_initcall(stdio_init);
static void console_exit(void)
{
if (!con_init_done)
return;
close_lines(vts, ARRAY_SIZE(vts));
}
__uml_exitcall(console_exit);
static int console_chan_setup(char *str)
{
if (!strncmp(str, "sole=", 5)) /* console= option specifies tty */
return 0;
line_setup(vt_conf, MAX_TTYS, &def_conf, str, "console");
return 1;
}
__setup("con", console_chan_setup);
__channel_help(console_chan_setup, "con");
| linux-master | arch/um/drivers/stdio_console.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Virtio vhost-user driver
*
* Copyright(c) 2019 Intel Corporation
*
* This driver allows virtio devices to be used over a vhost-user socket.
*
* Guest devices can be instantiated by kernel module or command line
* parameters. One device will be created for each parameter. Syntax:
*
* virtio_uml.device=<socket>:<virtio_id>[:<platform_id>]
* where:
* <socket> := vhost-user socket path to connect
* <virtio_id> := virtio device id (as in virtio_ids.h)
* <platform_id> := (optional) platform device id
*
* example:
* virtio_uml.device=/var/uml.socket:1
*
* Based on Virtio MMIO driver by Pawel Moll, copyright 2011-2014, ARM Ltd.
*/
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/virtio.h>
#include <linux/virtio_config.h>
#include <linux/virtio_ring.h>
#include <linux/time-internal.h>
#include <linux/virtio-uml.h>
#include <shared/as-layout.h>
#include <irq_kern.h>
#include <init.h>
#include <os.h>
#include "vhost_user.h"
#define MAX_SUPPORTED_QUEUE_SIZE 256
#define to_virtio_uml_device(_vdev) \
container_of(_vdev, struct virtio_uml_device, vdev)
struct virtio_uml_platform_data {
u32 virtio_device_id;
const char *socket_path;
struct work_struct conn_broken_wk;
struct platform_device *pdev;
};
struct virtio_uml_device {
struct virtio_device vdev;
struct platform_device *pdev;
struct virtio_uml_platform_data *pdata;
spinlock_t sock_lock;
int sock, req_fd, irq;
u64 features;
u64 protocol_features;
u8 status;
u8 registered:1;
u8 suspended:1;
u8 no_vq_suspend:1;
u8 config_changed_irq:1;
uint64_t vq_irq_vq_map;
int recv_rc;
};
struct virtio_uml_vq_info {
int kick_fd, call_fd;
char name[32];
bool suspended;
};
extern unsigned long long physmem_size, highmem;
#define vu_err(vu_dev, ...) dev_err(&(vu_dev)->pdev->dev, ##__VA_ARGS__)
/* Vhost-user protocol */
static int full_sendmsg_fds(int fd, const void *buf, unsigned int len,
const int *fds, unsigned int fds_num)
{
int rc;
do {
rc = os_sendmsg_fds(fd, buf, len, fds, fds_num);
if (rc > 0) {
buf += rc;
len -= rc;
fds = NULL;
fds_num = 0;
}
} while (len && (rc >= 0 || rc == -EINTR));
if (rc < 0)
return rc;
return 0;
}
static int full_read(int fd, void *buf, int len, bool abortable)
{
int rc;
if (!len)
return 0;
do {
rc = os_read_file(fd, buf, len);
if (rc > 0) {
buf += rc;
len -= rc;
}
} while (len && (rc > 0 || rc == -EINTR || (!abortable && rc == -EAGAIN)));
if (rc < 0)
return rc;
if (rc == 0)
return -ECONNRESET;
return 0;
}
static int vhost_user_recv_header(int fd, struct vhost_user_msg *msg)
{
return full_read(fd, msg, sizeof(msg->header), true);
}
static int vhost_user_recv(struct virtio_uml_device *vu_dev,
int fd, struct vhost_user_msg *msg,
size_t max_payload_size, bool wait)
{
size_t size;
int rc;
/*
* In virtio time-travel mode, we're handling all the vhost-user
* FDs by polling them whenever appropriate. However, we may get
* into a situation where we're sending out an interrupt message
* to a device (e.g. a net device) and need to handle a simulation
* time message while doing so, e.g. one that tells us to update
* our idea of how long we can run without scheduling.
*
* Thus, we need to not just read() from the given fd, but need
* to also handle messages for the simulation time - this function
* does that for us while waiting for the given fd to be readable.
*/
if (wait)
time_travel_wait_readable(fd);
rc = vhost_user_recv_header(fd, msg);
if (rc)
return rc;
size = msg->header.size;
if (size > max_payload_size)
return -EPROTO;
return full_read(fd, &msg->payload, size, false);
}
static void vhost_user_check_reset(struct virtio_uml_device *vu_dev,
int rc)
{
struct virtio_uml_platform_data *pdata = vu_dev->pdata;
if (rc != -ECONNRESET)
return;
if (!vu_dev->registered)
return;
vu_dev->registered = 0;
schedule_work(&pdata->conn_broken_wk);
}
static int vhost_user_recv_resp(struct virtio_uml_device *vu_dev,
struct vhost_user_msg *msg,
size_t max_payload_size)
{
int rc = vhost_user_recv(vu_dev, vu_dev->sock, msg,
max_payload_size, true);
if (rc) {
vhost_user_check_reset(vu_dev, rc);
return rc;
}
if (msg->header.flags != (VHOST_USER_FLAG_REPLY | VHOST_USER_VERSION))
return -EPROTO;
return 0;
}
static int vhost_user_recv_u64(struct virtio_uml_device *vu_dev,
u64 *value)
{
struct vhost_user_msg msg;
int rc = vhost_user_recv_resp(vu_dev, &msg,
sizeof(msg.payload.integer));
if (rc)
return rc;
if (msg.header.size != sizeof(msg.payload.integer))
return -EPROTO;
*value = msg.payload.integer;
return 0;
}
static int vhost_user_recv_req(struct virtio_uml_device *vu_dev,
struct vhost_user_msg *msg,
size_t max_payload_size)
{
int rc = vhost_user_recv(vu_dev, vu_dev->req_fd, msg,
max_payload_size, false);
if (rc)
return rc;
if ((msg->header.flags & ~VHOST_USER_FLAG_NEED_REPLY) !=
VHOST_USER_VERSION)
return -EPROTO;
return 0;
}
static int vhost_user_send(struct virtio_uml_device *vu_dev,
bool need_response, struct vhost_user_msg *msg,
int *fds, size_t num_fds)
{
size_t size = sizeof(msg->header) + msg->header.size;
unsigned long flags;
bool request_ack;
int rc;
msg->header.flags |= VHOST_USER_VERSION;
/*
* The need_response flag indicates that we already need a response,
* e.g. to read the features. In these cases, don't request an ACK as
* it is meaningless. Also request an ACK only if supported.
*/
request_ack = !need_response;
if (!(vu_dev->protocol_features &
BIT_ULL(VHOST_USER_PROTOCOL_F_REPLY_ACK)))
request_ack = false;
if (request_ack)
msg->header.flags |= VHOST_USER_FLAG_NEED_REPLY;
spin_lock_irqsave(&vu_dev->sock_lock, flags);
rc = full_sendmsg_fds(vu_dev->sock, msg, size, fds, num_fds);
if (rc < 0)
goto out;
if (request_ack) {
uint64_t status;
rc = vhost_user_recv_u64(vu_dev, &status);
if (rc)
goto out;
if (status) {
vu_err(vu_dev, "slave reports error: %llu\n", status);
rc = -EIO;
goto out;
}
}
out:
spin_unlock_irqrestore(&vu_dev->sock_lock, flags);
return rc;
}
static int vhost_user_send_no_payload(struct virtio_uml_device *vu_dev,
bool need_response, u32 request)
{
struct vhost_user_msg msg = {
.header.request = request,
};
return vhost_user_send(vu_dev, need_response, &msg, NULL, 0);
}
static int vhost_user_send_no_payload_fd(struct virtio_uml_device *vu_dev,
u32 request, int fd)
{
struct vhost_user_msg msg = {
.header.request = request,
};
return vhost_user_send(vu_dev, false, &msg, &fd, 1);
}
static int vhost_user_send_u64(struct virtio_uml_device *vu_dev,
u32 request, u64 value)
{
struct vhost_user_msg msg = {
.header.request = request,
.header.size = sizeof(msg.payload.integer),
.payload.integer = value,
};
return vhost_user_send(vu_dev, false, &msg, NULL, 0);
}
static int vhost_user_set_owner(struct virtio_uml_device *vu_dev)
{
return vhost_user_send_no_payload(vu_dev, false, VHOST_USER_SET_OWNER);
}
static int vhost_user_get_features(struct virtio_uml_device *vu_dev,
u64 *features)
{
int rc = vhost_user_send_no_payload(vu_dev, true,
VHOST_USER_GET_FEATURES);
if (rc)
return rc;
return vhost_user_recv_u64(vu_dev, features);
}
static int vhost_user_set_features(struct virtio_uml_device *vu_dev,
u64 features)
{
return vhost_user_send_u64(vu_dev, VHOST_USER_SET_FEATURES, features);
}
static int vhost_user_get_protocol_features(struct virtio_uml_device *vu_dev,
u64 *protocol_features)
{
int rc = vhost_user_send_no_payload(vu_dev, true,
VHOST_USER_GET_PROTOCOL_FEATURES);
if (rc)
return rc;
return vhost_user_recv_u64(vu_dev, protocol_features);
}
static int vhost_user_set_protocol_features(struct virtio_uml_device *vu_dev,
u64 protocol_features)
{
return vhost_user_send_u64(vu_dev, VHOST_USER_SET_PROTOCOL_FEATURES,
protocol_features);
}
static void vhost_user_reply(struct virtio_uml_device *vu_dev,
struct vhost_user_msg *msg, int response)
{
struct vhost_user_msg reply = {
.payload.integer = response,
};
size_t size = sizeof(reply.header) + sizeof(reply.payload.integer);
int rc;
reply.header = msg->header;
reply.header.flags &= ~VHOST_USER_FLAG_NEED_REPLY;
reply.header.flags |= VHOST_USER_FLAG_REPLY;
reply.header.size = sizeof(reply.payload.integer);
rc = full_sendmsg_fds(vu_dev->req_fd, &reply, size, NULL, 0);
if (rc)
vu_err(vu_dev,
"sending reply to slave request failed: %d (size %zu)\n",
rc, size);
}
static irqreturn_t vu_req_read_message(struct virtio_uml_device *vu_dev,
struct time_travel_event *ev)
{
struct virtqueue *vq;
int response = 1;
struct {
struct vhost_user_msg msg;
u8 extra_payload[512];
} msg;
int rc;
irqreturn_t irq_rc = IRQ_NONE;
while (1) {
rc = vhost_user_recv_req(vu_dev, &msg.msg,
sizeof(msg.msg.payload) +
sizeof(msg.extra_payload));
if (rc)
break;
switch (msg.msg.header.request) {
case VHOST_USER_SLAVE_CONFIG_CHANGE_MSG:
vu_dev->config_changed_irq = true;
response = 0;
break;
case VHOST_USER_SLAVE_VRING_CALL:
virtio_device_for_each_vq((&vu_dev->vdev), vq) {
if (vq->index == msg.msg.payload.vring_state.index) {
response = 0;
vu_dev->vq_irq_vq_map |= BIT_ULL(vq->index);
break;
}
}
break;
case VHOST_USER_SLAVE_IOTLB_MSG:
/* not supported - VIRTIO_F_ACCESS_PLATFORM */
case VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG:
/* not supported - VHOST_USER_PROTOCOL_F_HOST_NOTIFIER */
default:
vu_err(vu_dev, "unexpected slave request %d\n",
msg.msg.header.request);
}
if (ev && !vu_dev->suspended)
time_travel_add_irq_event(ev);
if (msg.msg.header.flags & VHOST_USER_FLAG_NEED_REPLY)
vhost_user_reply(vu_dev, &msg.msg, response);
irq_rc = IRQ_HANDLED;
}
/* mask EAGAIN as we try non-blocking read until socket is empty */
vu_dev->recv_rc = (rc == -EAGAIN) ? 0 : rc;
return irq_rc;
}
static irqreturn_t vu_req_interrupt(int irq, void *data)
{
struct virtio_uml_device *vu_dev = data;
irqreturn_t ret = IRQ_HANDLED;
if (!um_irq_timetravel_handler_used())
ret = vu_req_read_message(vu_dev, NULL);
if (vu_dev->recv_rc) {
vhost_user_check_reset(vu_dev, vu_dev->recv_rc);
} else if (vu_dev->vq_irq_vq_map) {
struct virtqueue *vq;
virtio_device_for_each_vq((&vu_dev->vdev), vq) {
if (vu_dev->vq_irq_vq_map & BIT_ULL(vq->index))
vring_interrupt(0 /* ignored */, vq);
}
vu_dev->vq_irq_vq_map = 0;
} else if (vu_dev->config_changed_irq) {
virtio_config_changed(&vu_dev->vdev);
vu_dev->config_changed_irq = false;
}
return ret;
}
static void vu_req_interrupt_comm_handler(int irq, int fd, void *data,
struct time_travel_event *ev)
{
vu_req_read_message(data, ev);
}
static int vhost_user_init_slave_req(struct virtio_uml_device *vu_dev)
{
int rc, req_fds[2];
/* Use a pipe for slave req fd, SIGIO is not supported for eventfd */
rc = os_pipe(req_fds, true, true);
if (rc < 0)
return rc;
vu_dev->req_fd = req_fds[0];
rc = um_request_irq_tt(UM_IRQ_ALLOC, vu_dev->req_fd, IRQ_READ,
vu_req_interrupt, IRQF_SHARED,
vu_dev->pdev->name, vu_dev,
vu_req_interrupt_comm_handler);
if (rc < 0)
goto err_close;
vu_dev->irq = rc;
rc = vhost_user_send_no_payload_fd(vu_dev, VHOST_USER_SET_SLAVE_REQ_FD,
req_fds[1]);
if (rc)
goto err_free_irq;
goto out;
err_free_irq:
um_free_irq(vu_dev->irq, vu_dev);
err_close:
os_close_file(req_fds[0]);
out:
/* Close unused write end of request fds */
os_close_file(req_fds[1]);
return rc;
}
static int vhost_user_init(struct virtio_uml_device *vu_dev)
{
int rc = vhost_user_set_owner(vu_dev);
if (rc)
return rc;
rc = vhost_user_get_features(vu_dev, &vu_dev->features);
if (rc)
return rc;
if (vu_dev->features & BIT_ULL(VHOST_USER_F_PROTOCOL_FEATURES)) {
rc = vhost_user_get_protocol_features(vu_dev,
&vu_dev->protocol_features);
if (rc)
return rc;
vu_dev->protocol_features &= VHOST_USER_SUPPORTED_PROTOCOL_F;
rc = vhost_user_set_protocol_features(vu_dev,
vu_dev->protocol_features);
if (rc)
return rc;
}
if (vu_dev->protocol_features &
BIT_ULL(VHOST_USER_PROTOCOL_F_SLAVE_REQ)) {
rc = vhost_user_init_slave_req(vu_dev);
if (rc)
return rc;
}
return 0;
}
static void vhost_user_get_config(struct virtio_uml_device *vu_dev,
u32 offset, void *buf, u32 len)
{
u32 cfg_size = offset + len;
struct vhost_user_msg *msg;
size_t payload_size = sizeof(msg->payload.config) + cfg_size;
size_t msg_size = sizeof(msg->header) + payload_size;
int rc;
if (!(vu_dev->protocol_features &
BIT_ULL(VHOST_USER_PROTOCOL_F_CONFIG)))
return;
msg = kzalloc(msg_size, GFP_KERNEL);
if (!msg)
return;
msg->header.request = VHOST_USER_GET_CONFIG;
msg->header.size = payload_size;
msg->payload.config.offset = 0;
msg->payload.config.size = cfg_size;
rc = vhost_user_send(vu_dev, true, msg, NULL, 0);
if (rc) {
vu_err(vu_dev, "sending VHOST_USER_GET_CONFIG failed: %d\n",
rc);
goto free;
}
rc = vhost_user_recv_resp(vu_dev, msg, msg_size);
if (rc) {
vu_err(vu_dev,
"receiving VHOST_USER_GET_CONFIG response failed: %d\n",
rc);
goto free;
}
if (msg->header.size != payload_size ||
msg->payload.config.size != cfg_size) {
rc = -EPROTO;
vu_err(vu_dev,
"Invalid VHOST_USER_GET_CONFIG sizes (payload %d expected %zu, config %u expected %u)\n",
msg->header.size, payload_size,
msg->payload.config.size, cfg_size);
goto free;
}
memcpy(buf, msg->payload.config.payload + offset, len);
free:
kfree(msg);
}
static void vhost_user_set_config(struct virtio_uml_device *vu_dev,
u32 offset, const void *buf, u32 len)
{
struct vhost_user_msg *msg;
size_t payload_size = sizeof(msg->payload.config) + len;
size_t msg_size = sizeof(msg->header) + payload_size;
int rc;
if (!(vu_dev->protocol_features &
BIT_ULL(VHOST_USER_PROTOCOL_F_CONFIG)))
return;
msg = kzalloc(msg_size, GFP_KERNEL);
if (!msg)
return;
msg->header.request = VHOST_USER_SET_CONFIG;
msg->header.size = payload_size;
msg->payload.config.offset = offset;
msg->payload.config.size = len;
memcpy(msg->payload.config.payload, buf, len);
rc = vhost_user_send(vu_dev, false, msg, NULL, 0);
if (rc)
vu_err(vu_dev, "sending VHOST_USER_SET_CONFIG failed: %d\n",
rc);
kfree(msg);
}
static int vhost_user_init_mem_region(u64 addr, u64 size, int *fd_out,
struct vhost_user_mem_region *region_out)
{
unsigned long long mem_offset;
int rc = phys_mapping(addr, &mem_offset);
if (WARN(rc < 0, "phys_mapping of 0x%llx returned %d\n", addr, rc))
return -EFAULT;
*fd_out = rc;
region_out->guest_addr = addr;
region_out->user_addr = addr;
region_out->size = size;
region_out->mmap_offset = mem_offset;
/* Ensure mapping is valid for the entire region */
rc = phys_mapping(addr + size - 1, &mem_offset);
if (WARN(rc != *fd_out, "phys_mapping of 0x%llx failed: %d != %d\n",
addr + size - 1, rc, *fd_out))
return -EFAULT;
return 0;
}
static int vhost_user_set_mem_table(struct virtio_uml_device *vu_dev)
{
struct vhost_user_msg msg = {
.header.request = VHOST_USER_SET_MEM_TABLE,
.header.size = sizeof(msg.payload.mem_regions),
.payload.mem_regions.num = 1,
};
unsigned long reserved = uml_reserved - uml_physmem;
int fds[2];
int rc;
/*
* This is a bit tricky, see also the comment with setup_physmem().
*
* Essentially, setup_physmem() uses a file to mmap() our physmem,
* but the code and data we *already* have is omitted. To us, this
* is no difference, since they both become part of our address
* space and memory consumption. To somebody looking in from the
* outside, however, it is different because the part of our memory
* consumption that's already part of the binary (code/data) is not
* mapped from the file, so it's not visible to another mmap from
* the file descriptor.
*
* Thus, don't advertise this space to the vhost-user slave. This
* means that the slave will likely abort or similar when we give
* it an address from the hidden range, since it's not marked as
* a valid address, but at least that way we detect the issue and
* don't just have the slave read an all-zeroes buffer from the
* shared memory file, or write something there that we can never
* see (depending on the direction of the virtqueue traffic.)
*
* Since we usually don't want to use .text for virtio buffers,
* this effectively means that you cannot use
* 1) global variables, which are in the .bss and not in the shm
* file-backed memory
* 2) the stack in some processes, depending on where they have
* their stack (or maybe only no interrupt stack?)
*
* The stack is already not typically valid for DMA, so this isn't
* much of a restriction, but global variables might be encountered.
*
* It might be possible to fix it by copying around the data that's
* between bss_start and where we map the file now, but it's not
* something that you typically encounter with virtio drivers, so
* it didn't seem worthwhile.
*/
rc = vhost_user_init_mem_region(reserved, physmem_size - reserved,
&fds[0],
&msg.payload.mem_regions.regions[0]);
if (rc < 0)
return rc;
if (highmem) {
msg.payload.mem_regions.num++;
rc = vhost_user_init_mem_region(__pa(end_iomem), highmem,
&fds[1], &msg.payload.mem_regions.regions[1]);
if (rc < 0)
return rc;
}
return vhost_user_send(vu_dev, false, &msg, fds,
msg.payload.mem_regions.num);
}
static int vhost_user_set_vring_state(struct virtio_uml_device *vu_dev,
u32 request, u32 index, u32 num)
{
struct vhost_user_msg msg = {
.header.request = request,
.header.size = sizeof(msg.payload.vring_state),
.payload.vring_state.index = index,
.payload.vring_state.num = num,
};
return vhost_user_send(vu_dev, false, &msg, NULL, 0);
}
static int vhost_user_set_vring_num(struct virtio_uml_device *vu_dev,
u32 index, u32 num)
{
return vhost_user_set_vring_state(vu_dev, VHOST_USER_SET_VRING_NUM,
index, num);
}
static int vhost_user_set_vring_base(struct virtio_uml_device *vu_dev,
u32 index, u32 offset)
{
return vhost_user_set_vring_state(vu_dev, VHOST_USER_SET_VRING_BASE,
index, offset);
}
static int vhost_user_set_vring_addr(struct virtio_uml_device *vu_dev,
u32 index, u64 desc, u64 used, u64 avail,
u64 log)
{
struct vhost_user_msg msg = {
.header.request = VHOST_USER_SET_VRING_ADDR,
.header.size = sizeof(msg.payload.vring_addr),
.payload.vring_addr.index = index,
.payload.vring_addr.desc = desc,
.payload.vring_addr.used = used,
.payload.vring_addr.avail = avail,
.payload.vring_addr.log = log,
};
return vhost_user_send(vu_dev, false, &msg, NULL, 0);
}
static int vhost_user_set_vring_fd(struct virtio_uml_device *vu_dev,
u32 request, int index, int fd)
{
struct vhost_user_msg msg = {
.header.request = request,
.header.size = sizeof(msg.payload.integer),
.payload.integer = index,
};
if (index & ~VHOST_USER_VRING_INDEX_MASK)
return -EINVAL;
if (fd < 0) {
msg.payload.integer |= VHOST_USER_VRING_POLL_MASK;
return vhost_user_send(vu_dev, false, &msg, NULL, 0);
}
return vhost_user_send(vu_dev, false, &msg, &fd, 1);
}
static int vhost_user_set_vring_call(struct virtio_uml_device *vu_dev,
int index, int fd)
{
return vhost_user_set_vring_fd(vu_dev, VHOST_USER_SET_VRING_CALL,
index, fd);
}
static int vhost_user_set_vring_kick(struct virtio_uml_device *vu_dev,
int index, int fd)
{
return vhost_user_set_vring_fd(vu_dev, VHOST_USER_SET_VRING_KICK,
index, fd);
}
static int vhost_user_set_vring_enable(struct virtio_uml_device *vu_dev,
u32 index, bool enable)
{
if (!(vu_dev->features & BIT_ULL(VHOST_USER_F_PROTOCOL_FEATURES)))
return 0;
return vhost_user_set_vring_state(vu_dev, VHOST_USER_SET_VRING_ENABLE,
index, enable);
}
/* Virtio interface */
static bool vu_notify(struct virtqueue *vq)
{
struct virtio_uml_vq_info *info = vq->priv;
const uint64_t n = 1;
int rc;
if (info->suspended)
return true;
time_travel_propagate_time();
if (info->kick_fd < 0) {
struct virtio_uml_device *vu_dev;
vu_dev = to_virtio_uml_device(vq->vdev);
return vhost_user_set_vring_state(vu_dev, VHOST_USER_VRING_KICK,
vq->index, 0) == 0;
}
do {
rc = os_write_file(info->kick_fd, &n, sizeof(n));
} while (rc == -EINTR);
return !WARN(rc != sizeof(n), "write returned %d\n", rc);
}
static irqreturn_t vu_interrupt(int irq, void *opaque)
{
struct virtqueue *vq = opaque;
struct virtio_uml_vq_info *info = vq->priv;
uint64_t n;
int rc;
irqreturn_t ret = IRQ_NONE;
do {
rc = os_read_file(info->call_fd, &n, sizeof(n));
if (rc == sizeof(n))
ret |= vring_interrupt(irq, vq);
} while (rc == sizeof(n) || rc == -EINTR);
WARN(rc != -EAGAIN, "read returned %d\n", rc);
return ret;
}
static void vu_get(struct virtio_device *vdev, unsigned offset,
void *buf, unsigned len)
{
struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
vhost_user_get_config(vu_dev, offset, buf, len);
}
static void vu_set(struct virtio_device *vdev, unsigned offset,
const void *buf, unsigned len)
{
struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
vhost_user_set_config(vu_dev, offset, buf, len);
}
static u8 vu_get_status(struct virtio_device *vdev)
{
struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
return vu_dev->status;
}
static void vu_set_status(struct virtio_device *vdev, u8 status)
{
struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
vu_dev->status = status;
}
static void vu_reset(struct virtio_device *vdev)
{
struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
vu_dev->status = 0;
}
static void vu_del_vq(struct virtqueue *vq)
{
struct virtio_uml_vq_info *info = vq->priv;
if (info->call_fd >= 0) {
struct virtio_uml_device *vu_dev;
vu_dev = to_virtio_uml_device(vq->vdev);
um_free_irq(vu_dev->irq, vq);
os_close_file(info->call_fd);
}
if (info->kick_fd >= 0)
os_close_file(info->kick_fd);
vring_del_virtqueue(vq);
kfree(info);
}
static void vu_del_vqs(struct virtio_device *vdev)
{
struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
struct virtqueue *vq, *n;
u64 features;
/* Note: reverse order as a workaround to a decoding bug in snabb */
list_for_each_entry_reverse(vq, &vdev->vqs, list)
WARN_ON(vhost_user_set_vring_enable(vu_dev, vq->index, false));
/* Ensure previous messages have been processed */
WARN_ON(vhost_user_get_features(vu_dev, &features));
list_for_each_entry_safe(vq, n, &vdev->vqs, list)
vu_del_vq(vq);
}
static int vu_setup_vq_call_fd(struct virtio_uml_device *vu_dev,
struct virtqueue *vq)
{
struct virtio_uml_vq_info *info = vq->priv;
int call_fds[2];
int rc;
/* no call FD needed/desired in this case */
if (vu_dev->protocol_features &
BIT_ULL(VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS) &&
vu_dev->protocol_features &
BIT_ULL(VHOST_USER_PROTOCOL_F_SLAVE_REQ)) {
info->call_fd = -1;
return 0;
}
/* Use a pipe for call fd, since SIGIO is not supported for eventfd */
rc = os_pipe(call_fds, true, true);
if (rc < 0)
return rc;
info->call_fd = call_fds[0];
rc = um_request_irq(vu_dev->irq, info->call_fd, IRQ_READ,
vu_interrupt, IRQF_SHARED, info->name, vq);
if (rc < 0)
goto close_both;
rc = vhost_user_set_vring_call(vu_dev, vq->index, call_fds[1]);
if (rc)
goto release_irq;
goto out;
release_irq:
um_free_irq(vu_dev->irq, vq);
close_both:
os_close_file(call_fds[0]);
out:
/* Close (unused) write end of call fds */
os_close_file(call_fds[1]);
return rc;
}
static struct virtqueue *vu_setup_vq(struct virtio_device *vdev,
unsigned index, vq_callback_t *callback,
const char *name, bool ctx)
{
struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
struct platform_device *pdev = vu_dev->pdev;
struct virtio_uml_vq_info *info;
struct virtqueue *vq;
int num = MAX_SUPPORTED_QUEUE_SIZE;
int rc;
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info) {
rc = -ENOMEM;
goto error_kzalloc;
}
snprintf(info->name, sizeof(info->name), "%s.%d-%s", pdev->name,
pdev->id, name);
vq = vring_create_virtqueue(index, num, PAGE_SIZE, vdev, true, true,
ctx, vu_notify, callback, info->name);
if (!vq) {
rc = -ENOMEM;
goto error_create;
}
vq->priv = info;
vq->num_max = num;
num = virtqueue_get_vring_size(vq);
if (vu_dev->protocol_features &
BIT_ULL(VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS)) {
info->kick_fd = -1;
} else {
rc = os_eventfd(0, 0);
if (rc < 0)
goto error_kick;
info->kick_fd = rc;
}
rc = vu_setup_vq_call_fd(vu_dev, vq);
if (rc)
goto error_call;
rc = vhost_user_set_vring_num(vu_dev, index, num);
if (rc)
goto error_setup;
rc = vhost_user_set_vring_base(vu_dev, index, 0);
if (rc)
goto error_setup;
rc = vhost_user_set_vring_addr(vu_dev, index,
virtqueue_get_desc_addr(vq),
virtqueue_get_used_addr(vq),
virtqueue_get_avail_addr(vq),
(u64) -1);
if (rc)
goto error_setup;
return vq;
error_setup:
if (info->call_fd >= 0) {
um_free_irq(vu_dev->irq, vq);
os_close_file(info->call_fd);
}
error_call:
if (info->kick_fd >= 0)
os_close_file(info->kick_fd);
error_kick:
vring_del_virtqueue(vq);
error_create:
kfree(info);
error_kzalloc:
return ERR_PTR(rc);
}
static int vu_find_vqs(struct virtio_device *vdev, unsigned nvqs,
struct virtqueue *vqs[], vq_callback_t *callbacks[],
const char * const names[], const bool *ctx,
struct irq_affinity *desc)
{
struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
int i, queue_idx = 0, rc;
struct virtqueue *vq;
/* not supported for now */
if (WARN_ON(nvqs > 64))
return -EINVAL;
rc = vhost_user_set_mem_table(vu_dev);
if (rc)
return rc;
for (i = 0; i < nvqs; ++i) {
if (!names[i]) {
vqs[i] = NULL;
continue;
}
vqs[i] = vu_setup_vq(vdev, queue_idx++, callbacks[i], names[i],
ctx ? ctx[i] : false);
if (IS_ERR(vqs[i])) {
rc = PTR_ERR(vqs[i]);
goto error_setup;
}
}
list_for_each_entry(vq, &vdev->vqs, list) {
struct virtio_uml_vq_info *info = vq->priv;
if (info->kick_fd >= 0) {
rc = vhost_user_set_vring_kick(vu_dev, vq->index,
info->kick_fd);
if (rc)
goto error_setup;
}
rc = vhost_user_set_vring_enable(vu_dev, vq->index, true);
if (rc)
goto error_setup;
}
return 0;
error_setup:
vu_del_vqs(vdev);
return rc;
}
static u64 vu_get_features(struct virtio_device *vdev)
{
struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
return vu_dev->features;
}
static int vu_finalize_features(struct virtio_device *vdev)
{
struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
u64 supported = vdev->features & VHOST_USER_SUPPORTED_F;
vring_transport_features(vdev);
vu_dev->features = vdev->features | supported;
return vhost_user_set_features(vu_dev, vu_dev->features);
}
static const char *vu_bus_name(struct virtio_device *vdev)
{
struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
return vu_dev->pdev->name;
}
static const struct virtio_config_ops virtio_uml_config_ops = {
.get = vu_get,
.set = vu_set,
.get_status = vu_get_status,
.set_status = vu_set_status,
.reset = vu_reset,
.find_vqs = vu_find_vqs,
.del_vqs = vu_del_vqs,
.get_features = vu_get_features,
.finalize_features = vu_finalize_features,
.bus_name = vu_bus_name,
};
static void virtio_uml_release_dev(struct device *d)
{
struct virtio_device *vdev =
container_of(d, struct virtio_device, dev);
struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
time_travel_propagate_time();
/* might not have been opened due to not negotiating the feature */
if (vu_dev->req_fd >= 0) {
um_free_irq(vu_dev->irq, vu_dev);
os_close_file(vu_dev->req_fd);
}
os_close_file(vu_dev->sock);
kfree(vu_dev);
}
void virtio_uml_set_no_vq_suspend(struct virtio_device *vdev,
bool no_vq_suspend)
{
struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
if (WARN_ON(vdev->config != &virtio_uml_config_ops))
return;
vu_dev->no_vq_suspend = no_vq_suspend;
dev_info(&vdev->dev, "%sabled VQ suspend\n",
no_vq_suspend ? "dis" : "en");
}
static void vu_of_conn_broken(struct work_struct *wk)
{
struct virtio_uml_platform_data *pdata;
struct virtio_uml_device *vu_dev;
pdata = container_of(wk, struct virtio_uml_platform_data, conn_broken_wk);
vu_dev = platform_get_drvdata(pdata->pdev);
virtio_break_device(&vu_dev->vdev);
/*
* We can't remove the device from the devicetree so the only thing we
* can do is warn.
*/
WARN_ON(1);
}
/* Platform device */
static struct virtio_uml_platform_data *
virtio_uml_create_pdata(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct virtio_uml_platform_data *pdata;
int ret;
if (!np)
return ERR_PTR(-EINVAL);
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return ERR_PTR(-ENOMEM);
INIT_WORK(&pdata->conn_broken_wk, vu_of_conn_broken);
pdata->pdev = pdev;
ret = of_property_read_string(np, "socket-path", &pdata->socket_path);
if (ret)
return ERR_PTR(ret);
ret = of_property_read_u32(np, "virtio-device-id",
&pdata->virtio_device_id);
if (ret)
return ERR_PTR(ret);
return pdata;
}
static int virtio_uml_probe(struct platform_device *pdev)
{
struct virtio_uml_platform_data *pdata = pdev->dev.platform_data;
struct virtio_uml_device *vu_dev;
int rc;
if (!pdata) {
pdata = virtio_uml_create_pdata(pdev);
if (IS_ERR(pdata))
return PTR_ERR(pdata);
}
vu_dev = kzalloc(sizeof(*vu_dev), GFP_KERNEL);
if (!vu_dev)
return -ENOMEM;
vu_dev->pdata = pdata;
vu_dev->vdev.dev.parent = &pdev->dev;
vu_dev->vdev.dev.release = virtio_uml_release_dev;
vu_dev->vdev.config = &virtio_uml_config_ops;
vu_dev->vdev.id.device = pdata->virtio_device_id;
vu_dev->vdev.id.vendor = VIRTIO_DEV_ANY_ID;
vu_dev->pdev = pdev;
vu_dev->req_fd = -1;
time_travel_propagate_time();
do {
rc = os_connect_socket(pdata->socket_path);
} while (rc == -EINTR);
if (rc < 0)
goto error_free;
vu_dev->sock = rc;
spin_lock_init(&vu_dev->sock_lock);
rc = vhost_user_init(vu_dev);
if (rc)
goto error_init;
platform_set_drvdata(pdev, vu_dev);
device_set_wakeup_capable(&vu_dev->vdev.dev, true);
rc = register_virtio_device(&vu_dev->vdev);
if (rc)
put_device(&vu_dev->vdev.dev);
vu_dev->registered = 1;
return rc;
error_init:
os_close_file(vu_dev->sock);
error_free:
kfree(vu_dev);
return rc;
}
static int virtio_uml_remove(struct platform_device *pdev)
{
struct virtio_uml_device *vu_dev = platform_get_drvdata(pdev);
unregister_virtio_device(&vu_dev->vdev);
return 0;
}
/* Command line device list */
static void vu_cmdline_release_dev(struct device *d)
{
}
static struct device vu_cmdline_parent = {
.init_name = "virtio-uml-cmdline",
.release = vu_cmdline_release_dev,
};
static bool vu_cmdline_parent_registered;
static int vu_cmdline_id;
static int vu_unregister_cmdline_device(struct device *dev, void *data)
{
struct platform_device *pdev = to_platform_device(dev);
struct virtio_uml_platform_data *pdata = pdev->dev.platform_data;
kfree(pdata->socket_path);
platform_device_unregister(pdev);
return 0;
}
static void vu_conn_broken(struct work_struct *wk)
{
struct virtio_uml_platform_data *pdata;
struct virtio_uml_device *vu_dev;
pdata = container_of(wk, struct virtio_uml_platform_data, conn_broken_wk);
vu_dev = platform_get_drvdata(pdata->pdev);
virtio_break_device(&vu_dev->vdev);
vu_unregister_cmdline_device(&pdata->pdev->dev, NULL);
}
static int vu_cmdline_set(const char *device, const struct kernel_param *kp)
{
const char *ids = strchr(device, ':');
unsigned int virtio_device_id;
int processed, consumed, err;
char *socket_path;
struct virtio_uml_platform_data pdata, *ppdata;
struct platform_device *pdev;
if (!ids || ids == device)
return -EINVAL;
processed = sscanf(ids, ":%u%n:%d%n",
&virtio_device_id, &consumed,
&vu_cmdline_id, &consumed);
if (processed < 1 || ids[consumed])
return -EINVAL;
if (!vu_cmdline_parent_registered) {
err = device_register(&vu_cmdline_parent);
if (err) {
pr_err("Failed to register parent device!\n");
put_device(&vu_cmdline_parent);
return err;
}
vu_cmdline_parent_registered = true;
}
socket_path = kmemdup_nul(device, ids - device, GFP_KERNEL);
if (!socket_path)
return -ENOMEM;
pdata.virtio_device_id = (u32) virtio_device_id;
pdata.socket_path = socket_path;
pr_info("Registering device virtio-uml.%d id=%d at %s\n",
vu_cmdline_id, virtio_device_id, socket_path);
pdev = platform_device_register_data(&vu_cmdline_parent, "virtio-uml",
vu_cmdline_id++, &pdata,
sizeof(pdata));
err = PTR_ERR_OR_ZERO(pdev);
if (err)
goto free;
ppdata = pdev->dev.platform_data;
ppdata->pdev = pdev;
INIT_WORK(&ppdata->conn_broken_wk, vu_conn_broken);
return 0;
free:
kfree(socket_path);
return err;
}
static int vu_cmdline_get_device(struct device *dev, void *data)
{
struct platform_device *pdev = to_platform_device(dev);
struct virtio_uml_platform_data *pdata = pdev->dev.platform_data;
char *buffer = data;
unsigned int len = strlen(buffer);
snprintf(buffer + len, PAGE_SIZE - len, "%s:%d:%d\n",
pdata->socket_path, pdata->virtio_device_id, pdev->id);
return 0;
}
static int vu_cmdline_get(char *buffer, const struct kernel_param *kp)
{
buffer[0] = '\0';
if (vu_cmdline_parent_registered)
device_for_each_child(&vu_cmdline_parent, buffer,
vu_cmdline_get_device);
return strlen(buffer) + 1;
}
static const struct kernel_param_ops vu_cmdline_param_ops = {
.set = vu_cmdline_set,
.get = vu_cmdline_get,
};
device_param_cb(device, &vu_cmdline_param_ops, NULL, S_IRUSR);
__uml_help(vu_cmdline_param_ops,
"virtio_uml.device=<socket>:<virtio_id>[:<platform_id>]\n"
" Configure a virtio device over a vhost-user socket.\n"
" See virtio_ids.h for a list of possible virtio device id values.\n"
" Optionally use a specific platform_device id.\n\n"
);
static void vu_unregister_cmdline_devices(void)
{
if (vu_cmdline_parent_registered) {
device_for_each_child(&vu_cmdline_parent, NULL,
vu_unregister_cmdline_device);
device_unregister(&vu_cmdline_parent);
vu_cmdline_parent_registered = false;
}
}
/* Platform driver */
static const struct of_device_id virtio_uml_match[] = {
{ .compatible = "virtio,uml", },
{ }
};
MODULE_DEVICE_TABLE(of, virtio_uml_match);
static int virtio_uml_suspend(struct platform_device *pdev, pm_message_t state)
{
struct virtio_uml_device *vu_dev = platform_get_drvdata(pdev);
if (!vu_dev->no_vq_suspend) {
struct virtqueue *vq;
virtio_device_for_each_vq((&vu_dev->vdev), vq) {
struct virtio_uml_vq_info *info = vq->priv;
info->suspended = true;
vhost_user_set_vring_enable(vu_dev, vq->index, false);
}
}
if (!device_may_wakeup(&vu_dev->vdev.dev)) {
vu_dev->suspended = true;
return 0;
}
return irq_set_irq_wake(vu_dev->irq, 1);
}
static int virtio_uml_resume(struct platform_device *pdev)
{
struct virtio_uml_device *vu_dev = platform_get_drvdata(pdev);
if (!vu_dev->no_vq_suspend) {
struct virtqueue *vq;
virtio_device_for_each_vq((&vu_dev->vdev), vq) {
struct virtio_uml_vq_info *info = vq->priv;
info->suspended = false;
vhost_user_set_vring_enable(vu_dev, vq->index, true);
}
}
vu_dev->suspended = false;
if (!device_may_wakeup(&vu_dev->vdev.dev))
return 0;
return irq_set_irq_wake(vu_dev->irq, 0);
}
static struct platform_driver virtio_uml_driver = {
.probe = virtio_uml_probe,
.remove = virtio_uml_remove,
.driver = {
.name = "virtio-uml",
.of_match_table = virtio_uml_match,
},
.suspend = virtio_uml_suspend,
.resume = virtio_uml_resume,
};
static int __init virtio_uml_init(void)
{
return platform_driver_register(&virtio_uml_driver);
}
static void __exit virtio_uml_exit(void)
{
platform_driver_unregister(&virtio_uml_driver);
vu_unregister_cmdline_devices();
}
module_init(virtio_uml_init);
module_exit(virtio_uml_exit);
__uml_exitcall(virtio_uml_exit);
MODULE_DESCRIPTION("UML driver for vhost-user virtio devices");
MODULE_LICENSE("GPL");
| linux-master | arch/um/drivers/virtio_uml.c |
// SPDX-License-Identifier: GPL-2.0
#include <string.h>
#include "slip_common.h"
#include <net_user.h>
int slip_proto_read(int fd, void *buf, int len, struct slip_proto *slip)
{
int i, n, size, start;
if(slip->more > 0){
i = 0;
while(i < slip->more){
size = slip_unesc(slip->ibuf[i++], slip->ibuf,
&slip->pos, &slip->esc);
if(size){
memcpy(buf, slip->ibuf, size);
memmove(slip->ibuf, &slip->ibuf[i],
slip->more - i);
slip->more = slip->more - i;
return size;
}
}
slip->more = 0;
}
n = net_read(fd, &slip->ibuf[slip->pos],
sizeof(slip->ibuf) - slip->pos);
if(n <= 0)
return n;
start = slip->pos;
for(i = 0; i < n; i++){
size = slip_unesc(slip->ibuf[start + i], slip->ibuf,&slip->pos,
&slip->esc);
if(size){
memcpy(buf, slip->ibuf, size);
memmove(slip->ibuf, &slip->ibuf[start+i+1],
n - (i + 1));
slip->more = n - (i + 1);
return size;
}
}
return 0;
}
int slip_proto_write(int fd, void *buf, int len, struct slip_proto *slip)
{
int actual, n;
actual = slip_esc(buf, slip->obuf, len);
n = net_write(fd, slip->obuf, actual);
if(n < 0)
return n;
else return len;
}
| linux-master | arch/um/drivers/slip_common.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2001 - 2007 Jeff Dike (jdike@{linux.intel,addtoit}.com)
*/
#include <errno.h>
#include <fcntl.h>
#include <termios.h>
#include "chan_user.h"
#include <os.h>
#include <um_malloc.h>
struct tty_chan {
char *dev;
int raw;
struct termios tt;
};
static void *tty_chan_init(char *str, int device, const struct chan_opts *opts)
{
struct tty_chan *data;
if (*str != ':') {
printk(UM_KERN_ERR "tty_init : channel type 'tty' must specify "
"a device\n");
return NULL;
}
str++;
data = uml_kmalloc(sizeof(*data), UM_GFP_KERNEL);
if (data == NULL)
return NULL;
*data = ((struct tty_chan) { .dev = str,
.raw = opts->raw });
return data;
}
static int tty_open(int input, int output, int primary, void *d,
char **dev_out)
{
struct tty_chan *data = d;
int fd, err, mode = 0;
if (input && output)
mode = O_RDWR;
else if (input)
mode = O_RDONLY;
else if (output)
mode = O_WRONLY;
fd = open(data->dev, mode);
if (fd < 0)
return -errno;
if (data->raw) {
CATCH_EINTR(err = tcgetattr(fd, &data->tt));
if (err)
return err;
err = raw(fd);
if (err)
return err;
}
*dev_out = data->dev;
return fd;
}
const struct chan_ops tty_ops = {
.type = "tty",
.init = tty_chan_init,
.open = tty_open,
.close = generic_close,
.read = generic_read,
.write = generic_write,
.console_write = generic_console_write,
.window_size = generic_window_size,
.free = generic_free,
.winch = 0,
};
| linux-master | arch/um/drivers/tty.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
*/
#include <linux/if_arp.h>
#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/string.h>
#include <net_kern.h>
#include <net_user.h>
#include "slirp.h"
struct slirp_init {
struct arg_list_dummy_wrapper argw; /* XXX should be simpler... */
};
static void slirp_init(struct net_device *dev, void *data)
{
struct uml_net_private *private;
struct slirp_data *spri;
struct slirp_init *init = data;
int i;
private = netdev_priv(dev);
spri = (struct slirp_data *) private->user;
spri->argw = init->argw;
spri->pid = -1;
spri->slave = -1;
spri->dev = dev;
slip_proto_init(&spri->slip);
dev->hard_header_len = 0;
dev->header_ops = NULL;
dev->addr_len = 0;
dev->type = ARPHRD_SLIP;
dev->tx_queue_len = 256;
dev->flags = IFF_NOARP;
printk("SLIRP backend - command line:");
for (i = 0; spri->argw.argv[i] != NULL; i++)
printk(" '%s'",spri->argw.argv[i]);
printk("\n");
}
static unsigned short slirp_protocol(struct sk_buff *skbuff)
{
return htons(ETH_P_IP);
}
static int slirp_read(int fd, struct sk_buff *skb, struct uml_net_private *lp)
{
return slirp_user_read(fd, skb_mac_header(skb), skb->dev->mtu,
(struct slirp_data *) &lp->user);
}
static int slirp_write(int fd, struct sk_buff *skb, struct uml_net_private *lp)
{
return slirp_user_write(fd, skb->data, skb->len,
(struct slirp_data *) &lp->user);
}
const struct net_kern_info slirp_kern_info = {
.init = slirp_init,
.protocol = slirp_protocol,
.read = slirp_read,
.write = slirp_write,
};
static int slirp_setup(char *str, char **mac_out, void *data)
{
struct slirp_init *init = data;
int i=0;
*init = ((struct slirp_init) { .argw = { { "slirp", NULL } } });
str = split_if_spec(str, mac_out, NULL);
if (str == NULL) /* no command line given after MAC addr */
return 1;
do {
if (i >= SLIRP_MAX_ARGS - 1) {
printk(KERN_WARNING "slirp_setup: truncating slirp "
"arguments\n");
break;
}
init->argw.argv[i++] = str;
while(*str && *str!=',') {
if (*str == '_')
*str=' ';
str++;
}
if (*str != ',')
break;
*str++ = '\0';
} while (1);
init->argw.argv[i] = NULL;
return 1;
}
static struct transport slirp_transport = {
.list = LIST_HEAD_INIT(slirp_transport.list),
.name = "slirp",
.setup = slirp_setup,
.user = &slirp_user_info,
.kern = &slirp_kern_info,
.private_size = sizeof(struct slirp_data),
.setup_size = sizeof(struct slirp_init),
};
static int register_slirp(void)
{
register_transport(&slirp_transport);
return 0;
}
late_initcall(register_slirp);
| linux-master | arch/um/drivers/slirp_kern.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
*/
#include <stddef.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <errno.h>
#include <string.h>
#include <termios.h>
#include "chan_user.h"
#include <os.h>
#include <um_malloc.h>
#include "xterm.h"
struct xterm_chan {
int pid;
int helper_pid;
int chan_fd;
char *title;
int device;
int raw;
struct termios tt;
};
static void *xterm_init(char *str, int device, const struct chan_opts *opts)
{
struct xterm_chan *data;
data = uml_kmalloc(sizeof(*data), UM_GFP_KERNEL);
if (data == NULL)
return NULL;
*data = ((struct xterm_chan) { .pid = -1,
.helper_pid = -1,
.chan_fd = -1,
.device = device,
.title = opts->xterm_title,
.raw = opts->raw } );
return data;
}
/* Only changed by xterm_setup, which is a setup */
static char *terminal_emulator = CONFIG_XTERM_CHAN_DEFAULT_EMULATOR;
static char *title_switch = "-T";
static char *exec_switch = "-e";
static int __init xterm_setup(char *line, int *add)
{
*add = 0;
terminal_emulator = line;
line = strchr(line, ',');
if (line == NULL)
return 0;
*line++ = '\0';
if (*line)
title_switch = line;
line = strchr(line, ',');
if (line == NULL)
return 0;
*line++ = '\0';
if (*line)
exec_switch = line;
return 0;
}
__uml_setup("xterm=", xterm_setup,
"xterm=<terminal emulator>,<title switch>,<exec switch>\n"
" Specifies an alternate terminal emulator to use for the debugger,\n"
" consoles, and serial lines when they are attached to the xterm channel.\n"
" The values are the terminal emulator binary, the switch it uses to set\n"
" its title, and the switch it uses to execute a subprocess,\n"
" respectively. The title switch must have the form '<switch> title',\n"
" not '<switch>=title'. Similarly, the exec switch must have the form\n"
" '<switch> command arg1 arg2 ...'.\n"
" The default values are 'xterm=" CONFIG_XTERM_CHAN_DEFAULT_EMULATOR
",-T,-e'.\n"
" Values for gnome-terminal are 'xterm=gnome-terminal,-t,-x'.\n\n"
);
static int xterm_open(int input, int output, int primary, void *d,
char **dev_out)
{
struct xterm_chan *data = d;
int pid, fd, new, err;
char title[256], file[] = "/tmp/xterm-pipeXXXXXX";
char *argv[] = { terminal_emulator, title_switch, title, exec_switch,
OS_LIB_PATH "/uml/port-helper", "-uml-socket",
file, NULL };
if (access(argv[4], X_OK) < 0)
argv[4] = "port-helper";
/*
* Check that DISPLAY is set, this doesn't guarantee the xterm
* will work but w/o it we can be pretty sure it won't.
*/
if (getenv("DISPLAY") == NULL) {
printk(UM_KERN_ERR "xterm_open: $DISPLAY not set.\n");
return -ENODEV;
}
/*
* This business of getting a descriptor to a temp file,
* deleting the file and closing the descriptor is just to get
* a known-unused name for the Unix socket that we really
* want.
*/
fd = mkstemp(file);
if (fd < 0) {
err = -errno;
printk(UM_KERN_ERR "xterm_open : mkstemp failed, errno = %d\n",
errno);
return err;
}
if (unlink(file)) {
err = -errno;
printk(UM_KERN_ERR "xterm_open : unlink failed, errno = %d\n",
errno);
close(fd);
return err;
}
close(fd);
fd = os_create_unix_socket(file, sizeof(file), 1);
if (fd < 0) {
printk(UM_KERN_ERR "xterm_open : create_unix_socket failed, "
"errno = %d\n", -fd);
return fd;
}
sprintf(title, data->title, data->device);
pid = run_helper(NULL, NULL, argv);
if (pid < 0) {
err = pid;
printk(UM_KERN_ERR "xterm_open : run_helper failed, "
"errno = %d\n", -err);
goto out_close1;
}
err = os_set_fd_block(fd, 0);
if (err < 0) {
printk(UM_KERN_ERR "xterm_open : failed to set descriptor "
"non-blocking, err = %d\n", -err);
goto out_kill;
}
data->chan_fd = fd;
new = xterm_fd(fd, &data->helper_pid);
if (new < 0) {
err = new;
printk(UM_KERN_ERR "xterm_open : os_rcv_fd failed, err = %d\n",
-err);
goto out_kill;
}
err = os_set_fd_block(new, 0);
if (err) {
printk(UM_KERN_ERR "xterm_open : failed to set xterm "
"descriptor non-blocking, err = %d\n", -err);
goto out_close2;
}
CATCH_EINTR(err = tcgetattr(new, &data->tt));
if (err) {
new = err;
goto out_close2;
}
if (data->raw) {
err = raw(new);
if (err) {
new = err;
goto out_close2;
}
}
unlink(file);
data->pid = pid;
*dev_out = NULL;
return new;
out_close2:
close(new);
out_kill:
os_kill_process(pid, 1);
out_close1:
close(fd);
return err;
}
static void xterm_close(int fd, void *d)
{
struct xterm_chan *data = d;
if (data->pid != -1)
os_kill_process(data->pid, 1);
data->pid = -1;
if (data->helper_pid != -1)
os_kill_process(data->helper_pid, 0);
data->helper_pid = -1;
if (data->chan_fd != -1)
os_close_file(data->chan_fd);
os_close_file(fd);
}
const struct chan_ops xterm_ops = {
.type = "xterm",
.init = xterm_init,
.open = xterm_open,
.close = xterm_close,
.read = generic_read,
.write = generic_write,
.console_write = generic_console_write,
.window_size = generic_window_size,
.free = generic_free,
.winch = 1,
};
| linux-master | arch/um/drivers/xterm.c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.