python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: GPL-2.0
/*
* arch/sh/kernel/cpu/sh2a/opcode_helper.c
*
* Helper for the SH-2A 32-bit opcodes.
*
* Copyright (C) 2007 Paul Mundt
*/
#include <linux/kernel.h>
/*
* Instructions on SH are generally fixed at 16-bits, however, SH-2A
* introduces some 32-bit instructions. Since there are no real
* constraints on their use (and they can be mixed and matched), we need
* to check the instruction encoding to work out if it's a true 32-bit
* instruction or not.
*
* Presently, 32-bit opcodes have only slight variations in what the
* actual encoding looks like in the first-half of the instruction, which
* makes it fairly straightforward to differentiate from the 16-bit ones.
*
* First 16-bits of encoding Used by
*
* 0011nnnnmmmm0001 mov.b, mov.w, mov.l, fmov.d,
* fmov.s, movu.b, movu.w
*
* 0011nnnn0iii1001 bclr.b, bld.b, bset.b, bst.b, band.b,
* bandnot.b, bldnot.b, bor.b, bornot.b,
* bxor.b
*
* 0000nnnniiii0000 movi20
* 0000nnnniiii0001 movi20s
*/
unsigned int instruction_size(unsigned int insn)
{
/* Look for the common cases */
switch ((insn & 0xf00f)) {
case 0x0000: /* movi20 */
case 0x0001: /* movi20s */
case 0x3001: /* 32-bit mov/fmov/movu variants */
return 4;
}
/* And the special cases.. */
switch ((insn & 0xf08f)) {
case 0x3009: /* 32-bit b*.b bit operations */
return 4;
}
return 2;
}
| linux-master | arch/sh/kernel/cpu/sh2a/opcode_helper.c |
// SPDX-License-Identifier: GPL-2.0
/*
* arch/sh/kernel/cpu/sh4/clock-shx3.c
*
* SH-X3 support for the clock framework
*
* Copyright (C) 2006-2007 Renesas Technology Corp.
* Copyright (C) 2006-2007 Renesas Solutions Corp.
* Copyright (C) 2006-2010 Paul Mundt
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/io.h>
#include <linux/clkdev.h>
#include <asm/clock.h>
#include <asm/freq.h>
/*
* Default rate for the root input clock, reset this with clk_set_rate()
* from the platform code.
*/
static struct clk extal_clk = {
.rate = 16666666,
};
static unsigned long pll_recalc(struct clk *clk)
{
/* PLL1 has a fixed x72 multiplier. */
return clk->parent->rate * 72;
}
static struct sh_clk_ops pll_clk_ops = {
.recalc = pll_recalc,
};
static struct clk pll_clk = {
.ops = &pll_clk_ops,
.parent = &extal_clk,
.flags = CLK_ENABLE_ON_INIT,
};
static struct clk *clks[] = {
&extal_clk,
&pll_clk,
};
static unsigned int div2[] = { 1, 2, 4, 6, 8, 12, 16, 18,
24, 32, 36, 48 };
static struct clk_div_mult_table div4_div_mult_table = {
.divisors = div2,
.nr_divisors = ARRAY_SIZE(div2),
};
static struct clk_div4_table div4_table = {
.div_mult_table = &div4_div_mult_table,
};
enum { DIV4_I, DIV4_SH, DIV4_B, DIV4_DDR, DIV4_SHA, DIV4_P, DIV4_NR };
#define DIV4(_bit, _mask, _flags) \
SH_CLK_DIV4(&pll_clk, FRQMR1, _bit, _mask, _flags)
struct clk div4_clks[DIV4_NR] = {
[DIV4_P] = DIV4(0, 0x0f80, 0),
[DIV4_SHA] = DIV4(4, 0x0ff0, 0),
[DIV4_DDR] = DIV4(12, 0x000c, CLK_ENABLE_ON_INIT),
[DIV4_B] = DIV4(16, 0x0fe0, CLK_ENABLE_ON_INIT),
[DIV4_SH] = DIV4(20, 0x000c, CLK_ENABLE_ON_INIT),
[DIV4_I] = DIV4(28, 0x000e, CLK_ENABLE_ON_INIT),
};
#define MSTPCR0 0xffc00030
#define MSTPCR1 0xffc00034
enum { MSTP027, MSTP026, MSTP025, MSTP024,
MSTP009, MSTP008, MSTP003, MSTP002,
MSTP001, MSTP000, MSTP119, MSTP105,
MSTP104, MSTP_NR };
static struct clk mstp_clks[MSTP_NR] = {
/* MSTPCR0 */
[MSTP027] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 27, 0),
[MSTP026] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 26, 0),
[MSTP025] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 25, 0),
[MSTP024] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 24, 0),
[MSTP009] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 9, 0),
[MSTP008] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 8, 0),
[MSTP003] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 3, 0),
[MSTP002] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 2, 0),
[MSTP001] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 1, 0),
[MSTP000] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 0, 0),
/* MSTPCR1 */
[MSTP119] = SH_CLK_MSTP32(NULL, MSTPCR1, 19, 0),
[MSTP105] = SH_CLK_MSTP32(NULL, MSTPCR1, 5, 0),
[MSTP104] = SH_CLK_MSTP32(NULL, MSTPCR1, 4, 0),
};
static struct clk_lookup lookups[] = {
/* main clocks */
CLKDEV_CON_ID("extal", &extal_clk),
CLKDEV_CON_ID("pll_clk", &pll_clk),
/* DIV4 clocks */
CLKDEV_CON_ID("peripheral_clk", &div4_clks[DIV4_P]),
CLKDEV_CON_ID("shywaya_clk", &div4_clks[DIV4_SHA]),
CLKDEV_CON_ID("ddr_clk", &div4_clks[DIV4_DDR]),
CLKDEV_CON_ID("bus_clk", &div4_clks[DIV4_B]),
CLKDEV_CON_ID("shyway_clk", &div4_clks[DIV4_SH]),
CLKDEV_CON_ID("cpu_clk", &div4_clks[DIV4_I]),
/* MSTP32 clocks */
CLKDEV_ICK_ID("fck", "sh-sci.3", &mstp_clks[MSTP027]),
CLKDEV_ICK_ID("fck", "sh-sci.2", &mstp_clks[MSTP026]),
CLKDEV_ICK_ID("fck", "sh-sci.1", &mstp_clks[MSTP025]),
CLKDEV_ICK_ID("fck", "sh-sci.0", &mstp_clks[MSTP024]),
CLKDEV_CON_ID("h8ex_fck", &mstp_clks[MSTP003]),
CLKDEV_CON_ID("csm_fck", &mstp_clks[MSTP002]),
CLKDEV_CON_ID("fe1_fck", &mstp_clks[MSTP001]),
CLKDEV_CON_ID("fe0_fck", &mstp_clks[MSTP000]),
CLKDEV_ICK_ID("fck", "sh-tmu.0", &mstp_clks[MSTP008]),
CLKDEV_ICK_ID("fck", "sh-tmu.1", &mstp_clks[MSTP009]),
CLKDEV_CON_ID("hudi_fck", &mstp_clks[MSTP119]),
CLKDEV_CON_ID("dmac_11_6_fck", &mstp_clks[MSTP105]),
CLKDEV_CON_ID("dmac_5_0_fck", &mstp_clks[MSTP104]),
};
int __init arch_clk_init(void)
{
int i, ret = 0;
for (i = 0; i < ARRAY_SIZE(clks); i++)
ret |= clk_register(clks[i]);
clkdev_add_table(lookups, ARRAY_SIZE(lookups));
if (!ret)
ret = sh_clk_div4_register(div4_clks, ARRAY_SIZE(div4_clks),
&div4_table);
if (!ret)
ret = sh_clk_mstp_register(mstp_clks, MSTP_NR);
return ret;
}
| linux-master | arch/sh/kernel/cpu/sh4a/clock-shx3.c |
// SPDX-License-Identifier: GPL-2.0
/*
* SH7786 Pinmux
*
* Copyright (C) 2008, 2009 Renesas Solutions Corp.
* Kuninori Morimoto <[email protected]>
*
* Based on SH7785 pinmux
*
* Copyright (C) 2008 Magnus Damm
*/
#include <linux/bug.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/ioport.h>
#include <cpu/pfc.h>
static struct resource sh7786_pfc_resources[] = {
[0] = {
.start = 0xffcc0000,
.end = 0xffcc008f,
.flags = IORESOURCE_MEM,
},
};
static int __init plat_pinmux_setup(void)
{
return sh_pfc_register("pfc-sh7786", sh7786_pfc_resources,
ARRAY_SIZE(sh7786_pfc_resources));
}
arch_initcall(plat_pinmux_setup);
| linux-master | arch/sh/kernel/cpu/sh4a/pinmux-sh7786.c |
// SPDX-License-Identifier: GPL-2.0
/*
* SH7763 Setup
*
* Copyright (C) 2006 Paul Mundt
* Copyright (C) 2007 Yoshihiro Shimoda
* Copyright (C) 2008, 2009 Nobuhiro Iwamatsu
*/
#include <linux/platform_device.h>
#include <linux/init.h>
#include <linux/serial.h>
#include <linux/sh_timer.h>
#include <linux/sh_intc.h>
#include <linux/io.h>
#include <linux/serial_sci.h>
#include <linux/usb/ohci_pdriver.h>
#include <asm/platform_early.h>
static struct plat_sci_port scif0_platform_data = {
.scscr = SCSCR_REIE,
.type = PORT_SCIF,
.regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE,
};
static struct resource scif0_resources[] = {
DEFINE_RES_MEM(0xffe00000, 0x100),
DEFINE_RES_IRQ(evt2irq(0x700)),
};
static struct platform_device scif0_device = {
.name = "sh-sci",
.id = 0,
.resource = scif0_resources,
.num_resources = ARRAY_SIZE(scif0_resources),
.dev = {
.platform_data = &scif0_platform_data,
},
};
static struct plat_sci_port scif1_platform_data = {
.scscr = SCSCR_REIE,
.type = PORT_SCIF,
.regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE,
};
static struct resource scif1_resources[] = {
DEFINE_RES_MEM(0xffe08000, 0x100),
DEFINE_RES_IRQ(evt2irq(0xb80)),
};
static struct platform_device scif1_device = {
.name = "sh-sci",
.id = 1,
.resource = scif1_resources,
.num_resources = ARRAY_SIZE(scif1_resources),
.dev = {
.platform_data = &scif1_platform_data,
},
};
static struct plat_sci_port scif2_platform_data = {
.scscr = SCSCR_REIE,
.type = PORT_SCIF,
.regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE,
};
static struct resource scif2_resources[] = {
DEFINE_RES_MEM(0xffe10000, 0x100),
DEFINE_RES_IRQ(evt2irq(0xf00)),
};
static struct platform_device scif2_device = {
.name = "sh-sci",
.id = 2,
.resource = scif2_resources,
.num_resources = ARRAY_SIZE(scif2_resources),
.dev = {
.platform_data = &scif2_platform_data,
},
};
static struct resource rtc_resources[] = {
[0] = {
.start = 0xffe80000,
.end = 0xffe80000 + 0x58 - 1,
.flags = IORESOURCE_IO,
},
[1] = {
/* Shared Period/Carry/Alarm IRQ */
.start = evt2irq(0x480),
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device rtc_device = {
.name = "sh-rtc",
.id = -1,
.num_resources = ARRAY_SIZE(rtc_resources),
.resource = rtc_resources,
};
static struct resource usb_ohci_resources[] = {
[0] = {
.start = 0xffec8000,
.end = 0xffec80ff,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = evt2irq(0xc60),
.end = evt2irq(0xc60),
.flags = IORESOURCE_IRQ,
},
};
static u64 usb_ohci_dma_mask = 0xffffffffUL;
static struct usb_ohci_pdata usb_ohci_pdata;
static struct platform_device usb_ohci_device = {
.name = "ohci-platform",
.id = -1,
.dev = {
.dma_mask = &usb_ohci_dma_mask,
.coherent_dma_mask = 0xffffffff,
.platform_data = &usb_ohci_pdata,
},
.num_resources = ARRAY_SIZE(usb_ohci_resources),
.resource = usb_ohci_resources,
};
static struct resource usbf_resources[] = {
[0] = {
.start = 0xffec0000,
.end = 0xffec00ff,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = evt2irq(0xc80),
.end = evt2irq(0xc80),
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device usbf_device = {
.name = "sh_udc",
.id = -1,
.dev = {
.dma_mask = NULL,
.coherent_dma_mask = 0xffffffff,
},
.num_resources = ARRAY_SIZE(usbf_resources),
.resource = usbf_resources,
};
static struct sh_timer_config tmu0_platform_data = {
.channels_mask = 7,
};
static struct resource tmu0_resources[] = {
DEFINE_RES_MEM(0xffd80000, 0x30),
DEFINE_RES_IRQ(evt2irq(0x580)),
DEFINE_RES_IRQ(evt2irq(0x5a0)),
DEFINE_RES_IRQ(evt2irq(0x5c0)),
};
static struct platform_device tmu0_device = {
.name = "sh-tmu",
.id = 0,
.dev = {
.platform_data = &tmu0_platform_data,
},
.resource = tmu0_resources,
.num_resources = ARRAY_SIZE(tmu0_resources),
};
static struct sh_timer_config tmu1_platform_data = {
.channels_mask = 7,
};
static struct resource tmu1_resources[] = {
DEFINE_RES_MEM(0xffd88000, 0x2c),
DEFINE_RES_IRQ(evt2irq(0xe00)),
DEFINE_RES_IRQ(evt2irq(0xe20)),
DEFINE_RES_IRQ(evt2irq(0xe40)),
};
static struct platform_device tmu1_device = {
.name = "sh-tmu",
.id = 1,
.dev = {
.platform_data = &tmu1_platform_data,
},
.resource = tmu1_resources,
.num_resources = ARRAY_SIZE(tmu1_resources),
};
static struct platform_device *sh7763_devices[] __initdata = {
&scif0_device,
&scif1_device,
&scif2_device,
&tmu0_device,
&tmu1_device,
&rtc_device,
&usb_ohci_device,
&usbf_device,
};
static int __init sh7763_devices_setup(void)
{
return platform_add_devices(sh7763_devices,
ARRAY_SIZE(sh7763_devices));
}
arch_initcall(sh7763_devices_setup);
static struct platform_device *sh7763_early_devices[] __initdata = {
&scif0_device,
&scif1_device,
&scif2_device,
&tmu0_device,
&tmu1_device,
};
void __init plat_early_device_setup(void)
{
sh_early_platform_add_devices(sh7763_early_devices,
ARRAY_SIZE(sh7763_early_devices));
}
enum {
UNUSED = 0,
/* interrupt sources */
IRL_LLLL, IRL_LLLH, IRL_LLHL, IRL_LLHH,
IRL_LHLL, IRL_LHLH, IRL_LHHL, IRL_LHHH,
IRL_HLLL, IRL_HLLH, IRL_HLHL, IRL_HLHH,
IRL_HHLL, IRL_HHLH, IRL_HHHL,
IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7,
RTC, WDT, TMU0, TMU1, TMU2, TMU2_TICPI,
HUDI, LCDC, DMAC, SCIF0, IIC0, IIC1, CMT, GETHER, HAC,
PCISERR, PCIINTA, PCIINTB, PCIINTC, PCIINTD, PCIC5,
STIF0, STIF1, SCIF1, SIOF0, SIOF1, SIOF2,
USBH, USBF, TPU, PCC, MMCIF, SIM,
TMU3, TMU4, TMU5, ADC, SSI0, SSI1, SSI2, SSI3,
SCIF2, GPIO,
/* interrupt groups */
TMU012, TMU345,
};
static struct intc_vect vectors[] __initdata = {
INTC_VECT(RTC, 0x480), INTC_VECT(RTC, 0x4a0),
INTC_VECT(RTC, 0x4c0),
INTC_VECT(WDT, 0x560), INTC_VECT(TMU0, 0x580),
INTC_VECT(TMU1, 0x5a0), INTC_VECT(TMU2, 0x5c0),
INTC_VECT(TMU2_TICPI, 0x5e0), INTC_VECT(HUDI, 0x600),
INTC_VECT(LCDC, 0x620),
INTC_VECT(DMAC, 0x640), INTC_VECT(DMAC, 0x660),
INTC_VECT(DMAC, 0x680), INTC_VECT(DMAC, 0x6a0),
INTC_VECT(DMAC, 0x6c0),
INTC_VECT(SCIF0, 0x700), INTC_VECT(SCIF0, 0x720),
INTC_VECT(SCIF0, 0x740), INTC_VECT(SCIF0, 0x760),
INTC_VECT(DMAC, 0x780), INTC_VECT(DMAC, 0x7a0),
INTC_VECT(IIC0, 0x8A0), INTC_VECT(IIC1, 0x8C0),
INTC_VECT(CMT, 0x900), INTC_VECT(GETHER, 0x920),
INTC_VECT(GETHER, 0x940), INTC_VECT(GETHER, 0x960),
INTC_VECT(HAC, 0x980),
INTC_VECT(PCISERR, 0xa00), INTC_VECT(PCIINTA, 0xa20),
INTC_VECT(PCIINTB, 0xa40), INTC_VECT(PCIINTC, 0xa60),
INTC_VECT(PCIINTD, 0xa80), INTC_VECT(PCIC5, 0xaa0),
INTC_VECT(PCIC5, 0xac0), INTC_VECT(PCIC5, 0xae0),
INTC_VECT(PCIC5, 0xb00), INTC_VECT(PCIC5, 0xb20),
INTC_VECT(STIF0, 0xb40), INTC_VECT(STIF1, 0xb60),
INTC_VECT(SCIF1, 0xb80), INTC_VECT(SCIF1, 0xba0),
INTC_VECT(SCIF1, 0xbc0), INTC_VECT(SCIF1, 0xbe0),
INTC_VECT(SIOF0, 0xc00), INTC_VECT(SIOF1, 0xc20),
INTC_VECT(USBH, 0xc60), INTC_VECT(USBF, 0xc80),
INTC_VECT(USBF, 0xca0),
INTC_VECT(TPU, 0xcc0), INTC_VECT(PCC, 0xce0),
INTC_VECT(MMCIF, 0xd00), INTC_VECT(MMCIF, 0xd20),
INTC_VECT(MMCIF, 0xd40), INTC_VECT(MMCIF, 0xd60),
INTC_VECT(SIM, 0xd80), INTC_VECT(SIM, 0xda0),
INTC_VECT(SIM, 0xdc0), INTC_VECT(SIM, 0xde0),
INTC_VECT(TMU3, 0xe00), INTC_VECT(TMU4, 0xe20),
INTC_VECT(TMU5, 0xe40), INTC_VECT(ADC, 0xe60),
INTC_VECT(SSI0, 0xe80), INTC_VECT(SSI1, 0xea0),
INTC_VECT(SSI2, 0xec0), INTC_VECT(SSI3, 0xee0),
INTC_VECT(SCIF2, 0xf00), INTC_VECT(SCIF2, 0xf20),
INTC_VECT(SCIF2, 0xf40), INTC_VECT(SCIF2, 0xf60),
INTC_VECT(GPIO, 0xf80), INTC_VECT(GPIO, 0xfa0),
INTC_VECT(GPIO, 0xfc0), INTC_VECT(GPIO, 0xfe0),
};
static struct intc_group groups[] __initdata = {
INTC_GROUP(TMU012, TMU0, TMU1, TMU2, TMU2_TICPI),
INTC_GROUP(TMU345, TMU3, TMU4, TMU5),
};
static struct intc_mask_reg mask_registers[] __initdata = {
{ 0xffd40038, 0xffd4003c, 32, /* INT2MSKR / INT2MSKCR */
{ 0, 0, 0, 0, 0, 0, GPIO, 0,
SSI0, MMCIF, 0, SIOF0, PCIC5, PCIINTD, PCIINTC, PCIINTB,
PCIINTA, PCISERR, HAC, CMT, 0, 0, 0, DMAC,
HUDI, 0, WDT, SCIF1, SCIF0, RTC, TMU345, TMU012 } },
{ 0xffd400d0, 0xffd400d4, 32, /* INT2MSKR1 / INT2MSKCR1 */
{ 0, 0, 0, 0, 0, 0, SCIF2, USBF,
0, 0, STIF1, STIF0, 0, 0, USBH, GETHER,
PCC, 0, 0, ADC, TPU, SIM, SIOF2, SIOF1,
LCDC, 0, IIC1, IIC0, SSI3, SSI2, SSI1, 0 } },
};
static struct intc_prio_reg prio_registers[] __initdata = {
{ 0xffd40000, 0, 32, 8, /* INT2PRI0 */ { TMU0, TMU1,
TMU2, TMU2_TICPI } },
{ 0xffd40004, 0, 32, 8, /* INT2PRI1 */ { TMU3, TMU4, TMU5, RTC } },
{ 0xffd40008, 0, 32, 8, /* INT2PRI2 */ { SCIF0, SCIF1, WDT } },
{ 0xffd4000c, 0, 32, 8, /* INT2PRI3 */ { HUDI, DMAC, ADC } },
{ 0xffd40010, 0, 32, 8, /* INT2PRI4 */ { CMT, HAC,
PCISERR, PCIINTA } },
{ 0xffd40014, 0, 32, 8, /* INT2PRI5 */ { PCIINTB, PCIINTC,
PCIINTD, PCIC5 } },
{ 0xffd40018, 0, 32, 8, /* INT2PRI6 */ { SIOF0, USBF, MMCIF, SSI0 } },
{ 0xffd4001c, 0, 32, 8, /* INT2PRI7 */ { SCIF2, GPIO } },
{ 0xffd400a0, 0, 32, 8, /* INT2PRI8 */ { SSI3, SSI2, SSI1, 0 } },
{ 0xffd400a4, 0, 32, 8, /* INT2PRI9 */ { LCDC, 0, IIC1, IIC0 } },
{ 0xffd400a8, 0, 32, 8, /* INT2PRI10 */ { TPU, SIM, SIOF2, SIOF1 } },
{ 0xffd400ac, 0, 32, 8, /* INT2PRI11 */ { PCC } },
{ 0xffd400b0, 0, 32, 8, /* INT2PRI12 */ { 0, 0, USBH, GETHER } },
{ 0xffd400b4, 0, 32, 8, /* INT2PRI13 */ { 0, 0, STIF1, STIF0 } },
};
static DECLARE_INTC_DESC(intc_desc, "sh7763", vectors, groups,
mask_registers, prio_registers, NULL);
/* Support for external interrupt pins in IRQ mode */
static struct intc_vect irq_vectors[] __initdata = {
INTC_VECT(IRQ0, 0x240), INTC_VECT(IRQ1, 0x280),
INTC_VECT(IRQ2, 0x2c0), INTC_VECT(IRQ3, 0x300),
INTC_VECT(IRQ4, 0x340), INTC_VECT(IRQ5, 0x380),
INTC_VECT(IRQ6, 0x3c0), INTC_VECT(IRQ7, 0x200),
};
static struct intc_mask_reg irq_mask_registers[] __initdata = {
{ 0xffd00044, 0xffd00064, 32, /* INTMSK0 / INTMSKCLR0 */
{ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
};
static struct intc_prio_reg irq_prio_registers[] __initdata = {
{ 0xffd00010, 0, 32, 4, /* INTPRI */ { IRQ0, IRQ1, IRQ2, IRQ3,
IRQ4, IRQ5, IRQ6, IRQ7 } },
};
static struct intc_sense_reg irq_sense_registers[] __initdata = {
{ 0xffd0001c, 32, 2, /* ICR1 */ { IRQ0, IRQ1, IRQ2, IRQ3,
IRQ4, IRQ5, IRQ6, IRQ7 } },
};
static struct intc_mask_reg irq_ack_registers[] __initdata = {
{ 0xffd00024, 0, 32, /* INTREQ */
{ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
};
static DECLARE_INTC_DESC_ACK(intc_irq_desc, "sh7763-irq", irq_vectors,
NULL, irq_mask_registers, irq_prio_registers,
irq_sense_registers, irq_ack_registers);
/* External interrupt pins in IRL mode */
static struct intc_vect irl_vectors[] __initdata = {
INTC_VECT(IRL_LLLL, 0x200), INTC_VECT(IRL_LLLH, 0x220),
INTC_VECT(IRL_LLHL, 0x240), INTC_VECT(IRL_LLHH, 0x260),
INTC_VECT(IRL_LHLL, 0x280), INTC_VECT(IRL_LHLH, 0x2a0),
INTC_VECT(IRL_LHHL, 0x2c0), INTC_VECT(IRL_LHHH, 0x2e0),
INTC_VECT(IRL_HLLL, 0x300), INTC_VECT(IRL_HLLH, 0x320),
INTC_VECT(IRL_HLHL, 0x340), INTC_VECT(IRL_HLHH, 0x360),
INTC_VECT(IRL_HHLL, 0x380), INTC_VECT(IRL_HHLH, 0x3a0),
INTC_VECT(IRL_HHHL, 0x3c0),
};
static struct intc_mask_reg irl3210_mask_registers[] __initdata = {
{ 0xffd40080, 0xffd40084, 32, /* INTMSK2 / INTMSKCLR2 */
{ IRL_LLLL, IRL_LLLH, IRL_LLHL, IRL_LLHH,
IRL_LHLL, IRL_LHLH, IRL_LHHL, IRL_LHHH,
IRL_HLLL, IRL_HLLH, IRL_HLHL, IRL_HLHH,
IRL_HHLL, IRL_HHLH, IRL_HHHL, } },
};
static struct intc_mask_reg irl7654_mask_registers[] __initdata = {
{ 0xffd40080, 0xffd40084, 32, /* INTMSK2 / INTMSKCLR2 */
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
IRL_LLLL, IRL_LLLH, IRL_LLHL, IRL_LLHH,
IRL_LHLL, IRL_LHLH, IRL_LHHL, IRL_LHHH,
IRL_HLLL, IRL_HLLH, IRL_HLHL, IRL_HLHH,
IRL_HHLL, IRL_HHLH, IRL_HHHL, } },
};
static DECLARE_INTC_DESC(intc_irl7654_desc, "sh7763-irl7654", irl_vectors,
NULL, irl7654_mask_registers, NULL, NULL);
static DECLARE_INTC_DESC(intc_irl3210_desc, "sh7763-irl3210", irl_vectors,
NULL, irl3210_mask_registers, NULL, NULL);
#define INTC_ICR0 0xffd00000
#define INTC_INTMSK0 0xffd00044
#define INTC_INTMSK1 0xffd00048
#define INTC_INTMSK2 0xffd40080
#define INTC_INTMSKCLR1 0xffd00068
#define INTC_INTMSKCLR2 0xffd40084
void __init plat_irq_setup(void)
{
/* disable IRQ7-0 */
__raw_writel(0xff000000, INTC_INTMSK0);
/* disable IRL3-0 + IRL7-4 */
__raw_writel(0xc0000000, INTC_INTMSK1);
__raw_writel(0xfffefffe, INTC_INTMSK2);
register_intc_controller(&intc_desc);
}
void __init plat_irq_setup_pins(int mode)
{
switch (mode) {
case IRQ_MODE_IRQ:
/* select IRQ mode for IRL3-0 + IRL7-4 */
__raw_writel(__raw_readl(INTC_ICR0) | 0x00c00000, INTC_ICR0);
register_intc_controller(&intc_irq_desc);
break;
case IRQ_MODE_IRL7654:
/* enable IRL7-4 but don't provide any masking */
__raw_writel(0x40000000, INTC_INTMSKCLR1);
__raw_writel(0x0000fffe, INTC_INTMSKCLR2);
break;
case IRQ_MODE_IRL3210:
/* enable IRL0-3 but don't provide any masking */
__raw_writel(0x80000000, INTC_INTMSKCLR1);
__raw_writel(0xfffe0000, INTC_INTMSKCLR2);
break;
case IRQ_MODE_IRL7654_MASK:
/* enable IRL7-4 and mask using cpu intc controller */
__raw_writel(0x40000000, INTC_INTMSKCLR1);
register_intc_controller(&intc_irl7654_desc);
break;
case IRQ_MODE_IRL3210_MASK:
/* enable IRL0-3 and mask using cpu intc controller */
__raw_writel(0x80000000, INTC_INTMSKCLR1);
register_intc_controller(&intc_irl3210_desc);
break;
default:
BUG();
}
}
| linux-master | arch/sh/kernel/cpu/sh4a/setup-sh7763.c |
// SPDX-License-Identifier: GPL-2.0
/*
* arch/sh/kernel/cpu/sh4a/clock-sh7786.c
*
* SH7786 support for the clock framework
*
* Copyright (C) 2010 Paul Mundt
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/clkdev.h>
#include <asm/clock.h>
#include <asm/freq.h>
/*
* Default rate for the root input clock, reset this with clk_set_rate()
* from the platform code.
*/
static struct clk extal_clk = {
.rate = 33333333,
};
static unsigned long pll_recalc(struct clk *clk)
{
int multiplier;
/*
* Clock modes 0, 1, and 2 use an x64 multiplier against PLL1,
* while modes 3, 4, and 5 use an x32.
*/
multiplier = (sh_mv.mv_mode_pins() & 0xf) < 3 ? 64 : 32;
return clk->parent->rate * multiplier;
}
static struct sh_clk_ops pll_clk_ops = {
.recalc = pll_recalc,
};
static struct clk pll_clk = {
.ops = &pll_clk_ops,
.parent = &extal_clk,
.flags = CLK_ENABLE_ON_INIT,
};
static struct clk *clks[] = {
&extal_clk,
&pll_clk,
};
static unsigned int div2[] = { 1, 2, 4, 6, 8, 12, 16, 18,
24, 32, 36, 48 };
static struct clk_div_mult_table div4_div_mult_table = {
.divisors = div2,
.nr_divisors = ARRAY_SIZE(div2),
};
static struct clk_div4_table div4_table = {
.div_mult_table = &div4_div_mult_table,
};
enum { DIV4_I, DIV4_SH, DIV4_B, DIV4_DDR, DIV4_DU, DIV4_P, DIV4_NR };
#define DIV4(_bit, _mask, _flags) \
SH_CLK_DIV4(&pll_clk, FRQMR1, _bit, _mask, _flags)
struct clk div4_clks[DIV4_NR] = {
[DIV4_P] = DIV4(0, 0x0b40, 0),
[DIV4_DU] = DIV4(4, 0x0010, 0),
[DIV4_DDR] = DIV4(12, 0x0002, CLK_ENABLE_ON_INIT),
[DIV4_B] = DIV4(16, 0x0360, CLK_ENABLE_ON_INIT),
[DIV4_SH] = DIV4(20, 0x0002, CLK_ENABLE_ON_INIT),
[DIV4_I] = DIV4(28, 0x0006, CLK_ENABLE_ON_INIT),
};
#define MSTPCR0 0xffc40030
#define MSTPCR1 0xffc40034
enum { MSTP029, MSTP028, MSTP027, MSTP026, MSTP025, MSTP024,
MSTP023, MSTP022, MSTP021, MSTP020, MSTP017, MSTP016,
MSTP015, MSTP014, MSTP011, MSTP010, MSTP009, MSTP008,
MSTP005, MSTP004, MSTP002,
MSTP112, MSTP110, MSTP109, MSTP108,
MSTP105, MSTP104, MSTP103, MSTP102,
MSTP_NR };
static struct clk mstp_clks[MSTP_NR] = {
/* MSTPCR0 */
[MSTP029] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 29, 0),
[MSTP028] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 28, 0),
[MSTP027] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 27, 0),
[MSTP026] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 26, 0),
[MSTP025] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 25, 0),
[MSTP024] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 24, 0),
[MSTP023] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 23, 0),
[MSTP022] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 22, 0),
[MSTP021] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 21, 0),
[MSTP020] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 20, 0),
[MSTP017] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 17, 0),
[MSTP016] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 16, 0),
[MSTP015] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 15, 0),
[MSTP014] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 14, 0),
[MSTP011] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 11, 0),
[MSTP010] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 10, 0),
[MSTP009] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 9, 0),
[MSTP008] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 8, 0),
[MSTP005] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 5, 0),
[MSTP004] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 4, 0),
[MSTP002] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 2, 0),
/* MSTPCR1 */
[MSTP112] = SH_CLK_MSTP32(NULL, MSTPCR1, 12, 0),
[MSTP110] = SH_CLK_MSTP32(NULL, MSTPCR1, 10, 0),
[MSTP109] = SH_CLK_MSTP32(NULL, MSTPCR1, 9, 0),
[MSTP108] = SH_CLK_MSTP32(NULL, MSTPCR1, 8, 0),
[MSTP105] = SH_CLK_MSTP32(NULL, MSTPCR1, 5, 0),
[MSTP104] = SH_CLK_MSTP32(NULL, MSTPCR1, 4, 0),
[MSTP103] = SH_CLK_MSTP32(NULL, MSTPCR1, 3, 0),
[MSTP102] = SH_CLK_MSTP32(NULL, MSTPCR1, 2, 0),
};
static struct clk_lookup lookups[] = {
/* main clocks */
CLKDEV_CON_ID("extal", &extal_clk),
CLKDEV_CON_ID("pll_clk", &pll_clk),
/* DIV4 clocks */
CLKDEV_CON_ID("peripheral_clk", &div4_clks[DIV4_P]),
CLKDEV_CON_ID("du_clk", &div4_clks[DIV4_DU]),
CLKDEV_CON_ID("ddr_clk", &div4_clks[DIV4_DDR]),
CLKDEV_CON_ID("bus_clk", &div4_clks[DIV4_B]),
CLKDEV_CON_ID("shyway_clk", &div4_clks[DIV4_SH]),
CLKDEV_CON_ID("cpu_clk", &div4_clks[DIV4_I]),
/* MSTP32 clocks */
CLKDEV_ICK_ID("fck", "sh-sci.5", &mstp_clks[MSTP029]),
CLKDEV_ICK_ID("fck", "sh-sci.4", &mstp_clks[MSTP028]),
CLKDEV_ICK_ID("fck", "sh-sci.3", &mstp_clks[MSTP027]),
CLKDEV_ICK_ID("fck", "sh-sci.2", &mstp_clks[MSTP026]),
CLKDEV_ICK_ID("fck", "sh-sci.1", &mstp_clks[MSTP025]),
CLKDEV_ICK_ID("fck", "sh-sci.0", &mstp_clks[MSTP024]),
CLKDEV_CON_ID("ssi3_fck", &mstp_clks[MSTP023]),
CLKDEV_CON_ID("ssi2_fck", &mstp_clks[MSTP022]),
CLKDEV_CON_ID("ssi1_fck", &mstp_clks[MSTP021]),
CLKDEV_CON_ID("ssi0_fck", &mstp_clks[MSTP020]),
CLKDEV_CON_ID("hac1_fck", &mstp_clks[MSTP017]),
CLKDEV_CON_ID("hac0_fck", &mstp_clks[MSTP016]),
CLKDEV_CON_ID("i2c1_fck", &mstp_clks[MSTP015]),
CLKDEV_CON_ID("i2c0_fck", &mstp_clks[MSTP014]),
CLKDEV_ICK_ID("fck", "sh-tmu.0", &mstp_clks[MSTP008]),
CLKDEV_ICK_ID("fck", "sh-tmu.1", &mstp_clks[MSTP009]),
CLKDEV_ICK_ID("fck", "sh-tmu.2", &mstp_clks[MSTP010]),
CLKDEV_ICK_ID("fck", "sh-tmu.3", &mstp_clks[MSTP011]),
CLKDEV_CON_ID("sdif1_fck", &mstp_clks[MSTP005]),
CLKDEV_CON_ID("sdif0_fck", &mstp_clks[MSTP004]),
CLKDEV_CON_ID("hspi_fck", &mstp_clks[MSTP002]),
CLKDEV_CON_ID("usb_fck", &mstp_clks[MSTP112]),
CLKDEV_CON_ID("pcie2_fck", &mstp_clks[MSTP110]),
CLKDEV_CON_ID("pcie1_fck", &mstp_clks[MSTP109]),
CLKDEV_CON_ID("pcie0_fck", &mstp_clks[MSTP108]),
CLKDEV_CON_ID("dmac_11_6_fck", &mstp_clks[MSTP105]),
CLKDEV_CON_ID("dmac_5_0_fck", &mstp_clks[MSTP104]),
CLKDEV_CON_ID("du_fck", &mstp_clks[MSTP103]),
CLKDEV_CON_ID("ether_fck", &mstp_clks[MSTP102]),
};
int __init arch_clk_init(void)
{
int i, ret = 0;
for (i = 0; i < ARRAY_SIZE(clks); i++)
ret |= clk_register(clks[i]);
clkdev_add_table(lookups, ARRAY_SIZE(lookups));
if (!ret)
ret = sh_clk_div4_register(div4_clks, ARRAY_SIZE(div4_clks),
&div4_table);
if (!ret)
ret = sh_clk_mstp_register(mstp_clks, MSTP_NR);
return ret;
}
| linux-master | arch/sh/kernel/cpu/sh4a/clock-sh7786.c |
// SPDX-License-Identifier: GPL-2.0
/*
* SH7780 Setup
*
* Copyright (C) 2006 Paul Mundt
*/
#include <linux/platform_device.h>
#include <linux/init.h>
#include <linux/serial.h>
#include <linux/io.h>
#include <linux/serial_sci.h>
#include <linux/sh_dma.h>
#include <linux/sh_timer.h>
#include <linux/sh_intc.h>
#include <cpu/dma-register.h>
#include <asm/platform_early.h>
static struct plat_sci_port scif0_platform_data = {
.scscr = SCSCR_REIE | SCSCR_CKE1,
.type = PORT_SCIF,
.regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE,
};
static struct resource scif0_resources[] = {
DEFINE_RES_MEM(0xffe00000, 0x100),
DEFINE_RES_IRQ(evt2irq(0x700)),
};
static struct platform_device scif0_device = {
.name = "sh-sci",
.id = 0,
.resource = scif0_resources,
.num_resources = ARRAY_SIZE(scif0_resources),
.dev = {
.platform_data = &scif0_platform_data,
},
};
static struct plat_sci_port scif1_platform_data = {
.scscr = SCSCR_REIE | SCSCR_CKE1,
.type = PORT_SCIF,
.regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE,
};
static struct resource scif1_resources[] = {
DEFINE_RES_MEM(0xffe10000, 0x100),
DEFINE_RES_IRQ(evt2irq(0xb80)),
};
static struct platform_device scif1_device = {
.name = "sh-sci",
.id = 1,
.resource = scif1_resources,
.num_resources = ARRAY_SIZE(scif1_resources),
.dev = {
.platform_data = &scif1_platform_data,
},
};
static struct sh_timer_config tmu0_platform_data = {
.channels_mask = 7,
};
static struct resource tmu0_resources[] = {
DEFINE_RES_MEM(0xffd80000, 0x30),
DEFINE_RES_IRQ(evt2irq(0x580)),
DEFINE_RES_IRQ(evt2irq(0x5a0)),
DEFINE_RES_IRQ(evt2irq(0x5c0)),
};
static struct platform_device tmu0_device = {
.name = "sh-tmu",
.id = 0,
.dev = {
.platform_data = &tmu0_platform_data,
},
.resource = tmu0_resources,
.num_resources = ARRAY_SIZE(tmu0_resources),
};
static struct sh_timer_config tmu1_platform_data = {
.channels_mask = 7,
};
static struct resource tmu1_resources[] = {
DEFINE_RES_MEM(0xffdc0000, 0x2c),
DEFINE_RES_IRQ(evt2irq(0xe00)),
DEFINE_RES_IRQ(evt2irq(0xe20)),
DEFINE_RES_IRQ(evt2irq(0xe40)),
};
static struct platform_device tmu1_device = {
.name = "sh-tmu",
.id = 1,
.dev = {
.platform_data = &tmu1_platform_data,
},
.resource = tmu1_resources,
.num_resources = ARRAY_SIZE(tmu1_resources),
};
static struct resource rtc_resources[] = {
[0] = {
.start = 0xffe80000,
.end = 0xffe80000 + 0x58 - 1,
.flags = IORESOURCE_IO,
},
[1] = {
/* Shared Period/Carry/Alarm IRQ */
.start = evt2irq(0x480),
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device rtc_device = {
.name = "sh-rtc",
.id = -1,
.num_resources = ARRAY_SIZE(rtc_resources),
.resource = rtc_resources,
};
/* DMA */
static const struct sh_dmae_channel sh7780_dmae0_channels[] = {
{
.offset = 0,
.dmars = 0,
.dmars_bit = 0,
}, {
.offset = 0x10,
.dmars = 0,
.dmars_bit = 8,
}, {
.offset = 0x20,
.dmars = 4,
.dmars_bit = 0,
}, {
.offset = 0x30,
.dmars = 4,
.dmars_bit = 8,
}, {
.offset = 0x50,
.dmars = 8,
.dmars_bit = 0,
}, {
.offset = 0x60,
.dmars = 8,
.dmars_bit = 8,
}
};
static const struct sh_dmae_channel sh7780_dmae1_channels[] = {
{
.offset = 0,
}, {
.offset = 0x10,
}, {
.offset = 0x20,
}, {
.offset = 0x30,
}, {
.offset = 0x50,
}, {
.offset = 0x60,
}
};
static const unsigned int ts_shift[] = TS_SHIFT;
static struct sh_dmae_pdata dma0_platform_data = {
.channel = sh7780_dmae0_channels,
.channel_num = ARRAY_SIZE(sh7780_dmae0_channels),
.ts_low_shift = CHCR_TS_LOW_SHIFT,
.ts_low_mask = CHCR_TS_LOW_MASK,
.ts_high_shift = CHCR_TS_HIGH_SHIFT,
.ts_high_mask = CHCR_TS_HIGH_MASK,
.ts_shift = ts_shift,
.ts_shift_num = ARRAY_SIZE(ts_shift),
.dmaor_init = DMAOR_INIT,
};
static struct sh_dmae_pdata dma1_platform_data = {
.channel = sh7780_dmae1_channels,
.channel_num = ARRAY_SIZE(sh7780_dmae1_channels),
.ts_low_shift = CHCR_TS_LOW_SHIFT,
.ts_low_mask = CHCR_TS_LOW_MASK,
.ts_high_shift = CHCR_TS_HIGH_SHIFT,
.ts_high_mask = CHCR_TS_HIGH_MASK,
.ts_shift = ts_shift,
.ts_shift_num = ARRAY_SIZE(ts_shift),
.dmaor_init = DMAOR_INIT,
};
static struct resource sh7780_dmae0_resources[] = {
[0] = {
/* Channel registers and DMAOR */
.start = 0xfc808020,
.end = 0xfc80808f,
.flags = IORESOURCE_MEM,
},
[1] = {
/* DMARSx */
.start = 0xfc809000,
.end = 0xfc80900b,
.flags = IORESOURCE_MEM,
},
{
/*
* Real DMA error vector is 0x6c0, and channel
* vectors are 0x640-0x6a0, 0x780-0x7a0
*/
.name = "error_irq",
.start = evt2irq(0x640),
.end = evt2irq(0x640),
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE,
},
};
static struct resource sh7780_dmae1_resources[] = {
[0] = {
/* Channel registers and DMAOR */
.start = 0xfc818020,
.end = 0xfc81808f,
.flags = IORESOURCE_MEM,
},
/* DMAC1 has no DMARS */
{
/*
* Real DMA error vector is 0x6c0, and channel
* vectors are 0x7c0-0x7e0, 0xd80-0xde0
*/
.name = "error_irq",
.start = evt2irq(0x7c0),
.end = evt2irq(0x7c0),
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE,
},
};
static struct platform_device dma0_device = {
.name = "sh-dma-engine",
.id = 0,
.resource = sh7780_dmae0_resources,
.num_resources = ARRAY_SIZE(sh7780_dmae0_resources),
.dev = {
.platform_data = &dma0_platform_data,
},
};
static struct platform_device dma1_device = {
.name = "sh-dma-engine",
.id = 1,
.resource = sh7780_dmae1_resources,
.num_resources = ARRAY_SIZE(sh7780_dmae1_resources),
.dev = {
.platform_data = &dma1_platform_data,
},
};
static struct platform_device *sh7780_devices[] __initdata = {
&scif0_device,
&scif1_device,
&tmu0_device,
&tmu1_device,
&rtc_device,
&dma0_device,
&dma1_device,
};
static int __init sh7780_devices_setup(void)
{
return platform_add_devices(sh7780_devices,
ARRAY_SIZE(sh7780_devices));
}
arch_initcall(sh7780_devices_setup);
static struct platform_device *sh7780_early_devices[] __initdata = {
&scif0_device,
&scif1_device,
&tmu0_device,
&tmu1_device,
};
void __init plat_early_device_setup(void)
{
if (mach_is_sh2007()) {
scif0_platform_data.scscr &= ~SCSCR_CKE1;
scif1_platform_data.scscr &= ~SCSCR_CKE1;
}
sh_early_platform_add_devices(sh7780_early_devices,
ARRAY_SIZE(sh7780_early_devices));
}
enum {
UNUSED = 0,
/* interrupt sources */
IRL_LLLL, IRL_LLLH, IRL_LLHL, IRL_LLHH,
IRL_LHLL, IRL_LHLH, IRL_LHHL, IRL_LHHH,
IRL_HLLL, IRL_HLLH, IRL_HLHL, IRL_HLHH,
IRL_HHLL, IRL_HHLH, IRL_HHHL,
IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7,
RTC, WDT, TMU0, TMU1, TMU2, TMU2_TICPI,
HUDI, DMAC0, SCIF0, DMAC1, CMT, HAC,
PCISERR, PCIINTA, PCIINTB, PCIINTC, PCIINTD, PCIC5,
SCIF1, SIOF, HSPI, MMCIF, TMU3, TMU4, TMU5, SSI, FLCTL, GPIO,
/* interrupt groups */
TMU012, TMU345,
};
static struct intc_vect vectors[] __initdata = {
INTC_VECT(RTC, 0x480), INTC_VECT(RTC, 0x4a0),
INTC_VECT(RTC, 0x4c0),
INTC_VECT(WDT, 0x560),
INTC_VECT(TMU0, 0x580), INTC_VECT(TMU1, 0x5a0),
INTC_VECT(TMU2, 0x5c0), INTC_VECT(TMU2_TICPI, 0x5e0),
INTC_VECT(HUDI, 0x600),
INTC_VECT(DMAC0, 0x640), INTC_VECT(DMAC0, 0x660),
INTC_VECT(DMAC0, 0x680), INTC_VECT(DMAC0, 0x6a0),
INTC_VECT(DMAC0, 0x6c0),
INTC_VECT(SCIF0, 0x700), INTC_VECT(SCIF0, 0x720),
INTC_VECT(SCIF0, 0x740), INTC_VECT(SCIF0, 0x760),
INTC_VECT(DMAC0, 0x780), INTC_VECT(DMAC0, 0x7a0),
INTC_VECT(DMAC1, 0x7c0), INTC_VECT(DMAC1, 0x7e0),
INTC_VECT(CMT, 0x900), INTC_VECT(HAC, 0x980),
INTC_VECT(PCISERR, 0xa00), INTC_VECT(PCIINTA, 0xa20),
INTC_VECT(PCIINTB, 0xa40), INTC_VECT(PCIINTC, 0xa60),
INTC_VECT(PCIINTD, 0xa80), INTC_VECT(PCIC5, 0xaa0),
INTC_VECT(PCIC5, 0xac0), INTC_VECT(PCIC5, 0xae0),
INTC_VECT(PCIC5, 0xb00), INTC_VECT(PCIC5, 0xb20),
INTC_VECT(SCIF1, 0xb80), INTC_VECT(SCIF1, 0xba0),
INTC_VECT(SCIF1, 0xbc0), INTC_VECT(SCIF1, 0xbe0),
INTC_VECT(SIOF, 0xc00), INTC_VECT(HSPI, 0xc80),
INTC_VECT(MMCIF, 0xd00), INTC_VECT(MMCIF, 0xd20),
INTC_VECT(MMCIF, 0xd40), INTC_VECT(MMCIF, 0xd60),
INTC_VECT(DMAC1, 0xd80), INTC_VECT(DMAC1, 0xda0),
INTC_VECT(DMAC1, 0xdc0), INTC_VECT(DMAC1, 0xde0),
INTC_VECT(TMU3, 0xe00), INTC_VECT(TMU4, 0xe20),
INTC_VECT(TMU5, 0xe40),
INTC_VECT(SSI, 0xe80),
INTC_VECT(FLCTL, 0xf00), INTC_VECT(FLCTL, 0xf20),
INTC_VECT(FLCTL, 0xf40), INTC_VECT(FLCTL, 0xf60),
INTC_VECT(GPIO, 0xf80), INTC_VECT(GPIO, 0xfa0),
INTC_VECT(GPIO, 0xfc0), INTC_VECT(GPIO, 0xfe0),
};
static struct intc_group groups[] __initdata = {
INTC_GROUP(TMU012, TMU0, TMU1, TMU2, TMU2_TICPI),
INTC_GROUP(TMU345, TMU3, TMU4, TMU5),
};
static struct intc_mask_reg mask_registers[] __initdata = {
{ 0xffd40038, 0xffd4003c, 32, /* INT2MSKR / INT2MSKCR */
{ 0, 0, 0, 0, 0, 0, GPIO, FLCTL,
SSI, MMCIF, HSPI, SIOF, PCIC5, PCIINTD, PCIINTC, PCIINTB,
PCIINTA, PCISERR, HAC, CMT, 0, 0, DMAC1, DMAC0,
HUDI, 0, WDT, SCIF1, SCIF0, RTC, TMU345, TMU012 } },
};
static struct intc_prio_reg prio_registers[] __initdata = {
{ 0xffd40000, 0, 32, 8, /* INT2PRI0 */ { TMU0, TMU1,
TMU2, TMU2_TICPI } },
{ 0xffd40004, 0, 32, 8, /* INT2PRI1 */ { TMU3, TMU4, TMU5, RTC } },
{ 0xffd40008, 0, 32, 8, /* INT2PRI2 */ { SCIF0, SCIF1, WDT } },
{ 0xffd4000c, 0, 32, 8, /* INT2PRI3 */ { HUDI, DMAC0, DMAC1 } },
{ 0xffd40010, 0, 32, 8, /* INT2PRI4 */ { CMT, HAC,
PCISERR, PCIINTA, } },
{ 0xffd40014, 0, 32, 8, /* INT2PRI5 */ { PCIINTB, PCIINTC,
PCIINTD, PCIC5 } },
{ 0xffd40018, 0, 32, 8, /* INT2PRI6 */ { SIOF, HSPI, MMCIF, SSI } },
{ 0xffd4001c, 0, 32, 8, /* INT2PRI7 */ { FLCTL, GPIO } },
};
static DECLARE_INTC_DESC(intc_desc, "sh7780", vectors, groups,
mask_registers, prio_registers, NULL);
/* Support for external interrupt pins in IRQ mode */
static struct intc_vect irq_vectors[] __initdata = {
INTC_VECT(IRQ0, 0x240), INTC_VECT(IRQ1, 0x280),
INTC_VECT(IRQ2, 0x2c0), INTC_VECT(IRQ3, 0x300),
INTC_VECT(IRQ4, 0x340), INTC_VECT(IRQ5, 0x380),
INTC_VECT(IRQ6, 0x3c0), INTC_VECT(IRQ7, 0x200),
};
static struct intc_mask_reg irq_mask_registers[] __initdata = {
{ 0xffd00044, 0xffd00064, 32, /* INTMSK0 / INTMSKCLR0 */
{ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
};
static struct intc_prio_reg irq_prio_registers[] __initdata = {
{ 0xffd00010, 0, 32, 4, /* INTPRI */ { IRQ0, IRQ1, IRQ2, IRQ3,
IRQ4, IRQ5, IRQ6, IRQ7 } },
};
static struct intc_sense_reg irq_sense_registers[] __initdata = {
{ 0xffd0001c, 32, 2, /* ICR1 */ { IRQ0, IRQ1, IRQ2, IRQ3,
IRQ4, IRQ5, IRQ6, IRQ7 } },
};
static struct intc_mask_reg irq_ack_registers[] __initdata = {
{ 0xffd00024, 0, 32, /* INTREQ */
{ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
};
static DECLARE_INTC_DESC_ACK(intc_irq_desc, "sh7780-irq", irq_vectors,
NULL, irq_mask_registers, irq_prio_registers,
irq_sense_registers, irq_ack_registers);
/* External interrupt pins in IRL mode */
static struct intc_vect irl_vectors[] __initdata = {
INTC_VECT(IRL_LLLL, 0x200), INTC_VECT(IRL_LLLH, 0x220),
INTC_VECT(IRL_LLHL, 0x240), INTC_VECT(IRL_LLHH, 0x260),
INTC_VECT(IRL_LHLL, 0x280), INTC_VECT(IRL_LHLH, 0x2a0),
INTC_VECT(IRL_LHHL, 0x2c0), INTC_VECT(IRL_LHHH, 0x2e0),
INTC_VECT(IRL_HLLL, 0x300), INTC_VECT(IRL_HLLH, 0x320),
INTC_VECT(IRL_HLHL, 0x340), INTC_VECT(IRL_HLHH, 0x360),
INTC_VECT(IRL_HHLL, 0x380), INTC_VECT(IRL_HHLH, 0x3a0),
INTC_VECT(IRL_HHHL, 0x3c0),
};
static struct intc_mask_reg irl3210_mask_registers[] __initdata = {
{ 0xffd40080, 0xffd40084, 32, /* INTMSK2 / INTMSKCLR2 */
{ IRL_LLLL, IRL_LLLH, IRL_LLHL, IRL_LLHH,
IRL_LHLL, IRL_LHLH, IRL_LHHL, IRL_LHHH,
IRL_HLLL, IRL_HLLH, IRL_HLHL, IRL_HLHH,
IRL_HHLL, IRL_HHLH, IRL_HHHL, } },
};
static struct intc_mask_reg irl7654_mask_registers[] __initdata = {
{ 0xffd40080, 0xffd40084, 32, /* INTMSK2 / INTMSKCLR2 */
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
IRL_LLLL, IRL_LLLH, IRL_LLHL, IRL_LLHH,
IRL_LHLL, IRL_LHLH, IRL_LHHL, IRL_LHHH,
IRL_HLLL, IRL_HLLH, IRL_HLHL, IRL_HLHH,
IRL_HHLL, IRL_HHLH, IRL_HHHL, } },
};
static DECLARE_INTC_DESC(intc_irl7654_desc, "sh7780-irl7654", irl_vectors,
NULL, irl7654_mask_registers, NULL, NULL);
static DECLARE_INTC_DESC(intc_irl3210_desc, "sh7780-irl3210", irl_vectors,
NULL, irl3210_mask_registers, NULL, NULL);
#define INTC_ICR0 0xffd00000
#define INTC_INTMSK0 0xffd00044
#define INTC_INTMSK1 0xffd00048
#define INTC_INTMSK2 0xffd40080
#define INTC_INTMSKCLR1 0xffd00068
#define INTC_INTMSKCLR2 0xffd40084
void __init plat_irq_setup(void)
{
/* disable IRQ7-0 */
__raw_writel(0xff000000, INTC_INTMSK0);
/* disable IRL3-0 + IRL7-4 */
__raw_writel(0xc0000000, INTC_INTMSK1);
__raw_writel(0xfffefffe, INTC_INTMSK2);
/* select IRL mode for IRL3-0 + IRL7-4 */
__raw_writel(__raw_readl(INTC_ICR0) & ~0x00c00000, INTC_ICR0);
/* disable holding function, ie enable "SH-4 Mode" */
__raw_writel(__raw_readl(INTC_ICR0) | 0x00200000, INTC_ICR0);
register_intc_controller(&intc_desc);
}
void __init plat_irq_setup_pins(int mode)
{
switch (mode) {
case IRQ_MODE_IRQ:
/* select IRQ mode for IRL3-0 + IRL7-4 */
__raw_writel(__raw_readl(INTC_ICR0) | 0x00c00000, INTC_ICR0);
register_intc_controller(&intc_irq_desc);
break;
case IRQ_MODE_IRL7654:
/* enable IRL7-4 but don't provide any masking */
__raw_writel(0x40000000, INTC_INTMSKCLR1);
__raw_writel(0x0000fffe, INTC_INTMSKCLR2);
break;
case IRQ_MODE_IRL3210:
/* enable IRL0-3 but don't provide any masking */
__raw_writel(0x80000000, INTC_INTMSKCLR1);
__raw_writel(0xfffe0000, INTC_INTMSKCLR2);
break;
case IRQ_MODE_IRL7654_MASK:
/* enable IRL7-4 and mask using cpu intc controller */
__raw_writel(0x40000000, INTC_INTMSKCLR1);
register_intc_controller(&intc_irl7654_desc);
break;
case IRQ_MODE_IRL3210_MASK:
/* enable IRL0-3 and mask using cpu intc controller */
__raw_writel(0x80000000, INTC_INTMSKCLR1);
register_intc_controller(&intc_irl3210_desc);
break;
default:
BUG();
}
}
| linux-master | arch/sh/kernel/cpu/sh4a/setup-sh7780.c |
// SPDX-License-Identifier: GPL-2.0
/*
* SH7723 Setup
*
* Copyright (C) 2008 Paul Mundt
*/
#include <linux/platform_device.h>
#include <linux/init.h>
#include <linux/serial.h>
#include <linux/mm.h>
#include <linux/serial_sci.h>
#include <linux/uio_driver.h>
#include <linux/usb/r8a66597.h>
#include <linux/sh_timer.h>
#include <linux/sh_intc.h>
#include <linux/io.h>
#include <asm/clock.h>
#include <asm/mmzone.h>
#include <asm/platform_early.h>
#include <cpu/sh7723.h>
/* Serial */
static struct plat_sci_port scif0_platform_data = {
.scscr = SCSCR_REIE,
.type = PORT_SCIF,
.regtype = SCIx_SH4_SCIF_NO_SCSPTR_REGTYPE,
};
static struct resource scif0_resources[] = {
DEFINE_RES_MEM(0xffe00000, 0x100),
DEFINE_RES_IRQ(evt2irq(0xc00)),
};
static struct platform_device scif0_device = {
.name = "sh-sci",
.id = 0,
.resource = scif0_resources,
.num_resources = ARRAY_SIZE(scif0_resources),
.dev = {
.platform_data = &scif0_platform_data,
},
};
static struct plat_sci_port scif1_platform_data = {
.scscr = SCSCR_REIE,
.type = PORT_SCIF,
.regtype = SCIx_SH4_SCIF_NO_SCSPTR_REGTYPE,
};
static struct resource scif1_resources[] = {
DEFINE_RES_MEM(0xffe10000, 0x100),
DEFINE_RES_IRQ(evt2irq(0xc20)),
};
static struct platform_device scif1_device = {
.name = "sh-sci",
.id = 1,
.resource = scif1_resources,
.num_resources = ARRAY_SIZE(scif1_resources),
.dev = {
.platform_data = &scif1_platform_data,
},
};
static struct plat_sci_port scif2_platform_data = {
.scscr = SCSCR_REIE,
.type = PORT_SCIF,
.regtype = SCIx_SH4_SCIF_NO_SCSPTR_REGTYPE,
};
static struct resource scif2_resources[] = {
DEFINE_RES_MEM(0xffe20000, 0x100),
DEFINE_RES_IRQ(evt2irq(0xc40)),
};
static struct platform_device scif2_device = {
.name = "sh-sci",
.id = 2,
.resource = scif2_resources,
.num_resources = ARRAY_SIZE(scif2_resources),
.dev = {
.platform_data = &scif2_platform_data,
},
};
static struct plat_sci_port scif3_platform_data = {
.sampling_rate = 8,
.type = PORT_SCIFA,
};
static struct resource scif3_resources[] = {
DEFINE_RES_MEM(0xa4e30000, 0x100),
DEFINE_RES_IRQ(evt2irq(0x900)),
};
static struct platform_device scif3_device = {
.name = "sh-sci",
.id = 3,
.resource = scif3_resources,
.num_resources = ARRAY_SIZE(scif3_resources),
.dev = {
.platform_data = &scif3_platform_data,
},
};
static struct plat_sci_port scif4_platform_data = {
.sampling_rate = 8,
.type = PORT_SCIFA,
};
static struct resource scif4_resources[] = {
DEFINE_RES_MEM(0xa4e40000, 0x100),
DEFINE_RES_IRQ(evt2irq(0xd00)),
};
static struct platform_device scif4_device = {
.name = "sh-sci",
.id = 4,
.resource = scif4_resources,
.num_resources = ARRAY_SIZE(scif4_resources),
.dev = {
.platform_data = &scif4_platform_data,
},
};
static struct plat_sci_port scif5_platform_data = {
.sampling_rate = 8,
.type = PORT_SCIFA,
};
static struct resource scif5_resources[] = {
DEFINE_RES_MEM(0xa4e50000, 0x100),
DEFINE_RES_IRQ(evt2irq(0xfa0)),
};
static struct platform_device scif5_device = {
.name = "sh-sci",
.id = 5,
.resource = scif5_resources,
.num_resources = ARRAY_SIZE(scif5_resources),
.dev = {
.platform_data = &scif5_platform_data,
},
};
static struct uio_info vpu_platform_data = {
.name = "VPU5",
.version = "0",
.irq = evt2irq(0x980),
};
static struct resource vpu_resources[] = {
[0] = {
.name = "VPU",
.start = 0xfe900000,
.end = 0xfe902807,
.flags = IORESOURCE_MEM,
},
[1] = {
/* place holder for contiguous memory */
},
};
static struct platform_device vpu_device = {
.name = "uio_pdrv_genirq",
.id = 0,
.dev = {
.platform_data = &vpu_platform_data,
},
.resource = vpu_resources,
.num_resources = ARRAY_SIZE(vpu_resources),
};
static struct uio_info veu0_platform_data = {
.name = "VEU2H",
.version = "0",
.irq = evt2irq(0x8c0),
};
static struct resource veu0_resources[] = {
[0] = {
.name = "VEU2H0",
.start = 0xfe920000,
.end = 0xfe92027b,
.flags = IORESOURCE_MEM,
},
[1] = {
/* place holder for contiguous memory */
},
};
static struct platform_device veu0_device = {
.name = "uio_pdrv_genirq",
.id = 1,
.dev = {
.platform_data = &veu0_platform_data,
},
.resource = veu0_resources,
.num_resources = ARRAY_SIZE(veu0_resources),
};
static struct uio_info veu1_platform_data = {
.name = "VEU2H",
.version = "0",
.irq = evt2irq(0x560),
};
static struct resource veu1_resources[] = {
[0] = {
.name = "VEU2H1",
.start = 0xfe924000,
.end = 0xfe92427b,
.flags = IORESOURCE_MEM,
},
[1] = {
/* place holder for contiguous memory */
},
};
static struct platform_device veu1_device = {
.name = "uio_pdrv_genirq",
.id = 2,
.dev = {
.platform_data = &veu1_platform_data,
},
.resource = veu1_resources,
.num_resources = ARRAY_SIZE(veu1_resources),
};
static struct sh_timer_config cmt_platform_data = {
.channels_mask = 0x20,
};
static struct resource cmt_resources[] = {
DEFINE_RES_MEM(0x044a0000, 0x70),
DEFINE_RES_IRQ(evt2irq(0xf00)),
};
static struct platform_device cmt_device = {
.name = "sh-cmt-32",
.id = 0,
.dev = {
.platform_data = &cmt_platform_data,
},
.resource = cmt_resources,
.num_resources = ARRAY_SIZE(cmt_resources),
};
static struct sh_timer_config tmu0_platform_data = {
.channels_mask = 7,
};
static struct resource tmu0_resources[] = {
DEFINE_RES_MEM(0xffd80000, 0x2c),
DEFINE_RES_IRQ(evt2irq(0x400)),
DEFINE_RES_IRQ(evt2irq(0x420)),
DEFINE_RES_IRQ(evt2irq(0x440)),
};
static struct platform_device tmu0_device = {
.name = "sh-tmu",
.id = 0,
.dev = {
.platform_data = &tmu0_platform_data,
},
.resource = tmu0_resources,
.num_resources = ARRAY_SIZE(tmu0_resources),
};
static struct sh_timer_config tmu1_platform_data = {
.channels_mask = 7,
};
static struct resource tmu1_resources[] = {
DEFINE_RES_MEM(0xffd90000, 0x2c),
DEFINE_RES_IRQ(evt2irq(0x920)),
DEFINE_RES_IRQ(evt2irq(0x940)),
DEFINE_RES_IRQ(evt2irq(0x960)),
};
static struct platform_device tmu1_device = {
.name = "sh-tmu",
.id = 1,
.dev = {
.platform_data = &tmu1_platform_data,
},
.resource = tmu1_resources,
.num_resources = ARRAY_SIZE(tmu1_resources),
};
static struct resource rtc_resources[] = {
[0] = {
.start = 0xa465fec0,
.end = 0xa465fec0 + 0x58 - 1,
.flags = IORESOURCE_IO,
},
[1] = {
/* Period IRQ */
.start = evt2irq(0xaa0),
.flags = IORESOURCE_IRQ,
},
[2] = {
/* Carry IRQ */
.start = evt2irq(0xac0),
.flags = IORESOURCE_IRQ,
},
[3] = {
/* Alarm IRQ */
.start = evt2irq(0xa80),
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device rtc_device = {
.name = "sh-rtc",
.id = -1,
.num_resources = ARRAY_SIZE(rtc_resources),
.resource = rtc_resources,
};
static struct r8a66597_platdata r8a66597_data = {
.on_chip = 1,
};
static struct resource sh7723_usb_host_resources[] = {
[0] = {
.start = 0xa4d80000,
.end = 0xa4d800ff,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = evt2irq(0xa20),
.end = evt2irq(0xa20),
.flags = IORESOURCE_IRQ | IRQF_TRIGGER_LOW,
},
};
static struct platform_device sh7723_usb_host_device = {
.name = "r8a66597_hcd",
.id = 0,
.dev = {
.dma_mask = NULL, /* not use dma */
.coherent_dma_mask = 0xffffffff,
.platform_data = &r8a66597_data,
},
.num_resources = ARRAY_SIZE(sh7723_usb_host_resources),
.resource = sh7723_usb_host_resources,
};
static struct resource iic_resources[] = {
[0] = {
.name = "IIC",
.start = 0x04470000,
.end = 0x04470017,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = evt2irq(0xe00),
.end = evt2irq(0xe60),
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device iic_device = {
.name = "i2c-sh_mobile",
.id = 0, /* "i2c0" clock */
.num_resources = ARRAY_SIZE(iic_resources),
.resource = iic_resources,
};
static struct platform_device *sh7723_devices[] __initdata = {
&scif0_device,
&scif1_device,
&scif2_device,
&scif3_device,
&scif4_device,
&scif5_device,
&cmt_device,
&tmu0_device,
&tmu1_device,
&rtc_device,
&iic_device,
&sh7723_usb_host_device,
&vpu_device,
&veu0_device,
&veu1_device,
};
static int __init sh7723_devices_setup(void)
{
platform_resource_setup_memory(&vpu_device, "vpu", 2 << 20);
platform_resource_setup_memory(&veu0_device, "veu0", 2 << 20);
platform_resource_setup_memory(&veu1_device, "veu1", 2 << 20);
return platform_add_devices(sh7723_devices,
ARRAY_SIZE(sh7723_devices));
}
arch_initcall(sh7723_devices_setup);
static struct platform_device *sh7723_early_devices[] __initdata = {
&scif0_device,
&scif1_device,
&scif2_device,
&scif3_device,
&scif4_device,
&scif5_device,
&cmt_device,
&tmu0_device,
&tmu1_device,
};
void __init plat_early_device_setup(void)
{
sh_early_platform_add_devices(sh7723_early_devices,
ARRAY_SIZE(sh7723_early_devices));
}
#define RAMCR_CACHE_L2FC 0x0002
#define RAMCR_CACHE_L2E 0x0001
#define L2_CACHE_ENABLE (RAMCR_CACHE_L2E|RAMCR_CACHE_L2FC)
void l2_cache_init(void)
{
/* Enable L2 cache */
__raw_writel(L2_CACHE_ENABLE, RAMCR);
}
enum {
UNUSED=0,
ENABLED,
DISABLED,
/* interrupt sources */
IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7,
HUDI,
DMAC1A_DEI0,DMAC1A_DEI1,DMAC1A_DEI2,DMAC1A_DEI3,
_2DG_TRI,_2DG_INI,_2DG_CEI,
DMAC0A_DEI0,DMAC0A_DEI1,DMAC0A_DEI2,DMAC0A_DEI3,
VIO_CEUI,VIO_BEUI,VIO_VEU2HI,VIO_VOUI,
SCIFA_SCIFA0,
VPU_VPUI,
TPU_TPUI,
ADC_ADI,
USB_USI0,
RTC_ATI,RTC_PRI,RTC_CUI,
DMAC1B_DEI4,DMAC1B_DEI5,DMAC1B_DADERR,
DMAC0B_DEI4,DMAC0B_DEI5,DMAC0B_DADERR,
KEYSC_KEYI,
SCIF_SCIF0,SCIF_SCIF1,SCIF_SCIF2,
MSIOF_MSIOFI0,MSIOF_MSIOFI1,
SCIFA_SCIFA1,
FLCTL_FLSTEI,FLCTL_FLTENDI,FLCTL_FLTREQ0I,FLCTL_FLTREQ1I,
I2C_ALI,I2C_TACKI,I2C_WAITI,I2C_DTEI,
CMT_CMTI,
TSIF_TSIFI,
SIU_SIUI,
SCIFA_SCIFA2,
TMU0_TUNI0, TMU0_TUNI1, TMU0_TUNI2,
IRDA_IRDAI,
ATAPI_ATAPII,
VEU2H1_VEU2HI,
LCDC_LCDCI,
TMU1_TUNI0,TMU1_TUNI1,TMU1_TUNI2,
/* interrupt groups */
DMAC1A, DMAC0A, VIO, DMAC0B, FLCTL, I2C, _2DG,
SDHI1, RTC, DMAC1B, SDHI0,
};
static struct intc_vect vectors[] __initdata = {
INTC_VECT(IRQ0, 0x600), INTC_VECT(IRQ1, 0x620),
INTC_VECT(IRQ2, 0x640), INTC_VECT(IRQ3, 0x660),
INTC_VECT(IRQ4, 0x680), INTC_VECT(IRQ5, 0x6a0),
INTC_VECT(IRQ6, 0x6c0), INTC_VECT(IRQ7, 0x6e0),
INTC_VECT(DMAC1A_DEI0,0x700),
INTC_VECT(DMAC1A_DEI1,0x720),
INTC_VECT(DMAC1A_DEI2,0x740),
INTC_VECT(DMAC1A_DEI3,0x760),
INTC_VECT(_2DG_TRI, 0x780),
INTC_VECT(_2DG_INI, 0x7A0),
INTC_VECT(_2DG_CEI, 0x7C0),
INTC_VECT(DMAC0A_DEI0,0x800),
INTC_VECT(DMAC0A_DEI1,0x820),
INTC_VECT(DMAC0A_DEI2,0x840),
INTC_VECT(DMAC0A_DEI3,0x860),
INTC_VECT(VIO_CEUI,0x880),
INTC_VECT(VIO_BEUI,0x8A0),
INTC_VECT(VIO_VEU2HI,0x8C0),
INTC_VECT(VIO_VOUI,0x8E0),
INTC_VECT(SCIFA_SCIFA0,0x900),
INTC_VECT(VPU_VPUI,0x980),
INTC_VECT(TPU_TPUI,0x9A0),
INTC_VECT(ADC_ADI,0x9E0),
INTC_VECT(USB_USI0,0xA20),
INTC_VECT(RTC_ATI,0xA80),
INTC_VECT(RTC_PRI,0xAA0),
INTC_VECT(RTC_CUI,0xAC0),
INTC_VECT(DMAC1B_DEI4,0xB00),
INTC_VECT(DMAC1B_DEI5,0xB20),
INTC_VECT(DMAC1B_DADERR,0xB40),
INTC_VECT(DMAC0B_DEI4,0xB80),
INTC_VECT(DMAC0B_DEI5,0xBA0),
INTC_VECT(DMAC0B_DADERR,0xBC0),
INTC_VECT(KEYSC_KEYI,0xBE0),
INTC_VECT(SCIF_SCIF0,0xC00),
INTC_VECT(SCIF_SCIF1,0xC20),
INTC_VECT(SCIF_SCIF2,0xC40),
INTC_VECT(MSIOF_MSIOFI0,0xC80),
INTC_VECT(MSIOF_MSIOFI1,0xCA0),
INTC_VECT(SCIFA_SCIFA1,0xD00),
INTC_VECT(FLCTL_FLSTEI,0xD80),
INTC_VECT(FLCTL_FLTENDI,0xDA0),
INTC_VECT(FLCTL_FLTREQ0I,0xDC0),
INTC_VECT(FLCTL_FLTREQ1I,0xDE0),
INTC_VECT(I2C_ALI,0xE00),
INTC_VECT(I2C_TACKI,0xE20),
INTC_VECT(I2C_WAITI,0xE40),
INTC_VECT(I2C_DTEI,0xE60),
INTC_VECT(SDHI0, 0xE80),
INTC_VECT(SDHI0, 0xEA0),
INTC_VECT(SDHI0, 0xEC0),
INTC_VECT(CMT_CMTI,0xF00),
INTC_VECT(TSIF_TSIFI,0xF20),
INTC_VECT(SIU_SIUI,0xF80),
INTC_VECT(SCIFA_SCIFA2,0xFA0),
INTC_VECT(TMU0_TUNI0,0x400),
INTC_VECT(TMU0_TUNI1,0x420),
INTC_VECT(TMU0_TUNI2,0x440),
INTC_VECT(IRDA_IRDAI,0x480),
INTC_VECT(ATAPI_ATAPII,0x4A0),
INTC_VECT(SDHI1, 0x4E0),
INTC_VECT(SDHI1, 0x500),
INTC_VECT(SDHI1, 0x520),
INTC_VECT(VEU2H1_VEU2HI,0x560),
INTC_VECT(LCDC_LCDCI,0x580),
INTC_VECT(TMU1_TUNI0,0x920),
INTC_VECT(TMU1_TUNI1,0x940),
INTC_VECT(TMU1_TUNI2,0x960),
};
static struct intc_group groups[] __initdata = {
INTC_GROUP(DMAC1A,DMAC1A_DEI0,DMAC1A_DEI1,DMAC1A_DEI2,DMAC1A_DEI3),
INTC_GROUP(DMAC0A,DMAC0A_DEI0,DMAC0A_DEI1,DMAC0A_DEI2,DMAC0A_DEI3),
INTC_GROUP(VIO, VIO_CEUI,VIO_BEUI,VIO_VEU2HI,VIO_VOUI),
INTC_GROUP(DMAC0B, DMAC0B_DEI4,DMAC0B_DEI5,DMAC0B_DADERR),
INTC_GROUP(FLCTL,FLCTL_FLSTEI,FLCTL_FLTENDI,FLCTL_FLTREQ0I,FLCTL_FLTREQ1I),
INTC_GROUP(I2C,I2C_ALI,I2C_TACKI,I2C_WAITI,I2C_DTEI),
INTC_GROUP(_2DG, _2DG_TRI,_2DG_INI,_2DG_CEI),
INTC_GROUP(RTC, RTC_ATI,RTC_PRI,RTC_CUI),
INTC_GROUP(DMAC1B, DMAC1B_DEI4,DMAC1B_DEI5,DMAC1B_DADERR),
};
static struct intc_mask_reg mask_registers[] __initdata = {
{ 0xa4080080, 0xa40800c0, 8, /* IMR0 / IMCR0 */
{ 0, TMU1_TUNI2, TMU1_TUNI1, TMU1_TUNI0,
0, ENABLED, ENABLED, ENABLED } },
{ 0xa4080084, 0xa40800c4, 8, /* IMR1 / IMCR1 */
{ VIO_VOUI, VIO_VEU2HI,VIO_BEUI,VIO_CEUI,DMAC0A_DEI3,DMAC0A_DEI2,DMAC0A_DEI1,DMAC0A_DEI0 } },
{ 0xa4080088, 0xa40800c8, 8, /* IMR2 / IMCR2 */
{ 0, 0, 0, VPU_VPUI,0,0,0,SCIFA_SCIFA0 } },
{ 0xa408008c, 0xa40800cc, 8, /* IMR3 / IMCR3 */
{ DMAC1A_DEI3,DMAC1A_DEI2,DMAC1A_DEI1,DMAC1A_DEI0,0,0,0,IRDA_IRDAI } },
{ 0xa4080090, 0xa40800d0, 8, /* IMR4 / IMCR4 */
{ 0,TMU0_TUNI2,TMU0_TUNI1,TMU0_TUNI0,VEU2H1_VEU2HI,0,0,LCDC_LCDCI } },
{ 0xa4080094, 0xa40800d4, 8, /* IMR5 / IMCR5 */
{ KEYSC_KEYI,DMAC0B_DADERR,DMAC0B_DEI5,DMAC0B_DEI4,0,SCIF_SCIF2,SCIF_SCIF1,SCIF_SCIF0 } },
{ 0xa4080098, 0xa40800d8, 8, /* IMR6 / IMCR6 */
{ 0,0,0,SCIFA_SCIFA1,ADC_ADI,0,MSIOF_MSIOFI1,MSIOF_MSIOFI0 } },
{ 0xa408009c, 0xa40800dc, 8, /* IMR7 / IMCR7 */
{ I2C_DTEI, I2C_WAITI, I2C_TACKI, I2C_ALI,
FLCTL_FLTREQ1I, FLCTL_FLTREQ0I, FLCTL_FLTENDI, FLCTL_FLSTEI } },
{ 0xa40800a0, 0xa40800e0, 8, /* IMR8 / IMCR8 */
{ 0, ENABLED, ENABLED, ENABLED,
0, 0, SCIFA_SCIFA2, SIU_SIUI } },
{ 0xa40800a4, 0xa40800e4, 8, /* IMR9 / IMCR9 */
{ 0, 0, 0, CMT_CMTI, 0, 0, USB_USI0,0 } },
{ 0xa40800a8, 0xa40800e8, 8, /* IMR10 / IMCR10 */
{ 0, DMAC1B_DADERR,DMAC1B_DEI5,DMAC1B_DEI4,0,RTC_ATI,RTC_PRI,RTC_CUI } },
{ 0xa40800ac, 0xa40800ec, 8, /* IMR11 / IMCR11 */
{ 0,_2DG_CEI,_2DG_INI,_2DG_TRI,0,TPU_TPUI,0,TSIF_TSIFI } },
{ 0xa40800b0, 0xa40800f0, 8, /* IMR12 / IMCR12 */
{ 0,0,0,0,0,0,0,ATAPI_ATAPII } },
{ 0xa4140044, 0xa4140064, 8, /* INTMSK00 / INTMSKCLR00 */
{ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
};
static struct intc_prio_reg prio_registers[] __initdata = {
{ 0xa4080000, 0, 16, 4, /* IPRA */ { TMU0_TUNI0, TMU0_TUNI1, TMU0_TUNI2, IRDA_IRDAI } },
{ 0xa4080004, 0, 16, 4, /* IPRB */ { VEU2H1_VEU2HI, LCDC_LCDCI, DMAC1A, 0} },
{ 0xa4080008, 0, 16, 4, /* IPRC */ { TMU1_TUNI0, TMU1_TUNI1, TMU1_TUNI2, 0} },
{ 0xa408000c, 0, 16, 4, /* IPRD */ { } },
{ 0xa4080010, 0, 16, 4, /* IPRE */ { DMAC0A, VIO, SCIFA_SCIFA0, VPU_VPUI } },
{ 0xa4080014, 0, 16, 4, /* IPRF */ { KEYSC_KEYI, DMAC0B, USB_USI0, CMT_CMTI } },
{ 0xa4080018, 0, 16, 4, /* IPRG */ { SCIF_SCIF0, SCIF_SCIF1, SCIF_SCIF2,0 } },
{ 0xa408001c, 0, 16, 4, /* IPRH */ { MSIOF_MSIOFI0,MSIOF_MSIOFI1, FLCTL, I2C } },
{ 0xa4080020, 0, 16, 4, /* IPRI */ { SCIFA_SCIFA1,0,TSIF_TSIFI,_2DG } },
{ 0xa4080024, 0, 16, 4, /* IPRJ */ { ADC_ADI,0,SIU_SIUI,SDHI1 } },
{ 0xa4080028, 0, 16, 4, /* IPRK */ { RTC,DMAC1B,0,SDHI0 } },
{ 0xa408002c, 0, 16, 4, /* IPRL */ { SCIFA_SCIFA2,0,TPU_TPUI,ATAPI_ATAPII } },
{ 0xa4140010, 0, 32, 4, /* INTPRI00 */
{ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
};
static struct intc_sense_reg sense_registers[] __initdata = {
{ 0xa414001c, 16, 2, /* ICR1 */
{ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
};
static struct intc_mask_reg ack_registers[] __initdata = {
{ 0xa4140024, 0, 8, /* INTREQ00 */
{ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
};
static struct intc_desc intc_desc __initdata = {
.name = "sh7723",
.force_enable = ENABLED,
.force_disable = DISABLED,
.hw = INTC_HW_DESC(vectors, groups, mask_registers,
prio_registers, sense_registers, ack_registers),
};
void __init plat_irq_setup(void)
{
register_intc_controller(&intc_desc);
}
| linux-master | arch/sh/kernel/cpu/sh4a/setup-sh7723.c |
// SPDX-License-Identifier: GPL-2.0
/*
* arch/sh/kernel/cpu/sh4a/ubc.c
*
* On-chip UBC support for SH-4A CPUs.
*
* Copyright (C) 2009 - 2010 Paul Mundt
*/
#include <linux/init.h>
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <asm/hw_breakpoint.h>
#define UBC_CBR(idx) (0xff200000 + (0x20 * idx))
#define UBC_CRR(idx) (0xff200004 + (0x20 * idx))
#define UBC_CAR(idx) (0xff200008 + (0x20 * idx))
#define UBC_CAMR(idx) (0xff20000c + (0x20 * idx))
#define UBC_CCMFR 0xff200600
#define UBC_CBCR 0xff200620
/* CRR */
#define UBC_CRR_PCB (1 << 1)
#define UBC_CRR_BIE (1 << 0)
/* CBR */
#define UBC_CBR_CE (1 << 0)
static struct sh_ubc sh4a_ubc;
static void sh4a_ubc_enable(struct arch_hw_breakpoint *info, int idx)
{
__raw_writel(UBC_CBR_CE | info->len | info->type, UBC_CBR(idx));
__raw_writel(info->address, UBC_CAR(idx));
}
static void sh4a_ubc_disable(struct arch_hw_breakpoint *info, int idx)
{
__raw_writel(0, UBC_CBR(idx));
__raw_writel(0, UBC_CAR(idx));
}
static void sh4a_ubc_enable_all(unsigned long mask)
{
int i;
for (i = 0; i < sh4a_ubc.num_events; i++)
if (mask & (1 << i))
__raw_writel(__raw_readl(UBC_CBR(i)) | UBC_CBR_CE,
UBC_CBR(i));
}
static void sh4a_ubc_disable_all(void)
{
int i;
for (i = 0; i < sh4a_ubc.num_events; i++)
__raw_writel(__raw_readl(UBC_CBR(i)) & ~UBC_CBR_CE,
UBC_CBR(i));
}
static unsigned long sh4a_ubc_active_mask(void)
{
unsigned long active = 0;
int i;
for (i = 0; i < sh4a_ubc.num_events; i++)
if (__raw_readl(UBC_CBR(i)) & UBC_CBR_CE)
active |= (1 << i);
return active;
}
static unsigned long sh4a_ubc_triggered_mask(void)
{
return __raw_readl(UBC_CCMFR);
}
static void sh4a_ubc_clear_triggered_mask(unsigned long mask)
{
__raw_writel(__raw_readl(UBC_CCMFR) & ~mask, UBC_CCMFR);
}
static struct sh_ubc sh4a_ubc = {
.name = "SH-4A",
.num_events = 2,
.trap_nr = 0x1e0,
.enable = sh4a_ubc_enable,
.disable = sh4a_ubc_disable,
.enable_all = sh4a_ubc_enable_all,
.disable_all = sh4a_ubc_disable_all,
.active_mask = sh4a_ubc_active_mask,
.triggered_mask = sh4a_ubc_triggered_mask,
.clear_triggered_mask = sh4a_ubc_clear_triggered_mask,
};
static int __init sh4a_ubc_init(void)
{
struct clk *ubc_iclk = clk_get(NULL, "ubc0");
int i;
/*
* The UBC MSTP bit is optional, as not all platforms will have
* it. Just ignore it if we can't find it.
*/
if (IS_ERR(ubc_iclk))
ubc_iclk = NULL;
clk_enable(ubc_iclk);
__raw_writel(0, UBC_CBCR);
for (i = 0; i < sh4a_ubc.num_events; i++) {
__raw_writel(0, UBC_CAMR(i));
__raw_writel(0, UBC_CBR(i));
__raw_writel(UBC_CRR_BIE | UBC_CRR_PCB, UBC_CRR(i));
/* dummy read for write posting */
(void)__raw_readl(UBC_CRR(i));
}
clk_disable(ubc_iclk);
sh4a_ubc.clk = ubc_iclk;
return register_sh_ubc(&sh4a_ubc);
}
arch_initcall(sh4a_ubc_init);
| linux-master | arch/sh/kernel/cpu/sh4a/ubc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* arch/sh/kernel/cpu/sh4a/clock-sh7366.c
*
* SH7366 clock framework support
*
* Copyright (C) 2009 Magnus Damm
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/io.h>
#include <linux/clkdev.h>
#include <asm/clock.h>
/* SH7366 registers */
#define FRQCR 0xa4150000
#define VCLKCR 0xa4150004
#define SCLKACR 0xa4150008
#define SCLKBCR 0xa415000c
#define PLLCR 0xa4150024
#define MSTPCR0 0xa4150030
#define MSTPCR1 0xa4150034
#define MSTPCR2 0xa4150038
#define DLLFRQ 0xa4150050
/* Fixed 32 KHz root clock for RTC and Power Management purposes */
static struct clk r_clk = {
.rate = 32768,
};
/*
* Default rate for the root input clock, reset this with clk_set_rate()
* from the platform code.
*/
struct clk extal_clk = {
.rate = 33333333,
};
/* The dll block multiplies the 32khz r_clk, may be used instead of extal */
static unsigned long dll_recalc(struct clk *clk)
{
unsigned long mult;
if (__raw_readl(PLLCR) & 0x1000)
mult = __raw_readl(DLLFRQ);
else
mult = 0;
return clk->parent->rate * mult;
}
static struct sh_clk_ops dll_clk_ops = {
.recalc = dll_recalc,
};
static struct clk dll_clk = {
.ops = &dll_clk_ops,
.parent = &r_clk,
.flags = CLK_ENABLE_ON_INIT,
};
static unsigned long pll_recalc(struct clk *clk)
{
unsigned long mult = 1;
unsigned long div = 1;
if (__raw_readl(PLLCR) & 0x4000)
mult = (((__raw_readl(FRQCR) >> 24) & 0x1f) + 1);
else
div = 2;
return (clk->parent->rate * mult) / div;
}
static struct sh_clk_ops pll_clk_ops = {
.recalc = pll_recalc,
};
static struct clk pll_clk = {
.ops = &pll_clk_ops,
.flags = CLK_ENABLE_ON_INIT,
};
struct clk *main_clks[] = {
&r_clk,
&extal_clk,
&dll_clk,
&pll_clk,
};
static int multipliers[] = { 1, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1 };
static int divisors[] = { 1, 3, 2, 5, 3, 4, 5, 6, 8, 10, 12, 16, 20 };
static struct clk_div_mult_table div4_div_mult_table = {
.divisors = divisors,
.nr_divisors = ARRAY_SIZE(divisors),
.multipliers = multipliers,
.nr_multipliers = ARRAY_SIZE(multipliers),
};
static struct clk_div4_table div4_table = {
.div_mult_table = &div4_div_mult_table,
};
enum { DIV4_I, DIV4_U, DIV4_SH, DIV4_B, DIV4_B3, DIV4_P,
DIV4_SIUA, DIV4_SIUB, DIV4_NR };
#define DIV4(_reg, _bit, _mask, _flags) \
SH_CLK_DIV4(&pll_clk, _reg, _bit, _mask, _flags)
struct clk div4_clks[DIV4_NR] = {
[DIV4_I] = DIV4(FRQCR, 20, 0x1fef, CLK_ENABLE_ON_INIT),
[DIV4_U] = DIV4(FRQCR, 16, 0x1fff, CLK_ENABLE_ON_INIT),
[DIV4_SH] = DIV4(FRQCR, 12, 0x1fff, CLK_ENABLE_ON_INIT),
[DIV4_B] = DIV4(FRQCR, 8, 0x1fff, CLK_ENABLE_ON_INIT),
[DIV4_B3] = DIV4(FRQCR, 4, 0x1fff, CLK_ENABLE_ON_INIT),
[DIV4_P] = DIV4(FRQCR, 0, 0x1fff, 0),
[DIV4_SIUA] = DIV4(SCLKACR, 0, 0x1fff, 0),
[DIV4_SIUB] = DIV4(SCLKBCR, 0, 0x1fff, 0),
};
enum { DIV6_V, DIV6_NR };
struct clk div6_clks[DIV6_NR] = {
[DIV6_V] = SH_CLK_DIV6(&pll_clk, VCLKCR, 0),
};
#define MSTP(_parent, _reg, _bit, _flags) \
SH_CLK_MSTP32(_parent, _reg, _bit, _flags)
enum { MSTP031, MSTP030, MSTP029, MSTP028, MSTP026,
MSTP023, MSTP022, MSTP021, MSTP020, MSTP019, MSTP018, MSTP017, MSTP016,
MSTP015, MSTP014, MSTP013, MSTP012, MSTP011, MSTP010,
MSTP007, MSTP006, MSTP005, MSTP002, MSTP001,
MSTP109, MSTP100,
MSTP227, MSTP226, MSTP224, MSTP223, MSTP222, MSTP218, MSTP217,
MSTP211, MSTP207, MSTP205, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200,
MSTP_NR };
static struct clk mstp_clks[MSTP_NR] = {
/* See page 52 of Datasheet V0.40: Overview -> Block Diagram */
[MSTP031] = MSTP(&div4_clks[DIV4_I], MSTPCR0, 31, CLK_ENABLE_ON_INIT),
[MSTP030] = MSTP(&div4_clks[DIV4_I], MSTPCR0, 30, CLK_ENABLE_ON_INIT),
[MSTP029] = MSTP(&div4_clks[DIV4_I], MSTPCR0, 29, CLK_ENABLE_ON_INIT),
[MSTP028] = MSTP(&div4_clks[DIV4_SH], MSTPCR0, 28, CLK_ENABLE_ON_INIT),
[MSTP026] = MSTP(&div4_clks[DIV4_B], MSTPCR0, 26, CLK_ENABLE_ON_INIT),
[MSTP023] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 23, 0),
[MSTP022] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 22, 0),
[MSTP021] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 21, 0),
[MSTP020] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 20, 0),
[MSTP019] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 19, 0),
[MSTP017] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 17, 0),
[MSTP015] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 15, 0),
[MSTP014] = MSTP(&r_clk, MSTPCR0, 14, 0),
[MSTP013] = MSTP(&r_clk, MSTPCR0, 13, 0),
[MSTP011] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 11, 0),
[MSTP010] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 10, 0),
[MSTP007] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 7, 0),
[MSTP006] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 6, 0),
[MSTP005] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 5, 0),
[MSTP002] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 2, 0),
[MSTP001] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 1, 0),
[MSTP109] = MSTP(&div4_clks[DIV4_P], MSTPCR1, 9, 0),
[MSTP227] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 27, 0),
[MSTP226] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 26, 0),
[MSTP224] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 24, 0),
[MSTP223] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 23, 0),
[MSTP222] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 22, 0),
[MSTP218] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 18, 0),
[MSTP217] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 17, 0),
[MSTP211] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 11, 0),
[MSTP207] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 7, CLK_ENABLE_ON_INIT),
[MSTP205] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 5, 0),
[MSTP204] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 4, 0),
[MSTP203] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 3, 0),
[MSTP202] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 2, CLK_ENABLE_ON_INIT),
[MSTP201] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 1, CLK_ENABLE_ON_INIT),
[MSTP200] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 0, 0),
};
static struct clk_lookup lookups[] = {
/* main clocks */
CLKDEV_CON_ID("rclk", &r_clk),
CLKDEV_CON_ID("extal", &extal_clk),
CLKDEV_CON_ID("dll_clk", &dll_clk),
CLKDEV_CON_ID("pll_clk", &pll_clk),
/* DIV4 clocks */
CLKDEV_CON_ID("cpu_clk", &div4_clks[DIV4_I]),
CLKDEV_CON_ID("umem_clk", &div4_clks[DIV4_U]),
CLKDEV_CON_ID("shyway_clk", &div4_clks[DIV4_SH]),
CLKDEV_CON_ID("bus_clk", &div4_clks[DIV4_B]),
CLKDEV_CON_ID("b3_clk", &div4_clks[DIV4_B3]),
CLKDEV_CON_ID("peripheral_clk", &div4_clks[DIV4_P]),
CLKDEV_CON_ID("siua_clk", &div4_clks[DIV4_SIUA]),
CLKDEV_CON_ID("siub_clk", &div4_clks[DIV4_SIUB]),
/* DIV6 clocks */
CLKDEV_CON_ID("video_clk", &div6_clks[DIV6_V]),
/* MSTP32 clocks */
CLKDEV_CON_ID("tlb0", &mstp_clks[MSTP031]),
CLKDEV_CON_ID("ic0", &mstp_clks[MSTP030]),
CLKDEV_CON_ID("oc0", &mstp_clks[MSTP029]),
CLKDEV_CON_ID("rsmem0", &mstp_clks[MSTP028]),
CLKDEV_CON_ID("xymem0", &mstp_clks[MSTP026]),
CLKDEV_CON_ID("intc3", &mstp_clks[MSTP023]),
CLKDEV_CON_ID("intc0", &mstp_clks[MSTP022]),
CLKDEV_CON_ID("dmac0", &mstp_clks[MSTP021]),
CLKDEV_CON_ID("sh0", &mstp_clks[MSTP020]),
CLKDEV_CON_ID("hudi0", &mstp_clks[MSTP019]),
CLKDEV_CON_ID("ubc0", &mstp_clks[MSTP017]),
CLKDEV_CON_ID("tmu_fck", &mstp_clks[MSTP015]),
CLKDEV_ICK_ID("fck", "sh-cmt-32.0", &mstp_clks[MSTP014]),
CLKDEV_CON_ID("rwdt0", &mstp_clks[MSTP013]),
CLKDEV_CON_ID("mfi0", &mstp_clks[MSTP011]),
CLKDEV_CON_ID("flctl0", &mstp_clks[MSTP010]),
CLKDEV_ICK_ID("fck", "sh-sci.0", &mstp_clks[MSTP007]),
CLKDEV_ICK_ID("fck", "sh-sci.1", &mstp_clks[MSTP006]),
CLKDEV_ICK_ID("fck", "sh-sci.2", &mstp_clks[MSTP005]),
CLKDEV_CON_ID("msiof0", &mstp_clks[MSTP002]),
CLKDEV_CON_ID("sbr0", &mstp_clks[MSTP001]),
CLKDEV_DEV_ID("i2c-sh_mobile.0", &mstp_clks[MSTP109]),
CLKDEV_CON_ID("icb0", &mstp_clks[MSTP227]),
CLKDEV_CON_ID("meram0", &mstp_clks[MSTP226]),
CLKDEV_CON_ID("dacy1", &mstp_clks[MSTP224]),
CLKDEV_CON_ID("dacy0", &mstp_clks[MSTP223]),
CLKDEV_CON_ID("tsif0", &mstp_clks[MSTP222]),
CLKDEV_CON_ID("sdhi0", &mstp_clks[MSTP218]),
CLKDEV_CON_ID("mmcif0", &mstp_clks[MSTP217]),
CLKDEV_CON_ID("usbf0", &mstp_clks[MSTP211]),
CLKDEV_CON_ID("veu1", &mstp_clks[MSTP207]),
CLKDEV_CON_ID("vou0", &mstp_clks[MSTP205]),
CLKDEV_CON_ID("beu0", &mstp_clks[MSTP204]),
CLKDEV_CON_ID("ceu0", &mstp_clks[MSTP203]),
CLKDEV_CON_ID("veu0", &mstp_clks[MSTP202]),
CLKDEV_CON_ID("vpu0", &mstp_clks[MSTP201]),
CLKDEV_CON_ID("lcdc0", &mstp_clks[MSTP200]),
};
int __init arch_clk_init(void)
{
int k, ret = 0;
/* autodetect extal or dll configuration */
if (__raw_readl(PLLCR) & 0x1000)
pll_clk.parent = &dll_clk;
else
pll_clk.parent = &extal_clk;
for (k = 0; !ret && (k < ARRAY_SIZE(main_clks)); k++)
ret = clk_register(main_clks[k]);
clkdev_add_table(lookups, ARRAY_SIZE(lookups));
if (!ret)
ret = sh_clk_div4_register(div4_clks, DIV4_NR, &div4_table);
if (!ret)
ret = sh_clk_div6_register(div6_clks, DIV6_NR);
if (!ret)
ret = sh_clk_mstp_register(mstp_clks, MSTP_NR);
return ret;
}
| linux-master | arch/sh/kernel/cpu/sh4a/clock-sh7366.c |
// SPDX-License-Identifier: GPL-2.0
/*
* arch/sh/kernel/cpu/sh4a/clock-sh7343.c
*
* SH7343 clock framework support
*
* Copyright (C) 2009 Magnus Damm
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/io.h>
#include <linux/clkdev.h>
#include <asm/clock.h>
/* SH7343 registers */
#define FRQCR 0xa4150000
#define VCLKCR 0xa4150004
#define SCLKACR 0xa4150008
#define SCLKBCR 0xa415000c
#define PLLCR 0xa4150024
#define MSTPCR0 0xa4150030
#define MSTPCR1 0xa4150034
#define MSTPCR2 0xa4150038
#define DLLFRQ 0xa4150050
/* Fixed 32 KHz root clock for RTC and Power Management purposes */
static struct clk r_clk = {
.rate = 32768,
};
/*
* Default rate for the root input clock, reset this with clk_set_rate()
* from the platform code.
*/
struct clk extal_clk = {
.rate = 33333333,
};
/* The dll block multiplies the 32khz r_clk, may be used instead of extal */
static unsigned long dll_recalc(struct clk *clk)
{
unsigned long mult;
if (__raw_readl(PLLCR) & 0x1000)
mult = __raw_readl(DLLFRQ);
else
mult = 0;
return clk->parent->rate * mult;
}
static struct sh_clk_ops dll_clk_ops = {
.recalc = dll_recalc,
};
static struct clk dll_clk = {
.ops = &dll_clk_ops,
.parent = &r_clk,
.flags = CLK_ENABLE_ON_INIT,
};
static unsigned long pll_recalc(struct clk *clk)
{
unsigned long mult = 1;
if (__raw_readl(PLLCR) & 0x4000)
mult = (((__raw_readl(FRQCR) >> 24) & 0x1f) + 1);
return clk->parent->rate * mult;
}
static struct sh_clk_ops pll_clk_ops = {
.recalc = pll_recalc,
};
static struct clk pll_clk = {
.ops = &pll_clk_ops,
.flags = CLK_ENABLE_ON_INIT,
};
struct clk *main_clks[] = {
&r_clk,
&extal_clk,
&dll_clk,
&pll_clk,
};
static int multipliers[] = { 1, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1 };
static int divisors[] = { 1, 3, 2, 5, 3, 4, 5, 6, 8, 10, 12, 16, 20 };
static struct clk_div_mult_table div4_div_mult_table = {
.divisors = divisors,
.nr_divisors = ARRAY_SIZE(divisors),
.multipliers = multipliers,
.nr_multipliers = ARRAY_SIZE(multipliers),
};
static struct clk_div4_table div4_table = {
.div_mult_table = &div4_div_mult_table,
};
enum { DIV4_I, DIV4_U, DIV4_SH, DIV4_B, DIV4_B3, DIV4_P,
DIV4_SIUA, DIV4_SIUB, DIV4_NR };
#define DIV4(_reg, _bit, _mask, _flags) \
SH_CLK_DIV4(&pll_clk, _reg, _bit, _mask, _flags)
struct clk div4_clks[DIV4_NR] = {
[DIV4_I] = DIV4(FRQCR, 20, 0x1fff, CLK_ENABLE_ON_INIT),
[DIV4_U] = DIV4(FRQCR, 16, 0x1fff, CLK_ENABLE_ON_INIT),
[DIV4_SH] = DIV4(FRQCR, 12, 0x1fff, CLK_ENABLE_ON_INIT),
[DIV4_B] = DIV4(FRQCR, 8, 0x1fff, CLK_ENABLE_ON_INIT),
[DIV4_B3] = DIV4(FRQCR, 4, 0x1fff, CLK_ENABLE_ON_INIT),
[DIV4_P] = DIV4(FRQCR, 0, 0x1fff, 0),
[DIV4_SIUA] = DIV4(SCLKACR, 0, 0x1fff, 0),
[DIV4_SIUB] = DIV4(SCLKBCR, 0, 0x1fff, 0),
};
enum { DIV6_V, DIV6_NR };
struct clk div6_clks[DIV6_NR] = {
[DIV6_V] = SH_CLK_DIV6(&pll_clk, VCLKCR, 0),
};
#define MSTP(_parent, _reg, _bit, _flags) \
SH_CLK_MSTP32(_parent, _reg, _bit, _flags)
enum { MSTP031, MSTP030, MSTP029, MSTP028, MSTP026,
MSTP023, MSTP022, MSTP021, MSTP020, MSTP019, MSTP018, MSTP017, MSTP016,
MSTP015, MSTP014, MSTP013, MSTP012, MSTP011, MSTP010,
MSTP007, MSTP006, MSTP005, MSTP004, MSTP003, MSTP002, MSTP001,
MSTP109, MSTP108, MSTP100,
MSTP225, MSTP224, MSTP218, MSTP217, MSTP216,
MSTP214, MSTP213, MSTP212, MSTP211, MSTP208,
MSTP206, MSTP205, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200,
MSTP_NR };
static struct clk mstp_clks[MSTP_NR] = {
[MSTP031] = MSTP(&div4_clks[DIV4_I], MSTPCR0, 31, CLK_ENABLE_ON_INIT),
[MSTP030] = MSTP(&div4_clks[DIV4_I], MSTPCR0, 30, CLK_ENABLE_ON_INIT),
[MSTP029] = MSTP(&div4_clks[DIV4_I], MSTPCR0, 29, CLK_ENABLE_ON_INIT),
[MSTP028] = MSTP(&div4_clks[DIV4_U], MSTPCR0, 28, CLK_ENABLE_ON_INIT),
[MSTP026] = MSTP(&div4_clks[DIV4_B], MSTPCR0, 26, CLK_ENABLE_ON_INIT),
[MSTP023] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 23, 0),
[MSTP022] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 22, 0),
[MSTP021] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 21, 0),
[MSTP020] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 20, 0),
[MSTP019] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 19, 0),
[MSTP017] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 17, 0),
[MSTP015] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 15, 0),
[MSTP014] = MSTP(&r_clk, MSTPCR0, 14, 0),
[MSTP013] = MSTP(&r_clk, MSTPCR0, 13, 0),
[MSTP011] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 11, 0),
[MSTP010] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 10, 0),
[MSTP007] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 7, 0),
[MSTP006] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 6, 0),
[MSTP005] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 5, 0),
[MSTP004] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 4, 0),
[MSTP003] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 3, 0),
[MSTP002] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 2, 0),
[MSTP001] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 1, 0),
[MSTP109] = MSTP(&div4_clks[DIV4_P], MSTPCR1, 9, 0),
[MSTP108] = MSTP(&div4_clks[DIV4_P], MSTPCR1, 8, 0),
[MSTP225] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 25, 0),
[MSTP224] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 24, 0),
[MSTP218] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 18, 0),
[MSTP217] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 17, 0),
[MSTP216] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 16, 0),
[MSTP214] = MSTP(&r_clk, MSTPCR2, 14, 0),
[MSTP213] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 13, 0),
[MSTP212] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 12, 0),
[MSTP211] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 11, 0),
[MSTP208] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 8, 0),
[MSTP206] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 6, CLK_ENABLE_ON_INIT),
[MSTP205] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 5, 0),
[MSTP204] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 4, 0),
[MSTP203] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 3, 0),
[MSTP202] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 2, CLK_ENABLE_ON_INIT),
[MSTP201] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 1, CLK_ENABLE_ON_INIT),
[MSTP200] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 0, 0),
};
static struct clk_lookup lookups[] = {
/* main clocks */
CLKDEV_CON_ID("rclk", &r_clk),
CLKDEV_CON_ID("extal", &extal_clk),
CLKDEV_CON_ID("dll_clk", &dll_clk),
CLKDEV_CON_ID("pll_clk", &pll_clk),
/* DIV4 clocks */
CLKDEV_CON_ID("cpu_clk", &div4_clks[DIV4_I]),
CLKDEV_CON_ID("umem_clk", &div4_clks[DIV4_U]),
CLKDEV_CON_ID("shyway_clk", &div4_clks[DIV4_SH]),
CLKDEV_CON_ID("bus_clk", &div4_clks[DIV4_B]),
CLKDEV_CON_ID("b3_clk", &div4_clks[DIV4_B3]),
CLKDEV_CON_ID("peripheral_clk", &div4_clks[DIV4_P]),
CLKDEV_CON_ID("siua_clk", &div4_clks[DIV4_SIUA]),
CLKDEV_CON_ID("siub_clk", &div4_clks[DIV4_SIUB]),
/* DIV6 clocks */
CLKDEV_CON_ID("video_clk", &div6_clks[DIV6_V]),
/* MSTP32 clocks */
CLKDEV_CON_ID("tlb0", &mstp_clks[MSTP031]),
CLKDEV_CON_ID("ic0", &mstp_clks[MSTP030]),
CLKDEV_CON_ID("oc0", &mstp_clks[MSTP029]),
CLKDEV_CON_ID("uram0", &mstp_clks[MSTP028]),
CLKDEV_CON_ID("xymem0", &mstp_clks[MSTP026]),
CLKDEV_CON_ID("intc3", &mstp_clks[MSTP023]),
CLKDEV_CON_ID("intc0", &mstp_clks[MSTP022]),
CLKDEV_CON_ID("dmac0", &mstp_clks[MSTP021]),
CLKDEV_CON_ID("sh0", &mstp_clks[MSTP020]),
CLKDEV_CON_ID("hudi0", &mstp_clks[MSTP019]),
CLKDEV_CON_ID("ubc0", &mstp_clks[MSTP017]),
CLKDEV_CON_ID("tmu_fck", &mstp_clks[MSTP015]),
CLKDEV_ICK_ID("fck", "sh-cmt-32.0", &mstp_clks[MSTP014]),
CLKDEV_CON_ID("rwdt0", &mstp_clks[MSTP013]),
CLKDEV_CON_ID("mfi0", &mstp_clks[MSTP011]),
CLKDEV_CON_ID("flctl0", &mstp_clks[MSTP010]),
CLKDEV_ICK_ID("fck", "sh-sci.0", &mstp_clks[MSTP007]),
CLKDEV_ICK_ID("fck", "sh-sci.1", &mstp_clks[MSTP006]),
CLKDEV_ICK_ID("fck", "sh-sci.2", &mstp_clks[MSTP005]),
CLKDEV_ICK_ID("fck", "sh-sci.3", &mstp_clks[MSTP004]),
CLKDEV_CON_ID("sio0", &mstp_clks[MSTP003]),
CLKDEV_CON_ID("siof0", &mstp_clks[MSTP002]),
CLKDEV_CON_ID("siof1", &mstp_clks[MSTP001]),
CLKDEV_DEV_ID("i2c-sh_mobile.0", &mstp_clks[MSTP109]),
CLKDEV_DEV_ID("i2c-sh_mobile.1", &mstp_clks[MSTP108]),
CLKDEV_CON_ID("tpu0", &mstp_clks[MSTP225]),
CLKDEV_CON_ID("irda0", &mstp_clks[MSTP224]),
CLKDEV_CON_ID("sdhi0", &mstp_clks[MSTP218]),
CLKDEV_CON_ID("mmcif0", &mstp_clks[MSTP217]),
CLKDEV_CON_ID("sim0", &mstp_clks[MSTP216]),
CLKDEV_CON_ID("keysc0", &mstp_clks[MSTP214]),
CLKDEV_CON_ID("tsif0", &mstp_clks[MSTP213]),
CLKDEV_CON_ID("s3d40", &mstp_clks[MSTP212]),
CLKDEV_CON_ID("usbf0", &mstp_clks[MSTP211]),
CLKDEV_CON_ID("siu0", &mstp_clks[MSTP208]),
CLKDEV_CON_ID("jpu0", &mstp_clks[MSTP206]),
CLKDEV_CON_ID("vou0", &mstp_clks[MSTP205]),
CLKDEV_CON_ID("beu0", &mstp_clks[MSTP204]),
CLKDEV_CON_ID("ceu0", &mstp_clks[MSTP203]),
CLKDEV_CON_ID("veu0", &mstp_clks[MSTP202]),
CLKDEV_CON_ID("vpu0", &mstp_clks[MSTP201]),
CLKDEV_CON_ID("lcdc0", &mstp_clks[MSTP200]),
};
int __init arch_clk_init(void)
{
int k, ret = 0;
/* autodetect extal or dll configuration */
if (__raw_readl(PLLCR) & 0x1000)
pll_clk.parent = &dll_clk;
else
pll_clk.parent = &extal_clk;
for (k = 0; !ret && (k < ARRAY_SIZE(main_clks)); k++)
ret = clk_register(main_clks[k]);
clkdev_add_table(lookups, ARRAY_SIZE(lookups));
if (!ret)
ret = sh_clk_div4_register(div4_clks, DIV4_NR, &div4_table);
if (!ret)
ret = sh_clk_div6_register(div6_clks, DIV6_NR);
if (!ret)
ret = sh_clk_mstp_register(mstp_clks, MSTP_NR);
return ret;
}
| linux-master | arch/sh/kernel/cpu/sh4a/clock-sh7343.c |
// SPDX-License-Identifier: GPL-2.0
/*
* SH7770 Setup
*
* Copyright (C) 2006 - 2008 Paul Mundt
*/
#include <linux/platform_device.h>
#include <linux/init.h>
#include <linux/serial.h>
#include <linux/serial_sci.h>
#include <linux/sh_timer.h>
#include <linux/sh_intc.h>
#include <linux/io.h>
#include <asm/platform_early.h>
static struct plat_sci_port scif0_platform_data = {
.scscr = SCSCR_REIE | SCSCR_TOIE,
.type = PORT_SCIF,
};
static struct resource scif0_resources[] = {
DEFINE_RES_MEM(0xff923000, 0x100),
DEFINE_RES_IRQ(evt2irq(0x9a0)),
};
static struct platform_device scif0_device = {
.name = "sh-sci",
.id = 0,
.resource = scif0_resources,
.num_resources = ARRAY_SIZE(scif0_resources),
.dev = {
.platform_data = &scif0_platform_data,
},
};
static struct plat_sci_port scif1_platform_data = {
.scscr = SCSCR_REIE | SCSCR_TOIE,
.type = PORT_SCIF,
};
static struct resource scif1_resources[] = {
DEFINE_RES_MEM(0xff924000, 0x100),
DEFINE_RES_IRQ(evt2irq(0x9c0)),
};
static struct platform_device scif1_device = {
.name = "sh-sci",
.id = 1,
.resource = scif1_resources,
.num_resources = ARRAY_SIZE(scif1_resources),
.dev = {
.platform_data = &scif1_platform_data,
},
};
static struct plat_sci_port scif2_platform_data = {
.scscr = SCSCR_REIE | SCSCR_TOIE,
.type = PORT_SCIF,
};
static struct resource scif2_resources[] = {
DEFINE_RES_MEM(0xff925000, 0x100),
DEFINE_RES_IRQ(evt2irq(0x9e0)),
};
static struct platform_device scif2_device = {
.name = "sh-sci",
.id = 2,
.resource = scif2_resources,
.num_resources = ARRAY_SIZE(scif2_resources),
.dev = {
.platform_data = &scif2_platform_data,
},
};
static struct plat_sci_port scif3_platform_data = {
.scscr = SCSCR_REIE | SCSCR_TOIE,
.type = PORT_SCIF,
};
static struct resource scif3_resources[] = {
DEFINE_RES_MEM(0xff926000, 0x100),
DEFINE_RES_IRQ(evt2irq(0xa00)),
};
static struct platform_device scif3_device = {
.name = "sh-sci",
.id = 3,
.resource = scif3_resources,
.num_resources = ARRAY_SIZE(scif3_resources),
.dev = {
.platform_data = &scif3_platform_data,
},
};
static struct plat_sci_port scif4_platform_data = {
.scscr = SCSCR_REIE | SCSCR_TOIE,
.type = PORT_SCIF,
};
static struct resource scif4_resources[] = {
DEFINE_RES_MEM(0xff927000, 0x100),
DEFINE_RES_IRQ(evt2irq(0xa20)),
};
static struct platform_device scif4_device = {
.name = "sh-sci",
.id = 4,
.resource = scif4_resources,
.num_resources = ARRAY_SIZE(scif4_resources),
.dev = {
.platform_data = &scif4_platform_data,
},
};
static struct plat_sci_port scif5_platform_data = {
.scscr = SCSCR_REIE | SCSCR_TOIE,
.type = PORT_SCIF,
};
static struct resource scif5_resources[] = {
DEFINE_RES_MEM(0xff928000, 0x100),
DEFINE_RES_IRQ(evt2irq(0xa40)),
};
static struct platform_device scif5_device = {
.name = "sh-sci",
.id = 5,
.resource = scif5_resources,
.num_resources = ARRAY_SIZE(scif5_resources),
.dev = {
.platform_data = &scif5_platform_data,
},
};
static struct plat_sci_port scif6_platform_data = {
.scscr = SCSCR_REIE | SCSCR_TOIE,
.type = PORT_SCIF,
};
static struct resource scif6_resources[] = {
DEFINE_RES_MEM(0xff929000, 0x100),
DEFINE_RES_IRQ(evt2irq(0xa60)),
};
static struct platform_device scif6_device = {
.name = "sh-sci",
.id = 6,
.resource = scif6_resources,
.num_resources = ARRAY_SIZE(scif6_resources),
.dev = {
.platform_data = &scif6_platform_data,
},
};
static struct plat_sci_port scif7_platform_data = {
.scscr = SCSCR_REIE | SCSCR_TOIE,
.type = PORT_SCIF,
};
static struct resource scif7_resources[] = {
DEFINE_RES_MEM(0xff92a000, 0x100),
DEFINE_RES_IRQ(evt2irq(0xa80)),
};
static struct platform_device scif7_device = {
.name = "sh-sci",
.id = 7,
.resource = scif7_resources,
.num_resources = ARRAY_SIZE(scif7_resources),
.dev = {
.platform_data = &scif7_platform_data,
},
};
static struct plat_sci_port scif8_platform_data = {
.scscr = SCSCR_REIE | SCSCR_TOIE,
.type = PORT_SCIF,
};
static struct resource scif8_resources[] = {
DEFINE_RES_MEM(0xff92b000, 0x100),
DEFINE_RES_IRQ(evt2irq(0xaa0)),
};
static struct platform_device scif8_device = {
.name = "sh-sci",
.id = 8,
.resource = scif8_resources,
.num_resources = ARRAY_SIZE(scif8_resources),
.dev = {
.platform_data = &scif8_platform_data,
},
};
static struct plat_sci_port scif9_platform_data = {
.scscr = SCSCR_REIE | SCSCR_TOIE,
.type = PORT_SCIF,
};
static struct resource scif9_resources[] = {
DEFINE_RES_MEM(0xff92c000, 0x100),
DEFINE_RES_IRQ(evt2irq(0xac0)),
};
static struct platform_device scif9_device = {
.name = "sh-sci",
.id = 9,
.resource = scif9_resources,
.num_resources = ARRAY_SIZE(scif9_resources),
.dev = {
.platform_data = &scif9_platform_data,
},
};
static struct sh_timer_config tmu0_platform_data = {
.channels_mask = 7,
};
static struct resource tmu0_resources[] = {
DEFINE_RES_MEM(0xffd80000, 0x30),
DEFINE_RES_IRQ(evt2irq(0x400)),
DEFINE_RES_IRQ(evt2irq(0x420)),
DEFINE_RES_IRQ(evt2irq(0x440)),
};
static struct platform_device tmu0_device = {
.name = "sh-tmu",
.id = 0,
.dev = {
.platform_data = &tmu0_platform_data,
},
.resource = tmu0_resources,
.num_resources = ARRAY_SIZE(tmu0_resources),
};
static struct sh_timer_config tmu1_platform_data = {
.channels_mask = 7,
};
static struct resource tmu1_resources[] = {
DEFINE_RES_MEM(0xffd81000, 0x30),
DEFINE_RES_IRQ(evt2irq(0x460)),
DEFINE_RES_IRQ(evt2irq(0x480)),
DEFINE_RES_IRQ(evt2irq(0x4a0)),
};
static struct platform_device tmu1_device = {
.name = "sh-tmu",
.id = 1,
.dev = {
.platform_data = &tmu1_platform_data,
},
.resource = tmu1_resources,
.num_resources = ARRAY_SIZE(tmu1_resources),
};
static struct sh_timer_config tmu2_platform_data = {
.channels_mask = 7,
};
static struct resource tmu2_resources[] = {
DEFINE_RES_MEM(0xffd82000, 0x2c),
DEFINE_RES_IRQ(evt2irq(0x4c0)),
DEFINE_RES_IRQ(evt2irq(0x4e0)),
DEFINE_RES_IRQ(evt2irq(0x500)),
};
static struct platform_device tmu2_device = {
.name = "sh-tmu",
.id = 2,
.dev = {
.platform_data = &tmu2_platform_data,
},
.resource = tmu2_resources,
.num_resources = ARRAY_SIZE(tmu2_resources),
};
static struct platform_device *sh7770_devices[] __initdata = {
&scif0_device,
&scif1_device,
&scif2_device,
&scif3_device,
&scif4_device,
&scif5_device,
&scif6_device,
&scif7_device,
&scif8_device,
&scif9_device,
&tmu0_device,
&tmu1_device,
&tmu2_device,
};
static int __init sh7770_devices_setup(void)
{
return platform_add_devices(sh7770_devices,
ARRAY_SIZE(sh7770_devices));
}
arch_initcall(sh7770_devices_setup);
static struct platform_device *sh7770_early_devices[] __initdata = {
&scif0_device,
&scif1_device,
&scif2_device,
&scif3_device,
&scif4_device,
&scif5_device,
&scif6_device,
&scif7_device,
&scif8_device,
&scif9_device,
&tmu0_device,
&tmu1_device,
&tmu2_device,
};
void __init plat_early_device_setup(void)
{
sh_early_platform_add_devices(sh7770_early_devices,
ARRAY_SIZE(sh7770_early_devices));
}
enum {
UNUSED = 0,
/* interrupt sources */
IRL_LLLL, IRL_LLLH, IRL_LLHL, IRL_LLHH,
IRL_LHLL, IRL_LHLH, IRL_LHHL, IRL_LHHH,
IRL_HLLL, IRL_HLLH, IRL_HLHL, IRL_HLHH,
IRL_HHLL, IRL_HHLH, IRL_HHHL,
IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5,
GPIO,
TMU0, TMU1, TMU2, TMU2_TICPI,
TMU3, TMU4, TMU5, TMU5_TICPI,
TMU6, TMU7, TMU8,
HAC, IPI, SPDIF, HUDI, I2C,
DMAC0_DMINT0, DMAC0_DMINT1, DMAC0_DMINT2,
I2S0, I2S1, I2S2, I2S3,
SRC_RX, SRC_TX, SRC_SPDIF,
DU, VIDEO_IN, REMOTE, YUV, USB, ATAPI, CAN, GPS, GFX2D,
GFX3D_MBX, GFX3D_DMAC,
EXBUS_ATA,
SPI0, SPI1,
SCIF089, SCIF1234, SCIF567,
ADC,
BBDMAC_0_3, BBDMAC_4_7, BBDMAC_8_10, BBDMAC_11_14,
BBDMAC_15_18, BBDMAC_19_22, BBDMAC_23_26, BBDMAC_27,
BBDMAC_28, BBDMAC_29, BBDMAC_30, BBDMAC_31,
/* interrupt groups */
TMU, DMAC, I2S, SRC, GFX3D, SPI, SCIF, BBDMAC,
};
static struct intc_vect vectors[] __initdata = {
INTC_VECT(GPIO, 0x3e0),
INTC_VECT(TMU0, 0x400), INTC_VECT(TMU1, 0x420),
INTC_VECT(TMU2, 0x440), INTC_VECT(TMU2_TICPI, 0x460),
INTC_VECT(TMU3, 0x480), INTC_VECT(TMU4, 0x4a0),
INTC_VECT(TMU5, 0x4c0), INTC_VECT(TMU5_TICPI, 0x4e0),
INTC_VECT(TMU6, 0x500), INTC_VECT(TMU7, 0x520),
INTC_VECT(TMU8, 0x540),
INTC_VECT(HAC, 0x580), INTC_VECT(IPI, 0x5c0),
INTC_VECT(SPDIF, 0x5e0),
INTC_VECT(HUDI, 0x600), INTC_VECT(I2C, 0x620),
INTC_VECT(DMAC0_DMINT0, 0x640), INTC_VECT(DMAC0_DMINT1, 0x660),
INTC_VECT(DMAC0_DMINT2, 0x680),
INTC_VECT(I2S0, 0x6a0), INTC_VECT(I2S1, 0x6c0),
INTC_VECT(I2S2, 0x6e0), INTC_VECT(I2S3, 0x700),
INTC_VECT(SRC_RX, 0x720), INTC_VECT(SRC_TX, 0x740),
INTC_VECT(SRC_SPDIF, 0x760),
INTC_VECT(DU, 0x780), INTC_VECT(VIDEO_IN, 0x7a0),
INTC_VECT(REMOTE, 0x7c0), INTC_VECT(YUV, 0x7e0),
INTC_VECT(USB, 0x840), INTC_VECT(ATAPI, 0x860),
INTC_VECT(CAN, 0x880), INTC_VECT(GPS, 0x8a0),
INTC_VECT(GFX2D, 0x8c0),
INTC_VECT(GFX3D_MBX, 0x900), INTC_VECT(GFX3D_DMAC, 0x920),
INTC_VECT(EXBUS_ATA, 0x940),
INTC_VECT(SPI0, 0x960), INTC_VECT(SPI1, 0x980),
INTC_VECT(SCIF089, 0x9a0), INTC_VECT(SCIF1234, 0x9c0),
INTC_VECT(SCIF1234, 0x9e0), INTC_VECT(SCIF1234, 0xa00),
INTC_VECT(SCIF1234, 0xa20), INTC_VECT(SCIF567, 0xa40),
INTC_VECT(SCIF567, 0xa60), INTC_VECT(SCIF567, 0xa80),
INTC_VECT(SCIF089, 0xaa0), INTC_VECT(SCIF089, 0xac0),
INTC_VECT(ADC, 0xb20),
INTC_VECT(BBDMAC_0_3, 0xba0), INTC_VECT(BBDMAC_0_3, 0xbc0),
INTC_VECT(BBDMAC_0_3, 0xbe0), INTC_VECT(BBDMAC_0_3, 0xc00),
INTC_VECT(BBDMAC_4_7, 0xc20), INTC_VECT(BBDMAC_4_7, 0xc40),
INTC_VECT(BBDMAC_4_7, 0xc60), INTC_VECT(BBDMAC_4_7, 0xc80),
INTC_VECT(BBDMAC_8_10, 0xca0), INTC_VECT(BBDMAC_8_10, 0xcc0),
INTC_VECT(BBDMAC_8_10, 0xce0), INTC_VECT(BBDMAC_11_14, 0xd00),
INTC_VECT(BBDMAC_11_14, 0xd20), INTC_VECT(BBDMAC_11_14, 0xd40),
INTC_VECT(BBDMAC_11_14, 0xd60), INTC_VECT(BBDMAC_15_18, 0xd80),
INTC_VECT(BBDMAC_15_18, 0xda0), INTC_VECT(BBDMAC_15_18, 0xdc0),
INTC_VECT(BBDMAC_15_18, 0xde0), INTC_VECT(BBDMAC_19_22, 0xe00),
INTC_VECT(BBDMAC_19_22, 0xe20), INTC_VECT(BBDMAC_19_22, 0xe40),
INTC_VECT(BBDMAC_19_22, 0xe60), INTC_VECT(BBDMAC_23_26, 0xe80),
INTC_VECT(BBDMAC_23_26, 0xea0), INTC_VECT(BBDMAC_23_26, 0xec0),
INTC_VECT(BBDMAC_23_26, 0xee0), INTC_VECT(BBDMAC_27, 0xf00),
INTC_VECT(BBDMAC_28, 0xf20), INTC_VECT(BBDMAC_29, 0xf40),
INTC_VECT(BBDMAC_30, 0xf60), INTC_VECT(BBDMAC_31, 0xf80),
};
static struct intc_group groups[] __initdata = {
INTC_GROUP(TMU, TMU0, TMU1, TMU2, TMU2_TICPI, TMU3, TMU4, TMU5,
TMU5_TICPI, TMU6, TMU7, TMU8),
INTC_GROUP(DMAC, DMAC0_DMINT0, DMAC0_DMINT1, DMAC0_DMINT2),
INTC_GROUP(I2S, I2S0, I2S1, I2S2, I2S3),
INTC_GROUP(SRC, SRC_RX, SRC_TX, SRC_SPDIF),
INTC_GROUP(GFX3D, GFX3D_MBX, GFX3D_DMAC),
INTC_GROUP(SPI, SPI0, SPI1),
INTC_GROUP(SCIF, SCIF089, SCIF1234, SCIF567),
INTC_GROUP(BBDMAC,
BBDMAC_0_3, BBDMAC_4_7, BBDMAC_8_10, BBDMAC_11_14,
BBDMAC_15_18, BBDMAC_19_22, BBDMAC_23_26, BBDMAC_27,
BBDMAC_28, BBDMAC_29, BBDMAC_30, BBDMAC_31),
};
static struct intc_mask_reg mask_registers[] __initdata = {
{ 0xffe00040, 0xffe00044, 32, /* INT2MSKR / INT2MSKCR */
{ 0, BBDMAC, ADC, SCIF, SPI, EXBUS_ATA, GFX3D, GFX2D,
GPS, CAN, ATAPI, USB, YUV, REMOTE, VIDEO_IN, DU, SRC, I2S,
DMAC, I2C, HUDI, SPDIF, IPI, HAC, TMU, GPIO } },
};
static struct intc_prio_reg prio_registers[] __initdata = {
{ 0xffe00000, 0, 32, 8, /* INT2PRI0 */ { GPIO, TMU0, 0, HAC } },
{ 0xffe00004, 0, 32, 8, /* INT2PRI1 */ { IPI, SPDIF, HUDI, I2C } },
{ 0xffe00008, 0, 32, 8, /* INT2PRI2 */ { DMAC, I2S, SRC, DU } },
{ 0xffe0000c, 0, 32, 8, /* INT2PRI3 */ { VIDEO_IN, REMOTE, YUV, USB } },
{ 0xffe00010, 0, 32, 8, /* INT2PRI4 */ { ATAPI, CAN, GPS, GFX2D } },
{ 0xffe00014, 0, 32, 8, /* INT2PRI5 */ { 0, GFX3D, EXBUS_ATA, SPI } },
{ 0xffe00018, 0, 32, 8, /* INT2PRI6 */ { SCIF1234, SCIF567, SCIF089 } },
{ 0xffe0001c, 0, 32, 8, /* INT2PRI7 */ { ADC, 0, 0, BBDMAC_0_3 } },
{ 0xffe00020, 0, 32, 8, /* INT2PRI8 */
{ BBDMAC_4_7, BBDMAC_8_10, BBDMAC_11_14, BBDMAC_15_18 } },
{ 0xffe00024, 0, 32, 8, /* INT2PRI9 */
{ BBDMAC_19_22, BBDMAC_23_26, BBDMAC_27, BBDMAC_28 } },
{ 0xffe00028, 0, 32, 8, /* INT2PRI10 */
{ BBDMAC_29, BBDMAC_30, BBDMAC_31 } },
{ 0xffe0002c, 0, 32, 8, /* INT2PRI11 */
{ TMU1, TMU2, TMU2_TICPI, TMU3 } },
{ 0xffe00030, 0, 32, 8, /* INT2PRI12 */
{ TMU4, TMU5, TMU5_TICPI, TMU6 } },
{ 0xffe00034, 0, 32, 8, /* INT2PRI13 */
{ TMU7, TMU8 } },
};
static DECLARE_INTC_DESC(intc_desc, "sh7770", vectors, groups,
mask_registers, prio_registers, NULL);
/* Support for external interrupt pins in IRQ mode */
static struct intc_vect irq_vectors[] __initdata = {
INTC_VECT(IRQ0, 0x240), INTC_VECT(IRQ1, 0x280),
INTC_VECT(IRQ2, 0x2c0), INTC_VECT(IRQ3, 0x300),
INTC_VECT(IRQ4, 0x340), INTC_VECT(IRQ5, 0x380),
};
static struct intc_mask_reg irq_mask_registers[] __initdata = {
{ 0xffd00044, 0xffd00064, 32, /* INTMSK0 / INTMSKCLR0 */
{ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, } },
};
static struct intc_prio_reg irq_prio_registers[] __initdata = {
{ 0xffd00010, 0, 32, 4, /* INTPRI */ { IRQ0, IRQ1, IRQ2, IRQ3,
IRQ4, IRQ5, } },
};
static struct intc_sense_reg irq_sense_registers[] __initdata = {
{ 0xffd0001c, 32, 2, /* ICR1 */ { IRQ0, IRQ1, IRQ2, IRQ3,
IRQ4, IRQ5, } },
};
static DECLARE_INTC_DESC(intc_irq_desc, "sh7770-irq", irq_vectors,
NULL, irq_mask_registers, irq_prio_registers,
irq_sense_registers);
/* External interrupt pins in IRL mode */
static struct intc_vect irl_vectors[] __initdata = {
INTC_VECT(IRL_LLLL, 0x200), INTC_VECT(IRL_LLLH, 0x220),
INTC_VECT(IRL_LLHL, 0x240), INTC_VECT(IRL_LLHH, 0x260),
INTC_VECT(IRL_LHLL, 0x280), INTC_VECT(IRL_LHLH, 0x2a0),
INTC_VECT(IRL_LHHL, 0x2c0), INTC_VECT(IRL_LHHH, 0x2e0),
INTC_VECT(IRL_HLLL, 0x300), INTC_VECT(IRL_HLLH, 0x320),
INTC_VECT(IRL_HLHL, 0x340), INTC_VECT(IRL_HLHH, 0x360),
INTC_VECT(IRL_HHLL, 0x380), INTC_VECT(IRL_HHLH, 0x3a0),
INTC_VECT(IRL_HHHL, 0x3c0),
};
static struct intc_mask_reg irl3210_mask_registers[] __initdata = {
{ 0xffd40080, 0xffd40084, 32, /* INTMSK2 / INTMSKCLR2 */
{ IRL_LLLL, IRL_LLLH, IRL_LLHL, IRL_LLHH,
IRL_LHLL, IRL_LHLH, IRL_LHHL, IRL_LHHH,
IRL_HLLL, IRL_HLLH, IRL_HLHL, IRL_HLHH,
IRL_HHLL, IRL_HHLH, IRL_HHHL, } },
};
static struct intc_mask_reg irl7654_mask_registers[] __initdata = {
{ 0xffd40080, 0xffd40084, 32, /* INTMSK2 / INTMSKCLR2 */
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
IRL_LLLL, IRL_LLLH, IRL_LLHL, IRL_LLHH,
IRL_LHLL, IRL_LHLH, IRL_LHHL, IRL_LHHH,
IRL_HLLL, IRL_HLLH, IRL_HLHL, IRL_HLHH,
IRL_HHLL, IRL_HHLH, IRL_HHHL, } },
};
static DECLARE_INTC_DESC(intc_irl7654_desc, "sh7780-irl7654", irl_vectors,
NULL, irl7654_mask_registers, NULL, NULL);
static DECLARE_INTC_DESC(intc_irl3210_desc, "sh7780-irl3210", irl_vectors,
NULL, irl3210_mask_registers, NULL, NULL);
#define INTC_ICR0 0xffd00000
#define INTC_INTMSK0 0xffd00044
#define INTC_INTMSK1 0xffd00048
#define INTC_INTMSK2 0xffd40080
#define INTC_INTMSKCLR1 0xffd00068
#define INTC_INTMSKCLR2 0xffd40084
void __init plat_irq_setup(void)
{
/* disable IRQ7-0 */
__raw_writel(0xff000000, INTC_INTMSK0);
/* disable IRL3-0 + IRL7-4 */
__raw_writel(0xc0000000, INTC_INTMSK1);
__raw_writel(0xfffefffe, INTC_INTMSK2);
/* select IRL mode for IRL3-0 + IRL7-4 */
__raw_writel(__raw_readl(INTC_ICR0) & ~0x00c00000, INTC_ICR0);
/* disable holding function, ie enable "SH-4 Mode" */
__raw_writel(__raw_readl(INTC_ICR0) | 0x00200000, INTC_ICR0);
register_intc_controller(&intc_desc);
}
void __init plat_irq_setup_pins(int mode)
{
switch (mode) {
case IRQ_MODE_IRQ:
/* select IRQ mode for IRL3-0 + IRL7-4 */
__raw_writel(__raw_readl(INTC_ICR0) | 0x00c00000, INTC_ICR0);
register_intc_controller(&intc_irq_desc);
break;
case IRQ_MODE_IRL7654:
/* enable IRL7-4 but don't provide any masking */
__raw_writel(0x40000000, INTC_INTMSKCLR1);
__raw_writel(0x0000fffe, INTC_INTMSKCLR2);
break;
case IRQ_MODE_IRL3210:
/* enable IRL0-3 but don't provide any masking */
__raw_writel(0x80000000, INTC_INTMSKCLR1);
__raw_writel(0xfffe0000, INTC_INTMSKCLR2);
break;
case IRQ_MODE_IRL7654_MASK:
/* enable IRL7-4 and mask using cpu intc controller */
__raw_writel(0x40000000, INTC_INTMSKCLR1);
register_intc_controller(&intc_irl7654_desc);
break;
case IRQ_MODE_IRL3210_MASK:
/* enable IRL0-3 and mask using cpu intc controller */
__raw_writel(0x80000000, INTC_INTMSKCLR1);
register_intc_controller(&intc_irl3210_desc);
break;
default:
BUG();
}
}
| linux-master | arch/sh/kernel/cpu/sh4a/setup-sh7770.c |
// SPDX-License-Identifier: GPL-2.0
/*
* SH7757 (B0 step) Pinmux
*
* Copyright (C) 2009-2010 Renesas Solutions Corp.
*
* Author : Yoshihiro Shimoda <[email protected]>
*
* Based on SH7723 Pinmux
* Copyright (C) 2008 Magnus Damm
*/
#include <linux/bug.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/ioport.h>
#include <cpu/pfc.h>
static struct resource sh7757_pfc_resources[] = {
[0] = {
.start = 0xffec0000,
.end = 0xffec008f,
.flags = IORESOURCE_MEM,
},
};
static int __init plat_pinmux_setup(void)
{
return sh_pfc_register("pfc-sh7757", sh7757_pfc_resources,
ARRAY_SIZE(sh7757_pfc_resources));
}
arch_initcall(plat_pinmux_setup);
| linux-master | arch/sh/kernel/cpu/sh4a/pinmux-sh7757.c |
// SPDX-License-Identifier: GPL-2.0
/*
* arch/sh/kernel/cpu/sh4/clock-sh7757.c
*
* SH7757 support for the clock framework
*
* Copyright (C) 2009-2010 Renesas Solutions Corp.
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/io.h>
#include <linux/clkdev.h>
#include <asm/clock.h>
#include <asm/freq.h>
/*
* Default rate for the root input clock, reset this with clk_set_rate()
* from the platform code.
*/
static struct clk extal_clk = {
.rate = 48000000,
};
static unsigned long pll_recalc(struct clk *clk)
{
int multiplier;
multiplier = test_mode_pin(MODE_PIN0) ? 24 : 16;
return clk->parent->rate * multiplier;
}
static struct sh_clk_ops pll_clk_ops = {
.recalc = pll_recalc,
};
static struct clk pll_clk = {
.ops = &pll_clk_ops,
.parent = &extal_clk,
.flags = CLK_ENABLE_ON_INIT,
};
static struct clk *clks[] = {
&extal_clk,
&pll_clk,
};
static unsigned int div2[] = { 1, 1, 2, 1, 1, 4, 1, 6,
1, 1, 1, 16, 1, 24, 1, 1 };
static struct clk_div_mult_table div4_div_mult_table = {
.divisors = div2,
.nr_divisors = ARRAY_SIZE(div2),
};
static struct clk_div4_table div4_table = {
.div_mult_table = &div4_div_mult_table,
};
enum { DIV4_I, DIV4_SH, DIV4_P, DIV4_NR };
#define DIV4(_bit, _mask, _flags) \
SH_CLK_DIV4(&pll_clk, FRQCR, _bit, _mask, _flags)
struct clk div4_clks[DIV4_NR] = {
/*
* P clock is always enable, because some P clock modules is used
* by Host PC.
*/
[DIV4_P] = DIV4(0, 0x2800, CLK_ENABLE_ON_INIT),
[DIV4_SH] = DIV4(12, 0x00a0, CLK_ENABLE_ON_INIT),
[DIV4_I] = DIV4(20, 0x0004, CLK_ENABLE_ON_INIT),
};
#define MSTPCR0 0xffc80030
#define MSTPCR1 0xffc80034
#define MSTPCR2 0xffc10028
enum { MSTP004, MSTP000, MSTP127, MSTP114, MSTP113, MSTP112,
MSTP111, MSTP110, MSTP103, MSTP102, MSTP220,
MSTP_NR };
static struct clk mstp_clks[MSTP_NR] = {
/* MSTPCR0 */
[MSTP004] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 4, 0),
[MSTP000] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 0, 0),
/* MSTPCR1 */
[MSTP127] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 27, 0),
[MSTP114] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 14, 0),
[MSTP113] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 13, 0),
[MSTP112] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 12, 0),
[MSTP111] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 11, 0),
[MSTP110] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 10, 0),
[MSTP103] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 3, 0),
[MSTP102] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 2, 0),
/* MSTPCR2 */
[MSTP220] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR2, 20, 0),
};
static struct clk_lookup lookups[] = {
/* main clocks */
CLKDEV_CON_ID("extal", &extal_clk),
CLKDEV_CON_ID("pll_clk", &pll_clk),
/* DIV4 clocks */
CLKDEV_CON_ID("peripheral_clk", &div4_clks[DIV4_P]),
CLKDEV_CON_ID("shyway_clk", &div4_clks[DIV4_SH]),
CLKDEV_CON_ID("cpu_clk", &div4_clks[DIV4_I]),
/* MSTP32 clocks */
CLKDEV_DEV_ID("sh_mobile_sdhi.0", &mstp_clks[MSTP004]),
CLKDEV_CON_ID("riic0", &mstp_clks[MSTP000]),
CLKDEV_CON_ID("riic1", &mstp_clks[MSTP000]),
CLKDEV_CON_ID("riic2", &mstp_clks[MSTP000]),
CLKDEV_CON_ID("riic3", &mstp_clks[MSTP000]),
CLKDEV_CON_ID("riic4", &mstp_clks[MSTP000]),
CLKDEV_CON_ID("riic5", &mstp_clks[MSTP000]),
CLKDEV_CON_ID("riic6", &mstp_clks[MSTP000]),
CLKDEV_CON_ID("riic7", &mstp_clks[MSTP000]),
CLKDEV_ICK_ID("fck", "sh-tmu.0", &mstp_clks[MSTP113]),
CLKDEV_ICK_ID("fck", "sh-tmu.1", &mstp_clks[MSTP114]),
CLKDEV_ICK_ID("fck", "sh-sci.2", &mstp_clks[MSTP112]),
CLKDEV_ICK_ID("fck", "sh-sci.1", &mstp_clks[MSTP111]),
CLKDEV_ICK_ID("fck", "sh-sci.0", &mstp_clks[MSTP110]),
CLKDEV_CON_ID("usb_fck", &mstp_clks[MSTP103]),
CLKDEV_DEV_ID("renesas_usbhs.0", &mstp_clks[MSTP102]),
CLKDEV_CON_ID("mmc0", &mstp_clks[MSTP220]),
CLKDEV_DEV_ID("rspi.2", &mstp_clks[MSTP127]),
};
int __init arch_clk_init(void)
{
int i, ret = 0;
for (i = 0; i < ARRAY_SIZE(clks); i++)
ret |= clk_register(clks[i]);
clkdev_add_table(lookups, ARRAY_SIZE(lookups));
if (!ret)
ret = sh_clk_div4_register(div4_clks, ARRAY_SIZE(div4_clks),
&div4_table);
if (!ret)
ret = sh_clk_mstp_register(mstp_clks, MSTP_NR);
return ret;
}
| linux-master | arch/sh/kernel/cpu/sh4a/clock-sh7757.c |
// SPDX-License-Identifier: GPL-2.0
/*
* SH-X3 Prototype Setup
*
* Copyright (C) 2007 - 2010 Paul Mundt
*/
#include <linux/platform_device.h>
#include <linux/init.h>
#include <linux/serial.h>
#include <linux/serial_sci.h>
#include <linux/io.h>
#include <linux/gpio.h>
#include <linux/sh_timer.h>
#include <linux/sh_intc.h>
#include <cpu/shx3.h>
#include <asm/mmzone.h>
#include <asm/platform_early.h>
/*
* This intentionally only registers SCIF ports 0, 1, and 3. SCIF 2
* INTEVT values overlap with the FPU EXPEVT ones, requiring special
* demuxing in the exception dispatch path.
*
* As this overlap is something that never should have made it in to
* silicon in the first place, we just refuse to deal with the port at
* all rather than adding infrastructure to hack around it.
*/
static struct plat_sci_port scif0_platform_data = {
.scscr = SCSCR_REIE,
.type = PORT_SCIF,
};
static struct resource scif0_resources[] = {
DEFINE_RES_MEM(0xffc30000, 0x100),
DEFINE_RES_IRQ(evt2irq(0x700)),
DEFINE_RES_IRQ(evt2irq(0x720)),
DEFINE_RES_IRQ(evt2irq(0x760)),
DEFINE_RES_IRQ(evt2irq(0x740)),
};
static struct platform_device scif0_device = {
.name = "sh-sci",
.id = 0,
.resource = scif0_resources,
.num_resources = ARRAY_SIZE(scif0_resources),
.dev = {
.platform_data = &scif0_platform_data,
},
};
static struct plat_sci_port scif1_platform_data = {
.scscr = SCSCR_REIE,
.type = PORT_SCIF,
};
static struct resource scif1_resources[] = {
DEFINE_RES_MEM(0xffc40000, 0x100),
DEFINE_RES_IRQ(evt2irq(0x780)),
DEFINE_RES_IRQ(evt2irq(0x7a0)),
DEFINE_RES_IRQ(evt2irq(0x7e0)),
DEFINE_RES_IRQ(evt2irq(0x7c0)),
};
static struct platform_device scif1_device = {
.name = "sh-sci",
.id = 1,
.resource = scif1_resources,
.num_resources = ARRAY_SIZE(scif1_resources),
.dev = {
.platform_data = &scif1_platform_data,
},
};
static struct plat_sci_port scif2_platform_data = {
.scscr = SCSCR_REIE,
.type = PORT_SCIF,
};
static struct resource scif2_resources[] = {
DEFINE_RES_MEM(0xffc60000, 0x100),
DEFINE_RES_IRQ(evt2irq(0x880)),
DEFINE_RES_IRQ(evt2irq(0x8a0)),
DEFINE_RES_IRQ(evt2irq(0x8e0)),
DEFINE_RES_IRQ(evt2irq(0x8c0)),
};
static struct platform_device scif2_device = {
.name = "sh-sci",
.id = 2,
.resource = scif2_resources,
.num_resources = ARRAY_SIZE(scif2_resources),
.dev = {
.platform_data = &scif2_platform_data,
},
};
static struct sh_timer_config tmu0_platform_data = {
.channels_mask = 7,
};
static struct resource tmu0_resources[] = {
DEFINE_RES_MEM(0xffc10000, 0x30),
DEFINE_RES_IRQ(evt2irq(0x400)),
DEFINE_RES_IRQ(evt2irq(0x420)),
DEFINE_RES_IRQ(evt2irq(0x440)),
};
static struct platform_device tmu0_device = {
.name = "sh-tmu",
.id = 0,
.dev = {
.platform_data = &tmu0_platform_data,
},
.resource = tmu0_resources,
.num_resources = ARRAY_SIZE(tmu0_resources),
};
static struct sh_timer_config tmu1_platform_data = {
.channels_mask = 7,
};
static struct resource tmu1_resources[] = {
DEFINE_RES_MEM(0xffc20000, 0x2c),
DEFINE_RES_IRQ(evt2irq(0x460)),
DEFINE_RES_IRQ(evt2irq(0x480)),
DEFINE_RES_IRQ(evt2irq(0x4a0)),
};
static struct platform_device tmu1_device = {
.name = "sh-tmu",
.id = 1,
.dev = {
.platform_data = &tmu1_platform_data,
},
.resource = tmu1_resources,
.num_resources = ARRAY_SIZE(tmu1_resources),
};
static struct platform_device *shx3_early_devices[] __initdata = {
&scif0_device,
&scif1_device,
&scif2_device,
&tmu0_device,
&tmu1_device,
};
static int __init shx3_devices_setup(void)
{
return platform_add_devices(shx3_early_devices,
ARRAY_SIZE(shx3_early_devices));
}
arch_initcall(shx3_devices_setup);
void __init plat_early_device_setup(void)
{
sh_early_platform_add_devices(shx3_early_devices,
ARRAY_SIZE(shx3_early_devices));
}
enum {
UNUSED = 0,
/* interrupt sources */
IRL_LLLL, IRL_LLLH, IRL_LLHL, IRL_LLHH,
IRL_LHLL, IRL_LHLH, IRL_LHHL, IRL_LHHH,
IRL_HLLL, IRL_HLLH, IRL_HLHL, IRL_HLHH,
IRL_HHLL, IRL_HHLH, IRL_HHHL,
IRQ0, IRQ1, IRQ2, IRQ3,
HUDII,
TMU0, TMU1, TMU2, TMU3, TMU4, TMU5,
PCII0, PCII1, PCII2, PCII3, PCII4,
PCII5, PCII6, PCII7, PCII8, PCII9,
SCIF0_ERI, SCIF0_RXI, SCIF0_BRI, SCIF0_TXI,
SCIF1_ERI, SCIF1_RXI, SCIF1_BRI, SCIF1_TXI,
SCIF2_ERI, SCIF2_RXI, SCIF2_BRI, SCIF2_TXI,
SCIF3_ERI, SCIF3_RXI, SCIF3_BRI, SCIF3_TXI,
DMAC0_DMINT0, DMAC0_DMINT1, DMAC0_DMINT2, DMAC0_DMINT3,
DMAC0_DMINT4, DMAC0_DMINT5, DMAC0_DMAE,
DU,
DMAC1_DMINT6, DMAC1_DMINT7, DMAC1_DMINT8, DMAC1_DMINT9,
DMAC1_DMINT10, DMAC1_DMINT11, DMAC1_DMAE,
IIC, VIN0, VIN1, VCORE0, ATAPI,
DTU0, DTU1, DTU2, DTU3,
FE0, FE1,
GPIO0, GPIO1, GPIO2, GPIO3,
PAM, IRM,
INTICI0, INTICI1, INTICI2, INTICI3,
INTICI4, INTICI5, INTICI6, INTICI7,
/* interrupt groups */
IRL, PCII56789, SCIF0, SCIF1, SCIF2, SCIF3,
DMAC0, DMAC1,
};
static struct intc_vect vectors[] __initdata = {
INTC_VECT(HUDII, 0x3e0),
INTC_VECT(TMU0, 0x400), INTC_VECT(TMU1, 0x420),
INTC_VECT(TMU2, 0x440), INTC_VECT(TMU3, 0x460),
INTC_VECT(TMU4, 0x480), INTC_VECT(TMU5, 0x4a0),
INTC_VECT(PCII0, 0x500), INTC_VECT(PCII1, 0x520),
INTC_VECT(PCII2, 0x540), INTC_VECT(PCII3, 0x560),
INTC_VECT(PCII4, 0x580), INTC_VECT(PCII5, 0x5a0),
INTC_VECT(PCII6, 0x5c0), INTC_VECT(PCII7, 0x5e0),
INTC_VECT(PCII8, 0x600), INTC_VECT(PCII9, 0x620),
INTC_VECT(SCIF0_ERI, 0x700), INTC_VECT(SCIF0_RXI, 0x720),
INTC_VECT(SCIF0_BRI, 0x740), INTC_VECT(SCIF0_TXI, 0x760),
INTC_VECT(SCIF1_ERI, 0x780), INTC_VECT(SCIF1_RXI, 0x7a0),
INTC_VECT(SCIF1_BRI, 0x7c0), INTC_VECT(SCIF1_TXI, 0x7e0),
INTC_VECT(SCIF3_ERI, 0x880), INTC_VECT(SCIF3_RXI, 0x8a0),
INTC_VECT(SCIF3_BRI, 0x8c0), INTC_VECT(SCIF3_TXI, 0x8e0),
INTC_VECT(DMAC0_DMINT0, 0x900), INTC_VECT(DMAC0_DMINT1, 0x920),
INTC_VECT(DMAC0_DMINT2, 0x940), INTC_VECT(DMAC0_DMINT3, 0x960),
INTC_VECT(DMAC0_DMINT4, 0x980), INTC_VECT(DMAC0_DMINT5, 0x9a0),
INTC_VECT(DMAC0_DMAE, 0x9c0),
INTC_VECT(DU, 0x9e0),
INTC_VECT(DMAC1_DMINT6, 0xa00), INTC_VECT(DMAC1_DMINT7, 0xa20),
INTC_VECT(DMAC1_DMINT8, 0xa40), INTC_VECT(DMAC1_DMINT9, 0xa60),
INTC_VECT(DMAC1_DMINT10, 0xa80), INTC_VECT(DMAC1_DMINT11, 0xaa0),
INTC_VECT(DMAC1_DMAE, 0xac0),
INTC_VECT(IIC, 0xae0),
INTC_VECT(VIN0, 0xb00), INTC_VECT(VIN1, 0xb20),
INTC_VECT(VCORE0, 0xb00), INTC_VECT(ATAPI, 0xb60),
INTC_VECT(DTU0, 0xc00), INTC_VECT(DTU0, 0xc20),
INTC_VECT(DTU0, 0xc40),
INTC_VECT(DTU1, 0xc60), INTC_VECT(DTU1, 0xc80),
INTC_VECT(DTU1, 0xca0),
INTC_VECT(DTU2, 0xcc0), INTC_VECT(DTU2, 0xce0),
INTC_VECT(DTU2, 0xd00),
INTC_VECT(DTU3, 0xd20), INTC_VECT(DTU3, 0xd40),
INTC_VECT(DTU3, 0xd60),
INTC_VECT(FE0, 0xe00), INTC_VECT(FE1, 0xe20),
INTC_VECT(GPIO0, 0xe40), INTC_VECT(GPIO1, 0xe60),
INTC_VECT(GPIO2, 0xe80), INTC_VECT(GPIO3, 0xea0),
INTC_VECT(PAM, 0xec0), INTC_VECT(IRM, 0xee0),
INTC_VECT(INTICI0, 0xf00), INTC_VECT(INTICI1, 0xf20),
INTC_VECT(INTICI2, 0xf40), INTC_VECT(INTICI3, 0xf60),
INTC_VECT(INTICI4, 0xf80), INTC_VECT(INTICI5, 0xfa0),
INTC_VECT(INTICI6, 0xfc0), INTC_VECT(INTICI7, 0xfe0),
};
static struct intc_group groups[] __initdata = {
INTC_GROUP(IRL, IRL_LLLL, IRL_LLLH, IRL_LLHL, IRL_LLHH,
IRL_LHLL, IRL_LHLH, IRL_LHHL, IRL_LHHH,
IRL_HLLL, IRL_HLLH, IRL_HLHL, IRL_HLHH,
IRL_HHLL, IRL_HHLH, IRL_HHHL),
INTC_GROUP(PCII56789, PCII5, PCII6, PCII7, PCII8, PCII9),
INTC_GROUP(SCIF0, SCIF0_ERI, SCIF0_RXI, SCIF0_BRI, SCIF0_TXI),
INTC_GROUP(SCIF1, SCIF1_ERI, SCIF1_RXI, SCIF1_BRI, SCIF1_TXI),
INTC_GROUP(SCIF3, SCIF3_ERI, SCIF3_RXI, SCIF3_BRI, SCIF3_TXI),
INTC_GROUP(DMAC0, DMAC0_DMINT0, DMAC0_DMINT1, DMAC0_DMINT2,
DMAC0_DMINT3, DMAC0_DMINT4, DMAC0_DMINT5, DMAC0_DMAE),
INTC_GROUP(DMAC1, DMAC1_DMINT6, DMAC1_DMINT7, DMAC1_DMINT8,
DMAC1_DMINT9, DMAC1_DMINT10, DMAC1_DMINT11),
};
#define INT2DISTCR0 0xfe4108a0
#define INT2DISTCR1 0xfe4108a4
#define INT2DISTCR2 0xfe4108a8
static struct intc_mask_reg mask_registers[] __initdata = {
{ 0xfe410030, 0xfe410050, 32, /* CnINTMSK0 / CnINTMSKCLR0 */
{ IRQ0, IRQ1, IRQ2, IRQ3 } },
{ 0xfe410040, 0xfe410060, 32, /* CnINTMSK1 / CnINTMSKCLR1 */
{ IRL } },
{ 0xfe410820, 0xfe410850, 32, /* CnINT2MSK0 / CnINT2MSKCLR0 */
{ FE1, FE0, 0, ATAPI, VCORE0, VIN1, VIN0, IIC,
DU, GPIO3, GPIO2, GPIO1, GPIO0, PAM, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, /* HUDI bits ignored */
0, TMU5, TMU4, TMU3, TMU2, TMU1, TMU0, 0, },
INTC_SMP_BALANCING(INT2DISTCR0) },
{ 0xfe410830, 0xfe410860, 32, /* CnINT2MSK1 / CnINT2MSKCLR1 */
{ 0, 0, 0, 0, DTU3, DTU2, DTU1, DTU0, /* IRM bits ignored */
PCII9, PCII8, PCII7, PCII6, PCII5, PCII4, PCII3, PCII2,
PCII1, PCII0, DMAC1_DMAE, DMAC1_DMINT11,
DMAC1_DMINT10, DMAC1_DMINT9, DMAC1_DMINT8, DMAC1_DMINT7,
DMAC1_DMINT6, DMAC0_DMAE, DMAC0_DMINT5, DMAC0_DMINT4,
DMAC0_DMINT3, DMAC0_DMINT2, DMAC0_DMINT1, DMAC0_DMINT0 },
INTC_SMP_BALANCING(INT2DISTCR1) },
{ 0xfe410840, 0xfe410870, 32, /* CnINT2MSK2 / CnINT2MSKCLR2 */
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
SCIF3_TXI, SCIF3_BRI, SCIF3_RXI, SCIF3_ERI,
SCIF2_TXI, SCIF2_BRI, SCIF2_RXI, SCIF2_ERI,
SCIF1_TXI, SCIF1_BRI, SCIF1_RXI, SCIF1_ERI,
SCIF0_TXI, SCIF0_BRI, SCIF0_RXI, SCIF0_ERI },
INTC_SMP_BALANCING(INT2DISTCR2) },
};
static struct intc_prio_reg prio_registers[] __initdata = {
{ 0xfe410010, 0, 32, 4, /* INTPRI */ { IRQ0, IRQ1, IRQ2, IRQ3 } },
{ 0xfe410800, 0, 32, 4, /* INT2PRI0 */ { 0, HUDII, TMU5, TMU4,
TMU3, TMU2, TMU1, TMU0 } },
{ 0xfe410804, 0, 32, 4, /* INT2PRI1 */ { DTU3, DTU2, DTU1, DTU0,
SCIF3, SCIF2,
SCIF1, SCIF0 } },
{ 0xfe410808, 0, 32, 4, /* INT2PRI2 */ { DMAC1, DMAC0,
PCII56789, PCII4,
PCII3, PCII2,
PCII1, PCII0 } },
{ 0xfe41080c, 0, 32, 4, /* INT2PRI3 */ { FE1, FE0, ATAPI, VCORE0,
VIN1, VIN0, IIC, DU} },
{ 0xfe410810, 0, 32, 4, /* INT2PRI4 */ { 0, 0, PAM, GPIO3,
GPIO2, GPIO1, GPIO0, IRM } },
{ 0xfe410090, 0xfe4100a0, 32, 4, /* CnICIPRI / CnICIPRICLR */
{ INTICI7, INTICI6, INTICI5, INTICI4,
INTICI3, INTICI2, INTICI1, INTICI0 }, INTC_SMP(4, 4) },
};
static DECLARE_INTC_DESC(intc_desc, "shx3", vectors, groups,
mask_registers, prio_registers, NULL);
/* Support for external interrupt pins in IRQ mode */
static struct intc_vect vectors_irq[] __initdata = {
INTC_VECT(IRQ0, 0x240), INTC_VECT(IRQ1, 0x280),
INTC_VECT(IRQ2, 0x2c0), INTC_VECT(IRQ3, 0x300),
};
static struct intc_sense_reg sense_registers[] __initdata = {
{ 0xfe41001c, 32, 2, /* ICR1 */ { IRQ0, IRQ1, IRQ2, IRQ3 } },
};
static DECLARE_INTC_DESC(intc_desc_irq, "shx3-irq", vectors_irq, groups,
mask_registers, prio_registers, sense_registers);
/* External interrupt pins in IRL mode */
static struct intc_vect vectors_irl[] __initdata = {
INTC_VECT(IRL_LLLL, 0x200), INTC_VECT(IRL_LLLH, 0x220),
INTC_VECT(IRL_LLHL, 0x240), INTC_VECT(IRL_LLHH, 0x260),
INTC_VECT(IRL_LHLL, 0x280), INTC_VECT(IRL_LHLH, 0x2a0),
INTC_VECT(IRL_LHHL, 0x2c0), INTC_VECT(IRL_LHHH, 0x2e0),
INTC_VECT(IRL_HLLL, 0x300), INTC_VECT(IRL_HLLH, 0x320),
INTC_VECT(IRL_HLHL, 0x340), INTC_VECT(IRL_HLHH, 0x360),
INTC_VECT(IRL_HHLL, 0x380), INTC_VECT(IRL_HHLH, 0x3a0),
INTC_VECT(IRL_HHHL, 0x3c0),
};
static DECLARE_INTC_DESC(intc_desc_irl, "shx3-irl", vectors_irl, groups,
mask_registers, prio_registers, NULL);
void __init plat_irq_setup_pins(int mode)
{
int ret = 0;
switch (mode) {
case IRQ_MODE_IRQ:
ret |= gpio_request(GPIO_FN_IRQ3, intc_desc_irq.name);
ret |= gpio_request(GPIO_FN_IRQ2, intc_desc_irq.name);
ret |= gpio_request(GPIO_FN_IRQ1, intc_desc_irq.name);
ret |= gpio_request(GPIO_FN_IRQ0, intc_desc_irq.name);
if (unlikely(ret)) {
pr_err("Failed to set IRQ mode\n");
return;
}
register_intc_controller(&intc_desc_irq);
break;
case IRQ_MODE_IRL3210:
ret |= gpio_request(GPIO_FN_IRL3, intc_desc_irl.name);
ret |= gpio_request(GPIO_FN_IRL2, intc_desc_irl.name);
ret |= gpio_request(GPIO_FN_IRL1, intc_desc_irl.name);
ret |= gpio_request(GPIO_FN_IRL0, intc_desc_irl.name);
if (unlikely(ret)) {
pr_err("Failed to set IRL mode\n");
return;
}
register_intc_controller(&intc_desc_irl);
break;
default:
BUG();
}
}
void __init plat_irq_setup(void)
{
register_intc_controller(&intc_desc);
}
void __init plat_mem_setup(void)
{
unsigned int nid = 1;
/* Register CPU#0 URAM space as Node 1 */
setup_bootmem_node(nid++, 0x145f0000, 0x14610000); /* CPU0 */
#if 0
/* XXX: Not yet.. */
setup_bootmem_node(nid++, 0x14df0000, 0x14e10000); /* CPU1 */
setup_bootmem_node(nid++, 0x155f0000, 0x15610000); /* CPU2 */
setup_bootmem_node(nid++, 0x15df0000, 0x15e10000); /* CPU3 */
#endif
setup_bootmem_node(nid++, 0x16000000, 0x16020000); /* CSM */
}
| linux-master | arch/sh/kernel/cpu/sh4a/setup-shx3.c |
// SPDX-License-Identifier: GPL-2.0
/*
* arch/sh/kernel/cpu/sh4a/setup-sh7734.c
*
* SH7734 Setup
*
* Copyright (C) 2011,2012 Nobuhiro Iwamatsu <[email protected]>
* Copyright (C) 2011,2012 Renesas Solutions Corp.
*/
#include <linux/platform_device.h>
#include <linux/init.h>
#include <linux/serial.h>
#include <linux/mm.h>
#include <linux/dma-mapping.h>
#include <linux/serial_sci.h>
#include <linux/sh_timer.h>
#include <linux/io.h>
#include <asm/clock.h>
#include <asm/irq.h>
#include <asm/platform_early.h>
#include <cpu/sh7734.h>
/* SCIF */
static struct plat_sci_port scif0_platform_data = {
.scscr = SCSCR_REIE,
.type = PORT_SCIF,
.regtype = SCIx_SH4_SCIF_BRG_REGTYPE,
};
static struct resource scif0_resources[] = {
DEFINE_RES_MEM(0xffe40000, 0x100),
DEFINE_RES_IRQ(evt2irq(0x8c0)),
};
static struct platform_device scif0_device = {
.name = "sh-sci",
.id = 0,
.resource = scif0_resources,
.num_resources = ARRAY_SIZE(scif0_resources),
.dev = {
.platform_data = &scif0_platform_data,
},
};
static struct plat_sci_port scif1_platform_data = {
.scscr = SCSCR_REIE,
.type = PORT_SCIF,
.regtype = SCIx_SH4_SCIF_BRG_REGTYPE,
};
static struct resource scif1_resources[] = {
DEFINE_RES_MEM(0xffe41000, 0x100),
DEFINE_RES_IRQ(evt2irq(0x8e0)),
};
static struct platform_device scif1_device = {
.name = "sh-sci",
.id = 1,
.resource = scif1_resources,
.num_resources = ARRAY_SIZE(scif1_resources),
.dev = {
.platform_data = &scif1_platform_data,
},
};
static struct plat_sci_port scif2_platform_data = {
.scscr = SCSCR_REIE,
.type = PORT_SCIF,
.regtype = SCIx_SH4_SCIF_BRG_REGTYPE,
};
static struct resource scif2_resources[] = {
DEFINE_RES_MEM(0xffe42000, 0x100),
DEFINE_RES_IRQ(evt2irq(0x900)),
};
static struct platform_device scif2_device = {
.name = "sh-sci",
.id = 2,
.resource = scif2_resources,
.num_resources = ARRAY_SIZE(scif2_resources),
.dev = {
.platform_data = &scif2_platform_data,
},
};
static struct plat_sci_port scif3_platform_data = {
.scscr = SCSCR_REIE | SCSCR_TOIE,
.type = PORT_SCIF,
.regtype = SCIx_SH4_SCIF_BRG_REGTYPE,
};
static struct resource scif3_resources[] = {
DEFINE_RES_MEM(0xffe43000, 0x100),
DEFINE_RES_IRQ(evt2irq(0x920)),
};
static struct platform_device scif3_device = {
.name = "sh-sci",
.id = 3,
.resource = scif3_resources,
.num_resources = ARRAY_SIZE(scif3_resources),
.dev = {
.platform_data = &scif3_platform_data,
},
};
static struct plat_sci_port scif4_platform_data = {
.scscr = SCSCR_REIE,
.type = PORT_SCIF,
.regtype = SCIx_SH4_SCIF_BRG_REGTYPE,
};
static struct resource scif4_resources[] = {
DEFINE_RES_MEM(0xffe44000, 0x100),
DEFINE_RES_IRQ(evt2irq(0x940)),
};
static struct platform_device scif4_device = {
.name = "sh-sci",
.id = 4,
.resource = scif4_resources,
.num_resources = ARRAY_SIZE(scif4_resources),
.dev = {
.platform_data = &scif4_platform_data,
},
};
static struct plat_sci_port scif5_platform_data = {
.scscr = SCSCR_REIE,
.type = PORT_SCIF,
.regtype = SCIx_SH4_SCIF_BRG_REGTYPE,
};
static struct resource scif5_resources[] = {
DEFINE_RES_MEM(0xffe43000, 0x100),
DEFINE_RES_IRQ(evt2irq(0x960)),
};
static struct platform_device scif5_device = {
.name = "sh-sci",
.id = 5,
.resource = scif5_resources,
.num_resources = ARRAY_SIZE(scif5_resources),
.dev = {
.platform_data = &scif5_platform_data,
},
};
/* RTC */
static struct resource rtc_resources[] = {
[0] = {
.name = "rtc",
.start = 0xFFFC5000,
.end = 0xFFFC5000 + 0x26 - 1,
.flags = IORESOURCE_IO,
},
[1] = {
.start = evt2irq(0xC00),
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device rtc_device = {
.name = "sh-rtc",
.id = -1,
.num_resources = ARRAY_SIZE(rtc_resources),
.resource = rtc_resources,
};
/* I2C 0 */
static struct resource i2c0_resources[] = {
[0] = {
.name = "IIC0",
.start = 0xFFC70000,
.end = 0xFFC7000A - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = evt2irq(0x860),
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device i2c0_device = {
.name = "i2c-sh7734",
.id = 0,
.num_resources = ARRAY_SIZE(i2c0_resources),
.resource = i2c0_resources,
};
/* TMU */
static struct sh_timer_config tmu0_platform_data = {
.channels_mask = 7,
};
static struct resource tmu0_resources[] = {
DEFINE_RES_MEM(0xffd80000, 0x30),
DEFINE_RES_IRQ(evt2irq(0x400)),
DEFINE_RES_IRQ(evt2irq(0x420)),
DEFINE_RES_IRQ(evt2irq(0x440)),
};
static struct platform_device tmu0_device = {
.name = "sh-tmu",
.id = 0,
.dev = {
.platform_data = &tmu0_platform_data,
},
.resource = tmu0_resources,
.num_resources = ARRAY_SIZE(tmu0_resources),
};
static struct sh_timer_config tmu1_platform_data = {
.channels_mask = 7,
};
static struct resource tmu1_resources[] = {
DEFINE_RES_MEM(0xffd81000, 0x30),
DEFINE_RES_IRQ(evt2irq(0x480)),
DEFINE_RES_IRQ(evt2irq(0x4a0)),
DEFINE_RES_IRQ(evt2irq(0x4c0)),
};
static struct platform_device tmu1_device = {
.name = "sh-tmu",
.id = 1,
.dev = {
.platform_data = &tmu1_platform_data,
},
.resource = tmu1_resources,
.num_resources = ARRAY_SIZE(tmu1_resources),
};
static struct sh_timer_config tmu2_platform_data = {
.channels_mask = 7,
};
static struct resource tmu2_resources[] = {
DEFINE_RES_MEM(0xffd82000, 0x30),
DEFINE_RES_IRQ(evt2irq(0x500)),
DEFINE_RES_IRQ(evt2irq(0x520)),
DEFINE_RES_IRQ(evt2irq(0x540)),
};
static struct platform_device tmu2_device = {
.name = "sh-tmu",
.id = 2,
.dev = {
.platform_data = &tmu2_platform_data,
},
.resource = tmu2_resources,
.num_resources = ARRAY_SIZE(tmu2_resources),
};
static struct platform_device *sh7734_devices[] __initdata = {
&scif0_device,
&scif1_device,
&scif2_device,
&scif3_device,
&scif4_device,
&scif5_device,
&tmu0_device,
&tmu1_device,
&tmu2_device,
&rtc_device,
};
static struct platform_device *sh7734_early_devices[] __initdata = {
&scif0_device,
&scif1_device,
&scif2_device,
&scif3_device,
&scif4_device,
&scif5_device,
&tmu0_device,
&tmu1_device,
&tmu2_device,
};
void __init plat_early_device_setup(void)
{
sh_early_platform_add_devices(sh7734_early_devices,
ARRAY_SIZE(sh7734_early_devices));
}
#define GROUP 0
enum {
UNUSED = 0,
/* interrupt sources */
IRL0_LLLL, IRL0_LLLH, IRL0_LLHL, IRL0_LLHH,
IRL0_LHLL, IRL0_LHLH, IRL0_LHHL, IRL0_LHHH,
IRL0_HLLL, IRL0_HLLH, IRL0_HLHL, IRL0_HLHH,
IRL0_HHLL, IRL0_HHLH, IRL0_HHHL,
IRQ0, IRQ1, IRQ2, IRQ3,
DU,
TMU00, TMU10, TMU20, TMU21,
TMU30, TMU40, TMU50, TMU51,
TMU60, TMU70, TMU80,
RESET_WDT,
USB,
HUDI,
SHDMAC,
SSI0, SSI1, SSI2, SSI3,
VIN0,
RGPVG,
_2DG,
MMC,
HSPI,
LBSCATA,
I2C0,
RCAN0,
MIMLB,
SCIF0, SCIF1, SCIF2, SCIF3, SCIF4, SCIF5,
LBSCDMAC0, LBSCDMAC1, LBSCDMAC2,
RCAN1,
SDHI0, SDHI1,
IEBUS,
HPBDMAC0_3, HPBDMAC4_10, HPBDMAC11_18, HPBDMAC19_22, HPBDMAC23_25_27_28,
RTC,
VIN1,
LCDC,
SRC0, SRC1,
GETHER,
SDHI2,
GPIO0_3, GPIO4_5,
STIF0, STIF1,
ADMAC,
HIF,
FLCTL,
ADC,
MTU2,
RSPI,
QSPI,
HSCIF,
VEU3F_VE3,
/* Group */
/* Mask */
STIF_M,
GPIO_M,
HPBDMAC_M,
LBSCDMAC_M,
RCAN_M,
SRC_M,
SCIF_M,
LCDC_M,
_2DG_M,
VIN_M,
TMU_3_M,
TMU_0_M,
/* Priority */
RCAN_P,
LBSCDMAC_P,
/* Common */
SDHI,
SSI,
SPI,
};
static struct intc_vect vectors[] __initdata = {
INTC_VECT(DU, 0x3E0),
INTC_VECT(TMU00, 0x400),
INTC_VECT(TMU10, 0x420),
INTC_VECT(TMU20, 0x440),
INTC_VECT(TMU30, 0x480),
INTC_VECT(TMU40, 0x4A0),
INTC_VECT(TMU50, 0x4C0),
INTC_VECT(TMU51, 0x4E0),
INTC_VECT(TMU60, 0x500),
INTC_VECT(TMU70, 0x520),
INTC_VECT(TMU80, 0x540),
INTC_VECT(RESET_WDT, 0x560),
INTC_VECT(USB, 0x580),
INTC_VECT(HUDI, 0x600),
INTC_VECT(SHDMAC, 0x620),
INTC_VECT(SSI0, 0x6C0),
INTC_VECT(SSI1, 0x6E0),
INTC_VECT(SSI2, 0x700),
INTC_VECT(SSI3, 0x720),
INTC_VECT(VIN0, 0x740),
INTC_VECT(RGPVG, 0x760),
INTC_VECT(_2DG, 0x780),
INTC_VECT(MMC, 0x7A0),
INTC_VECT(HSPI, 0x7E0),
INTC_VECT(LBSCATA, 0x840),
INTC_VECT(I2C0, 0x860),
INTC_VECT(RCAN0, 0x880),
INTC_VECT(SCIF0, 0x8A0),
INTC_VECT(SCIF1, 0x8C0),
INTC_VECT(SCIF2, 0x900),
INTC_VECT(SCIF3, 0x920),
INTC_VECT(SCIF4, 0x940),
INTC_VECT(SCIF5, 0x960),
INTC_VECT(LBSCDMAC0, 0x9E0),
INTC_VECT(LBSCDMAC1, 0xA00),
INTC_VECT(LBSCDMAC2, 0xA20),
INTC_VECT(RCAN1, 0xA60),
INTC_VECT(SDHI0, 0xAE0),
INTC_VECT(SDHI1, 0xB00),
INTC_VECT(IEBUS, 0xB20),
INTC_VECT(HPBDMAC0_3, 0xB60),
INTC_VECT(HPBDMAC4_10, 0xB80),
INTC_VECT(HPBDMAC11_18, 0xBA0),
INTC_VECT(HPBDMAC19_22, 0xBC0),
INTC_VECT(HPBDMAC23_25_27_28, 0xBE0),
INTC_VECT(RTC, 0xC00),
INTC_VECT(VIN1, 0xC20),
INTC_VECT(LCDC, 0xC40),
INTC_VECT(SRC0, 0xC60),
INTC_VECT(SRC1, 0xC80),
INTC_VECT(GETHER, 0xCA0),
INTC_VECT(SDHI2, 0xCC0),
INTC_VECT(GPIO0_3, 0xCE0),
INTC_VECT(GPIO4_5, 0xD00),
INTC_VECT(STIF0, 0xD20),
INTC_VECT(STIF1, 0xD40),
INTC_VECT(ADMAC, 0xDA0),
INTC_VECT(HIF, 0xDC0),
INTC_VECT(FLCTL, 0xDE0),
INTC_VECT(ADC, 0xE00),
INTC_VECT(MTU2, 0xE20),
INTC_VECT(RSPI, 0xE40),
INTC_VECT(QSPI, 0xE60),
INTC_VECT(HSCIF, 0xFC0),
INTC_VECT(VEU3F_VE3, 0xF40),
};
static struct intc_group groups[] __initdata = {
/* Common */
INTC_GROUP(SDHI, SDHI0, SDHI1, SDHI2),
INTC_GROUP(SPI, HSPI, RSPI, QSPI),
INTC_GROUP(SSI, SSI0, SSI1, SSI2, SSI3),
/* Mask group */
INTC_GROUP(STIF_M, STIF0, STIF1), /* 22 */
INTC_GROUP(GPIO_M, GPIO0_3, GPIO4_5), /* 21 */
INTC_GROUP(HPBDMAC_M, HPBDMAC0_3, HPBDMAC4_10, HPBDMAC11_18,
HPBDMAC19_22, HPBDMAC23_25_27_28), /* 19 */
INTC_GROUP(LBSCDMAC_M, LBSCDMAC0, LBSCDMAC1, LBSCDMAC2), /* 18 */
INTC_GROUP(RCAN_M, RCAN0, RCAN1, IEBUS), /* 17 */
INTC_GROUP(SRC_M, SRC0, SRC1), /* 16 */
INTC_GROUP(SCIF_M, SCIF0, SCIF1, SCIF2, SCIF3, SCIF4, SCIF5,
HSCIF), /* 14 */
INTC_GROUP(LCDC_M, LCDC, MIMLB), /* 13 */
INTC_GROUP(_2DG_M, _2DG, RGPVG), /* 12 */
INTC_GROUP(VIN_M, VIN0, VIN1), /* 10 */
INTC_GROUP(TMU_3_M, TMU30, TMU40, TMU50, TMU51,
TMU60, TMU60, TMU70, TMU80), /* 2 */
INTC_GROUP(TMU_0_M, TMU00, TMU10, TMU20, TMU21), /* 1 */
/* Priority group*/
INTC_GROUP(RCAN_P, RCAN0, RCAN1), /* INT2PRI5 */
INTC_GROUP(LBSCDMAC_P, LBSCDMAC0, LBSCDMAC1), /* INT2PRI5 */
};
static struct intc_mask_reg mask_registers[] __initdata = {
{ 0xFF804040, 0xFF804044, 32, /* INT2MSKRG / INT2MSKCR */
{ 0,
VEU3F_VE3,
SDHI, /* SDHI 0-2 */
ADMAC,
FLCTL,
RESET_WDT,
HIF,
ADC,
MTU2,
STIF_M, /* STIF 0,1 */
GPIO_M, /* GPIO 0-5*/
GETHER,
HPBDMAC_M, /* HPBDMAC 0_3 - 23_25_27_28 */
LBSCDMAC_M, /* LBSCDMAC 0 - 2 */
RCAN_M, /* RCAN, IEBUS */
SRC_M, /* SRC 0,1 */
LBSCATA,
SCIF_M, /* SCIF 0-5, HSCIF */
LCDC_M, /* LCDC, MIMLB */
_2DG_M, /* 2DG, RGPVG */
SPI, /* HSPI, RSPI, QSPI */
VIN_M, /* VIN0, 1 */
SSI, /* SSI 0-3 */
USB,
SHDMAC,
HUDI,
MMC,
RTC,
I2C0, /* I2C */ /* I2C 0, 1*/
TMU_3_M, /* TMU30 - TMU80 */
TMU_0_M, /* TMU00 - TMU21 */
DU } },
};
static struct intc_prio_reg prio_registers[] __initdata = {
{ 0xFF804000, 0, 32, 8, /* INT2PRI0 */
{ DU, TMU00, TMU10, TMU20 } },
{ 0xFF804004, 0, 32, 8, /* INT2PRI1 */
{ TMU30, TMU60, RTC, SDHI } },
{ 0xFF804008, 0, 32, 8, /* INT2PRI2 */
{ HUDI, SHDMAC, USB, SSI } },
{ 0xFF80400C, 0, 32, 8, /* INT2PRI3 */
{ VIN0, SPI, _2DG, LBSCATA } },
{ 0xFF804010, 0, 32, 8, /* INT2PRI4 */
{ SCIF0, SCIF3, HSCIF, LCDC } },
{ 0xFF804014, 0, 32, 8, /* INT2PRI5 */
{ RCAN_P, LBSCDMAC_P, LBSCDMAC2, MMC } },
{ 0xFF804018, 0, 32, 8, /* INT2PRI6 */
{ HPBDMAC0_3, HPBDMAC4_10, HPBDMAC11_18, HPBDMAC19_22 } },
{ 0xFF80401C, 0, 32, 8, /* INT2PRI7 */
{ HPBDMAC23_25_27_28, I2C0, SRC0, SRC1 } },
{ 0xFF804020, 0, 32, 8, /* INT2PRI8 */
{ 0 /* ADIF */, VIN1, RESET_WDT, HIF } },
{ 0xFF804024, 0, 32, 8, /* INT2PRI9 */
{ ADMAC, FLCTL, GPIO0_3, GPIO4_5 } },
{ 0xFF804028, 0, 32, 8, /* INT2PRI10 */
{ STIF0, STIF1, VEU3F_VE3, GETHER } },
{ 0xFF80402C, 0, 32, 8, /* INT2PRI11 */
{ MTU2, RGPVG, MIMLB, IEBUS } },
};
static DECLARE_INTC_DESC(intc_desc, "sh7734", vectors, groups,
mask_registers, prio_registers, NULL);
/* Support for external interrupt pins in IRQ mode */
static struct intc_vect irq3210_vectors[] __initdata = {
INTC_VECT(IRQ0, 0x240), INTC_VECT(IRQ1, 0x280),
INTC_VECT(IRQ2, 0x2C0), INTC_VECT(IRQ3, 0x300),
};
static struct intc_sense_reg irq3210_sense_registers[] __initdata = {
{ 0xFF80201C, 32, 2, /* ICR1 */
{ IRQ0, IRQ1, IRQ2, IRQ3, } },
};
static struct intc_mask_reg irq3210_ack_registers[] __initdata = {
{ 0xFF802024, 0, 32, /* INTREQ */
{ IRQ0, IRQ1, IRQ2, IRQ3, } },
};
static struct intc_mask_reg irq3210_mask_registers[] __initdata = {
{ 0xFF802044, 0xFF802064, 32, /* INTMSK0 / INTMSKCLR0 */
{ IRQ0, IRQ1, IRQ2, IRQ3, } },
};
static struct intc_prio_reg irq3210_prio_registers[] __initdata = {
{ 0xFF802010, 0, 32, 4, /* INTPRI */
{ IRQ0, IRQ1, IRQ2, IRQ3, } },
};
static DECLARE_INTC_DESC_ACK(intc_desc_irq3210, "sh7734-irq3210",
irq3210_vectors, NULL,
irq3210_mask_registers, irq3210_prio_registers,
irq3210_sense_registers, irq3210_ack_registers);
/* External interrupt pins in IRL mode */
static struct intc_vect vectors_irl3210[] __initdata = {
INTC_VECT(IRL0_LLLL, 0x200), INTC_VECT(IRL0_LLLH, 0x220),
INTC_VECT(IRL0_LLHL, 0x240), INTC_VECT(IRL0_LLHH, 0x260),
INTC_VECT(IRL0_LHLL, 0x280), INTC_VECT(IRL0_LHLH, 0x2a0),
INTC_VECT(IRL0_LHHL, 0x2c0), INTC_VECT(IRL0_LHHH, 0x2e0),
INTC_VECT(IRL0_HLLL, 0x300), INTC_VECT(IRL0_HLLH, 0x320),
INTC_VECT(IRL0_HLHL, 0x340), INTC_VECT(IRL0_HLHH, 0x360),
INTC_VECT(IRL0_HHLL, 0x380), INTC_VECT(IRL0_HHLH, 0x3a0),
INTC_VECT(IRL0_HHHL, 0x3c0),
};
static DECLARE_INTC_DESC(intc_desc_irl3210, "sh7734-irl3210",
vectors_irl3210, NULL, mask_registers, NULL, NULL);
#define INTC_ICR0 0xFF802000
#define INTC_INTMSK0 0xFF802044
#define INTC_INTMSK1 0xFF802048
#define INTC_INTMSKCLR0 0xFF802064
#define INTC_INTMSKCLR1 0xFF802068
void __init plat_irq_setup(void)
{
/* disable IRQ3-0 */
__raw_writel(0xF0000000, INTC_INTMSK0);
/* disable IRL3-0 */
__raw_writel(0x80000000, INTC_INTMSK1);
/* select IRL mode for IRL3-0 */
__raw_writel(__raw_readl(INTC_ICR0) & ~0x00800000, INTC_ICR0);
/* disable holding function, ie enable "SH-4 Mode (LVLMODE)" */
__raw_writel(__raw_readl(INTC_ICR0) | 0x00200000, INTC_ICR0);
register_intc_controller(&intc_desc);
}
void __init plat_irq_setup_pins(int mode)
{
switch (mode) {
case IRQ_MODE_IRQ3210:
/* select IRQ mode for IRL3-0 */
__raw_writel(__raw_readl(INTC_ICR0) | 0x00800000, INTC_ICR0);
register_intc_controller(&intc_desc_irq3210);
break;
case IRQ_MODE_IRL3210:
/* enable IRL0-3 but don't provide any masking */
__raw_writel(0x80000000, INTC_INTMSKCLR1);
__raw_writel(0xf0000000, INTC_INTMSKCLR0);
break;
case IRQ_MODE_IRL3210_MASK:
/* enable IRL0-3 and mask using cpu intc controller */
__raw_writel(0x80000000, INTC_INTMSKCLR0);
register_intc_controller(&intc_desc_irl3210);
break;
default:
BUG();
}
}
| linux-master | arch/sh/kernel/cpu/sh4a/setup-sh7734.c |
// SPDX-License-Identifier: GPL-2.0
/*
* arch/sh/kernel/cpu/sh4a/clock-sh7785.c
*
* SH7785 support for the clock framework
*
* Copyright (C) 2007 - 2010 Paul Mundt
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/cpufreq.h>
#include <linux/clkdev.h>
#include <asm/clock.h>
#include <asm/freq.h>
#include <cpu/sh7785.h>
/*
* Default rate for the root input clock, reset this with clk_set_rate()
* from the platform code.
*/
static struct clk extal_clk = {
.rate = 33333333,
};
static unsigned long pll_recalc(struct clk *clk)
{
int multiplier;
multiplier = test_mode_pin(MODE_PIN4) ? 36 : 72;
return clk->parent->rate * multiplier;
}
static struct sh_clk_ops pll_clk_ops = {
.recalc = pll_recalc,
};
static struct clk pll_clk = {
.ops = &pll_clk_ops,
.parent = &extal_clk,
.flags = CLK_ENABLE_ON_INIT,
};
static struct clk *clks[] = {
&extal_clk,
&pll_clk,
};
static unsigned int div2[] = { 1, 2, 4, 6, 8, 12, 16, 18,
24, 32, 36, 48 };
static struct clk_div_mult_table div4_div_mult_table = {
.divisors = div2,
.nr_divisors = ARRAY_SIZE(div2),
};
static struct clk_div4_table div4_table = {
.div_mult_table = &div4_div_mult_table,
};
enum { DIV4_I, DIV4_U, DIV4_SH, DIV4_B, DIV4_DDR, DIV4_GA,
DIV4_DU, DIV4_P, DIV4_NR };
#define DIV4(_bit, _mask, _flags) \
SH_CLK_DIV4(&pll_clk, FRQMR1, _bit, _mask, _flags)
struct clk div4_clks[DIV4_NR] = {
[DIV4_P] = DIV4(0, 0x0f80, 0),
[DIV4_DU] = DIV4(4, 0x0ff0, 0),
[DIV4_GA] = DIV4(8, 0x0030, 0),
[DIV4_DDR] = DIV4(12, 0x000c, CLK_ENABLE_ON_INIT),
[DIV4_B] = DIV4(16, 0x0fe0, CLK_ENABLE_ON_INIT),
[DIV4_SH] = DIV4(20, 0x000c, CLK_ENABLE_ON_INIT),
[DIV4_U] = DIV4(24, 0x000c, CLK_ENABLE_ON_INIT),
[DIV4_I] = DIV4(28, 0x000e, CLK_ENABLE_ON_INIT),
};
#define MSTPCR0 0xffc80030
#define MSTPCR1 0xffc80034
enum { MSTP029, MSTP028, MSTP027, MSTP026, MSTP025, MSTP024,
MSTP021, MSTP020, MSTP017, MSTP016,
MSTP013, MSTP012, MSTP009, MSTP008, MSTP003, MSTP002,
MSTP119, MSTP117, MSTP105, MSTP104, MSTP100,
MSTP_NR };
static struct clk mstp_clks[MSTP_NR] = {
/* MSTPCR0 */
[MSTP029] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 29, 0),
[MSTP028] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 28, 0),
[MSTP027] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 27, 0),
[MSTP026] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 26, 0),
[MSTP025] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 25, 0),
[MSTP024] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 24, 0),
[MSTP021] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 21, 0),
[MSTP020] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 20, 0),
[MSTP017] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 17, 0),
[MSTP016] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 16, 0),
[MSTP013] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 13, 0),
[MSTP012] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 12, 0),
[MSTP009] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 9, 0),
[MSTP008] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 8, 0),
[MSTP003] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 3, 0),
[MSTP002] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 2, 0),
/* MSTPCR1 */
[MSTP119] = SH_CLK_MSTP32(NULL, MSTPCR1, 19, 0),
[MSTP117] = SH_CLK_MSTP32(NULL, MSTPCR1, 17, 0),
[MSTP105] = SH_CLK_MSTP32(NULL, MSTPCR1, 5, 0),
[MSTP104] = SH_CLK_MSTP32(NULL, MSTPCR1, 4, 0),
[MSTP100] = SH_CLK_MSTP32(NULL, MSTPCR1, 0, 0),
};
static struct clk_lookup lookups[] = {
/* main clocks */
CLKDEV_CON_ID("extal", &extal_clk),
CLKDEV_CON_ID("pll_clk", &pll_clk),
/* DIV4 clocks */
CLKDEV_CON_ID("peripheral_clk", &div4_clks[DIV4_P]),
CLKDEV_CON_ID("du_clk", &div4_clks[DIV4_DU]),
CLKDEV_CON_ID("ga_clk", &div4_clks[DIV4_GA]),
CLKDEV_CON_ID("ddr_clk", &div4_clks[DIV4_DDR]),
CLKDEV_CON_ID("bus_clk", &div4_clks[DIV4_B]),
CLKDEV_CON_ID("shyway_clk", &div4_clks[DIV4_SH]),
CLKDEV_CON_ID("umem_clk", &div4_clks[DIV4_U]),
CLKDEV_CON_ID("cpu_clk", &div4_clks[DIV4_I]),
/* MSTP32 clocks */
CLKDEV_ICK_ID("fck", "sh-sci.5", &mstp_clks[MSTP029]),
CLKDEV_ICK_ID("fck", "sh-sci.4", &mstp_clks[MSTP028]),
CLKDEV_ICK_ID("fck", "sh-sci.3", &mstp_clks[MSTP027]),
CLKDEV_ICK_ID("fck", "sh-sci.2", &mstp_clks[MSTP026]),
CLKDEV_ICK_ID("fck", "sh-sci.1", &mstp_clks[MSTP025]),
CLKDEV_ICK_ID("fck", "sh-sci.0", &mstp_clks[MSTP024]),
CLKDEV_CON_ID("ssi1_fck", &mstp_clks[MSTP021]),
CLKDEV_CON_ID("ssi0_fck", &mstp_clks[MSTP020]),
CLKDEV_CON_ID("hac1_fck", &mstp_clks[MSTP017]),
CLKDEV_CON_ID("hac0_fck", &mstp_clks[MSTP016]),
CLKDEV_CON_ID("mmcif_fck", &mstp_clks[MSTP013]),
CLKDEV_CON_ID("flctl_fck", &mstp_clks[MSTP012]),
CLKDEV_ICK_ID("fck", "sh-tmu.0", &mstp_clks[MSTP008]),
CLKDEV_ICK_ID("fck", "sh-tmu.1", &mstp_clks[MSTP009]),
CLKDEV_CON_ID("siof_fck", &mstp_clks[MSTP003]),
CLKDEV_CON_ID("hspi_fck", &mstp_clks[MSTP002]),
CLKDEV_CON_ID("hudi_fck", &mstp_clks[MSTP119]),
CLKDEV_CON_ID("ubc0", &mstp_clks[MSTP117]),
CLKDEV_CON_ID("dmac_11_6_fck", &mstp_clks[MSTP105]),
CLKDEV_CON_ID("dmac_5_0_fck", &mstp_clks[MSTP104]),
CLKDEV_CON_ID("gdta_fck", &mstp_clks[MSTP100]),
};
int __init arch_clk_init(void)
{
int i, ret = 0;
for (i = 0; i < ARRAY_SIZE(clks); i++)
ret |= clk_register(clks[i]);
clkdev_add_table(lookups, ARRAY_SIZE(lookups));
if (!ret)
ret = sh_clk_div4_register(div4_clks, ARRAY_SIZE(div4_clks),
&div4_table);
if (!ret)
ret = sh_clk_mstp_register(mstp_clks, MSTP_NR);
return ret;
}
| linux-master | arch/sh/kernel/cpu/sh4a/clock-sh7785.c |
// SPDX-License-Identifier: GPL-2.0
/*
* SH-X3 prototype CPU pinmux
*
* Copyright (C) 2010 Paul Mundt
*/
#include <linux/bug.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/ioport.h>
#include <cpu/pfc.h>
static struct resource shx3_pfc_resources[] = {
[0] = {
.start = 0xffc70000,
.end = 0xffc7001f,
.flags = IORESOURCE_MEM,
},
};
static int __init plat_pinmux_setup(void)
{
return sh_pfc_register("pfc-shx3", shx3_pfc_resources,
ARRAY_SIZE(shx3_pfc_resources));
}
arch_initcall(plat_pinmux_setup);
| linux-master | arch/sh/kernel/cpu/sh4a/pinmux-shx3.c |
// SPDX-License-Identifier: GPL-2.0
/*
* SH-X3 SMP
*
* Copyright (C) 2007 - 2010 Paul Mundt
* Copyright (C) 2007 Magnus Damm
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/cpumask.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/sched.h>
#include <linux/delay.h>
#include <linux/cpu.h>
#include <asm/sections.h>
#define STBCR_REG(phys_id) (0xfe400004 | (phys_id << 12))
#define RESET_REG(phys_id) (0xfe400008 | (phys_id << 12))
#define STBCR_MSTP 0x00000001
#define STBCR_RESET 0x00000002
#define STBCR_SLEEP 0x00000004
#define STBCR_LTSLP 0x80000000
static irqreturn_t ipi_interrupt_handler(int irq, void *arg)
{
unsigned int message = (unsigned int)(long)arg;
unsigned int cpu = hard_smp_processor_id();
unsigned int offs = 4 * cpu;
unsigned int x;
x = __raw_readl(0xfe410070 + offs); /* C0INITICI..CnINTICI */
x &= (1 << (message << 2));
__raw_writel(x, 0xfe410080 + offs); /* C0INTICICLR..CnINTICICLR */
smp_message_recv(message);
return IRQ_HANDLED;
}
static void shx3_smp_setup(void)
{
unsigned int cpu = 0;
int i, num;
init_cpu_possible(cpumask_of(cpu));
/* Enable light sleep for the boot CPU */
__raw_writel(__raw_readl(STBCR_REG(cpu)) | STBCR_LTSLP, STBCR_REG(cpu));
__cpu_number_map[0] = 0;
__cpu_logical_map[0] = 0;
/*
* Do this stupidly for now.. we don't have an easy way to probe
* for the total number of cores.
*/
for (i = 1, num = 0; i < NR_CPUS; i++) {
set_cpu_possible(i, true);
__cpu_number_map[i] = ++num;
__cpu_logical_map[num] = i;
}
printk(KERN_INFO "Detected %i available secondary CPU(s)\n", num);
}
static void shx3_prepare_cpus(unsigned int max_cpus)
{
int i;
BUILD_BUG_ON(SMP_MSG_NR >= 8);
for (i = 0; i < SMP_MSG_NR; i++)
if (request_irq(104 + i, ipi_interrupt_handler,
IRQF_PERCPU, "IPI", (void *)(long)i))
pr_err("Failed to request irq %d\n", i);
for (i = 0; i < max_cpus; i++)
set_cpu_present(i, true);
}
static void shx3_start_cpu(unsigned int cpu, unsigned long entry_point)
{
if (__in_29bit_mode())
__raw_writel(entry_point, RESET_REG(cpu));
else
__raw_writel(virt_to_phys(entry_point), RESET_REG(cpu));
if (!(__raw_readl(STBCR_REG(cpu)) & STBCR_MSTP))
__raw_writel(STBCR_MSTP, STBCR_REG(cpu));
while (!(__raw_readl(STBCR_REG(cpu)) & STBCR_MSTP))
cpu_relax();
/* Start up secondary processor by sending a reset */
__raw_writel(STBCR_RESET | STBCR_LTSLP, STBCR_REG(cpu));
}
static unsigned int shx3_smp_processor_id(void)
{
return __raw_readl(0xff000048); /* CPIDR */
}
static void shx3_send_ipi(unsigned int cpu, unsigned int message)
{
unsigned long addr = 0xfe410070 + (cpu * 4);
BUG_ON(cpu >= 4);
__raw_writel(1 << (message << 2), addr); /* C0INTICI..CnINTICI */
}
static void shx3_update_boot_vector(unsigned int cpu)
{
__raw_writel(STBCR_MSTP, STBCR_REG(cpu));
while (!(__raw_readl(STBCR_REG(cpu)) & STBCR_MSTP))
cpu_relax();
__raw_writel(STBCR_RESET, STBCR_REG(cpu));
}
static int shx3_cpu_prepare(unsigned int cpu)
{
shx3_update_boot_vector(cpu);
return 0;
}
static int register_shx3_cpu_notifier(void)
{
cpuhp_setup_state_nocalls(CPUHP_SH_SH3X_PREPARE, "sh/shx3:prepare",
shx3_cpu_prepare, NULL);
return 0;
}
late_initcall(register_shx3_cpu_notifier);
struct plat_smp_ops shx3_smp_ops = {
.smp_setup = shx3_smp_setup,
.prepare_cpus = shx3_prepare_cpus,
.start_cpu = shx3_start_cpu,
.smp_processor_id = shx3_smp_processor_id,
.send_ipi = shx3_send_ipi,
.cpu_die = native_cpu_die,
.cpu_disable = native_cpu_disable,
.play_dead = native_play_dead,
};
| linux-master | arch/sh/kernel/cpu/sh4a/smp-shx3.c |
// SPDX-License-Identifier: GPL-2.0
/*
* SH7785 Pinmux
*
* Copyright (C) 2008 Magnus Damm
*/
#include <linux/bug.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/ioport.h>
#include <cpu/pfc.h>
static struct resource sh7785_pfc_resources[] = {
[0] = {
.start = 0xffe70000,
.end = 0xffe7008f,
.flags = IORESOURCE_MEM,
},
};
static int __init plat_pinmux_setup(void)
{
return sh_pfc_register("pfc-sh7785", sh7785_pfc_resources,
ARRAY_SIZE(sh7785_pfc_resources));
}
arch_initcall(plat_pinmux_setup);
| linux-master | arch/sh/kernel/cpu/sh4a/pinmux-sh7785.c |
// SPDX-License-Identifier: GPL-2.0
/*
* SH7724 Setup
*
* Copyright (C) 2009 Renesas Solutions Corp.
*
* Kuninori Morimoto <[email protected]>
*
* Based on SH7723 Setup
* Copyright (C) 2008 Paul Mundt
*/
#include <linux/platform_device.h>
#include <linux/init.h>
#include <linux/serial.h>
#include <linux/mm.h>
#include <linux/serial_sci.h>
#include <linux/uio_driver.h>
#include <linux/sh_dma.h>
#include <linux/sh_timer.h>
#include <linux/sh_intc.h>
#include <linux/io.h>
#include <linux/notifier.h>
#include <asm/suspend.h>
#include <asm/clock.h>
#include <asm/mmzone.h>
#include <asm/platform_early.h>
#include <cpu/dma-register.h>
#include <cpu/sh7724.h>
/* DMA */
static const struct sh_dmae_slave_config sh7724_dmae_slaves[] = {
{
.slave_id = SHDMA_SLAVE_SCIF0_TX,
.addr = 0xffe0000c,
.chcr = DM_FIX | SM_INC | RS_ERS | TS_INDEX2VAL(XMIT_SZ_8BIT),
.mid_rid = 0x21,
}, {
.slave_id = SHDMA_SLAVE_SCIF0_RX,
.addr = 0xffe00014,
.chcr = DM_INC | SM_FIX | RS_ERS | TS_INDEX2VAL(XMIT_SZ_8BIT),
.mid_rid = 0x22,
}, {
.slave_id = SHDMA_SLAVE_SCIF1_TX,
.addr = 0xffe1000c,
.chcr = DM_FIX | SM_INC | RS_ERS | TS_INDEX2VAL(XMIT_SZ_8BIT),
.mid_rid = 0x25,
}, {
.slave_id = SHDMA_SLAVE_SCIF1_RX,
.addr = 0xffe10014,
.chcr = DM_INC | SM_FIX | RS_ERS | TS_INDEX2VAL(XMIT_SZ_8BIT),
.mid_rid = 0x26,
}, {
.slave_id = SHDMA_SLAVE_SCIF2_TX,
.addr = 0xffe2000c,
.chcr = DM_FIX | SM_INC | RS_ERS | TS_INDEX2VAL(XMIT_SZ_8BIT),
.mid_rid = 0x29,
}, {
.slave_id = SHDMA_SLAVE_SCIF2_RX,
.addr = 0xffe20014,
.chcr = DM_INC | SM_FIX | RS_ERS | TS_INDEX2VAL(XMIT_SZ_8BIT),
.mid_rid = 0x2a,
}, {
.slave_id = SHDMA_SLAVE_SCIF3_TX,
.addr = 0xa4e30020,
.chcr = DM_FIX | SM_INC | RS_ERS | TS_INDEX2VAL(XMIT_SZ_8BIT),
.mid_rid = 0x2d,
}, {
.slave_id = SHDMA_SLAVE_SCIF3_RX,
.addr = 0xa4e30024,
.chcr = DM_INC | SM_FIX | RS_ERS | TS_INDEX2VAL(XMIT_SZ_8BIT),
.mid_rid = 0x2e,
}, {
.slave_id = SHDMA_SLAVE_SCIF4_TX,
.addr = 0xa4e40020,
.chcr = DM_FIX | SM_INC | RS_ERS | TS_INDEX2VAL(XMIT_SZ_8BIT),
.mid_rid = 0x31,
}, {
.slave_id = SHDMA_SLAVE_SCIF4_RX,
.addr = 0xa4e40024,
.chcr = DM_INC | SM_FIX | RS_ERS | TS_INDEX2VAL(XMIT_SZ_8BIT),
.mid_rid = 0x32,
}, {
.slave_id = SHDMA_SLAVE_SCIF5_TX,
.addr = 0xa4e50020,
.chcr = DM_FIX | SM_INC | RS_ERS | TS_INDEX2VAL(XMIT_SZ_8BIT),
.mid_rid = 0x35,
}, {
.slave_id = SHDMA_SLAVE_SCIF5_RX,
.addr = 0xa4e50024,
.chcr = DM_INC | SM_FIX | RS_ERS | TS_INDEX2VAL(XMIT_SZ_8BIT),
.mid_rid = 0x36,
}, {
.slave_id = SHDMA_SLAVE_USB0D0_TX,
.addr = 0xA4D80100,
.chcr = DM_FIX | SM_INC | RS_ERS | TS_INDEX2VAL(XMIT_SZ_32BIT),
.mid_rid = 0x73,
}, {
.slave_id = SHDMA_SLAVE_USB0D0_RX,
.addr = 0xA4D80100,
.chcr = DM_INC | SM_FIX | RS_ERS | TS_INDEX2VAL(XMIT_SZ_32BIT),
.mid_rid = 0x73,
}, {
.slave_id = SHDMA_SLAVE_USB0D1_TX,
.addr = 0xA4D80120,
.chcr = DM_FIX | SM_INC | RS_ERS | TS_INDEX2VAL(XMIT_SZ_32BIT),
.mid_rid = 0x77,
}, {
.slave_id = SHDMA_SLAVE_USB0D1_RX,
.addr = 0xA4D80120,
.chcr = DM_INC | SM_FIX | RS_ERS | TS_INDEX2VAL(XMIT_SZ_32BIT),
.mid_rid = 0x77,
}, {
.slave_id = SHDMA_SLAVE_USB1D0_TX,
.addr = 0xA4D90100,
.chcr = DM_FIX | SM_INC | RS_ERS | TS_INDEX2VAL(XMIT_SZ_32BIT),
.mid_rid = 0xab,
}, {
.slave_id = SHDMA_SLAVE_USB1D0_RX,
.addr = 0xA4D90100,
.chcr = DM_INC | SM_FIX | RS_ERS | TS_INDEX2VAL(XMIT_SZ_32BIT),
.mid_rid = 0xab,
}, {
.slave_id = SHDMA_SLAVE_USB1D1_TX,
.addr = 0xA4D90120,
.chcr = DM_FIX | SM_INC | RS_ERS | TS_INDEX2VAL(XMIT_SZ_32BIT),
.mid_rid = 0xaf,
}, {
.slave_id = SHDMA_SLAVE_USB1D1_RX,
.addr = 0xA4D90120,
.chcr = DM_INC | SM_FIX | RS_ERS | TS_INDEX2VAL(XMIT_SZ_32BIT),
.mid_rid = 0xaf,
}, {
.slave_id = SHDMA_SLAVE_SDHI0_TX,
.addr = 0x04ce0030,
.chcr = DM_FIX | SM_INC | RS_ERS | TS_INDEX2VAL(XMIT_SZ_16BIT),
.mid_rid = 0xc1,
}, {
.slave_id = SHDMA_SLAVE_SDHI0_RX,
.addr = 0x04ce0030,
.chcr = DM_INC | SM_FIX | RS_ERS | TS_INDEX2VAL(XMIT_SZ_16BIT),
.mid_rid = 0xc2,
}, {
.slave_id = SHDMA_SLAVE_SDHI1_TX,
.addr = 0x04cf0030,
.chcr = DM_FIX | SM_INC | RS_ERS | TS_INDEX2VAL(XMIT_SZ_16BIT),
.mid_rid = 0xc9,
}, {
.slave_id = SHDMA_SLAVE_SDHI1_RX,
.addr = 0x04cf0030,
.chcr = DM_INC | SM_FIX | RS_ERS | TS_INDEX2VAL(XMIT_SZ_16BIT),
.mid_rid = 0xca,
},
};
static const struct sh_dmae_channel sh7724_dmae_channels[] = {
{
.offset = 0,
.dmars = 0,
.dmars_bit = 0,
}, {
.offset = 0x10,
.dmars = 0,
.dmars_bit = 8,
}, {
.offset = 0x20,
.dmars = 4,
.dmars_bit = 0,
}, {
.offset = 0x30,
.dmars = 4,
.dmars_bit = 8,
}, {
.offset = 0x50,
.dmars = 8,
.dmars_bit = 0,
}, {
.offset = 0x60,
.dmars = 8,
.dmars_bit = 8,
}
};
static const unsigned int ts_shift[] = TS_SHIFT;
static struct sh_dmae_pdata dma_platform_data = {
.slave = sh7724_dmae_slaves,
.slave_num = ARRAY_SIZE(sh7724_dmae_slaves),
.channel = sh7724_dmae_channels,
.channel_num = ARRAY_SIZE(sh7724_dmae_channels),
.ts_low_shift = CHCR_TS_LOW_SHIFT,
.ts_low_mask = CHCR_TS_LOW_MASK,
.ts_high_shift = CHCR_TS_HIGH_SHIFT,
.ts_high_mask = CHCR_TS_HIGH_MASK,
.ts_shift = ts_shift,
.ts_shift_num = ARRAY_SIZE(ts_shift),
.dmaor_init = DMAOR_INIT,
};
/* Resource order important! */
static struct resource sh7724_dmae0_resources[] = {
{
/* Channel registers and DMAOR */
.start = 0xfe008020,
.end = 0xfe00808f,
.flags = IORESOURCE_MEM,
},
{
/* DMARSx */
.start = 0xfe009000,
.end = 0xfe00900b,
.flags = IORESOURCE_MEM,
},
{
.name = "error_irq",
.start = evt2irq(0xbc0),
.end = evt2irq(0xbc0),
.flags = IORESOURCE_IRQ,
},
{
/* IRQ for channels 0-3 */
.start = evt2irq(0x800),
.end = evt2irq(0x860),
.flags = IORESOURCE_IRQ,
},
{
/* IRQ for channels 4-5 */
.start = evt2irq(0xb80),
.end = evt2irq(0xba0),
.flags = IORESOURCE_IRQ,
},
};
/* Resource order important! */
static struct resource sh7724_dmae1_resources[] = {
{
/* Channel registers and DMAOR */
.start = 0xfdc08020,
.end = 0xfdc0808f,
.flags = IORESOURCE_MEM,
},
{
/* DMARSx */
.start = 0xfdc09000,
.end = 0xfdc0900b,
.flags = IORESOURCE_MEM,
},
{
.name = "error_irq",
.start = evt2irq(0xb40),
.end = evt2irq(0xb40),
.flags = IORESOURCE_IRQ,
},
{
/* IRQ for channels 0-3 */
.start = evt2irq(0x700),
.end = evt2irq(0x760),
.flags = IORESOURCE_IRQ,
},
{
/* IRQ for channels 4-5 */
.start = evt2irq(0xb00),
.end = evt2irq(0xb20),
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device dma0_device = {
.name = "sh-dma-engine",
.id = 0,
.resource = sh7724_dmae0_resources,
.num_resources = ARRAY_SIZE(sh7724_dmae0_resources),
.dev = {
.platform_data = &dma_platform_data,
},
};
static struct platform_device dma1_device = {
.name = "sh-dma-engine",
.id = 1,
.resource = sh7724_dmae1_resources,
.num_resources = ARRAY_SIZE(sh7724_dmae1_resources),
.dev = {
.platform_data = &dma_platform_data,
},
};
/* Serial */
static struct plat_sci_port scif0_platform_data = {
.scscr = SCSCR_REIE,
.type = PORT_SCIF,
.regtype = SCIx_SH4_SCIF_NO_SCSPTR_REGTYPE,
};
static struct resource scif0_resources[] = {
DEFINE_RES_MEM(0xffe00000, 0x100),
DEFINE_RES_IRQ(evt2irq(0xc00)),
};
static struct platform_device scif0_device = {
.name = "sh-sci",
.id = 0,
.resource = scif0_resources,
.num_resources = ARRAY_SIZE(scif0_resources),
.dev = {
.platform_data = &scif0_platform_data,
},
};
static struct plat_sci_port scif1_platform_data = {
.scscr = SCSCR_REIE,
.type = PORT_SCIF,
.regtype = SCIx_SH4_SCIF_NO_SCSPTR_REGTYPE,
};
static struct resource scif1_resources[] = {
DEFINE_RES_MEM(0xffe10000, 0x100),
DEFINE_RES_IRQ(evt2irq(0xc20)),
};
static struct platform_device scif1_device = {
.name = "sh-sci",
.id = 1,
.resource = scif1_resources,
.num_resources = ARRAY_SIZE(scif1_resources),
.dev = {
.platform_data = &scif1_platform_data,
},
};
static struct plat_sci_port scif2_platform_data = {
.scscr = SCSCR_REIE,
.type = PORT_SCIF,
.regtype = SCIx_SH4_SCIF_NO_SCSPTR_REGTYPE,
};
static struct resource scif2_resources[] = {
DEFINE_RES_MEM(0xffe20000, 0x100),
DEFINE_RES_IRQ(evt2irq(0xc40)),
};
static struct platform_device scif2_device = {
.name = "sh-sci",
.id = 2,
.resource = scif2_resources,
.num_resources = ARRAY_SIZE(scif2_resources),
.dev = {
.platform_data = &scif2_platform_data,
},
};
static struct plat_sci_port scif3_platform_data = {
.sampling_rate = 8,
.type = PORT_SCIFA,
};
static struct resource scif3_resources[] = {
DEFINE_RES_MEM(0xa4e30000, 0x100),
DEFINE_RES_IRQ(evt2irq(0x900)),
};
static struct platform_device scif3_device = {
.name = "sh-sci",
.id = 3,
.resource = scif3_resources,
.num_resources = ARRAY_SIZE(scif3_resources),
.dev = {
.platform_data = &scif3_platform_data,
},
};
static struct plat_sci_port scif4_platform_data = {
.sampling_rate = 8,
.type = PORT_SCIFA,
};
static struct resource scif4_resources[] = {
DEFINE_RES_MEM(0xa4e40000, 0x100),
DEFINE_RES_IRQ(evt2irq(0xd00)),
};
static struct platform_device scif4_device = {
.name = "sh-sci",
.id = 4,
.resource = scif4_resources,
.num_resources = ARRAY_SIZE(scif4_resources),
.dev = {
.platform_data = &scif4_platform_data,
},
};
static struct plat_sci_port scif5_platform_data = {
.sampling_rate = 8,
.type = PORT_SCIFA,
};
static struct resource scif5_resources[] = {
DEFINE_RES_MEM(0xa4e50000, 0x100),
DEFINE_RES_IRQ(evt2irq(0xfa0)),
};
static struct platform_device scif5_device = {
.name = "sh-sci",
.id = 5,
.resource = scif5_resources,
.num_resources = ARRAY_SIZE(scif5_resources),
.dev = {
.platform_data = &scif5_platform_data,
},
};
/* RTC */
static struct resource rtc_resources[] = {
[0] = {
.start = 0xa465fec0,
.end = 0xa465fec0 + 0x58 - 1,
.flags = IORESOURCE_IO,
},
[1] = {
/* Period IRQ */
.start = evt2irq(0xaa0),
.flags = IORESOURCE_IRQ,
},
[2] = {
/* Carry IRQ */
.start = evt2irq(0xac0),
.flags = IORESOURCE_IRQ,
},
[3] = {
/* Alarm IRQ */
.start = evt2irq(0xa80),
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device rtc_device = {
.name = "sh-rtc",
.id = -1,
.num_resources = ARRAY_SIZE(rtc_resources),
.resource = rtc_resources,
};
/* I2C0 */
static struct resource iic0_resources[] = {
[0] = {
.name = "IIC0",
.start = 0x04470000,
.end = 0x04470018 - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = evt2irq(0xe00),
.end = evt2irq(0xe60),
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device iic0_device = {
.name = "i2c-sh_mobile",
.id = 0, /* "i2c0" clock */
.num_resources = ARRAY_SIZE(iic0_resources),
.resource = iic0_resources,
};
/* I2C1 */
static struct resource iic1_resources[] = {
[0] = {
.name = "IIC1",
.start = 0x04750000,
.end = 0x04750018 - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = evt2irq(0xd80),
.end = evt2irq(0xde0),
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device iic1_device = {
.name = "i2c-sh_mobile",
.id = 1, /* "i2c1" clock */
.num_resources = ARRAY_SIZE(iic1_resources),
.resource = iic1_resources,
};
/* VPU */
static struct uio_info vpu_platform_data = {
.name = "VPU5F",
.version = "0",
.irq = evt2irq(0x980),
};
static struct resource vpu_resources[] = {
[0] = {
.name = "VPU",
.start = 0xfe900000,
.end = 0xfe902807,
.flags = IORESOURCE_MEM,
},
[1] = {
/* place holder for contiguous memory */
},
};
static struct platform_device vpu_device = {
.name = "uio_pdrv_genirq",
.id = 0,
.dev = {
.platform_data = &vpu_platform_data,
},
.resource = vpu_resources,
.num_resources = ARRAY_SIZE(vpu_resources),
};
/* VEU0 */
static struct uio_info veu0_platform_data = {
.name = "VEU3F0",
.version = "0",
.irq = evt2irq(0xc60),
};
static struct resource veu0_resources[] = {
[0] = {
.name = "VEU3F0",
.start = 0xfe920000,
.end = 0xfe9200cb,
.flags = IORESOURCE_MEM,
},
[1] = {
/* place holder for contiguous memory */
},
};
static struct platform_device veu0_device = {
.name = "uio_pdrv_genirq",
.id = 1,
.dev = {
.platform_data = &veu0_platform_data,
},
.resource = veu0_resources,
.num_resources = ARRAY_SIZE(veu0_resources),
};
/* VEU1 */
static struct uio_info veu1_platform_data = {
.name = "VEU3F1",
.version = "0",
.irq = evt2irq(0x8c0),
};
static struct resource veu1_resources[] = {
[0] = {
.name = "VEU3F1",
.start = 0xfe924000,
.end = 0xfe9240cb,
.flags = IORESOURCE_MEM,
},
[1] = {
/* place holder for contiguous memory */
},
};
static struct platform_device veu1_device = {
.name = "uio_pdrv_genirq",
.id = 2,
.dev = {
.platform_data = &veu1_platform_data,
},
.resource = veu1_resources,
.num_resources = ARRAY_SIZE(veu1_resources),
};
/* BEU0 */
static struct uio_info beu0_platform_data = {
.name = "BEU0",
.version = "0",
.irq = evt2irq(0x8A0),
};
static struct resource beu0_resources[] = {
[0] = {
.name = "BEU0",
.start = 0xfe930000,
.end = 0xfe933400,
.flags = IORESOURCE_MEM,
},
[1] = {
/* place holder for contiguous memory */
},
};
static struct platform_device beu0_device = {
.name = "uio_pdrv_genirq",
.id = 6,
.dev = {
.platform_data = &beu0_platform_data,
},
.resource = beu0_resources,
.num_resources = ARRAY_SIZE(beu0_resources),
};
/* BEU1 */
static struct uio_info beu1_platform_data = {
.name = "BEU1",
.version = "0",
.irq = evt2irq(0xA00),
};
static struct resource beu1_resources[] = {
[0] = {
.name = "BEU1",
.start = 0xfe940000,
.end = 0xfe943400,
.flags = IORESOURCE_MEM,
},
[1] = {
/* place holder for contiguous memory */
},
};
static struct platform_device beu1_device = {
.name = "uio_pdrv_genirq",
.id = 7,
.dev = {
.platform_data = &beu1_platform_data,
},
.resource = beu1_resources,
.num_resources = ARRAY_SIZE(beu1_resources),
};
static struct sh_timer_config cmt_platform_data = {
.channels_mask = 0x20,
};
static struct resource cmt_resources[] = {
DEFINE_RES_MEM(0x044a0000, 0x70),
DEFINE_RES_IRQ(evt2irq(0xf00)),
};
static struct platform_device cmt_device = {
.name = "sh-cmt-32",
.id = 0,
.dev = {
.platform_data = &cmt_platform_data,
},
.resource = cmt_resources,
.num_resources = ARRAY_SIZE(cmt_resources),
};
static struct sh_timer_config tmu0_platform_data = {
.channels_mask = 7,
};
static struct resource tmu0_resources[] = {
DEFINE_RES_MEM(0xffd80000, 0x2c),
DEFINE_RES_IRQ(evt2irq(0x400)),
DEFINE_RES_IRQ(evt2irq(0x420)),
DEFINE_RES_IRQ(evt2irq(0x440)),
};
static struct platform_device tmu0_device = {
.name = "sh-tmu",
.id = 0,
.dev = {
.platform_data = &tmu0_platform_data,
},
.resource = tmu0_resources,
.num_resources = ARRAY_SIZE(tmu0_resources),
};
static struct sh_timer_config tmu1_platform_data = {
.channels_mask = 7,
};
static struct resource tmu1_resources[] = {
DEFINE_RES_MEM(0xffd90000, 0x2c),
DEFINE_RES_IRQ(evt2irq(0x920)),
DEFINE_RES_IRQ(evt2irq(0x940)),
DEFINE_RES_IRQ(evt2irq(0x960)),
};
static struct platform_device tmu1_device = {
.name = "sh-tmu",
.id = 1,
.dev = {
.platform_data = &tmu1_platform_data,
},
.resource = tmu1_resources,
.num_resources = ARRAY_SIZE(tmu1_resources),
};
/* JPU */
static struct uio_info jpu_platform_data = {
.name = "JPU",
.version = "0",
.irq = evt2irq(0x560),
};
static struct resource jpu_resources[] = {
[0] = {
.name = "JPU",
.start = 0xfe980000,
.end = 0xfe9902d3,
.flags = IORESOURCE_MEM,
},
[1] = {
/* place holder for contiguous memory */
},
};
static struct platform_device jpu_device = {
.name = "uio_pdrv_genirq",
.id = 3,
.dev = {
.platform_data = &jpu_platform_data,
},
.resource = jpu_resources,
.num_resources = ARRAY_SIZE(jpu_resources),
};
/* SPU2DSP0 */
static struct uio_info spu0_platform_data = {
.name = "SPU2DSP0",
.version = "0",
.irq = evt2irq(0xcc0),
};
static struct resource spu0_resources[] = {
[0] = {
.name = "SPU2DSP0",
.start = 0xFE200000,
.end = 0xFE2FFFFF,
.flags = IORESOURCE_MEM,
},
[1] = {
/* place holder for contiguous memory */
},
};
static struct platform_device spu0_device = {
.name = "uio_pdrv_genirq",
.id = 4,
.dev = {
.platform_data = &spu0_platform_data,
},
.resource = spu0_resources,
.num_resources = ARRAY_SIZE(spu0_resources),
};
/* SPU2DSP1 */
static struct uio_info spu1_platform_data = {
.name = "SPU2DSP1",
.version = "0",
.irq = evt2irq(0xce0),
};
static struct resource spu1_resources[] = {
[0] = {
.name = "SPU2DSP1",
.start = 0xFE300000,
.end = 0xFE3FFFFF,
.flags = IORESOURCE_MEM,
},
[1] = {
/* place holder for contiguous memory */
},
};
static struct platform_device spu1_device = {
.name = "uio_pdrv_genirq",
.id = 5,
.dev = {
.platform_data = &spu1_platform_data,
},
.resource = spu1_resources,
.num_resources = ARRAY_SIZE(spu1_resources),
};
static struct platform_device *sh7724_devices[] __initdata = {
&scif0_device,
&scif1_device,
&scif2_device,
&scif3_device,
&scif4_device,
&scif5_device,
&cmt_device,
&tmu0_device,
&tmu1_device,
&dma0_device,
&dma1_device,
&rtc_device,
&iic0_device,
&iic1_device,
&vpu_device,
&veu0_device,
&veu1_device,
&beu0_device,
&beu1_device,
&jpu_device,
&spu0_device,
&spu1_device,
};
static int __init sh7724_devices_setup(void)
{
platform_resource_setup_memory(&vpu_device, "vpu", 2 << 20);
platform_resource_setup_memory(&veu0_device, "veu0", 2 << 20);
platform_resource_setup_memory(&veu1_device, "veu1", 2 << 20);
platform_resource_setup_memory(&jpu_device, "jpu", 2 << 20);
platform_resource_setup_memory(&spu0_device, "spu0", 2 << 20);
platform_resource_setup_memory(&spu1_device, "spu1", 2 << 20);
return platform_add_devices(sh7724_devices,
ARRAY_SIZE(sh7724_devices));
}
arch_initcall(sh7724_devices_setup);
static struct platform_device *sh7724_early_devices[] __initdata = {
&scif0_device,
&scif1_device,
&scif2_device,
&scif3_device,
&scif4_device,
&scif5_device,
&cmt_device,
&tmu0_device,
&tmu1_device,
};
void __init plat_early_device_setup(void)
{
sh_early_platform_add_devices(sh7724_early_devices,
ARRAY_SIZE(sh7724_early_devices));
}
#define RAMCR_CACHE_L2FC 0x0002
#define RAMCR_CACHE_L2E 0x0001
#define L2_CACHE_ENABLE (RAMCR_CACHE_L2E|RAMCR_CACHE_L2FC)
void l2_cache_init(void)
{
/* Enable L2 cache */
__raw_writel(L2_CACHE_ENABLE, RAMCR);
}
enum {
UNUSED = 0,
ENABLED,
DISABLED,
/* interrupt sources */
IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7,
HUDI,
DMAC1A_DEI0, DMAC1A_DEI1, DMAC1A_DEI2, DMAC1A_DEI3,
_2DG_TRI, _2DG_INI, _2DG_CEI,
DMAC0A_DEI0, DMAC0A_DEI1, DMAC0A_DEI2, DMAC0A_DEI3,
VIO_CEU0, VIO_BEU0, VIO_VEU1, VIO_VOU,
SCIFA3,
VPU,
TPU,
CEU1,
BEU1,
USB0, USB1,
ATAPI,
RTC_ATI, RTC_PRI, RTC_CUI,
DMAC1B_DEI4, DMAC1B_DEI5, DMAC1B_DADERR,
DMAC0B_DEI4, DMAC0B_DEI5, DMAC0B_DADERR,
KEYSC,
SCIF_SCIF0, SCIF_SCIF1, SCIF_SCIF2,
VEU0,
MSIOF_MSIOFI0, MSIOF_MSIOFI1,
SPU_SPUI0, SPU_SPUI1,
SCIFA4,
ICB,
ETHI,
I2C1_ALI, I2C1_TACKI, I2C1_WAITI, I2C1_DTEI,
I2C0_ALI, I2C0_TACKI, I2C0_WAITI, I2C0_DTEI,
CMT,
TSIF,
FSI,
SCIFA5,
TMU0_TUNI0, TMU0_TUNI1, TMU0_TUNI2,
IRDA,
JPU,
_2DDMAC,
MMC_MMC2I, MMC_MMC3I,
LCDC,
TMU1_TUNI0, TMU1_TUNI1, TMU1_TUNI2,
/* interrupt groups */
DMAC1A, _2DG, DMAC0A, VIO, USB, RTC,
DMAC1B, DMAC0B, I2C0, I2C1, SDHI0, SDHI1, SPU, MMCIF,
};
static struct intc_vect vectors[] __initdata = {
INTC_VECT(IRQ0, 0x600), INTC_VECT(IRQ1, 0x620),
INTC_VECT(IRQ2, 0x640), INTC_VECT(IRQ3, 0x660),
INTC_VECT(IRQ4, 0x680), INTC_VECT(IRQ5, 0x6a0),
INTC_VECT(IRQ6, 0x6c0), INTC_VECT(IRQ7, 0x6e0),
INTC_VECT(DMAC1A_DEI0, 0x700),
INTC_VECT(DMAC1A_DEI1, 0x720),
INTC_VECT(DMAC1A_DEI2, 0x740),
INTC_VECT(DMAC1A_DEI3, 0x760),
INTC_VECT(_2DG_TRI, 0x780),
INTC_VECT(_2DG_INI, 0x7A0),
INTC_VECT(_2DG_CEI, 0x7C0),
INTC_VECT(DMAC0A_DEI0, 0x800),
INTC_VECT(DMAC0A_DEI1, 0x820),
INTC_VECT(DMAC0A_DEI2, 0x840),
INTC_VECT(DMAC0A_DEI3, 0x860),
INTC_VECT(VIO_CEU0, 0x880),
INTC_VECT(VIO_BEU0, 0x8A0),
INTC_VECT(VIO_VEU1, 0x8C0),
INTC_VECT(VIO_VOU, 0x8E0),
INTC_VECT(SCIFA3, 0x900),
INTC_VECT(VPU, 0x980),
INTC_VECT(TPU, 0x9A0),
INTC_VECT(CEU1, 0x9E0),
INTC_VECT(BEU1, 0xA00),
INTC_VECT(USB0, 0xA20),
INTC_VECT(USB1, 0xA40),
INTC_VECT(ATAPI, 0xA60),
INTC_VECT(RTC_ATI, 0xA80),
INTC_VECT(RTC_PRI, 0xAA0),
INTC_VECT(RTC_CUI, 0xAC0),
INTC_VECT(DMAC1B_DEI4, 0xB00),
INTC_VECT(DMAC1B_DEI5, 0xB20),
INTC_VECT(DMAC1B_DADERR, 0xB40),
INTC_VECT(DMAC0B_DEI4, 0xB80),
INTC_VECT(DMAC0B_DEI5, 0xBA0),
INTC_VECT(DMAC0B_DADERR, 0xBC0),
INTC_VECT(KEYSC, 0xBE0),
INTC_VECT(SCIF_SCIF0, 0xC00),
INTC_VECT(SCIF_SCIF1, 0xC20),
INTC_VECT(SCIF_SCIF2, 0xC40),
INTC_VECT(VEU0, 0xC60),
INTC_VECT(MSIOF_MSIOFI0, 0xC80),
INTC_VECT(MSIOF_MSIOFI1, 0xCA0),
INTC_VECT(SPU_SPUI0, 0xCC0),
INTC_VECT(SPU_SPUI1, 0xCE0),
INTC_VECT(SCIFA4, 0xD00),
INTC_VECT(ICB, 0xD20),
INTC_VECT(ETHI, 0xD60),
INTC_VECT(I2C1_ALI, 0xD80),
INTC_VECT(I2C1_TACKI, 0xDA0),
INTC_VECT(I2C1_WAITI, 0xDC0),
INTC_VECT(I2C1_DTEI, 0xDE0),
INTC_VECT(I2C0_ALI, 0xE00),
INTC_VECT(I2C0_TACKI, 0xE20),
INTC_VECT(I2C0_WAITI, 0xE40),
INTC_VECT(I2C0_DTEI, 0xE60),
INTC_VECT(SDHI0, 0xE80),
INTC_VECT(SDHI0, 0xEA0),
INTC_VECT(SDHI0, 0xEC0),
INTC_VECT(SDHI0, 0xEE0),
INTC_VECT(CMT, 0xF00),
INTC_VECT(TSIF, 0xF20),
INTC_VECT(FSI, 0xF80),
INTC_VECT(SCIFA5, 0xFA0),
INTC_VECT(TMU0_TUNI0, 0x400),
INTC_VECT(TMU0_TUNI1, 0x420),
INTC_VECT(TMU0_TUNI2, 0x440),
INTC_VECT(IRDA, 0x480),
INTC_VECT(SDHI1, 0x4E0),
INTC_VECT(SDHI1, 0x500),
INTC_VECT(SDHI1, 0x520),
INTC_VECT(JPU, 0x560),
INTC_VECT(_2DDMAC, 0x4A0),
INTC_VECT(MMC_MMC2I, 0x5A0),
INTC_VECT(MMC_MMC3I, 0x5C0),
INTC_VECT(LCDC, 0xF40),
INTC_VECT(TMU1_TUNI0, 0x920),
INTC_VECT(TMU1_TUNI1, 0x940),
INTC_VECT(TMU1_TUNI2, 0x960),
};
static struct intc_group groups[] __initdata = {
INTC_GROUP(DMAC1A, DMAC1A_DEI0, DMAC1A_DEI1, DMAC1A_DEI2, DMAC1A_DEI3),
INTC_GROUP(_2DG, _2DG_TRI, _2DG_INI, _2DG_CEI),
INTC_GROUP(DMAC0A, DMAC0A_DEI0, DMAC0A_DEI1, DMAC0A_DEI2, DMAC0A_DEI3),
INTC_GROUP(VIO, VIO_CEU0, VIO_BEU0, VIO_VEU1, VIO_VOU),
INTC_GROUP(USB, USB0, USB1),
INTC_GROUP(RTC, RTC_ATI, RTC_PRI, RTC_CUI),
INTC_GROUP(DMAC1B, DMAC1B_DEI4, DMAC1B_DEI5, DMAC1B_DADERR),
INTC_GROUP(DMAC0B, DMAC0B_DEI4, DMAC0B_DEI5, DMAC0B_DADERR),
INTC_GROUP(I2C0, I2C0_ALI, I2C0_TACKI, I2C0_WAITI, I2C0_DTEI),
INTC_GROUP(I2C1, I2C1_ALI, I2C1_TACKI, I2C1_WAITI, I2C1_DTEI),
INTC_GROUP(SPU, SPU_SPUI0, SPU_SPUI1),
INTC_GROUP(MMCIF, MMC_MMC2I, MMC_MMC3I),
};
static struct intc_mask_reg mask_registers[] __initdata = {
{ 0xa4080080, 0xa40800c0, 8, /* IMR0 / IMCR0 */
{ 0, TMU1_TUNI2, TMU1_TUNI1, TMU1_TUNI0,
0, ENABLED, ENABLED, ENABLED } },
{ 0xa4080084, 0xa40800c4, 8, /* IMR1 / IMCR1 */
{ VIO_VOU, VIO_VEU1, VIO_BEU0, VIO_CEU0,
DMAC0A_DEI3, DMAC0A_DEI2, DMAC0A_DEI1, DMAC0A_DEI0 } },
{ 0xa4080088, 0xa40800c8, 8, /* IMR2 / IMCR2 */
{ 0, 0, 0, VPU, ATAPI, ETHI, 0, SCIFA3 } },
{ 0xa408008c, 0xa40800cc, 8, /* IMR3 / IMCR3 */
{ DMAC1A_DEI3, DMAC1A_DEI2, DMAC1A_DEI1, DMAC1A_DEI0,
SPU_SPUI1, SPU_SPUI0, BEU1, IRDA } },
{ 0xa4080090, 0xa40800d0, 8, /* IMR4 / IMCR4 */
{ 0, TMU0_TUNI2, TMU0_TUNI1, TMU0_TUNI0,
JPU, 0, 0, LCDC } },
{ 0xa4080094, 0xa40800d4, 8, /* IMR5 / IMCR5 */
{ KEYSC, DMAC0B_DADERR, DMAC0B_DEI5, DMAC0B_DEI4,
VEU0, SCIF_SCIF2, SCIF_SCIF1, SCIF_SCIF0 } },
{ 0xa4080098, 0xa40800d8, 8, /* IMR6 / IMCR6 */
{ 0, 0, ICB, SCIFA4,
CEU1, 0, MSIOF_MSIOFI1, MSIOF_MSIOFI0 } },
{ 0xa408009c, 0xa40800dc, 8, /* IMR7 / IMCR7 */
{ I2C0_DTEI, I2C0_WAITI, I2C0_TACKI, I2C0_ALI,
I2C1_DTEI, I2C1_WAITI, I2C1_TACKI, I2C1_ALI } },
{ 0xa40800a0, 0xa40800e0, 8, /* IMR8 / IMCR8 */
{ DISABLED, ENABLED, ENABLED, ENABLED,
0, 0, SCIFA5, FSI } },
{ 0xa40800a4, 0xa40800e4, 8, /* IMR9 / IMCR9 */
{ 0, 0, 0, CMT, 0, USB1, USB0, 0 } },
{ 0xa40800a8, 0xa40800e8, 8, /* IMR10 / IMCR10 */
{ 0, DMAC1B_DADERR, DMAC1B_DEI5, DMAC1B_DEI4,
0, RTC_CUI, RTC_PRI, RTC_ATI } },
{ 0xa40800ac, 0xa40800ec, 8, /* IMR11 / IMCR11 */
{ 0, _2DG_CEI, _2DG_INI, _2DG_TRI,
0, TPU, 0, TSIF } },
{ 0xa40800b0, 0xa40800f0, 8, /* IMR12 / IMCR12 */
{ 0, 0, MMC_MMC3I, MMC_MMC2I, 0, 0, 0, _2DDMAC } },
{ 0xa4140044, 0xa4140064, 8, /* INTMSK00 / INTMSKCLR00 */
{ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
};
static struct intc_prio_reg prio_registers[] __initdata = {
{ 0xa4080000, 0, 16, 4, /* IPRA */ { TMU0_TUNI0, TMU0_TUNI1,
TMU0_TUNI2, IRDA } },
{ 0xa4080004, 0, 16, 4, /* IPRB */ { JPU, LCDC, DMAC1A, BEU1 } },
{ 0xa4080008, 0, 16, 4, /* IPRC */ { TMU1_TUNI0, TMU1_TUNI1,
TMU1_TUNI2, SPU } },
{ 0xa408000c, 0, 16, 4, /* IPRD */ { 0, MMCIF, 0, ATAPI } },
{ 0xa4080010, 0, 16, 4, /* IPRE */ { DMAC0A, VIO, SCIFA3, VPU } },
{ 0xa4080014, 0, 16, 4, /* IPRF */ { KEYSC, DMAC0B, USB, CMT } },
{ 0xa4080018, 0, 16, 4, /* IPRG */ { SCIF_SCIF0, SCIF_SCIF1,
SCIF_SCIF2, VEU0 } },
{ 0xa408001c, 0, 16, 4, /* IPRH */ { MSIOF_MSIOFI0, MSIOF_MSIOFI1,
I2C1, I2C0 } },
{ 0xa4080020, 0, 16, 4, /* IPRI */ { SCIFA4, ICB, TSIF, _2DG } },
{ 0xa4080024, 0, 16, 4, /* IPRJ */ { CEU1, ETHI, FSI, SDHI1 } },
{ 0xa4080028, 0, 16, 4, /* IPRK */ { RTC, DMAC1B, 0, SDHI0 } },
{ 0xa408002c, 0, 16, 4, /* IPRL */ { SCIFA5, 0, TPU, _2DDMAC } },
{ 0xa4140010, 0, 32, 4, /* INTPRI00 */
{ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
};
static struct intc_sense_reg sense_registers[] __initdata = {
{ 0xa414001c, 16, 2, /* ICR1 */
{ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
};
static struct intc_mask_reg ack_registers[] __initdata = {
{ 0xa4140024, 0, 8, /* INTREQ00 */
{ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
};
static struct intc_desc intc_desc __initdata = {
.name = "sh7724",
.force_enable = ENABLED,
.force_disable = DISABLED,
.hw = INTC_HW_DESC(vectors, groups, mask_registers,
prio_registers, sense_registers, ack_registers),
};
void __init plat_irq_setup(void)
{
register_intc_controller(&intc_desc);
}
static struct {
/* BSC */
unsigned long mmselr;
unsigned long cs0bcr;
unsigned long cs4bcr;
unsigned long cs5abcr;
unsigned long cs5bbcr;
unsigned long cs6abcr;
unsigned long cs6bbcr;
unsigned long cs4wcr;
unsigned long cs5awcr;
unsigned long cs5bwcr;
unsigned long cs6awcr;
unsigned long cs6bwcr;
/* INTC */
unsigned short ipra;
unsigned short iprb;
unsigned short iprc;
unsigned short iprd;
unsigned short ipre;
unsigned short iprf;
unsigned short iprg;
unsigned short iprh;
unsigned short ipri;
unsigned short iprj;
unsigned short iprk;
unsigned short iprl;
unsigned char imr0;
unsigned char imr1;
unsigned char imr2;
unsigned char imr3;
unsigned char imr4;
unsigned char imr5;
unsigned char imr6;
unsigned char imr7;
unsigned char imr8;
unsigned char imr9;
unsigned char imr10;
unsigned char imr11;
unsigned char imr12;
/* RWDT */
unsigned short rwtcnt;
unsigned short rwtcsr;
/* CPG */
unsigned long irdaclk;
unsigned long spuclk;
} sh7724_rstandby_state;
static int sh7724_pre_sleep_notifier_call(struct notifier_block *nb,
unsigned long flags, void *unused)
{
if (!(flags & SUSP_SH_RSTANDBY))
return NOTIFY_DONE;
/* BCR */
sh7724_rstandby_state.mmselr = __raw_readl(0xff800020); /* MMSELR */
sh7724_rstandby_state.mmselr |= 0xa5a50000;
sh7724_rstandby_state.cs0bcr = __raw_readl(0xfec10004); /* CS0BCR */
sh7724_rstandby_state.cs4bcr = __raw_readl(0xfec10010); /* CS4BCR */
sh7724_rstandby_state.cs5abcr = __raw_readl(0xfec10014); /* CS5ABCR */
sh7724_rstandby_state.cs5bbcr = __raw_readl(0xfec10018); /* CS5BBCR */
sh7724_rstandby_state.cs6abcr = __raw_readl(0xfec1001c); /* CS6ABCR */
sh7724_rstandby_state.cs6bbcr = __raw_readl(0xfec10020); /* CS6BBCR */
sh7724_rstandby_state.cs4wcr = __raw_readl(0xfec10030); /* CS4WCR */
sh7724_rstandby_state.cs5awcr = __raw_readl(0xfec10034); /* CS5AWCR */
sh7724_rstandby_state.cs5bwcr = __raw_readl(0xfec10038); /* CS5BWCR */
sh7724_rstandby_state.cs6awcr = __raw_readl(0xfec1003c); /* CS6AWCR */
sh7724_rstandby_state.cs6bwcr = __raw_readl(0xfec10040); /* CS6BWCR */
/* INTC */
sh7724_rstandby_state.ipra = __raw_readw(0xa4080000); /* IPRA */
sh7724_rstandby_state.iprb = __raw_readw(0xa4080004); /* IPRB */
sh7724_rstandby_state.iprc = __raw_readw(0xa4080008); /* IPRC */
sh7724_rstandby_state.iprd = __raw_readw(0xa408000c); /* IPRD */
sh7724_rstandby_state.ipre = __raw_readw(0xa4080010); /* IPRE */
sh7724_rstandby_state.iprf = __raw_readw(0xa4080014); /* IPRF */
sh7724_rstandby_state.iprg = __raw_readw(0xa4080018); /* IPRG */
sh7724_rstandby_state.iprh = __raw_readw(0xa408001c); /* IPRH */
sh7724_rstandby_state.ipri = __raw_readw(0xa4080020); /* IPRI */
sh7724_rstandby_state.iprj = __raw_readw(0xa4080024); /* IPRJ */
sh7724_rstandby_state.iprk = __raw_readw(0xa4080028); /* IPRK */
sh7724_rstandby_state.iprl = __raw_readw(0xa408002c); /* IPRL */
sh7724_rstandby_state.imr0 = __raw_readb(0xa4080080); /* IMR0 */
sh7724_rstandby_state.imr1 = __raw_readb(0xa4080084); /* IMR1 */
sh7724_rstandby_state.imr2 = __raw_readb(0xa4080088); /* IMR2 */
sh7724_rstandby_state.imr3 = __raw_readb(0xa408008c); /* IMR3 */
sh7724_rstandby_state.imr4 = __raw_readb(0xa4080090); /* IMR4 */
sh7724_rstandby_state.imr5 = __raw_readb(0xa4080094); /* IMR5 */
sh7724_rstandby_state.imr6 = __raw_readb(0xa4080098); /* IMR6 */
sh7724_rstandby_state.imr7 = __raw_readb(0xa408009c); /* IMR7 */
sh7724_rstandby_state.imr8 = __raw_readb(0xa40800a0); /* IMR8 */
sh7724_rstandby_state.imr9 = __raw_readb(0xa40800a4); /* IMR9 */
sh7724_rstandby_state.imr10 = __raw_readb(0xa40800a8); /* IMR10 */
sh7724_rstandby_state.imr11 = __raw_readb(0xa40800ac); /* IMR11 */
sh7724_rstandby_state.imr12 = __raw_readb(0xa40800b0); /* IMR12 */
/* RWDT */
sh7724_rstandby_state.rwtcnt = __raw_readb(0xa4520000); /* RWTCNT */
sh7724_rstandby_state.rwtcnt |= 0x5a00;
sh7724_rstandby_state.rwtcsr = __raw_readb(0xa4520004); /* RWTCSR */
sh7724_rstandby_state.rwtcsr |= 0xa500;
__raw_writew(sh7724_rstandby_state.rwtcsr & 0x07, 0xa4520004);
/* CPG */
sh7724_rstandby_state.irdaclk = __raw_readl(0xa4150018); /* IRDACLKCR */
sh7724_rstandby_state.spuclk = __raw_readl(0xa415003c); /* SPUCLKCR */
return NOTIFY_DONE;
}
static int sh7724_post_sleep_notifier_call(struct notifier_block *nb,
unsigned long flags, void *unused)
{
if (!(flags & SUSP_SH_RSTANDBY))
return NOTIFY_DONE;
/* BCR */
__raw_writel(sh7724_rstandby_state.mmselr, 0xff800020); /* MMSELR */
__raw_writel(sh7724_rstandby_state.cs0bcr, 0xfec10004); /* CS0BCR */
__raw_writel(sh7724_rstandby_state.cs4bcr, 0xfec10010); /* CS4BCR */
__raw_writel(sh7724_rstandby_state.cs5abcr, 0xfec10014); /* CS5ABCR */
__raw_writel(sh7724_rstandby_state.cs5bbcr, 0xfec10018); /* CS5BBCR */
__raw_writel(sh7724_rstandby_state.cs6abcr, 0xfec1001c); /* CS6ABCR */
__raw_writel(sh7724_rstandby_state.cs6bbcr, 0xfec10020); /* CS6BBCR */
__raw_writel(sh7724_rstandby_state.cs4wcr, 0xfec10030); /* CS4WCR */
__raw_writel(sh7724_rstandby_state.cs5awcr, 0xfec10034); /* CS5AWCR */
__raw_writel(sh7724_rstandby_state.cs5bwcr, 0xfec10038); /* CS5BWCR */
__raw_writel(sh7724_rstandby_state.cs6awcr, 0xfec1003c); /* CS6AWCR */
__raw_writel(sh7724_rstandby_state.cs6bwcr, 0xfec10040); /* CS6BWCR */
/* INTC */
__raw_writew(sh7724_rstandby_state.ipra, 0xa4080000); /* IPRA */
__raw_writew(sh7724_rstandby_state.iprb, 0xa4080004); /* IPRB */
__raw_writew(sh7724_rstandby_state.iprc, 0xa4080008); /* IPRC */
__raw_writew(sh7724_rstandby_state.iprd, 0xa408000c); /* IPRD */
__raw_writew(sh7724_rstandby_state.ipre, 0xa4080010); /* IPRE */
__raw_writew(sh7724_rstandby_state.iprf, 0xa4080014); /* IPRF */
__raw_writew(sh7724_rstandby_state.iprg, 0xa4080018); /* IPRG */
__raw_writew(sh7724_rstandby_state.iprh, 0xa408001c); /* IPRH */
__raw_writew(sh7724_rstandby_state.ipri, 0xa4080020); /* IPRI */
__raw_writew(sh7724_rstandby_state.iprj, 0xa4080024); /* IPRJ */
__raw_writew(sh7724_rstandby_state.iprk, 0xa4080028); /* IPRK */
__raw_writew(sh7724_rstandby_state.iprl, 0xa408002c); /* IPRL */
__raw_writeb(sh7724_rstandby_state.imr0, 0xa4080080); /* IMR0 */
__raw_writeb(sh7724_rstandby_state.imr1, 0xa4080084); /* IMR1 */
__raw_writeb(sh7724_rstandby_state.imr2, 0xa4080088); /* IMR2 */
__raw_writeb(sh7724_rstandby_state.imr3, 0xa408008c); /* IMR3 */
__raw_writeb(sh7724_rstandby_state.imr4, 0xa4080090); /* IMR4 */
__raw_writeb(sh7724_rstandby_state.imr5, 0xa4080094); /* IMR5 */
__raw_writeb(sh7724_rstandby_state.imr6, 0xa4080098); /* IMR6 */
__raw_writeb(sh7724_rstandby_state.imr7, 0xa408009c); /* IMR7 */
__raw_writeb(sh7724_rstandby_state.imr8, 0xa40800a0); /* IMR8 */
__raw_writeb(sh7724_rstandby_state.imr9, 0xa40800a4); /* IMR9 */
__raw_writeb(sh7724_rstandby_state.imr10, 0xa40800a8); /* IMR10 */
__raw_writeb(sh7724_rstandby_state.imr11, 0xa40800ac); /* IMR11 */
__raw_writeb(sh7724_rstandby_state.imr12, 0xa40800b0); /* IMR12 */
/* RWDT */
__raw_writew(sh7724_rstandby_state.rwtcnt, 0xa4520000); /* RWTCNT */
__raw_writew(sh7724_rstandby_state.rwtcsr, 0xa4520004); /* RWTCSR */
/* CPG */
__raw_writel(sh7724_rstandby_state.irdaclk, 0xa4150018); /* IRDACLKCR */
__raw_writel(sh7724_rstandby_state.spuclk, 0xa415003c); /* SPUCLKCR */
return NOTIFY_DONE;
}
static struct notifier_block sh7724_pre_sleep_notifier = {
.notifier_call = sh7724_pre_sleep_notifier_call,
.priority = SH_MOBILE_PRE(SH_MOBILE_SLEEP_CPU),
};
static struct notifier_block sh7724_post_sleep_notifier = {
.notifier_call = sh7724_post_sleep_notifier_call,
.priority = SH_MOBILE_POST(SH_MOBILE_SLEEP_CPU),
};
static int __init sh7724_sleep_setup(void)
{
atomic_notifier_chain_register(&sh_mobile_pre_sleep_notifier_list,
&sh7724_pre_sleep_notifier);
atomic_notifier_chain_register(&sh_mobile_post_sleep_notifier_list,
&sh7724_post_sleep_notifier);
return 0;
}
arch_initcall(sh7724_sleep_setup);
| linux-master | arch/sh/kernel/cpu/sh4a/setup-sh7724.c |
// SPDX-License-Identifier: GPL-2.0
/*
* arch/sh/kernel/cpu/sh4a/clock-sh7722.c
*
* SH7722 clock framework support
*
* Copyright (C) 2009 Magnus Damm
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/io.h>
#include <linux/clkdev.h>
#include <linux/sh_clk.h>
#include <asm/clock.h>
#include <cpu/sh7722.h>
/* SH7722 registers */
#define FRQCR 0xa4150000
#define VCLKCR 0xa4150004
#define SCLKACR 0xa4150008
#define SCLKBCR 0xa415000c
#define IRDACLKCR 0xa4150018
#define PLLCR 0xa4150024
#define MSTPCR0 0xa4150030
#define MSTPCR1 0xa4150034
#define MSTPCR2 0xa4150038
#define DLLFRQ 0xa4150050
/* Fixed 32 KHz root clock for RTC and Power Management purposes */
static struct clk r_clk = {
.rate = 32768,
};
/*
* Default rate for the root input clock, reset this with clk_set_rate()
* from the platform code.
*/
struct clk extal_clk = {
.rate = 33333333,
};
/* The dll block multiplies the 32khz r_clk, may be used instead of extal */
static unsigned long dll_recalc(struct clk *clk)
{
unsigned long mult;
if (__raw_readl(PLLCR) & 0x1000)
mult = __raw_readl(DLLFRQ);
else
mult = 0;
return clk->parent->rate * mult;
}
static struct sh_clk_ops dll_clk_ops = {
.recalc = dll_recalc,
};
static struct clk dll_clk = {
.ops = &dll_clk_ops,
.parent = &r_clk,
.flags = CLK_ENABLE_ON_INIT,
};
static unsigned long pll_recalc(struct clk *clk)
{
unsigned long mult = 1;
unsigned long div = 1;
if (__raw_readl(PLLCR) & 0x4000)
mult = (((__raw_readl(FRQCR) >> 24) & 0x1f) + 1);
else
div = 2;
return (clk->parent->rate * mult) / div;
}
static struct sh_clk_ops pll_clk_ops = {
.recalc = pll_recalc,
};
static struct clk pll_clk = {
.ops = &pll_clk_ops,
.flags = CLK_ENABLE_ON_INIT,
};
struct clk *main_clks[] = {
&r_clk,
&extal_clk,
&dll_clk,
&pll_clk,
};
static int multipliers[] = { 1, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1 };
static int divisors[] = { 1, 3, 2, 5, 3, 4, 5, 6, 8, 10, 12, 16, 20 };
static struct clk_div_mult_table div4_div_mult_table = {
.divisors = divisors,
.nr_divisors = ARRAY_SIZE(divisors),
.multipliers = multipliers,
.nr_multipliers = ARRAY_SIZE(multipliers),
};
static struct clk_div4_table div4_table = {
.div_mult_table = &div4_div_mult_table,
};
#define DIV4(_reg, _bit, _mask, _flags) \
SH_CLK_DIV4(&pll_clk, _reg, _bit, _mask, _flags)
enum { DIV4_I, DIV4_U, DIV4_SH, DIV4_B, DIV4_B3, DIV4_P, DIV4_NR };
struct clk div4_clks[DIV4_NR] = {
[DIV4_I] = DIV4(FRQCR, 20, 0x1fef, CLK_ENABLE_ON_INIT),
[DIV4_U] = DIV4(FRQCR, 16, 0x1fff, CLK_ENABLE_ON_INIT),
[DIV4_SH] = DIV4(FRQCR, 12, 0x1fff, CLK_ENABLE_ON_INIT),
[DIV4_B] = DIV4(FRQCR, 8, 0x1fff, CLK_ENABLE_ON_INIT),
[DIV4_B3] = DIV4(FRQCR, 4, 0x1fff, CLK_ENABLE_ON_INIT),
[DIV4_P] = DIV4(FRQCR, 0, 0x1fff, 0),
};
enum { DIV4_IRDA, DIV4_ENABLE_NR };
struct clk div4_enable_clks[DIV4_ENABLE_NR] = {
[DIV4_IRDA] = DIV4(IRDACLKCR, 0, 0x1fff, 0),
};
enum { DIV4_SIUA, DIV4_SIUB, DIV4_REPARENT_NR };
struct clk div4_reparent_clks[DIV4_REPARENT_NR] = {
[DIV4_SIUA] = DIV4(SCLKACR, 0, 0x1fff, 0),
[DIV4_SIUB] = DIV4(SCLKBCR, 0, 0x1fff, 0),
};
enum { DIV6_V, DIV6_NR };
struct clk div6_clks[DIV6_NR] = {
[DIV6_V] = SH_CLK_DIV6(&pll_clk, VCLKCR, 0),
};
static struct clk mstp_clks[HWBLK_NR] = {
[HWBLK_URAM] = SH_CLK_MSTP32(&div4_clks[DIV4_U], MSTPCR0, 28, CLK_ENABLE_ON_INIT),
[HWBLK_XYMEM] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR0, 26, CLK_ENABLE_ON_INIT),
[HWBLK_TMU] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 15, 0),
[HWBLK_CMT] = SH_CLK_MSTP32(&r_clk, MSTPCR0, 14, 0),
[HWBLK_RWDT] = SH_CLK_MSTP32(&r_clk, MSTPCR0, 13, 0),
[HWBLK_FLCTL] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 10, 0),
[HWBLK_SCIF0] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 7, 0),
[HWBLK_SCIF1] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 6, 0),
[HWBLK_SCIF2] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 5, 0),
[HWBLK_IIC] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 9, 0),
[HWBLK_RTC] = SH_CLK_MSTP32(&r_clk, MSTPCR1, 8, 0),
[HWBLK_SDHI] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR2, 18, 0),
[HWBLK_KEYSC] = SH_CLK_MSTP32(&r_clk, MSTPCR2, 14, 0),
[HWBLK_USBF] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR2, 11, 0),
[HWBLK_2DG] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 9, 0),
[HWBLK_SIU] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 8, 0),
[HWBLK_JPU] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 6, 0),
[HWBLK_VOU] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 5, 0),
[HWBLK_BEU] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 4, 0),
[HWBLK_CEU] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 3, 0),
[HWBLK_VEU] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 2, 0),
[HWBLK_VPU] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 1, 0),
[HWBLK_LCDC] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR2, 0, 0),
};
static struct clk_lookup lookups[] = {
/* main clocks */
CLKDEV_CON_ID("rclk", &r_clk),
CLKDEV_CON_ID("extal", &extal_clk),
CLKDEV_CON_ID("dll_clk", &dll_clk),
CLKDEV_CON_ID("pll_clk", &pll_clk),
/* DIV4 clocks */
CLKDEV_CON_ID("cpu_clk", &div4_clks[DIV4_I]),
CLKDEV_CON_ID("umem_clk", &div4_clks[DIV4_U]),
CLKDEV_CON_ID("shyway_clk", &div4_clks[DIV4_SH]),
CLKDEV_CON_ID("bus_clk", &div4_clks[DIV4_B]),
CLKDEV_CON_ID("b3_clk", &div4_clks[DIV4_B3]),
CLKDEV_CON_ID("peripheral_clk", &div4_clks[DIV4_P]),
CLKDEV_CON_ID("irda_clk", &div4_enable_clks[DIV4_IRDA]),
CLKDEV_CON_ID("siua_clk", &div4_reparent_clks[DIV4_SIUA]),
CLKDEV_CON_ID("siub_clk", &div4_reparent_clks[DIV4_SIUB]),
/* DIV6 clocks */
CLKDEV_CON_ID("video_clk", &div6_clks[DIV6_V]),
/* MSTP clocks */
CLKDEV_CON_ID("uram0", &mstp_clks[HWBLK_URAM]),
CLKDEV_CON_ID("xymem0", &mstp_clks[HWBLK_XYMEM]),
CLKDEV_ICK_ID("fck", "sh-tmu.0", &mstp_clks[HWBLK_TMU]),
CLKDEV_ICK_ID("fck", "sh-cmt-32.0", &mstp_clks[HWBLK_CMT]),
CLKDEV_DEV_ID("sh-wdt.0", &mstp_clks[HWBLK_RWDT]),
CLKDEV_CON_ID("flctl0", &mstp_clks[HWBLK_FLCTL]),
CLKDEV_DEV_ID("sh-sci.0", &mstp_clks[HWBLK_SCIF0]),
CLKDEV_DEV_ID("sh-sci.1", &mstp_clks[HWBLK_SCIF1]),
CLKDEV_DEV_ID("sh-sci.2", &mstp_clks[HWBLK_SCIF2]),
CLKDEV_DEV_ID("i2c-sh_mobile.0", &mstp_clks[HWBLK_IIC]),
CLKDEV_CON_ID("rtc0", &mstp_clks[HWBLK_RTC]),
CLKDEV_DEV_ID("sh_mobile_sdhi.0", &mstp_clks[HWBLK_SDHI]),
CLKDEV_DEV_ID("sh_keysc.0", &mstp_clks[HWBLK_KEYSC]),
CLKDEV_CON_ID("usbf0", &mstp_clks[HWBLK_USBF]),
CLKDEV_CON_ID("2dg0", &mstp_clks[HWBLK_2DG]),
CLKDEV_DEV_ID("siu-pcm-audio", &mstp_clks[HWBLK_SIU]),
CLKDEV_DEV_ID("sh-vou.0", &mstp_clks[HWBLK_VOU]),
CLKDEV_CON_ID("jpu0", &mstp_clks[HWBLK_JPU]),
CLKDEV_CON_ID("beu0", &mstp_clks[HWBLK_BEU]),
CLKDEV_DEV_ID("renesas-ceu.0", &mstp_clks[HWBLK_CEU]),
CLKDEV_CON_ID("veu0", &mstp_clks[HWBLK_VEU]),
CLKDEV_CON_ID("vpu0", &mstp_clks[HWBLK_VPU]),
CLKDEV_DEV_ID("sh_mobile_lcdc_fb.0", &mstp_clks[HWBLK_LCDC]),
};
int __init arch_clk_init(void)
{
int k, ret = 0;
/* autodetect extal or dll configuration */
if (__raw_readl(PLLCR) & 0x1000)
pll_clk.parent = &dll_clk;
else
pll_clk.parent = &extal_clk;
for (k = 0; !ret && (k < ARRAY_SIZE(main_clks)); k++)
ret = clk_register(main_clks[k]);
clkdev_add_table(lookups, ARRAY_SIZE(lookups));
if (!ret)
ret = sh_clk_div4_register(div4_clks, DIV4_NR, &div4_table);
if (!ret)
ret = sh_clk_div4_enable_register(div4_enable_clks,
DIV4_ENABLE_NR, &div4_table);
if (!ret)
ret = sh_clk_div4_reparent_register(div4_reparent_clks,
DIV4_REPARENT_NR, &div4_table);
if (!ret)
ret = sh_clk_div6_register(div6_clks, DIV6_NR);
if (!ret)
ret = sh_clk_mstp_register(mstp_clks, HWBLK_NR);
return ret;
}
| linux-master | arch/sh/kernel/cpu/sh4a/clock-sh7722.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/bug.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/ioport.h>
#include <cpu/pfc.h>
static struct resource sh7722_pfc_resources[] = {
[0] = {
.start = 0xa4050100,
.end = 0xa405018f,
.flags = IORESOURCE_MEM,
},
};
static int __init plat_pinmux_setup(void)
{
return sh_pfc_register("pfc-sh7722", sh7722_pfc_resources,
ARRAY_SIZE(sh7722_pfc_resources));
}
arch_initcall(plat_pinmux_setup);
| linux-master | arch/sh/kernel/cpu/sh4a/pinmux-sh7722.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Performance events support for SH-4A performance counters
*
* Copyright (C) 2009, 2010 Paul Mundt
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/perf_event.h>
#include <asm/processor.h>
#define PPC_CCBR(idx) (0xff200800 + (sizeof(u32) * idx))
#define PPC_PMCTR(idx) (0xfc100000 + (sizeof(u32) * idx))
#define CCBR_CIT_MASK (0x7ff << 6)
#define CCBR_DUC (1 << 3)
#define CCBR_CMDS (1 << 1)
#define CCBR_PPCE (1 << 0)
#ifdef CONFIG_CPU_SHX3
/*
* The PMCAT location for SH-X3 CPUs was quietly moved, while the CCBR
* and PMCTR locations remains tentatively constant. This change remains
* wholly undocumented, and was simply found through trial and error.
*
* Early cuts of SH-X3 still appear to use the SH-X/SH-X2 locations, and
* it's unclear when this ceased to be the case. For now we always use
* the new location (if future parts keep up with this trend then
* scanning for them at runtime also remains a viable option.)
*
* The gap in the register space also suggests that there are other
* undocumented counters, so this will need to be revisited at a later
* point in time.
*/
#define PPC_PMCAT 0xfc100240
#else
#define PPC_PMCAT 0xfc100080
#endif
#define PMCAT_OVF3 (1 << 27)
#define PMCAT_CNN3 (1 << 26)
#define PMCAT_CLR3 (1 << 25)
#define PMCAT_OVF2 (1 << 19)
#define PMCAT_CLR2 (1 << 17)
#define PMCAT_OVF1 (1 << 11)
#define PMCAT_CNN1 (1 << 10)
#define PMCAT_CLR1 (1 << 9)
#define PMCAT_OVF0 (1 << 3)
#define PMCAT_CLR0 (1 << 1)
static struct sh_pmu sh4a_pmu;
/*
* Supported raw event codes:
*
* Event Code Description
* ---------- -----------
*
* 0x0000 number of elapsed cycles
* 0x0200 number of elapsed cycles in privileged mode
* 0x0280 number of elapsed cycles while SR.BL is asserted
* 0x0202 instruction execution
* 0x0203 instruction execution in parallel
* 0x0204 number of unconditional branches
* 0x0208 number of exceptions
* 0x0209 number of interrupts
* 0x0220 UTLB miss caused by instruction fetch
* 0x0222 UTLB miss caused by operand access
* 0x02a0 number of ITLB misses
* 0x0028 number of accesses to instruction memories
* 0x0029 number of accesses to instruction cache
* 0x002a instruction cache miss
* 0x022e number of access to instruction X/Y memory
* 0x0030 number of reads to operand memories
* 0x0038 number of writes to operand memories
* 0x0031 number of operand cache read accesses
* 0x0039 number of operand cache write accesses
* 0x0032 operand cache read miss
* 0x003a operand cache write miss
* 0x0236 number of reads to operand X/Y memory
* 0x023e number of writes to operand X/Y memory
* 0x0237 number of reads to operand U memory
* 0x023f number of writes to operand U memory
* 0x0337 number of U memory read buffer misses
* 0x02b4 number of wait cycles due to operand read access
* 0x02bc number of wait cycles due to operand write access
* 0x0033 number of wait cycles due to operand cache read miss
* 0x003b number of wait cycles due to operand cache write miss
*/
/*
* Special reserved bits used by hardware emulators, read values will
* vary, but writes must always be 0.
*/
#define PMCAT_EMU_CLR_MASK ((1 << 24) | (1 << 16) | (1 << 8) | (1 << 0))
static const int sh4a_general_events[] = {
[PERF_COUNT_HW_CPU_CYCLES] = 0x0000,
[PERF_COUNT_HW_INSTRUCTIONS] = 0x0202,
[PERF_COUNT_HW_CACHE_REFERENCES] = 0x0029, /* I-cache */
[PERF_COUNT_HW_CACHE_MISSES] = 0x002a, /* I-cache */
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x0204,
[PERF_COUNT_HW_BRANCH_MISSES] = -1,
[PERF_COUNT_HW_BUS_CYCLES] = -1,
};
#define C(x) PERF_COUNT_HW_CACHE_##x
static const int sh4a_cache_events
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] =
{
[ C(L1D) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0x0031,
[ C(RESULT_MISS) ] = 0x0032,
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = 0x0039,
[ C(RESULT_MISS) ] = 0x003a,
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = 0,
[ C(RESULT_MISS) ] = 0,
},
},
[ C(L1I) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0x0029,
[ C(RESULT_MISS) ] = 0x002a,
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = 0,
[ C(RESULT_MISS) ] = 0,
},
},
[ C(LL) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0x0030,
[ C(RESULT_MISS) ] = 0,
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = 0x0038,
[ C(RESULT_MISS) ] = 0,
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = 0,
[ C(RESULT_MISS) ] = 0,
},
},
[ C(DTLB) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0x0222,
[ C(RESULT_MISS) ] = 0x0220,
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = 0,
[ C(RESULT_MISS) ] = 0,
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = 0,
[ C(RESULT_MISS) ] = 0,
},
},
[ C(ITLB) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0,
[ C(RESULT_MISS) ] = 0x02a0,
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
},
[ C(BPU) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
},
[ C(NODE) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
},
};
static int sh4a_event_map(int event)
{
return sh4a_general_events[event];
}
static u64 sh4a_pmu_read(int idx)
{
return __raw_readl(PPC_PMCTR(idx));
}
static void sh4a_pmu_disable(struct hw_perf_event *hwc, int idx)
{
unsigned int tmp;
tmp = __raw_readl(PPC_CCBR(idx));
tmp &= ~(CCBR_CIT_MASK | CCBR_DUC);
__raw_writel(tmp, PPC_CCBR(idx));
}
static void sh4a_pmu_enable(struct hw_perf_event *hwc, int idx)
{
unsigned int tmp;
tmp = __raw_readl(PPC_PMCAT);
tmp &= ~PMCAT_EMU_CLR_MASK;
tmp |= idx ? PMCAT_CLR1 : PMCAT_CLR0;
__raw_writel(tmp, PPC_PMCAT);
tmp = __raw_readl(PPC_CCBR(idx));
tmp |= (hwc->config << 6) | CCBR_CMDS | CCBR_PPCE;
__raw_writel(tmp, PPC_CCBR(idx));
__raw_writel(__raw_readl(PPC_CCBR(idx)) | CCBR_DUC, PPC_CCBR(idx));
}
static void sh4a_pmu_disable_all(void)
{
int i;
for (i = 0; i < sh4a_pmu.num_events; i++)
__raw_writel(__raw_readl(PPC_CCBR(i)) & ~CCBR_DUC, PPC_CCBR(i));
}
static void sh4a_pmu_enable_all(void)
{
int i;
for (i = 0; i < sh4a_pmu.num_events; i++)
__raw_writel(__raw_readl(PPC_CCBR(i)) | CCBR_DUC, PPC_CCBR(i));
}
static struct sh_pmu sh4a_pmu = {
.name = "sh4a",
.num_events = 2,
.event_map = sh4a_event_map,
.max_events = ARRAY_SIZE(sh4a_general_events),
.raw_event_mask = 0x3ff,
.cache_events = &sh4a_cache_events,
.read = sh4a_pmu_read,
.disable = sh4a_pmu_disable,
.enable = sh4a_pmu_enable,
.disable_all = sh4a_pmu_disable_all,
.enable_all = sh4a_pmu_enable_all,
};
static int __init sh4a_pmu_init(void)
{
/*
* Make sure this CPU actually has perf counters.
*/
if (!(boot_cpu_data.flags & CPU_HAS_PERF_COUNTER)) {
pr_notice("HW perf events unsupported, software events only.\n");
return -ENODEV;
}
return register_sh_pmu(&sh4a_pmu);
}
early_initcall(sh4a_pmu_init);
| linux-master | arch/sh/kernel/cpu/sh4a/perf_event.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/serial_sci.h>
#include <linux/serial_core.h>
#include <linux/io.h>
#define PSCR 0xA405011E
static void sh7722_sci_init_pins(struct uart_port *port, unsigned int cflag)
{
unsigned short data;
if (port->mapbase == 0xffe00000) {
data = __raw_readw(PSCR);
data &= ~0x03cf;
if (!(cflag & CRTSCTS))
data |= 0x0340;
__raw_writew(data, PSCR);
}
}
struct plat_sci_port_ops sh7722_sci_port_ops = {
.init_pins = sh7722_sci_init_pins,
};
| linux-master | arch/sh/kernel/cpu/sh4a/serial-sh7722.c |
// SPDX-License-Identifier: GPL-2.0
/*
* SH7786 Setup
*
* Copyright (C) 2009 - 2011 Renesas Solutions Corp.
* Kuninori Morimoto <[email protected]>
* Paul Mundt <[email protected]>
*
* Based on SH7785 Setup
*
* Copyright (C) 2007 Paul Mundt
*/
#include <linux/platform_device.h>
#include <linux/init.h>
#include <linux/serial.h>
#include <linux/serial_sci.h>
#include <linux/io.h>
#include <linux/mm.h>
#include <linux/dma-mapping.h>
#include <linux/sh_timer.h>
#include <linux/sh_dma.h>
#include <linux/sh_intc.h>
#include <linux/usb/ohci_pdriver.h>
#include <cpu/dma-register.h>
#include <asm/mmzone.h>
#include <asm/platform_early.h>
static struct plat_sci_port scif0_platform_data = {
.scscr = SCSCR_REIE | SCSCR_CKE1,
.type = PORT_SCIF,
.regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE,
};
static struct resource scif0_resources[] = {
DEFINE_RES_MEM(0xffea0000, 0x100),
DEFINE_RES_IRQ(evt2irq(0x700)),
DEFINE_RES_IRQ(evt2irq(0x720)),
DEFINE_RES_IRQ(evt2irq(0x760)),
DEFINE_RES_IRQ(evt2irq(0x740)),
};
static struct platform_device scif0_device = {
.name = "sh-sci",
.id = 0,
.resource = scif0_resources,
.num_resources = ARRAY_SIZE(scif0_resources),
.dev = {
.platform_data = &scif0_platform_data,
},
};
/*
* The rest of these all have multiplexed IRQs
*/
static struct plat_sci_port scif1_platform_data = {
.scscr = SCSCR_REIE | SCSCR_CKE1,
.type = PORT_SCIF,
.regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE,
};
static struct resource scif1_resources[] = {
DEFINE_RES_MEM(0xffeb0000, 0x100),
DEFINE_RES_IRQ(evt2irq(0x780)),
};
static struct resource scif1_demux_resources[] = {
DEFINE_RES_MEM(0xffeb0000, 0x100),
/* Placeholders, see sh7786_devices_setup() */
DEFINE_RES_IRQ(0),
DEFINE_RES_IRQ(0),
DEFINE_RES_IRQ(0),
DEFINE_RES_IRQ(0),
};
static struct platform_device scif1_device = {
.name = "sh-sci",
.id = 1,
.resource = scif1_resources,
.num_resources = ARRAY_SIZE(scif1_resources),
.dev = {
.platform_data = &scif1_platform_data,
},
};
static struct plat_sci_port scif2_platform_data = {
.scscr = SCSCR_REIE | SCSCR_CKE1,
.type = PORT_SCIF,
.regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE,
};
static struct resource scif2_resources[] = {
DEFINE_RES_MEM(0xffec0000, 0x100),
DEFINE_RES_IRQ(evt2irq(0x840)),
};
static struct platform_device scif2_device = {
.name = "sh-sci",
.id = 2,
.resource = scif2_resources,
.num_resources = ARRAY_SIZE(scif2_resources),
.dev = {
.platform_data = &scif2_platform_data,
},
};
static struct plat_sci_port scif3_platform_data = {
.scscr = SCSCR_REIE | SCSCR_CKE1,
.type = PORT_SCIF,
.regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE,
};
static struct resource scif3_resources[] = {
DEFINE_RES_MEM(0xffed0000, 0x100),
DEFINE_RES_IRQ(evt2irq(0x860)),
};
static struct platform_device scif3_device = {
.name = "sh-sci",
.id = 3,
.resource = scif3_resources,
.num_resources = ARRAY_SIZE(scif3_resources),
.dev = {
.platform_data = &scif3_platform_data,
},
};
static struct plat_sci_port scif4_platform_data = {
.scscr = SCSCR_REIE | SCSCR_CKE1,
.type = PORT_SCIF,
.regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE,
};
static struct resource scif4_resources[] = {
DEFINE_RES_MEM(0xffee0000, 0x100),
DEFINE_RES_IRQ(evt2irq(0x880)),
};
static struct platform_device scif4_device = {
.name = "sh-sci",
.id = 4,
.resource = scif4_resources,
.num_resources = ARRAY_SIZE(scif4_resources),
.dev = {
.platform_data = &scif4_platform_data,
},
};
static struct plat_sci_port scif5_platform_data = {
.scscr = SCSCR_REIE | SCSCR_CKE1,
.type = PORT_SCIF,
.regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE,
};
static struct resource scif5_resources[] = {
DEFINE_RES_MEM(0xffef0000, 0x100),
DEFINE_RES_IRQ(evt2irq(0x8a0)),
};
static struct platform_device scif5_device = {
.name = "sh-sci",
.id = 5,
.resource = scif5_resources,
.num_resources = ARRAY_SIZE(scif5_resources),
.dev = {
.platform_data = &scif5_platform_data,
},
};
static struct sh_timer_config tmu0_platform_data = {
.channels_mask = 7,
};
static struct resource tmu0_resources[] = {
DEFINE_RES_MEM(0xffd80000, 0x30),
DEFINE_RES_IRQ(evt2irq(0x400)),
DEFINE_RES_IRQ(evt2irq(0x420)),
DEFINE_RES_IRQ(evt2irq(0x440)),
};
static struct platform_device tmu0_device = {
.name = "sh-tmu",
.id = 0,
.dev = {
.platform_data = &tmu0_platform_data,
},
.resource = tmu0_resources,
.num_resources = ARRAY_SIZE(tmu0_resources),
};
static struct sh_timer_config tmu1_platform_data = {
.channels_mask = 7,
};
static struct resource tmu1_resources[] = {
DEFINE_RES_MEM(0xffda0000, 0x2c),
DEFINE_RES_IRQ(evt2irq(0x480)),
DEFINE_RES_IRQ(evt2irq(0x4a0)),
DEFINE_RES_IRQ(evt2irq(0x4c0)),
};
static struct platform_device tmu1_device = {
.name = "sh-tmu",
.id = 1,
.dev = {
.platform_data = &tmu1_platform_data,
},
.resource = tmu1_resources,
.num_resources = ARRAY_SIZE(tmu1_resources),
};
static struct sh_timer_config tmu2_platform_data = {
.channels_mask = 7,
};
static struct resource tmu2_resources[] = {
DEFINE_RES_MEM(0xffdc0000, 0x2c),
DEFINE_RES_IRQ(evt2irq(0x7a0)),
DEFINE_RES_IRQ(evt2irq(0x7a0)),
DEFINE_RES_IRQ(evt2irq(0x7a0)),
};
static struct platform_device tmu2_device = {
.name = "sh-tmu",
.id = 2,
.dev = {
.platform_data = &tmu2_platform_data,
},
.resource = tmu2_resources,
.num_resources = ARRAY_SIZE(tmu2_resources),
};
static struct sh_timer_config tmu3_platform_data = {
.channels_mask = 7,
};
static struct resource tmu3_resources[] = {
DEFINE_RES_MEM(0xffde0000, 0x2c),
DEFINE_RES_IRQ(evt2irq(0x7c0)),
DEFINE_RES_IRQ(evt2irq(0x7c0)),
DEFINE_RES_IRQ(evt2irq(0x7c0)),
};
static struct platform_device tmu3_device = {
.name = "sh-tmu",
.id = 3,
.dev = {
.platform_data = &tmu3_platform_data,
},
.resource = tmu3_resources,
.num_resources = ARRAY_SIZE(tmu3_resources),
};
static const struct sh_dmae_channel dmac0_channels[] = {
{
.offset = 0,
.dmars = 0,
.dmars_bit = 0,
}, {
.offset = 0x10,
.dmars = 0,
.dmars_bit = 8,
}, {
.offset = 0x20,
.dmars = 4,
.dmars_bit = 0,
}, {
.offset = 0x30,
.dmars = 4,
.dmars_bit = 8,
}, {
.offset = 0x50,
.dmars = 8,
.dmars_bit = 0,
}, {
.offset = 0x60,
.dmars = 8,
.dmars_bit = 8,
}
};
static const unsigned int ts_shift[] = TS_SHIFT;
static struct sh_dmae_pdata dma0_platform_data = {
.channel = dmac0_channels,
.channel_num = ARRAY_SIZE(dmac0_channels),
.ts_low_shift = CHCR_TS_LOW_SHIFT,
.ts_low_mask = CHCR_TS_LOW_MASK,
.ts_high_shift = CHCR_TS_HIGH_SHIFT,
.ts_high_mask = CHCR_TS_HIGH_MASK,
.ts_shift = ts_shift,
.ts_shift_num = ARRAY_SIZE(ts_shift),
.dmaor_init = DMAOR_INIT,
};
/* Resource order important! */
static struct resource dmac0_resources[] = {
{
/* Channel registers and DMAOR */
.start = 0xfe008020,
.end = 0xfe00808f,
.flags = IORESOURCE_MEM,
}, {
/* DMARSx */
.start = 0xfe009000,
.end = 0xfe00900b,
.flags = IORESOURCE_MEM,
}, {
.name = "error_irq",
.start = evt2irq(0x5c0),
.end = evt2irq(0x5c0),
.flags = IORESOURCE_IRQ,
}, {
/* IRQ for channels 0-5 */
.start = evt2irq(0x500),
.end = evt2irq(0x5a0),
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device dma0_device = {
.name = "sh-dma-engine",
.id = 0,
.resource = dmac0_resources,
.num_resources = ARRAY_SIZE(dmac0_resources),
.dev = {
.platform_data = &dma0_platform_data,
},
};
#define USB_EHCI_START 0xffe70000
#define USB_OHCI_START 0xffe70400
static struct resource usb_ehci_resources[] = {
[0] = {
.start = USB_EHCI_START,
.end = USB_EHCI_START + 0x3ff,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = evt2irq(0xba0),
.end = evt2irq(0xba0),
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device usb_ehci_device = {
.name = "sh_ehci",
.id = -1,
.dev = {
.dma_mask = &usb_ehci_device.dev.coherent_dma_mask,
.coherent_dma_mask = DMA_BIT_MASK(32),
},
.num_resources = ARRAY_SIZE(usb_ehci_resources),
.resource = usb_ehci_resources,
};
static struct resource usb_ohci_resources[] = {
[0] = {
.start = USB_OHCI_START,
.end = USB_OHCI_START + 0x3ff,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = evt2irq(0xba0),
.end = evt2irq(0xba0),
.flags = IORESOURCE_IRQ,
},
};
static struct usb_ohci_pdata usb_ohci_pdata;
static struct platform_device usb_ohci_device = {
.name = "ohci-platform",
.id = -1,
.dev = {
.dma_mask = &usb_ohci_device.dev.coherent_dma_mask,
.coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = &usb_ohci_pdata,
},
.num_resources = ARRAY_SIZE(usb_ohci_resources),
.resource = usb_ohci_resources,
};
static struct platform_device *sh7786_early_devices[] __initdata = {
&scif0_device,
&scif1_device,
&scif2_device,
&scif3_device,
&scif4_device,
&scif5_device,
&tmu0_device,
&tmu1_device,
&tmu2_device,
&tmu3_device,
};
static struct platform_device *sh7786_devices[] __initdata = {
&dma0_device,
&usb_ehci_device,
&usb_ohci_device,
};
/*
* Please call this function if your platform board
* use external clock for USB
* */
#define USBCTL0 0xffe70858
#define CLOCK_MODE_MASK 0xffffff7f
#define EXT_CLOCK_MODE 0x00000080
void __init sh7786_usb_use_exclock(void)
{
u32 val = __raw_readl(USBCTL0) & CLOCK_MODE_MASK;
__raw_writel(val | EXT_CLOCK_MODE, USBCTL0);
}
#define USBINITREG1 0xffe70094
#define USBINITREG2 0xffe7009c
#define USBINITVAL1 0x00ff0040
#define USBINITVAL2 0x00000001
#define USBPCTL1 0xffe70804
#define USBST 0xffe70808
#define PHY_ENB 0x00000001
#define PLL_ENB 0x00000002
#define PHY_RST 0x00000004
#define ACT_PLL_STATUS 0xc0000000
static void __init sh7786_usb_setup(void)
{
int i = 1000000;
/*
* USB initial settings
*
* The following settings are necessary
* for using the USB modules.
*
* see "USB Initial Settings" for detail
*/
__raw_writel(USBINITVAL1, USBINITREG1);
__raw_writel(USBINITVAL2, USBINITREG2);
/*
* Set the PHY and PLL enable bit
*/
__raw_writel(PHY_ENB | PLL_ENB, USBPCTL1);
while (i--) {
if (ACT_PLL_STATUS == (__raw_readl(USBST) & ACT_PLL_STATUS)) {
/* Set the PHY RST bit */
__raw_writel(PHY_ENB | PLL_ENB | PHY_RST, USBPCTL1);
printk(KERN_INFO "sh7786 usb setup done\n");
break;
}
cpu_relax();
}
}
enum {
UNUSED = 0,
/* interrupt sources */
IRL0_LLLL, IRL0_LLLH, IRL0_LLHL, IRL0_LLHH,
IRL0_LHLL, IRL0_LHLH, IRL0_LHHL, IRL0_LHHH,
IRL0_HLLL, IRL0_HLLH, IRL0_HLHL, IRL0_HLHH,
IRL0_HHLL, IRL0_HHLH, IRL0_HHHL,
IRL4_LLLL, IRL4_LLLH, IRL4_LLHL, IRL4_LLHH,
IRL4_LHLL, IRL4_LHLH, IRL4_LHHL, IRL4_LHHH,
IRL4_HLLL, IRL4_HLLH, IRL4_HLHL, IRL4_HLHH,
IRL4_HHLL, IRL4_HHLH, IRL4_HHHL,
IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7,
WDT,
TMU0_0, TMU0_1, TMU0_2, TMU0_3,
TMU1_0, TMU1_1, TMU1_2,
DMAC0_0, DMAC0_1, DMAC0_2, DMAC0_3, DMAC0_4, DMAC0_5, DMAC0_6,
HUDI1, HUDI0,
DMAC1_0, DMAC1_1, DMAC1_2, DMAC1_3,
HPB_0, HPB_1, HPB_2,
SCIF0_0, SCIF0_1, SCIF0_2, SCIF0_3,
SCIF1,
TMU2, TMU3,
SCIF2, SCIF3, SCIF4, SCIF5,
Eth_0, Eth_1,
PCIeC0_0, PCIeC0_1, PCIeC0_2,
PCIeC1_0, PCIeC1_1, PCIeC1_2,
USB,
I2C0, I2C1,
DU,
SSI0, SSI1, SSI2, SSI3,
PCIeC2_0, PCIeC2_1, PCIeC2_2,
HAC0, HAC1,
FLCTL,
HSPI,
GPIO0, GPIO1,
Thermal,
INTICI0, INTICI1, INTICI2, INTICI3,
INTICI4, INTICI5, INTICI6, INTICI7,
/* Muxed sub-events */
TXI1, BRI1, RXI1, ERI1,
};
static struct intc_vect sh7786_vectors[] __initdata = {
INTC_VECT(WDT, 0x3e0),
INTC_VECT(TMU0_0, 0x400), INTC_VECT(TMU0_1, 0x420),
INTC_VECT(TMU0_2, 0x440), INTC_VECT(TMU0_3, 0x460),
INTC_VECT(TMU1_0, 0x480), INTC_VECT(TMU1_1, 0x4a0),
INTC_VECT(TMU1_2, 0x4c0),
INTC_VECT(DMAC0_0, 0x500), INTC_VECT(DMAC0_1, 0x520),
INTC_VECT(DMAC0_2, 0x540), INTC_VECT(DMAC0_3, 0x560),
INTC_VECT(DMAC0_4, 0x580), INTC_VECT(DMAC0_5, 0x5a0),
INTC_VECT(DMAC0_6, 0x5c0),
INTC_VECT(HUDI1, 0x5e0), INTC_VECT(HUDI0, 0x600),
INTC_VECT(DMAC1_0, 0x620), INTC_VECT(DMAC1_1, 0x640),
INTC_VECT(DMAC1_2, 0x660), INTC_VECT(DMAC1_3, 0x680),
INTC_VECT(HPB_0, 0x6a0), INTC_VECT(HPB_1, 0x6c0),
INTC_VECT(HPB_2, 0x6e0),
INTC_VECT(SCIF0_0, 0x700), INTC_VECT(SCIF0_1, 0x720),
INTC_VECT(SCIF0_2, 0x740), INTC_VECT(SCIF0_3, 0x760),
INTC_VECT(SCIF1, 0x780),
INTC_VECT(TMU2, 0x7a0), INTC_VECT(TMU3, 0x7c0),
INTC_VECT(SCIF2, 0x840), INTC_VECT(SCIF3, 0x860),
INTC_VECT(SCIF4, 0x880), INTC_VECT(SCIF5, 0x8a0),
INTC_VECT(Eth_0, 0x8c0), INTC_VECT(Eth_1, 0x8e0),
INTC_VECT(PCIeC0_0, 0xae0), INTC_VECT(PCIeC0_1, 0xb00),
INTC_VECT(PCIeC0_2, 0xb20),
INTC_VECT(PCIeC1_0, 0xb40), INTC_VECT(PCIeC1_1, 0xb60),
INTC_VECT(PCIeC1_2, 0xb80),
INTC_VECT(USB, 0xba0),
INTC_VECT(I2C0, 0xcc0), INTC_VECT(I2C1, 0xce0),
INTC_VECT(DU, 0xd00),
INTC_VECT(SSI0, 0xd20), INTC_VECT(SSI1, 0xd40),
INTC_VECT(SSI2, 0xd60), INTC_VECT(SSI3, 0xd80),
INTC_VECT(PCIeC2_0, 0xda0), INTC_VECT(PCIeC2_1, 0xdc0),
INTC_VECT(PCIeC2_2, 0xde0),
INTC_VECT(HAC0, 0xe00), INTC_VECT(HAC1, 0xe20),
INTC_VECT(FLCTL, 0xe40),
INTC_VECT(HSPI, 0xe80),
INTC_VECT(GPIO0, 0xea0), INTC_VECT(GPIO1, 0xec0),
INTC_VECT(Thermal, 0xee0),
INTC_VECT(INTICI0, 0xf00), INTC_VECT(INTICI1, 0xf20),
INTC_VECT(INTICI2, 0xf40), INTC_VECT(INTICI3, 0xf60),
INTC_VECT(INTICI4, 0xf80), INTC_VECT(INTICI5, 0xfa0),
INTC_VECT(INTICI6, 0xfc0), INTC_VECT(INTICI7, 0xfe0),
};
#define CnINTMSK0 0xfe410030
#define CnINTMSK1 0xfe410040
#define CnINTMSKCLR0 0xfe410050
#define CnINTMSKCLR1 0xfe410060
#define CnINT2MSKR0 0xfe410a20
#define CnINT2MSKR1 0xfe410a24
#define CnINT2MSKR2 0xfe410a28
#define CnINT2MSKR3 0xfe410a2c
#define CnINT2MSKCR0 0xfe410a30
#define CnINT2MSKCR1 0xfe410a34
#define CnINT2MSKCR2 0xfe410a38
#define CnINT2MSKCR3 0xfe410a3c
#define INTMSK2 0xfe410068
#define INTMSKCLR2 0xfe41006c
#define INTDISTCR0 0xfe4100b0
#define INTDISTCR1 0xfe4100b4
#define INT2DISTCR0 0xfe410900
#define INT2DISTCR1 0xfe410904
#define INT2DISTCR2 0xfe410908
#define INT2DISTCR3 0xfe41090c
static struct intc_mask_reg sh7786_mask_registers[] __initdata = {
{ CnINTMSK0, CnINTMSKCLR0, 32,
{ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 },
INTC_SMP_BALANCING(INTDISTCR0) },
{ INTMSK2, INTMSKCLR2, 32,
{ IRL0_LLLL, IRL0_LLLH, IRL0_LLHL, IRL0_LLHH,
IRL0_LHLL, IRL0_LHLH, IRL0_LHHL, IRL0_LHHH,
IRL0_HLLL, IRL0_HLLH, IRL0_HLHL, IRL0_HLHH,
IRL0_HHLL, IRL0_HHLH, IRL0_HHHL, 0,
IRL4_LLLL, IRL4_LLLH, IRL4_LLHL, IRL4_LLHH,
IRL4_LHLL, IRL4_LHLH, IRL4_LHHL, IRL4_LHHH,
IRL4_HLLL, IRL4_HLLH, IRL4_HLHL, IRL4_HLHH,
IRL4_HHLL, IRL4_HHLH, IRL4_HHHL, 0, } },
{ CnINT2MSKR0, CnINT2MSKCR0 , 32,
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, WDT },
INTC_SMP_BALANCING(INT2DISTCR0) },
{ CnINT2MSKR1, CnINT2MSKCR1, 32,
{ TMU0_0, TMU0_1, TMU0_2, TMU0_3, TMU1_0, TMU1_1, TMU1_2, 0,
DMAC0_0, DMAC0_1, DMAC0_2, DMAC0_3, DMAC0_4, DMAC0_5, DMAC0_6,
HUDI1, HUDI0,
DMAC1_0, DMAC1_1, DMAC1_2, DMAC1_3,
HPB_0, HPB_1, HPB_2,
SCIF0_0, SCIF0_1, SCIF0_2, SCIF0_3,
SCIF1,
TMU2, TMU3, 0, }, INTC_SMP_BALANCING(INT2DISTCR1) },
{ CnINT2MSKR2, CnINT2MSKCR2, 32,
{ 0, 0, SCIF2, SCIF3, SCIF4, SCIF5,
Eth_0, Eth_1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
PCIeC0_0, PCIeC0_1, PCIeC0_2,
PCIeC1_0, PCIeC1_1, PCIeC1_2,
USB, 0, 0 }, INTC_SMP_BALANCING(INT2DISTCR2) },
{ CnINT2MSKR3, CnINT2MSKCR3, 32,
{ 0, 0, 0, 0, 0, 0,
I2C0, I2C1,
DU, SSI0, SSI1, SSI2, SSI3,
PCIeC2_0, PCIeC2_1, PCIeC2_2,
HAC0, HAC1,
FLCTL, 0,
HSPI, GPIO0, GPIO1, Thermal,
0, 0, 0, 0, 0, 0, 0, 0 }, INTC_SMP_BALANCING(INT2DISTCR3) },
};
static struct intc_prio_reg sh7786_prio_registers[] __initdata = {
{ 0xfe410010, 0, 32, 4, /* INTPRI */ { IRQ0, IRQ1, IRQ2, IRQ3,
IRQ4, IRQ5, IRQ6, IRQ7 } },
{ 0xfe410800, 0, 32, 8, /* INT2PRI0 */ { 0, 0, 0, WDT } },
{ 0xfe410804, 0, 32, 8, /* INT2PRI1 */ { TMU0_0, TMU0_1,
TMU0_2, TMU0_3 } },
{ 0xfe410808, 0, 32, 8, /* INT2PRI2 */ { TMU1_0, TMU1_1,
TMU1_2, 0 } },
{ 0xfe41080c, 0, 32, 8, /* INT2PRI3 */ { DMAC0_0, DMAC0_1,
DMAC0_2, DMAC0_3 } },
{ 0xfe410810, 0, 32, 8, /* INT2PRI4 */ { DMAC0_4, DMAC0_5,
DMAC0_6, HUDI1 } },
{ 0xfe410814, 0, 32, 8, /* INT2PRI5 */ { HUDI0, DMAC1_0,
DMAC1_1, DMAC1_2 } },
{ 0xfe410818, 0, 32, 8, /* INT2PRI6 */ { DMAC1_3, HPB_0,
HPB_1, HPB_2 } },
{ 0xfe41081c, 0, 32, 8, /* INT2PRI7 */ { SCIF0_0, SCIF0_1,
SCIF0_2, SCIF0_3 } },
{ 0xfe410820, 0, 32, 8, /* INT2PRI8 */ { SCIF1, TMU2, TMU3, 0 } },
{ 0xfe410824, 0, 32, 8, /* INT2PRI9 */ { 0, 0, SCIF2, SCIF3 } },
{ 0xfe410828, 0, 32, 8, /* INT2PRI10 */ { SCIF4, SCIF5,
Eth_0, Eth_1 } },
{ 0xfe41082c, 0, 32, 8, /* INT2PRI11 */ { 0, 0, 0, 0 } },
{ 0xfe410830, 0, 32, 8, /* INT2PRI12 */ { 0, 0, 0, 0 } },
{ 0xfe410834, 0, 32, 8, /* INT2PRI13 */ { 0, 0, 0, 0 } },
{ 0xfe410838, 0, 32, 8, /* INT2PRI14 */ { 0, 0, 0, PCIeC0_0 } },
{ 0xfe41083c, 0, 32, 8, /* INT2PRI15 */ { PCIeC0_1, PCIeC0_2,
PCIeC1_0, PCIeC1_1 } },
{ 0xfe410840, 0, 32, 8, /* INT2PRI16 */ { PCIeC1_2, USB, 0, 0 } },
{ 0xfe410844, 0, 32, 8, /* INT2PRI17 */ { 0, 0, 0, 0 } },
{ 0xfe410848, 0, 32, 8, /* INT2PRI18 */ { 0, 0, I2C0, I2C1 } },
{ 0xfe41084c, 0, 32, 8, /* INT2PRI19 */ { DU, SSI0, SSI1, SSI2 } },
{ 0xfe410850, 0, 32, 8, /* INT2PRI20 */ { SSI3, PCIeC2_0,
PCIeC2_1, PCIeC2_2 } },
{ 0xfe410854, 0, 32, 8, /* INT2PRI21 */ { HAC0, HAC1, FLCTL, 0 } },
{ 0xfe410858, 0, 32, 8, /* INT2PRI22 */ { HSPI, GPIO0,
GPIO1, Thermal } },
{ 0xfe41085c, 0, 32, 8, /* INT2PRI23 */ { 0, 0, 0, 0 } },
{ 0xfe410860, 0, 32, 8, /* INT2PRI24 */ { 0, 0, 0, 0 } },
{ 0xfe410090, 0xfe4100a0, 32, 4, /* CnICIPRI / CnICIPRICLR */
{ INTICI7, INTICI6, INTICI5, INTICI4,
INTICI3, INTICI2, INTICI1, INTICI0 }, INTC_SMP(4, 2) },
};
static struct intc_subgroup sh7786_subgroups[] __initdata = {
{ 0xfe410c20, 32, SCIF1,
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, TXI1, BRI1, RXI1, ERI1 } },
};
static struct intc_desc sh7786_intc_desc __initdata = {
.name = "sh7786",
.hw = {
.vectors = sh7786_vectors,
.nr_vectors = ARRAY_SIZE(sh7786_vectors),
.mask_regs = sh7786_mask_registers,
.nr_mask_regs = ARRAY_SIZE(sh7786_mask_registers),
.subgroups = sh7786_subgroups,
.nr_subgroups = ARRAY_SIZE(sh7786_subgroups),
.prio_regs = sh7786_prio_registers,
.nr_prio_regs = ARRAY_SIZE(sh7786_prio_registers),
},
};
/* Support for external interrupt pins in IRQ mode */
static struct intc_vect vectors_irq0123[] __initdata = {
INTC_VECT(IRQ0, 0x200), INTC_VECT(IRQ1, 0x240),
INTC_VECT(IRQ2, 0x280), INTC_VECT(IRQ3, 0x2c0),
};
static struct intc_vect vectors_irq4567[] __initdata = {
INTC_VECT(IRQ4, 0x300), INTC_VECT(IRQ5, 0x340),
INTC_VECT(IRQ6, 0x380), INTC_VECT(IRQ7, 0x3c0),
};
static struct intc_sense_reg sh7786_sense_registers[] __initdata = {
{ 0xfe41001c, 32, 2, /* ICR1 */ { IRQ0, IRQ1, IRQ2, IRQ3,
IRQ4, IRQ5, IRQ6, IRQ7 } },
};
static struct intc_mask_reg sh7786_ack_registers[] __initdata = {
{ 0xfe410024, 0, 32, /* INTREQ */
{ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
};
static DECLARE_INTC_DESC_ACK(intc_desc_irq0123, "sh7786-irq0123",
vectors_irq0123, NULL, sh7786_mask_registers,
sh7786_prio_registers, sh7786_sense_registers,
sh7786_ack_registers);
static DECLARE_INTC_DESC_ACK(intc_desc_irq4567, "sh7786-irq4567",
vectors_irq4567, NULL, sh7786_mask_registers,
sh7786_prio_registers, sh7786_sense_registers,
sh7786_ack_registers);
/* External interrupt pins in IRL mode */
static struct intc_vect vectors_irl0123[] __initdata = {
INTC_VECT(IRL0_LLLL, 0x200), INTC_VECT(IRL0_LLLH, 0x220),
INTC_VECT(IRL0_LLHL, 0x240), INTC_VECT(IRL0_LLHH, 0x260),
INTC_VECT(IRL0_LHLL, 0x280), INTC_VECT(IRL0_LHLH, 0x2a0),
INTC_VECT(IRL0_LHHL, 0x2c0), INTC_VECT(IRL0_LHHH, 0x2e0),
INTC_VECT(IRL0_HLLL, 0x300), INTC_VECT(IRL0_HLLH, 0x320),
INTC_VECT(IRL0_HLHL, 0x340), INTC_VECT(IRL0_HLHH, 0x360),
INTC_VECT(IRL0_HHLL, 0x380), INTC_VECT(IRL0_HHLH, 0x3a0),
INTC_VECT(IRL0_HHHL, 0x3c0),
};
static struct intc_vect vectors_irl4567[] __initdata = {
INTC_VECT(IRL4_LLLL, 0x900), INTC_VECT(IRL4_LLLH, 0x920),
INTC_VECT(IRL4_LLHL, 0x940), INTC_VECT(IRL4_LLHH, 0x960),
INTC_VECT(IRL4_LHLL, 0x980), INTC_VECT(IRL4_LHLH, 0x9a0),
INTC_VECT(IRL4_LHHL, 0x9c0), INTC_VECT(IRL4_LHHH, 0x9e0),
INTC_VECT(IRL4_HLLL, 0xa00), INTC_VECT(IRL4_HLLH, 0xa20),
INTC_VECT(IRL4_HLHL, 0xa40), INTC_VECT(IRL4_HLHH, 0xa60),
INTC_VECT(IRL4_HHLL, 0xa80), INTC_VECT(IRL4_HHLH, 0xaa0),
INTC_VECT(IRL4_HHHL, 0xac0),
};
static DECLARE_INTC_DESC(intc_desc_irl0123, "sh7786-irl0123", vectors_irl0123,
NULL, sh7786_mask_registers, NULL, NULL);
static DECLARE_INTC_DESC(intc_desc_irl4567, "sh7786-irl4567", vectors_irl4567,
NULL, sh7786_mask_registers, NULL, NULL);
#define INTC_ICR0 0xfe410000
#define INTC_INTMSK0 CnINTMSK0
#define INTC_INTMSK1 CnINTMSK1
#define INTC_INTMSK2 INTMSK2
#define INTC_INTMSKCLR1 CnINTMSKCLR1
#define INTC_INTMSKCLR2 INTMSKCLR2
void __init plat_irq_setup(void)
{
/* disable IRQ3-0 + IRQ7-4 */
__raw_writel(0xff000000, INTC_INTMSK0);
/* disable IRL3-0 + IRL7-4 */
__raw_writel(0xc0000000, INTC_INTMSK1);
__raw_writel(0xfffefffe, INTC_INTMSK2);
/* select IRL mode for IRL3-0 + IRL7-4 */
__raw_writel(__raw_readl(INTC_ICR0) & ~0x00c00000, INTC_ICR0);
register_intc_controller(&sh7786_intc_desc);
}
void __init plat_irq_setup_pins(int mode)
{
switch (mode) {
case IRQ_MODE_IRQ7654:
/* select IRQ mode for IRL7-4 */
__raw_writel(__raw_readl(INTC_ICR0) | 0x00400000, INTC_ICR0);
register_intc_controller(&intc_desc_irq4567);
break;
case IRQ_MODE_IRQ3210:
/* select IRQ mode for IRL3-0 */
__raw_writel(__raw_readl(INTC_ICR0) | 0x00800000, INTC_ICR0);
register_intc_controller(&intc_desc_irq0123);
break;
case IRQ_MODE_IRL7654:
/* enable IRL7-4 but don't provide any masking */
__raw_writel(0x40000000, INTC_INTMSKCLR1);
__raw_writel(0x0000fffe, INTC_INTMSKCLR2);
break;
case IRQ_MODE_IRL3210:
/* enable IRL0-3 but don't provide any masking */
__raw_writel(0x80000000, INTC_INTMSKCLR1);
__raw_writel(0xfffe0000, INTC_INTMSKCLR2);
break;
case IRQ_MODE_IRL7654_MASK:
/* enable IRL7-4 and mask using cpu intc controller */
__raw_writel(0x40000000, INTC_INTMSKCLR1);
register_intc_controller(&intc_desc_irl4567);
break;
case IRQ_MODE_IRL3210_MASK:
/* enable IRL0-3 and mask using cpu intc controller */
__raw_writel(0x80000000, INTC_INTMSKCLR1);
register_intc_controller(&intc_desc_irl0123);
break;
default:
BUG();
}
}
void __init plat_mem_setup(void)
{
}
static int __init sh7786_devices_setup(void)
{
int ret, irq;
sh7786_usb_setup();
/*
* De-mux SCIF1 IRQs if possible
*/
irq = intc_irq_lookup(sh7786_intc_desc.name, TXI1);
if (irq > 0) {
scif1_demux_resources[1].start =
intc_irq_lookup(sh7786_intc_desc.name, ERI1);
scif1_demux_resources[2].start =
intc_irq_lookup(sh7786_intc_desc.name, RXI1);
scif1_demux_resources[3].start = irq;
scif1_demux_resources[4].start =
intc_irq_lookup(sh7786_intc_desc.name, BRI1);
scif1_device.resource = scif1_demux_resources;
scif1_device.num_resources = ARRAY_SIZE(scif1_demux_resources);
}
ret = platform_add_devices(sh7786_early_devices,
ARRAY_SIZE(sh7786_early_devices));
if (unlikely(ret != 0))
return ret;
return platform_add_devices(sh7786_devices,
ARRAY_SIZE(sh7786_devices));
}
arch_initcall(sh7786_devices_setup);
void __init plat_early_device_setup(void)
{
sh_early_platform_add_devices(sh7786_early_devices,
ARRAY_SIZE(sh7786_early_devices));
}
| linux-master | arch/sh/kernel/cpu/sh4a/setup-sh7786.c |
// SPDX-License-Identifier: GPL-2.0
/*
* arch/sh/kernel/cpu/sh4a/clock-sh7763.c
*
* SH7763 support for the clock framework
*
* Copyright (C) 2005 Paul Mundt
* Copyright (C) 2007 Yoshihiro Shimoda
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/io.h>
#include <linux/clkdev.h>
#include <asm/clock.h>
#include <asm/freq.h>
#include <asm/io.h>
static int bfc_divisors[] = { 1, 1, 1, 8, 1, 1, 1, 1 };
static int p0fc_divisors[] = { 1, 1, 1, 8, 1, 1, 1, 1 };
static int cfc_divisors[] = { 1, 1, 4, 1, 1, 1, 1, 1 };
static void master_clk_init(struct clk *clk)
{
clk->rate *= p0fc_divisors[(__raw_readl(FRQCR) >> 4) & 0x07];
}
static struct sh_clk_ops sh7763_master_clk_ops = {
.init = master_clk_init,
};
static unsigned long module_clk_recalc(struct clk *clk)
{
int idx = ((__raw_readl(FRQCR) >> 4) & 0x07);
return clk->parent->rate / p0fc_divisors[idx];
}
static struct sh_clk_ops sh7763_module_clk_ops = {
.recalc = module_clk_recalc,
};
static unsigned long bus_clk_recalc(struct clk *clk)
{
int idx = ((__raw_readl(FRQCR) >> 16) & 0x07);
return clk->parent->rate / bfc_divisors[idx];
}
static struct sh_clk_ops sh7763_bus_clk_ops = {
.recalc = bus_clk_recalc,
};
static struct sh_clk_ops sh7763_cpu_clk_ops = {
.recalc = followparent_recalc,
};
static struct sh_clk_ops *sh7763_clk_ops[] = {
&sh7763_master_clk_ops,
&sh7763_module_clk_ops,
&sh7763_bus_clk_ops,
&sh7763_cpu_clk_ops,
};
void __init arch_init_clk_ops(struct sh_clk_ops **ops, int idx)
{
if (idx < ARRAY_SIZE(sh7763_clk_ops))
*ops = sh7763_clk_ops[idx];
}
static unsigned long shyway_clk_recalc(struct clk *clk)
{
int idx = ((__raw_readl(FRQCR) >> 20) & 0x07);
return clk->parent->rate / cfc_divisors[idx];
}
static struct sh_clk_ops sh7763_shyway_clk_ops = {
.recalc = shyway_clk_recalc,
};
static struct clk sh7763_shyway_clk = {
.flags = CLK_ENABLE_ON_INIT,
.ops = &sh7763_shyway_clk_ops,
};
/*
* Additional SH7763-specific on-chip clocks that aren't already part of the
* clock framework
*/
static struct clk *sh7763_onchip_clocks[] = {
&sh7763_shyway_clk,
};
static struct clk_lookup lookups[] = {
/* main clocks */
CLKDEV_CON_ID("shyway_clk", &sh7763_shyway_clk),
};
int __init arch_clk_init(void)
{
struct clk *clk;
int i, ret = 0;
cpg_clk_init();
clk = clk_get(NULL, "master_clk");
for (i = 0; i < ARRAY_SIZE(sh7763_onchip_clocks); i++) {
struct clk *clkp = sh7763_onchip_clocks[i];
clkp->parent = clk;
ret |= clk_register(clkp);
}
clk_put(clk);
clkdev_add_table(lookups, ARRAY_SIZE(lookups));
return ret;
}
| linux-master | arch/sh/kernel/cpu/sh4a/clock-sh7763.c |
// SPDX-License-Identifier: GPL-2.0
/*
* arch/sh/kernel/cpu/sh4a/clock-sh7723.c
*
* SH7723 clock framework support
*
* Copyright (C) 2009 Magnus Damm
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/clkdev.h>
#include <linux/sh_clk.h>
#include <asm/clock.h>
#include <cpu/sh7723.h>
/* SH7723 registers */
#define FRQCR 0xa4150000
#define VCLKCR 0xa4150004
#define SCLKACR 0xa4150008
#define SCLKBCR 0xa415000c
#define IRDACLKCR 0xa4150018
#define PLLCR 0xa4150024
#define MSTPCR0 0xa4150030
#define MSTPCR1 0xa4150034
#define MSTPCR2 0xa4150038
#define DLLFRQ 0xa4150050
/* Fixed 32 KHz root clock for RTC and Power Management purposes */
static struct clk r_clk = {
.rate = 32768,
};
/*
* Default rate for the root input clock, reset this with clk_set_rate()
* from the platform code.
*/
struct clk extal_clk = {
.rate = 33333333,
};
/* The dll multiplies the 32khz r_clk, may be used instead of extal */
static unsigned long dll_recalc(struct clk *clk)
{
unsigned long mult;
if (__raw_readl(PLLCR) & 0x1000)
mult = __raw_readl(DLLFRQ);
else
mult = 0;
return clk->parent->rate * mult;
}
static struct sh_clk_ops dll_clk_ops = {
.recalc = dll_recalc,
};
static struct clk dll_clk = {
.ops = &dll_clk_ops,
.parent = &r_clk,
.flags = CLK_ENABLE_ON_INIT,
};
static unsigned long pll_recalc(struct clk *clk)
{
unsigned long mult = 1;
unsigned long div = 1;
if (__raw_readl(PLLCR) & 0x4000)
mult = (((__raw_readl(FRQCR) >> 24) & 0x1f) + 1);
else
div = 2;
return (clk->parent->rate * mult) / div;
}
static struct sh_clk_ops pll_clk_ops = {
.recalc = pll_recalc,
};
static struct clk pll_clk = {
.ops = &pll_clk_ops,
.flags = CLK_ENABLE_ON_INIT,
};
struct clk *main_clks[] = {
&r_clk,
&extal_clk,
&dll_clk,
&pll_clk,
};
static int multipliers[] = { 1, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1 };
static int divisors[] = { 1, 3, 2, 5, 3, 4, 5, 6, 8, 10, 12, 16, 20 };
static struct clk_div_mult_table div4_div_mult_table = {
.divisors = divisors,
.nr_divisors = ARRAY_SIZE(divisors),
.multipliers = multipliers,
.nr_multipliers = ARRAY_SIZE(multipliers),
};
static struct clk_div4_table div4_table = {
.div_mult_table = &div4_div_mult_table,
};
enum { DIV4_I, DIV4_U, DIV4_SH, DIV4_B, DIV4_B3, DIV4_P, DIV4_NR };
#define DIV4(_reg, _bit, _mask, _flags) \
SH_CLK_DIV4(&pll_clk, _reg, _bit, _mask, _flags)
struct clk div4_clks[DIV4_NR] = {
[DIV4_I] = DIV4(FRQCR, 20, 0x0dbf, CLK_ENABLE_ON_INIT),
[DIV4_U] = DIV4(FRQCR, 16, 0x0dbf, CLK_ENABLE_ON_INIT),
[DIV4_SH] = DIV4(FRQCR, 12, 0x0dbf, CLK_ENABLE_ON_INIT),
[DIV4_B] = DIV4(FRQCR, 8, 0x0dbf, CLK_ENABLE_ON_INIT),
[DIV4_B3] = DIV4(FRQCR, 4, 0x0db4, CLK_ENABLE_ON_INIT),
[DIV4_P] = DIV4(FRQCR, 0, 0x0dbf, 0),
};
enum { DIV4_IRDA, DIV4_ENABLE_NR };
struct clk div4_enable_clks[DIV4_ENABLE_NR] = {
[DIV4_IRDA] = DIV4(IRDACLKCR, 0, 0x0dbf, 0),
};
enum { DIV4_SIUA, DIV4_SIUB, DIV4_REPARENT_NR };
struct clk div4_reparent_clks[DIV4_REPARENT_NR] = {
[DIV4_SIUA] = DIV4(SCLKACR, 0, 0x0dbf, 0),
[DIV4_SIUB] = DIV4(SCLKBCR, 0, 0x0dbf, 0),
};
enum { DIV6_V, DIV6_NR };
struct clk div6_clks[DIV6_NR] = {
[DIV6_V] = SH_CLK_DIV6(&pll_clk, VCLKCR, 0),
};
static struct clk mstp_clks[] = {
/* See page 60 of Datasheet V1.0: Overview -> Block Diagram */
[HWBLK_TLB] = SH_CLK_MSTP32(&div4_clks[DIV4_I], MSTPCR0, 31, CLK_ENABLE_ON_INIT),
[HWBLK_IC] = SH_CLK_MSTP32(&div4_clks[DIV4_I], MSTPCR0, 30, CLK_ENABLE_ON_INIT),
[HWBLK_OC] = SH_CLK_MSTP32(&div4_clks[DIV4_I], MSTPCR0, 29, CLK_ENABLE_ON_INIT),
[HWBLK_L2C] = SH_CLK_MSTP32(&div4_clks[DIV4_SH], MSTPCR0, 28, CLK_ENABLE_ON_INIT),
[HWBLK_ILMEM] = SH_CLK_MSTP32(&div4_clks[DIV4_I], MSTPCR0, 27, CLK_ENABLE_ON_INIT),
[HWBLK_FPU] = SH_CLK_MSTP32(&div4_clks[DIV4_I], MSTPCR0, 24, CLK_ENABLE_ON_INIT),
[HWBLK_INTC] = SH_CLK_MSTP32(&div4_clks[DIV4_I], MSTPCR0, 22, CLK_ENABLE_ON_INIT),
[HWBLK_DMAC0] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR0, 21, 0),
[HWBLK_SHYWAY] = SH_CLK_MSTP32(&div4_clks[DIV4_SH], MSTPCR0, 20, CLK_ENABLE_ON_INIT),
[HWBLK_HUDI] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 19, 0),
[HWBLK_UBC] = SH_CLK_MSTP32(&div4_clks[DIV4_I], MSTPCR0, 17, 0),
[HWBLK_TMU0] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 15, 0),
[HWBLK_CMT] = SH_CLK_MSTP32(&r_clk, MSTPCR0, 14, 0),
[HWBLK_RWDT] = SH_CLK_MSTP32(&r_clk, MSTPCR0, 13, 0),
[HWBLK_DMAC1] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR0, 12, 0),
[HWBLK_TMU1] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 11, 0),
[HWBLK_FLCTL] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 10, 0),
[HWBLK_SCIF0] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 9, 0),
[HWBLK_SCIF1] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 8, 0),
[HWBLK_SCIF2] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 7, 0),
[HWBLK_SCIF3] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR0, 6, 0),
[HWBLK_SCIF4] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR0, 5, 0),
[HWBLK_SCIF5] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR0, 4, 0),
[HWBLK_MSIOF0] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR0, 2, 0),
[HWBLK_MSIOF1] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR0, 1, 0),
[HWBLK_MERAM] = SH_CLK_MSTP32(&div4_clks[DIV4_SH], MSTPCR0, 0, 0),
[HWBLK_IIC] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 9, 0),
[HWBLK_RTC] = SH_CLK_MSTP32(&r_clk, MSTPCR1, 8, 0),
[HWBLK_ATAPI] = SH_CLK_MSTP32(&div4_clks[DIV4_SH], MSTPCR2, 28, 0),
[HWBLK_ADC] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR2, 27, 0),
[HWBLK_TPU] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 25, 0),
[HWBLK_IRDA] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR2, 24, 0),
[HWBLK_TSIF] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 22, 0),
[HWBLK_ICB] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 21, CLK_ENABLE_ON_INIT),
[HWBLK_SDHI0] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 18, 0),
[HWBLK_SDHI1] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 17, 0),
[HWBLK_KEYSC] = SH_CLK_MSTP32(&r_clk, MSTPCR2, 14, 0),
[HWBLK_USB] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 11, 0),
[HWBLK_2DG] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 10, 0),
[HWBLK_SIU] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 8, 0),
[HWBLK_VEU2H1] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 6, 0),
[HWBLK_VOU] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 5, 0),
[HWBLK_BEU] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 4, 0),
[HWBLK_CEU] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 3, 0),
[HWBLK_VEU2H0] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 2, 0),
[HWBLK_VPU] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 1, 0),
[HWBLK_LCDC] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 0, 0),
};
static struct clk_lookup lookups[] = {
/* main clocks */
CLKDEV_CON_ID("rclk", &r_clk),
CLKDEV_CON_ID("extal", &extal_clk),
CLKDEV_CON_ID("dll_clk", &dll_clk),
CLKDEV_CON_ID("pll_clk", &pll_clk),
/* DIV4 clocks */
CLKDEV_CON_ID("cpu_clk", &div4_clks[DIV4_I]),
CLKDEV_CON_ID("umem_clk", &div4_clks[DIV4_U]),
CLKDEV_CON_ID("shyway_clk", &div4_clks[DIV4_SH]),
CLKDEV_CON_ID("bus_clk", &div4_clks[DIV4_B]),
CLKDEV_CON_ID("b3_clk", &div4_clks[DIV4_B3]),
CLKDEV_CON_ID("peripheral_clk", &div4_clks[DIV4_P]),
CLKDEV_CON_ID("irda_clk", &div4_enable_clks[DIV4_IRDA]),
CLKDEV_CON_ID("siua_clk", &div4_reparent_clks[DIV4_SIUA]),
CLKDEV_CON_ID("siub_clk", &div4_reparent_clks[DIV4_SIUB]),
/* DIV6 clocks */
CLKDEV_CON_ID("video_clk", &div6_clks[DIV6_V]),
/* MSTP clocks */
CLKDEV_CON_ID("tlb0", &mstp_clks[HWBLK_TLB]),
CLKDEV_CON_ID("ic0", &mstp_clks[HWBLK_IC]),
CLKDEV_CON_ID("oc0", &mstp_clks[HWBLK_OC]),
CLKDEV_CON_ID("l2c0", &mstp_clks[HWBLK_L2C]),
CLKDEV_CON_ID("ilmem0", &mstp_clks[HWBLK_ILMEM]),
CLKDEV_CON_ID("fpu0", &mstp_clks[HWBLK_FPU]),
CLKDEV_CON_ID("intc0", &mstp_clks[HWBLK_INTC]),
CLKDEV_DEV_ID("sh-dma-engine.0", &mstp_clks[HWBLK_DMAC0]),
CLKDEV_CON_ID("sh0", &mstp_clks[HWBLK_SHYWAY]),
CLKDEV_CON_ID("hudi0", &mstp_clks[HWBLK_HUDI]),
CLKDEV_CON_ID("ubc0", &mstp_clks[HWBLK_UBC]),
CLKDEV_ICK_ID("fck", "sh-cmt-32.0", &mstp_clks[HWBLK_CMT]),
CLKDEV_DEV_ID("sh-wdt.0", &mstp_clks[HWBLK_RWDT]),
CLKDEV_DEV_ID("sh-dma-engine.1", &mstp_clks[HWBLK_DMAC1]),
CLKDEV_CON_ID("flctl0", &mstp_clks[HWBLK_FLCTL]),
CLKDEV_DEV_ID("spi_sh_msiof.0", &mstp_clks[HWBLK_MSIOF0]),
CLKDEV_DEV_ID("spi_sh_msiof.1", &mstp_clks[HWBLK_MSIOF1]),
CLKDEV_DEV_ID("sh_mobile_meram.0", &mstp_clks[HWBLK_MERAM]),
CLKDEV_DEV_ID("i2c-sh_mobile.0", &mstp_clks[HWBLK_IIC]),
CLKDEV_CON_ID("rtc0", &mstp_clks[HWBLK_RTC]),
CLKDEV_CON_ID("atapi0", &mstp_clks[HWBLK_ATAPI]),
CLKDEV_CON_ID("adc0", &mstp_clks[HWBLK_ADC]),
CLKDEV_CON_ID("tpu0", &mstp_clks[HWBLK_TPU]),
CLKDEV_CON_ID("irda0", &mstp_clks[HWBLK_IRDA]),
CLKDEV_CON_ID("tsif0", &mstp_clks[HWBLK_TSIF]),
CLKDEV_CON_ID("icb0", &mstp_clks[HWBLK_ICB]),
CLKDEV_DEV_ID("sh_mobile_sdhi.0", &mstp_clks[HWBLK_SDHI0]),
CLKDEV_DEV_ID("sh_mobile_sdhi.1", &mstp_clks[HWBLK_SDHI1]),
CLKDEV_DEV_ID("sh_keysc.0", &mstp_clks[HWBLK_KEYSC]),
CLKDEV_CON_ID("usb0", &mstp_clks[HWBLK_USB]),
CLKDEV_CON_ID("2dg0", &mstp_clks[HWBLK_2DG]),
CLKDEV_DEV_ID("siu-pcm-audio", &mstp_clks[HWBLK_SIU]),
CLKDEV_CON_ID("veu1", &mstp_clks[HWBLK_VEU2H1]),
CLKDEV_DEV_ID("sh-vou.0", &mstp_clks[HWBLK_VOU]),
CLKDEV_CON_ID("beu0", &mstp_clks[HWBLK_BEU]),
CLKDEV_DEV_ID("ceu.0", &mstp_clks[HWBLK_CEU]),
CLKDEV_CON_ID("veu0", &mstp_clks[HWBLK_VEU2H0]),
CLKDEV_CON_ID("vpu0", &mstp_clks[HWBLK_VPU]),
CLKDEV_ICK_ID("fck", "sh-tmu.0", &mstp_clks[HWBLK_TMU0]),
CLKDEV_ICK_ID("fck", "sh-tmu.1", &mstp_clks[HWBLK_TMU1]),
CLKDEV_ICK_ID("fck", "sh-sci.0", &mstp_clks[HWBLK_SCIF0]),
CLKDEV_ICK_ID("fck", "sh-sci.1", &mstp_clks[HWBLK_SCIF1]),
CLKDEV_ICK_ID("fck", "sh-sci.2", &mstp_clks[HWBLK_SCIF2]),
CLKDEV_ICK_ID("fck", "sh-sci.3", &mstp_clks[HWBLK_SCIF3]),
CLKDEV_ICK_ID("fck", "sh-sci.4", &mstp_clks[HWBLK_SCIF4]),
CLKDEV_ICK_ID("fck", "sh-sci.5", &mstp_clks[HWBLK_SCIF5]),
CLKDEV_DEV_ID("sh_mobile_lcdc_fb.0", &mstp_clks[HWBLK_LCDC]),
};
int __init arch_clk_init(void)
{
int k, ret = 0;
/* autodetect extal or dll configuration */
if (__raw_readl(PLLCR) & 0x1000)
pll_clk.parent = &dll_clk;
else
pll_clk.parent = &extal_clk;
for (k = 0; !ret && (k < ARRAY_SIZE(main_clks)); k++)
ret |= clk_register(main_clks[k]);
clkdev_add_table(lookups, ARRAY_SIZE(lookups));
if (!ret)
ret = sh_clk_div4_register(div4_clks, DIV4_NR, &div4_table);
if (!ret)
ret = sh_clk_div4_enable_register(div4_enable_clks,
DIV4_ENABLE_NR, &div4_table);
if (!ret)
ret = sh_clk_div4_reparent_register(div4_reparent_clks,
DIV4_REPARENT_NR, &div4_table);
if (!ret)
ret = sh_clk_div6_register(div6_clks, DIV6_NR);
if (!ret)
ret = sh_clk_mstp_register(mstp_clks, HWBLK_NR);
return ret;
}
| linux-master | arch/sh/kernel/cpu/sh4a/clock-sh7723.c |
// SPDX-License-Identifier: GPL-2.0
/*
* arch/sh/kernel/cpu/sh4a/clock-sh7780.c
*
* SH7780 support for the clock framework
*
* Copyright (C) 2005 Paul Mundt
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/io.h>
#include <linux/clkdev.h>
#include <asm/clock.h>
#include <asm/freq.h>
#include <asm/io.h>
static int ifc_divisors[] = { 2, 4 };
static int bfc_divisors[] = { 1, 1, 1, 8, 12, 16, 24, 1 };
static int pfc_divisors[] = { 1, 24, 24, 1 };
static int cfc_divisors[] = { 1, 1, 4, 1, 6, 1, 1, 1 };
static void master_clk_init(struct clk *clk)
{
clk->rate *= pfc_divisors[__raw_readl(FRQCR) & 0x0003];
}
static struct sh_clk_ops sh7780_master_clk_ops = {
.init = master_clk_init,
};
static unsigned long module_clk_recalc(struct clk *clk)
{
int idx = (__raw_readl(FRQCR) & 0x0003);
return clk->parent->rate / pfc_divisors[idx];
}
static struct sh_clk_ops sh7780_module_clk_ops = {
.recalc = module_clk_recalc,
};
static unsigned long bus_clk_recalc(struct clk *clk)
{
int idx = ((__raw_readl(FRQCR) >> 16) & 0x0007);
return clk->parent->rate / bfc_divisors[idx];
}
static struct sh_clk_ops sh7780_bus_clk_ops = {
.recalc = bus_clk_recalc,
};
static unsigned long cpu_clk_recalc(struct clk *clk)
{
int idx = ((__raw_readl(FRQCR) >> 24) & 0x0001);
return clk->parent->rate / ifc_divisors[idx];
}
static struct sh_clk_ops sh7780_cpu_clk_ops = {
.recalc = cpu_clk_recalc,
};
static struct sh_clk_ops *sh7780_clk_ops[] = {
&sh7780_master_clk_ops,
&sh7780_module_clk_ops,
&sh7780_bus_clk_ops,
&sh7780_cpu_clk_ops,
};
void __init arch_init_clk_ops(struct sh_clk_ops **ops, int idx)
{
if (idx < ARRAY_SIZE(sh7780_clk_ops))
*ops = sh7780_clk_ops[idx];
}
static unsigned long shyway_clk_recalc(struct clk *clk)
{
int idx = ((__raw_readl(FRQCR) >> 20) & 0x0007);
return clk->parent->rate / cfc_divisors[idx];
}
static struct sh_clk_ops sh7780_shyway_clk_ops = {
.recalc = shyway_clk_recalc,
};
static struct clk sh7780_shyway_clk = {
.flags = CLK_ENABLE_ON_INIT,
.ops = &sh7780_shyway_clk_ops,
};
/*
* Additional SH7780-specific on-chip clocks that aren't already part of the
* clock framework
*/
static struct clk *sh7780_onchip_clocks[] = {
&sh7780_shyway_clk,
};
static struct clk_lookup lookups[] = {
/* main clocks */
CLKDEV_CON_ID("shyway_clk", &sh7780_shyway_clk),
};
int __init arch_clk_init(void)
{
struct clk *clk;
int i, ret = 0;
cpg_clk_init();
clk = clk_get(NULL, "master_clk");
for (i = 0; i < ARRAY_SIZE(sh7780_onchip_clocks); i++) {
struct clk *clkp = sh7780_onchip_clocks[i];
clkp->parent = clk;
ret |= clk_register(clkp);
}
clk_put(clk);
clkdev_add_table(lookups, ARRAY_SIZE(lookups));
return ret;
}
| linux-master | arch/sh/kernel/cpu/sh4a/clock-sh7780.c |
// SPDX-License-Identifier: GPL-2.0
/*
* SH7723 Pinmux
*
* Copyright (C) 2008 Magnus Damm
*/
#include <linux/bug.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/ioport.h>
#include <cpu/pfc.h>
static struct resource sh7723_pfc_resources[] = {
[0] = {
.start = 0xa4050100,
.end = 0xa405016f,
.flags = IORESOURCE_MEM,
},
};
static int __init plat_pinmux_setup(void)
{
return sh_pfc_register("pfc-sh7723", sh7723_pfc_resources,
ARRAY_SIZE(sh7723_pfc_resources));
}
arch_initcall(plat_pinmux_setup);
| linux-master | arch/sh/kernel/cpu/sh4a/pinmux-sh7723.c |
// SPDX-License-Identifier: GPL-2.0
/*
* SH7343 Setup
*
* Copyright (C) 2006 Paul Mundt
*/
#include <linux/platform_device.h>
#include <linux/init.h>
#include <linux/serial.h>
#include <linux/serial_sci.h>
#include <linux/uio_driver.h>
#include <linux/sh_timer.h>
#include <linux/sh_intc.h>
#include <asm/clock.h>
#include <asm/platform_early.h>
/* Serial */
static struct plat_sci_port scif0_platform_data = {
.scscr = SCSCR_CKE1,
.type = PORT_SCIF,
};
static struct resource scif0_resources[] = {
DEFINE_RES_MEM(0xffe00000, 0x100),
DEFINE_RES_IRQ(evt2irq(0xc00)),
};
static struct platform_device scif0_device = {
.name = "sh-sci",
.id = 0,
.resource = scif0_resources,
.num_resources = ARRAY_SIZE(scif0_resources),
.dev = {
.platform_data = &scif0_platform_data,
},
};
static struct plat_sci_port scif1_platform_data = {
.scscr = SCSCR_CKE1,
.type = PORT_SCIF,
};
static struct resource scif1_resources[] = {
DEFINE_RES_MEM(0xffe10000, 0x100),
DEFINE_RES_IRQ(evt2irq(0xc20)),
};
static struct platform_device scif1_device = {
.name = "sh-sci",
.id = 1,
.resource = scif1_resources,
.num_resources = ARRAY_SIZE(scif1_resources),
.dev = {
.platform_data = &scif1_platform_data,
},
};
static struct plat_sci_port scif2_platform_data = {
.scscr = SCSCR_CKE1,
.type = PORT_SCIF,
};
static struct resource scif2_resources[] = {
DEFINE_RES_MEM(0xffe20000, 0x100),
DEFINE_RES_IRQ(evt2irq(0xc40)),
};
static struct platform_device scif2_device = {
.name = "sh-sci",
.id = 2,
.resource = scif2_resources,
.num_resources = ARRAY_SIZE(scif2_resources),
.dev = {
.platform_data = &scif2_platform_data,
},
};
static struct plat_sci_port scif3_platform_data = {
.scscr = SCSCR_CKE1,
.type = PORT_SCIF,
};
static struct resource scif3_resources[] = {
DEFINE_RES_MEM(0xffe30000, 0x100),
DEFINE_RES_IRQ(evt2irq(0xc60)),
};
static struct platform_device scif3_device = {
.name = "sh-sci",
.id = 3,
.resource = scif3_resources,
.num_resources = ARRAY_SIZE(scif3_resources),
.dev = {
.platform_data = &scif3_platform_data,
},
};
static struct resource iic0_resources[] = {
[0] = {
.name = "IIC0",
.start = 0x04470000,
.end = 0x04470017,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = evt2irq(0xe00),
.end = evt2irq(0xe60),
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device iic0_device = {
.name = "i2c-sh_mobile",
.id = 0, /* "i2c0" clock */
.num_resources = ARRAY_SIZE(iic0_resources),
.resource = iic0_resources,
};
static struct resource iic1_resources[] = {
[0] = {
.name = "IIC1",
.start = 0x04750000,
.end = 0x04750017,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = evt2irq(0x780),
.end = evt2irq(0x7e0),
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device iic1_device = {
.name = "i2c-sh_mobile",
.id = 1, /* "i2c1" clock */
.num_resources = ARRAY_SIZE(iic1_resources),
.resource = iic1_resources,
};
static struct uio_info vpu_platform_data = {
.name = "VPU4",
.version = "0",
.irq = evt2irq(0x980),
};
static struct resource vpu_resources[] = {
[0] = {
.name = "VPU",
.start = 0xfe900000,
.end = 0xfe9022eb,
.flags = IORESOURCE_MEM,
},
[1] = {
/* place holder for contiguous memory */
},
};
static struct platform_device vpu_device = {
.name = "uio_pdrv_genirq",
.id = 0,
.dev = {
.platform_data = &vpu_platform_data,
},
.resource = vpu_resources,
.num_resources = ARRAY_SIZE(vpu_resources),
};
static struct uio_info veu_platform_data = {
.name = "VEU",
.version = "0",
.irq = evt2irq(0x8c0),
};
static struct resource veu_resources[] = {
[0] = {
.name = "VEU",
.start = 0xfe920000,
.end = 0xfe9200b7,
.flags = IORESOURCE_MEM,
},
[1] = {
/* place holder for contiguous memory */
},
};
static struct platform_device veu_device = {
.name = "uio_pdrv_genirq",
.id = 1,
.dev = {
.platform_data = &veu_platform_data,
},
.resource = veu_resources,
.num_resources = ARRAY_SIZE(veu_resources),
};
static struct uio_info jpu_platform_data = {
.name = "JPU",
.version = "0",
.irq = evt2irq(0x560),
};
static struct resource jpu_resources[] = {
[0] = {
.name = "JPU",
.start = 0xfea00000,
.end = 0xfea102d3,
.flags = IORESOURCE_MEM,
},
[1] = {
/* place holder for contiguous memory */
},
};
static struct platform_device jpu_device = {
.name = "uio_pdrv_genirq",
.id = 2,
.dev = {
.platform_data = &jpu_platform_data,
},
.resource = jpu_resources,
.num_resources = ARRAY_SIZE(jpu_resources),
};
static struct sh_timer_config cmt_platform_data = {
.channels_mask = 0x20,
};
static struct resource cmt_resources[] = {
DEFINE_RES_MEM(0x044a0000, 0x70),
DEFINE_RES_IRQ(evt2irq(0xf00)),
};
static struct platform_device cmt_device = {
.name = "sh-cmt-32",
.id = 0,
.dev = {
.platform_data = &cmt_platform_data,
},
.resource = cmt_resources,
.num_resources = ARRAY_SIZE(cmt_resources),
};
static struct sh_timer_config tmu0_platform_data = {
.channels_mask = 7,
};
static struct resource tmu0_resources[] = {
DEFINE_RES_MEM(0xffd80000, 0x2c),
DEFINE_RES_IRQ(evt2irq(0x400)),
DEFINE_RES_IRQ(evt2irq(0x420)),
DEFINE_RES_IRQ(evt2irq(0x440)),
};
static struct platform_device tmu0_device = {
.name = "sh-tmu",
.id = 0,
.dev = {
.platform_data = &tmu0_platform_data,
},
.resource = tmu0_resources,
.num_resources = ARRAY_SIZE(tmu0_resources),
};
static struct platform_device *sh7343_devices[] __initdata = {
&scif0_device,
&scif1_device,
&scif2_device,
&scif3_device,
&cmt_device,
&tmu0_device,
&iic0_device,
&iic1_device,
&vpu_device,
&veu_device,
&jpu_device,
};
static int __init sh7343_devices_setup(void)
{
platform_resource_setup_memory(&vpu_device, "vpu", 1 << 20);
platform_resource_setup_memory(&veu_device, "veu", 2 << 20);
platform_resource_setup_memory(&jpu_device, "jpu", 2 << 20);
return platform_add_devices(sh7343_devices,
ARRAY_SIZE(sh7343_devices));
}
arch_initcall(sh7343_devices_setup);
static struct platform_device *sh7343_early_devices[] __initdata = {
&scif0_device,
&scif1_device,
&scif2_device,
&scif3_device,
&cmt_device,
&tmu0_device,
};
void __init plat_early_device_setup(void)
{
sh_early_platform_add_devices(sh7343_early_devices,
ARRAY_SIZE(sh7343_early_devices));
}
enum {
UNUSED = 0,
ENABLED,
DISABLED,
/* interrupt sources */
IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7,
DMAC0, DMAC1, DMAC2, DMAC3,
VIO_CEUI, VIO_BEUI, VIO_VEUI, VOU,
MFI, VPU, TPU, Z3D4, USBI0, USBI1,
MMC_ERR, MMC_TRAN, MMC_FSTAT, MMC_FRDY,
DMAC4, DMAC5, DMAC_DADERR,
KEYSC,
SCIF, SCIF1, SCIF2, SCIF3,
SIOF0, SIOF1, SIO,
FLCTL_FLSTEI, FLCTL_FLENDI, FLCTL_FLTREQ0I, FLCTL_FLTREQ1I,
I2C0_ALI, I2C0_TACKI, I2C0_WAITI, I2C0_DTEI,
I2C1_ALI, I2C1_TACKI, I2C1_WAITI, I2C1_DTEI,
SIM_TEI, SIM_TXI, SIM_RXI, SIM_ERI,
IRDA, SDHI, CMT, TSIF, SIU,
TMU0, TMU1, TMU2,
JPU, LCDC,
/* interrupt groups */
DMAC0123, VIOVOU, MMC, DMAC45, FLCTL, I2C0, I2C1, SIM, USB,
};
static struct intc_vect vectors[] __initdata = {
INTC_VECT(IRQ0, 0x600), INTC_VECT(IRQ1, 0x620),
INTC_VECT(IRQ2, 0x640), INTC_VECT(IRQ3, 0x660),
INTC_VECT(IRQ4, 0x680), INTC_VECT(IRQ5, 0x6a0),
INTC_VECT(IRQ6, 0x6c0), INTC_VECT(IRQ7, 0x6e0),
INTC_VECT(I2C1_ALI, 0x780), INTC_VECT(I2C1_TACKI, 0x7a0),
INTC_VECT(I2C1_WAITI, 0x7c0), INTC_VECT(I2C1_DTEI, 0x7e0),
INTC_VECT(DMAC0, 0x800), INTC_VECT(DMAC1, 0x820),
INTC_VECT(DMAC2, 0x840), INTC_VECT(DMAC3, 0x860),
INTC_VECT(VIO_CEUI, 0x880), INTC_VECT(VIO_BEUI, 0x8a0),
INTC_VECT(VIO_VEUI, 0x8c0), INTC_VECT(VOU, 0x8e0),
INTC_VECT(MFI, 0x900), INTC_VECT(VPU, 0x980),
INTC_VECT(TPU, 0x9a0), INTC_VECT(Z3D4, 0x9e0),
INTC_VECT(USBI0, 0xa20), INTC_VECT(USBI1, 0xa40),
INTC_VECT(MMC_ERR, 0xb00), INTC_VECT(MMC_TRAN, 0xb20),
INTC_VECT(MMC_FSTAT, 0xb40), INTC_VECT(MMC_FRDY, 0xb60),
INTC_VECT(DMAC4, 0xb80), INTC_VECT(DMAC5, 0xba0),
INTC_VECT(DMAC_DADERR, 0xbc0), INTC_VECT(KEYSC, 0xbe0),
INTC_VECT(SCIF, 0xc00), INTC_VECT(SCIF1, 0xc20),
INTC_VECT(SCIF2, 0xc40), INTC_VECT(SCIF3, 0xc60),
INTC_VECT(SIOF0, 0xc80), INTC_VECT(SIOF1, 0xca0),
INTC_VECT(SIO, 0xd00),
INTC_VECT(FLCTL_FLSTEI, 0xd80), INTC_VECT(FLCTL_FLENDI, 0xda0),
INTC_VECT(FLCTL_FLTREQ0I, 0xdc0), INTC_VECT(FLCTL_FLTREQ1I, 0xde0),
INTC_VECT(I2C0_ALI, 0xe00), INTC_VECT(I2C0_TACKI, 0xe20),
INTC_VECT(I2C0_WAITI, 0xe40), INTC_VECT(I2C0_DTEI, 0xe60),
INTC_VECT(SDHI, 0xe80), INTC_VECT(SDHI, 0xea0),
INTC_VECT(SDHI, 0xec0), INTC_VECT(SDHI, 0xee0),
INTC_VECT(CMT, 0xf00), INTC_VECT(TSIF, 0xf20),
INTC_VECT(SIU, 0xf80),
INTC_VECT(TMU0, 0x400), INTC_VECT(TMU1, 0x420),
INTC_VECT(TMU2, 0x440),
INTC_VECT(JPU, 0x560), INTC_VECT(LCDC, 0x580),
};
static struct intc_group groups[] __initdata = {
INTC_GROUP(DMAC0123, DMAC0, DMAC1, DMAC2, DMAC3),
INTC_GROUP(VIOVOU, VIO_CEUI, VIO_BEUI, VIO_VEUI, VOU),
INTC_GROUP(MMC, MMC_FRDY, MMC_FSTAT, MMC_TRAN, MMC_ERR),
INTC_GROUP(DMAC45, DMAC4, DMAC5, DMAC_DADERR),
INTC_GROUP(FLCTL, FLCTL_FLSTEI, FLCTL_FLENDI,
FLCTL_FLTREQ0I, FLCTL_FLTREQ1I),
INTC_GROUP(I2C0, I2C0_ALI, I2C0_TACKI, I2C0_WAITI, I2C0_DTEI),
INTC_GROUP(I2C1, I2C1_ALI, I2C1_TACKI, I2C1_WAITI, I2C1_DTEI),
INTC_GROUP(SIM, SIM_TEI, SIM_TXI, SIM_RXI, SIM_ERI),
INTC_GROUP(USB, USBI0, USBI1),
};
static struct intc_mask_reg mask_registers[] __initdata = {
{ 0xa4080084, 0xa40800c4, 8, /* IMR1 / IMCR1 */
{ VOU, VIO_VEUI, VIO_BEUI, VIO_CEUI, DMAC3, DMAC2, DMAC1, DMAC0 } },
{ 0xa4080088, 0xa40800c8, 8, /* IMR2 / IMCR2 */
{ 0, 0, 0, VPU, 0, 0, 0, MFI } },
{ 0xa408008c, 0xa40800cc, 8, /* IMR3 / IMCR3 */
{ SIM_TEI, SIM_TXI, SIM_RXI, SIM_ERI, 0, 0, 0, IRDA } },
{ 0xa4080090, 0xa40800d0, 8, /* IMR4 / IMCR4 */
{ 0, TMU2, TMU1, TMU0, JPU, 0, 0, LCDC } },
{ 0xa4080094, 0xa40800d4, 8, /* IMR5 / IMCR5 */
{ KEYSC, DMAC_DADERR, DMAC5, DMAC4, SCIF3, SCIF2, SCIF1, SCIF } },
{ 0xa4080098, 0xa40800d8, 8, /* IMR6 / IMCR6 */
{ 0, 0, 0, SIO, Z3D4, 0, SIOF1, SIOF0 } },
{ 0xa408009c, 0xa40800dc, 8, /* IMR7 / IMCR7 */
{ I2C0_DTEI, I2C0_WAITI, I2C0_TACKI, I2C0_ALI,
FLCTL_FLTREQ1I, FLCTL_FLTREQ0I, FLCTL_FLENDI, FLCTL_FLSTEI } },
{ 0xa40800a0, 0xa40800e0, 8, /* IMR8 / IMCR8 */
{ DISABLED, ENABLED, ENABLED, ENABLED, 0, 0, 0, SIU } },
{ 0xa40800a4, 0xa40800e4, 8, /* IMR9 / IMCR9 */
{ 0, 0, 0, CMT, 0, USBI1, USBI0 } },
{ 0xa40800a8, 0xa40800e8, 8, /* IMR10 / IMCR10 */
{ MMC_FRDY, MMC_FSTAT, MMC_TRAN, MMC_ERR } },
{ 0xa40800ac, 0xa40800ec, 8, /* IMR11 / IMCR11 */
{ I2C1_DTEI, I2C1_WAITI, I2C1_TACKI, I2C1_ALI, TPU, 0, 0, TSIF } },
{ 0xa4140044, 0xa4140064, 8, /* INTMSK00 / INTMSKCLR00 */
{ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
};
static struct intc_prio_reg prio_registers[] __initdata = {
{ 0xa4080000, 0, 16, 4, /* IPRA */ { TMU0, TMU1, TMU2 } },
{ 0xa4080004, 0, 16, 4, /* IPRB */ { JPU, LCDC, SIM } },
{ 0xa4080010, 0, 16, 4, /* IPRE */ { DMAC0123, VIOVOU, MFI, VPU } },
{ 0xa4080014, 0, 16, 4, /* IPRF */ { KEYSC, DMAC45, USB, CMT } },
{ 0xa4080018, 0, 16, 4, /* IPRG */ { SCIF, SCIF1, SCIF2, SCIF3 } },
{ 0xa408001c, 0, 16, 4, /* IPRH */ { SIOF0, SIOF1, FLCTL, I2C0 } },
{ 0xa4080020, 0, 16, 4, /* IPRI */ { SIO, 0, TSIF, I2C1 } },
{ 0xa4080024, 0, 16, 4, /* IPRJ */ { Z3D4, 0, SIU } },
{ 0xa4080028, 0, 16, 4, /* IPRK */ { 0, MMC, 0, SDHI } },
{ 0xa408002c, 0, 16, 4, /* IPRL */ { 0, 0, TPU } },
{ 0xa4140010, 0, 32, 4, /* INTPRI00 */
{ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
};
static struct intc_sense_reg sense_registers[] __initdata = {
{ 0xa414001c, 16, 2, /* ICR1 */
{ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
};
static struct intc_mask_reg ack_registers[] __initdata = {
{ 0xa4140024, 0, 8, /* INTREQ00 */
{ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
};
static struct intc_desc intc_desc __initdata = {
.name = "sh7343",
.force_enable = ENABLED,
.force_disable = DISABLED,
.hw = INTC_HW_DESC(vectors, groups, mask_registers,
prio_registers, sense_registers, ack_registers),
};
void __init plat_irq_setup(void)
{
register_intc_controller(&intc_desc);
}
| linux-master | arch/sh/kernel/cpu/sh4a/setup-sh7343.c |
// SPDX-License-Identifier: GPL-2.0
/*
* SH7366 Setup
*
* Copyright (C) 2008 Renesas Solutions
*
* Based on linux/arch/sh/kernel/cpu/sh4a/setup-sh7722.c
*/
#include <linux/platform_device.h>
#include <linux/init.h>
#include <linux/serial.h>
#include <linux/serial_sci.h>
#include <linux/uio_driver.h>
#include <linux/sh_timer.h>
#include <linux/sh_intc.h>
#include <linux/usb/r8a66597.h>
#include <asm/clock.h>
#include <asm/platform_early.h>
static struct plat_sci_port scif0_platform_data = {
.scscr = SCSCR_REIE,
.type = PORT_SCIF,
};
static struct resource scif0_resources[] = {
DEFINE_RES_MEM(0xffe00000, 0x100),
DEFINE_RES_IRQ(evt2irq(0xc00)),
};
static struct platform_device scif0_device = {
.name = "sh-sci",
.id = 0,
.resource = scif0_resources,
.num_resources = ARRAY_SIZE(scif0_resources),
.dev = {
.platform_data = &scif0_platform_data,
},
};
static struct resource iic_resources[] = {
[0] = {
.name = "IIC",
.start = 0x04470000,
.end = 0x04470017,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = evt2irq(0xe00),
.end = evt2irq(0xe60),
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device iic_device = {
.name = "i2c-sh_mobile",
.id = 0, /* "i2c0" clock */
.num_resources = ARRAY_SIZE(iic_resources),
.resource = iic_resources,
};
static struct r8a66597_platdata r8a66597_data = {
.on_chip = 1,
};
static struct resource usb_host_resources[] = {
[0] = {
.start = 0xa4d80000,
.end = 0xa4d800ff,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = evt2irq(0xa20),
.end = evt2irq(0xa20),
.flags = IORESOURCE_IRQ | IRQF_TRIGGER_LOW,
},
};
static struct platform_device usb_host_device = {
.name = "r8a66597_hcd",
.id = -1,
.dev = {
.dma_mask = NULL,
.coherent_dma_mask = 0xffffffff,
.platform_data = &r8a66597_data,
},
.num_resources = ARRAY_SIZE(usb_host_resources),
.resource = usb_host_resources,
};
static struct uio_info vpu_platform_data = {
.name = "VPU5",
.version = "0",
.irq = evt2irq(0x980),
};
static struct resource vpu_resources[] = {
[0] = {
.name = "VPU",
.start = 0xfe900000,
.end = 0xfe902807,
.flags = IORESOURCE_MEM,
},
[1] = {
/* place holder for contiguous memory */
},
};
static struct platform_device vpu_device = {
.name = "uio_pdrv_genirq",
.id = 0,
.dev = {
.platform_data = &vpu_platform_data,
},
.resource = vpu_resources,
.num_resources = ARRAY_SIZE(vpu_resources),
};
static struct uio_info veu0_platform_data = {
.name = "VEU",
.version = "0",
.irq = evt2irq(0x8c0),
};
static struct resource veu0_resources[] = {
[0] = {
.name = "VEU(1)",
.start = 0xfe920000,
.end = 0xfe9200b7,
.flags = IORESOURCE_MEM,
},
[1] = {
/* place holder for contiguous memory */
},
};
static struct platform_device veu0_device = {
.name = "uio_pdrv_genirq",
.id = 1,
.dev = {
.platform_data = &veu0_platform_data,
},
.resource = veu0_resources,
.num_resources = ARRAY_SIZE(veu0_resources),
};
static struct uio_info veu1_platform_data = {
.name = "VEU",
.version = "0",
.irq = evt2irq(0x560),
};
static struct resource veu1_resources[] = {
[0] = {
.name = "VEU(2)",
.start = 0xfe924000,
.end = 0xfe9240b7,
.flags = IORESOURCE_MEM,
},
[1] = {
/* place holder for contiguous memory */
},
};
static struct platform_device veu1_device = {
.name = "uio_pdrv_genirq",
.id = 2,
.dev = {
.platform_data = &veu1_platform_data,
},
.resource = veu1_resources,
.num_resources = ARRAY_SIZE(veu1_resources),
};
static struct sh_timer_config cmt_platform_data = {
.channels_mask = 0x20,
};
static struct resource cmt_resources[] = {
DEFINE_RES_MEM(0x044a0000, 0x70),
DEFINE_RES_IRQ(evt2irq(0xf00)),
};
static struct platform_device cmt_device = {
.name = "sh-cmt-32",
.id = 0,
.dev = {
.platform_data = &cmt_platform_data,
},
.resource = cmt_resources,
.num_resources = ARRAY_SIZE(cmt_resources),
};
static struct sh_timer_config tmu0_platform_data = {
.channels_mask = 7,
};
static struct resource tmu0_resources[] = {
DEFINE_RES_MEM(0xffd80000, 0x2c),
DEFINE_RES_IRQ(evt2irq(0x400)),
DEFINE_RES_IRQ(evt2irq(0x420)),
DEFINE_RES_IRQ(evt2irq(0x440)),
};
static struct platform_device tmu0_device = {
.name = "sh-tmu",
.id = 0,
.dev = {
.platform_data = &tmu0_platform_data,
},
.resource = tmu0_resources,
.num_resources = ARRAY_SIZE(tmu0_resources),
};
static struct platform_device *sh7366_devices[] __initdata = {
&scif0_device,
&cmt_device,
&tmu0_device,
&iic_device,
&usb_host_device,
&vpu_device,
&veu0_device,
&veu1_device,
};
static int __init sh7366_devices_setup(void)
{
platform_resource_setup_memory(&vpu_device, "vpu", 2 << 20);
platform_resource_setup_memory(&veu0_device, "veu0", 2 << 20);
platform_resource_setup_memory(&veu1_device, "veu1", 2 << 20);
return platform_add_devices(sh7366_devices,
ARRAY_SIZE(sh7366_devices));
}
arch_initcall(sh7366_devices_setup);
static struct platform_device *sh7366_early_devices[] __initdata = {
&scif0_device,
&cmt_device,
&tmu0_device,
};
void __init plat_early_device_setup(void)
{
sh_early_platform_add_devices(sh7366_early_devices,
ARRAY_SIZE(sh7366_early_devices));
}
enum {
UNUSED=0,
ENABLED,
DISABLED,
/* interrupt sources */
IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7,
ICB,
DMAC0, DMAC1, DMAC2, DMAC3,
VIO_CEUI, VIO_BEUI, VIO_VEUI, VOU,
MFI, VPU, USB,
MMC_MMC1I, MMC_MMC2I, MMC_MMC3I,
DMAC4, DMAC5, DMAC_DADERR,
SCIF, SCIFA1, SCIFA2,
DENC, MSIOF,
FLCTL_FLSTEI, FLCTL_FLENDI, FLCTL_FLTREQ0I, FLCTL_FLTREQ1I,
I2C_ALI, I2C_TACKI, I2C_WAITI, I2C_DTEI,
SDHI, CMT, TSIF, SIU,
TMU0, TMU1, TMU2,
VEU2, LCDC,
/* interrupt groups */
DMAC0123, VIOVOU, MMC, DMAC45, FLCTL, I2C,
};
static struct intc_vect vectors[] __initdata = {
INTC_VECT(IRQ0, 0x600), INTC_VECT(IRQ1, 0x620),
INTC_VECT(IRQ2, 0x640), INTC_VECT(IRQ3, 0x660),
INTC_VECT(IRQ4, 0x680), INTC_VECT(IRQ5, 0x6a0),
INTC_VECT(IRQ6, 0x6c0), INTC_VECT(IRQ7, 0x6e0),
INTC_VECT(ICB, 0x700),
INTC_VECT(DMAC0, 0x800), INTC_VECT(DMAC1, 0x820),
INTC_VECT(DMAC2, 0x840), INTC_VECT(DMAC3, 0x860),
INTC_VECT(VIO_CEUI, 0x880), INTC_VECT(VIO_BEUI, 0x8a0),
INTC_VECT(VIO_VEUI, 0x8c0), INTC_VECT(VOU, 0x8e0),
INTC_VECT(MFI, 0x900), INTC_VECT(VPU, 0x980), INTC_VECT(USB, 0xa20),
INTC_VECT(MMC_MMC1I, 0xb00), INTC_VECT(MMC_MMC2I, 0xb20),
INTC_VECT(MMC_MMC3I, 0xb40),
INTC_VECT(DMAC4, 0xb80), INTC_VECT(DMAC5, 0xba0),
INTC_VECT(DMAC_DADERR, 0xbc0),
INTC_VECT(SCIF, 0xc00), INTC_VECT(SCIFA1, 0xc20),
INTC_VECT(SCIFA2, 0xc40),
INTC_VECT(DENC, 0xc60), INTC_VECT(MSIOF, 0xc80),
INTC_VECT(FLCTL_FLSTEI, 0xd80), INTC_VECT(FLCTL_FLENDI, 0xda0),
INTC_VECT(FLCTL_FLTREQ0I, 0xdc0), INTC_VECT(FLCTL_FLTREQ1I, 0xde0),
INTC_VECT(I2C_ALI, 0xe00), INTC_VECT(I2C_TACKI, 0xe20),
INTC_VECT(I2C_WAITI, 0xe40), INTC_VECT(I2C_DTEI, 0xe60),
INTC_VECT(SDHI, 0xe80), INTC_VECT(SDHI, 0xea0),
INTC_VECT(SDHI, 0xec0), INTC_VECT(SDHI, 0xee0),
INTC_VECT(CMT, 0xf00), INTC_VECT(TSIF, 0xf20),
INTC_VECT(SIU, 0xf80),
INTC_VECT(TMU0, 0x400), INTC_VECT(TMU1, 0x420),
INTC_VECT(TMU2, 0x440),
INTC_VECT(VEU2, 0x560), INTC_VECT(LCDC, 0x580),
};
static struct intc_group groups[] __initdata = {
INTC_GROUP(DMAC0123, DMAC0, DMAC1, DMAC2, DMAC3),
INTC_GROUP(VIOVOU, VIO_CEUI, VIO_BEUI, VIO_VEUI, VOU),
INTC_GROUP(MMC, MMC_MMC1I, MMC_MMC2I, MMC_MMC3I),
INTC_GROUP(DMAC45, DMAC4, DMAC5, DMAC_DADERR),
INTC_GROUP(FLCTL, FLCTL_FLSTEI, FLCTL_FLENDI,
FLCTL_FLTREQ0I, FLCTL_FLTREQ1I),
INTC_GROUP(I2C, I2C_ALI, I2C_TACKI, I2C_WAITI, I2C_DTEI),
};
static struct intc_mask_reg mask_registers[] __initdata = {
{ 0xa4080080, 0xa40800c0, 8, /* IMR0 / IMCR0 */
{ } },
{ 0xa4080084, 0xa40800c4, 8, /* IMR1 / IMCR1 */
{ VOU, VIO_VEUI, VIO_BEUI, VIO_CEUI, DMAC3, DMAC2, DMAC1, DMAC0 } },
{ 0xa4080088, 0xa40800c8, 8, /* IMR2 / IMCR2 */
{ 0, 0, 0, VPU, 0, 0, 0, MFI } },
{ 0xa408008c, 0xa40800cc, 8, /* IMR3 / IMCR3 */
{ 0, 0, 0, ICB } },
{ 0xa4080090, 0xa40800d0, 8, /* IMR4 / IMCR4 */
{ 0, TMU2, TMU1, TMU0, VEU2, 0, 0, LCDC } },
{ 0xa4080094, 0xa40800d4, 8, /* IMR5 / IMCR5 */
{ 0, DMAC_DADERR, DMAC5, DMAC4, DENC, SCIFA2, SCIFA1, SCIF } },
{ 0xa4080098, 0xa40800d8, 8, /* IMR6 / IMCR6 */
{ 0, 0, 0, 0, 0, 0, 0, MSIOF } },
{ 0xa408009c, 0xa40800dc, 8, /* IMR7 / IMCR7 */
{ I2C_DTEI, I2C_WAITI, I2C_TACKI, I2C_ALI,
FLCTL_FLTREQ1I, FLCTL_FLTREQ0I, FLCTL_FLENDI, FLCTL_FLSTEI } },
{ 0xa40800a0, 0xa40800e0, 8, /* IMR8 / IMCR8 */
{ DISABLED, ENABLED, ENABLED, ENABLED, 0, 0, 0, SIU } },
{ 0xa40800a4, 0xa40800e4, 8, /* IMR9 / IMCR9 */
{ 0, 0, 0, CMT, 0, USB, } },
{ 0xa40800a8, 0xa40800e8, 8, /* IMR10 / IMCR10 */
{ 0, MMC_MMC3I, MMC_MMC2I, MMC_MMC1I } },
{ 0xa40800ac, 0xa40800ec, 8, /* IMR11 / IMCR11 */
{ 0, 0, 0, 0, 0, 0, 0, TSIF } },
{ 0xa4140044, 0xa4140064, 8, /* INTMSK00 / INTMSKCLR00 */
{ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
};
static struct intc_prio_reg prio_registers[] __initdata = {
{ 0xa4080000, 0, 16, 4, /* IPRA */ { TMU0, TMU1, TMU2 } },
{ 0xa4080004, 0, 16, 4, /* IPRB */ { VEU2, LCDC, ICB } },
{ 0xa4080008, 0, 16, 4, /* IPRC */ { } },
{ 0xa408000c, 0, 16, 4, /* IPRD */ { } },
{ 0xa4080010, 0, 16, 4, /* IPRE */ { DMAC0123, VIOVOU, MFI, VPU } },
{ 0xa4080014, 0, 16, 4, /* IPRF */ { 0, DMAC45, USB, CMT } },
{ 0xa4080018, 0, 16, 4, /* IPRG */ { SCIF, SCIFA1, SCIFA2, DENC } },
{ 0xa408001c, 0, 16, 4, /* IPRH */ { MSIOF, 0, FLCTL, I2C } },
{ 0xa4080020, 0, 16, 4, /* IPRI */ { 0, 0, TSIF, } },
{ 0xa4080024, 0, 16, 4, /* IPRJ */ { 0, 0, SIU } },
{ 0xa4080028, 0, 16, 4, /* IPRK */ { 0, MMC, 0, SDHI } },
{ 0xa408002c, 0, 16, 4, /* IPRL */ { } },
{ 0xa4140010, 0, 32, 4, /* INTPRI00 */
{ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
};
static struct intc_sense_reg sense_registers[] __initdata = {
{ 0xa414001c, 16, 2, /* ICR1 */
{ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
};
static struct intc_mask_reg ack_registers[] __initdata = {
{ 0xa4140024, 0, 8, /* INTREQ00 */
{ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
};
static struct intc_desc intc_desc __initdata = {
.name = "sh7366",
.force_enable = ENABLED,
.force_disable = DISABLED,
.hw = INTC_HW_DESC(vectors, groups, mask_registers,
prio_registers, sense_registers, ack_registers),
};
void __init plat_irq_setup(void)
{
register_intc_controller(&intc_desc);
}
void __init plat_mem_setup(void)
{
/* TODO: Register Node 1 */
}
| linux-master | arch/sh/kernel/cpu/sh4a/setup-sh7366.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Shared support for SH-X3 interrupt controllers.
*
* Copyright (C) 2009 - 2010 Paul Mundt
*/
#include <linux/irq.h>
#include <linux/io.h>
#include <linux/init.h>
#define INTACK 0xfe4100b8
#define INTACKCLR 0xfe4100bc
#define INTC_USERIMASK 0xfe411000
#ifdef CONFIG_INTC_BALANCING
unsigned int irq_lookup(unsigned int irq)
{
return __raw_readl(INTACK) & 1 ? irq : NO_IRQ_IGNORE;
}
void irq_finish(unsigned int irq)
{
__raw_writel(irq2evt(irq), INTACKCLR);
}
#endif
static int __init shx3_irq_setup(void)
{
return register_intc_userimask(INTC_USERIMASK);
}
arch_initcall(shx3_irq_setup);
| linux-master | arch/sh/kernel/cpu/sh4a/intc-shx3.c |
// SPDX-License-Identifier: GPL-2.0
/*
* arch/sh/kernel/cpu/sh4a/clock-sh7734.c
*
* Clock framework for SH7734
*
* Copyright (C) 2011, 2012 Nobuhiro Iwamatsu <[email protected]>
* Copyright (C) 2011, 2012 Renesas Solutions Corp.
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/io.h>
#include <linux/clkdev.h>
#include <linux/delay.h>
#include <asm/clock.h>
#include <asm/freq.h>
static struct clk extal_clk = {
.rate = 33333333,
};
#define MODEMR (0xFFCC0020)
#define MODEMR_MASK (0x6)
#define MODEMR_533MHZ (0x2)
static unsigned long pll_recalc(struct clk *clk)
{
int mode = 12;
u32 r = __raw_readl(MODEMR);
if ((r & MODEMR_MASK) & MODEMR_533MHZ)
mode = 16;
return clk->parent->rate * mode;
}
static struct sh_clk_ops pll_clk_ops = {
.recalc = pll_recalc,
};
static struct clk pll_clk = {
.ops = &pll_clk_ops,
.parent = &extal_clk,
.flags = CLK_ENABLE_ON_INIT,
};
static struct clk *main_clks[] = {
&extal_clk,
&pll_clk,
};
static int multipliers[] = { 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 };
static int divisors[] = { 1, 3, 2, 3, 4, 6, 8, 9, 12, 16, 18, 24 };
static struct clk_div_mult_table div4_div_mult_table = {
.divisors = divisors,
.nr_divisors = ARRAY_SIZE(divisors),
.multipliers = multipliers,
.nr_multipliers = ARRAY_SIZE(multipliers),
};
static struct clk_div4_table div4_table = {
.div_mult_table = &div4_div_mult_table,
};
enum { DIV4_I, DIV4_S, DIV4_B, DIV4_M, DIV4_S1, DIV4_P, DIV4_NR };
#define DIV4(_reg, _bit, _mask, _flags) \
SH_CLK_DIV4(&pll_clk, _reg, _bit, _mask, _flags)
struct clk div4_clks[DIV4_NR] = {
[DIV4_I] = DIV4(FRQMR1, 28, 0x0003, CLK_ENABLE_ON_INIT),
[DIV4_S] = DIV4(FRQMR1, 20, 0x000C, CLK_ENABLE_ON_INIT),
[DIV4_B] = DIV4(FRQMR1, 16, 0x0140, CLK_ENABLE_ON_INIT),
[DIV4_M] = DIV4(FRQMR1, 12, 0x0004, CLK_ENABLE_ON_INIT),
[DIV4_S1] = DIV4(FRQMR1, 4, 0x0030, CLK_ENABLE_ON_INIT),
[DIV4_P] = DIV4(FRQMR1, 0, 0x0140, CLK_ENABLE_ON_INIT),
};
#define MSTPCR0 0xFFC80030
#define MSTPCR1 0xFFC80034
#define MSTPCR3 0xFFC8003C
enum {
MSTP030, MSTP029, /* IIC */
MSTP026, MSTP025, MSTP024, /* SCIF */
MSTP023,
MSTP022, MSTP021,
MSTP019, /* HSCIF */
MSTP016, MSTP015, MSTP014, /* TMU / TIMER */
MSTP012, MSTP011, MSTP010, MSTP009, MSTP008, /* SSI */
MSTP007, /* HSPI */
MSTP115, /* ADMAC */
MSTP114, /* GETHER */
MSTP111, /* DMAC */
MSTP109, /* VIDEOIN1 */
MSTP108, /* VIDEOIN0 */
MSTP107, /* RGPVBG */
MSTP106, /* 2DG */
MSTP103, /* VIEW */
MSTP100, /* USB */
MSTP331, /* MMC */
MSTP330, /* MIMLB */
MSTP323, /* SDHI0 */
MSTP322, /* SDHI1 */
MSTP321, /* SDHI2 */
MSTP320, /* RQSPI */
MSTP319, /* SRC0 */
MSTP318, /* SRC1 */
MSTP317, /* RSPI */
MSTP316, /* RCAN0 */
MSTP315, /* RCAN1 */
MSTP314, /* FLTCL */
MSTP313, /* ADC */
MSTP312, /* MTU */
MSTP304, /* IE-BUS */
MSTP303, /* RTC */
MSTP302, /* HIF */
MSTP301, /* STIF0 */
MSTP300, /* STIF1 */
MSTP_NR };
static struct clk mstp_clks[MSTP_NR] = {
/* MSTPCR0 */
[MSTP030] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 30, 0),
[MSTP029] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 29, 0),
[MSTP026] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 26, 0),
[MSTP025] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 25, 0),
[MSTP024] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 24, 0),
[MSTP023] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 23, 0),
[MSTP022] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 22, 0),
[MSTP021] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 21, 0),
[MSTP019] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 19, 0),
[MSTP016] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 16, 0),
[MSTP015] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 15, 0),
[MSTP014] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 14, 0),
[MSTP012] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 12, 0),
[MSTP011] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 11, 0),
[MSTP010] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 10, 0),
[MSTP009] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 9, 0),
[MSTP008] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 8, 0),
[MSTP007] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 7, 0),
/* MSTPCR1 */
[MSTP115] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 15, 0),
[MSTP114] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 14, 0),
[MSTP111] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 11, 0),
[MSTP109] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 9, 0),
[MSTP108] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 8, 0),
[MSTP107] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 7, 0),
[MSTP106] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 6, 0),
[MSTP103] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 3, 0),
[MSTP100] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 0, 0),
/* MSTPCR3 */
[MSTP331] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR3, 31, 0),
[MSTP330] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR3, 30, 0),
[MSTP323] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR3, 23, 0),
[MSTP322] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR3, 22, 0),
[MSTP321] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR3, 21, 0),
[MSTP320] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR3, 20, 0),
[MSTP319] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR3, 19, 0),
[MSTP318] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR3, 18, 0),
[MSTP317] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR3, 17, 0),
[MSTP316] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR3, 16, 0),
[MSTP315] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR3, 15, 0),
[MSTP314] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR3, 14, 0),
[MSTP313] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR3, 13, 0),
[MSTP312] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR3, 12, 0),
[MSTP304] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR3, 4, 0),
[MSTP303] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR3, 3, 0),
[MSTP302] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR3, 2, 0),
[MSTP301] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR3, 1, 0),
[MSTP300] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR3, 0, 0),
};
static struct clk_lookup lookups[] = {
/* main clocks */
CLKDEV_CON_ID("extal", &extal_clk),
CLKDEV_CON_ID("pll_clk", &pll_clk),
/* clocks */
CLKDEV_CON_ID("cpu_clk", &div4_clks[DIV4_I]),
CLKDEV_CON_ID("shyway_clk", &div4_clks[DIV4_S]),
CLKDEV_CON_ID("ddr_clk", &div4_clks[DIV4_M]),
CLKDEV_CON_ID("bus_clk", &div4_clks[DIV4_B]),
CLKDEV_CON_ID("shyway_clk1", &div4_clks[DIV4_S1]),
CLKDEV_CON_ID("peripheral_clk", &div4_clks[DIV4_P]),
/* MSTP32 clocks */
CLKDEV_DEV_ID("i2c-sh7734.0", &mstp_clks[MSTP030]),
CLKDEV_DEV_ID("i2c-sh7734.1", &mstp_clks[MSTP029]),
CLKDEV_ICK_ID("fck", "sh-sci.0", &mstp_clks[MSTP026]),
CLKDEV_ICK_ID("fck", "sh-sci.1", &mstp_clks[MSTP025]),
CLKDEV_ICK_ID("fck", "sh-sci.2", &mstp_clks[MSTP024]),
CLKDEV_ICK_ID("fck", "sh-sci.3", &mstp_clks[MSTP023]),
CLKDEV_ICK_ID("fck", "sh-sci.4", &mstp_clks[MSTP022]),
CLKDEV_ICK_ID("fck", "sh-sci.5", &mstp_clks[MSTP021]),
CLKDEV_CON_ID("hscif", &mstp_clks[MSTP019]),
CLKDEV_ICK_ID("fck", "sh-tmu.0", &mstp_clks[MSTP016]),
CLKDEV_ICK_ID("fck", "sh-tmu.1", &mstp_clks[MSTP015]),
CLKDEV_ICK_ID("fck", "sh-tmu.2", &mstp_clks[MSTP014]),
CLKDEV_CON_ID("ssi0", &mstp_clks[MSTP012]),
CLKDEV_CON_ID("ssi1", &mstp_clks[MSTP011]),
CLKDEV_CON_ID("ssi2", &mstp_clks[MSTP010]),
CLKDEV_CON_ID("ssi3", &mstp_clks[MSTP009]),
CLKDEV_CON_ID("sss", &mstp_clks[MSTP008]),
CLKDEV_CON_ID("hspi", &mstp_clks[MSTP007]),
CLKDEV_CON_ID("usb_fck", &mstp_clks[MSTP100]),
CLKDEV_CON_ID("videoin0", &mstp_clks[MSTP109]),
CLKDEV_CON_ID("videoin1", &mstp_clks[MSTP108]),
CLKDEV_CON_ID("rgpvg", &mstp_clks[MSTP107]),
CLKDEV_CON_ID("2dg", &mstp_clks[MSTP106]),
CLKDEV_CON_ID("view", &mstp_clks[MSTP103]),
CLKDEV_CON_ID("mmc0", &mstp_clks[MSTP331]),
CLKDEV_CON_ID("mimlb0", &mstp_clks[MSTP330]),
CLKDEV_CON_ID("sdhi0", &mstp_clks[MSTP323]),
CLKDEV_CON_ID("sdhi1", &mstp_clks[MSTP322]),
CLKDEV_CON_ID("sdhi2", &mstp_clks[MSTP321]),
CLKDEV_CON_ID("rqspi0", &mstp_clks[MSTP320]),
CLKDEV_CON_ID("src0", &mstp_clks[MSTP319]),
CLKDEV_CON_ID("src1", &mstp_clks[MSTP318]),
CLKDEV_CON_ID("rsp0", &mstp_clks[MSTP317]),
CLKDEV_CON_ID("rcan0", &mstp_clks[MSTP316]),
CLKDEV_CON_ID("rcan1", &mstp_clks[MSTP315]),
CLKDEV_CON_ID("fltcl0", &mstp_clks[MSTP314]),
CLKDEV_CON_ID("adc0", &mstp_clks[MSTP313]),
CLKDEV_CON_ID("mtu0", &mstp_clks[MSTP312]),
CLKDEV_CON_ID("iebus0", &mstp_clks[MSTP304]),
CLKDEV_DEV_ID("sh7734-gether.0", &mstp_clks[MSTP114]),
CLKDEV_CON_ID("rtc0", &mstp_clks[MSTP303]),
CLKDEV_CON_ID("hif0", &mstp_clks[MSTP302]),
CLKDEV_CON_ID("stif0", &mstp_clks[MSTP301]),
CLKDEV_CON_ID("stif1", &mstp_clks[MSTP300]),
};
int __init arch_clk_init(void)
{
int i, ret = 0;
for (i = 0; i < ARRAY_SIZE(main_clks); i++)
ret |= clk_register(main_clks[i]);
clkdev_add_table(lookups, ARRAY_SIZE(lookups));
if (!ret)
ret = sh_clk_div4_register(div4_clks, ARRAY_SIZE(div4_clks),
&div4_table);
if (!ret)
ret = sh_clk_mstp_register(mstp_clks, MSTP_NR);
return ret;
}
| linux-master | arch/sh/kernel/cpu/sh4a/clock-sh7734.c |
// SPDX-License-Identifier: GPL-2.0
/*
* SH7757 Setup
*
* Copyright (C) 2009, 2011 Renesas Solutions Corp.
*
* based on setup-sh7785.c : Copyright (C) 2007 Paul Mundt
*/
#include <linux/platform_device.h>
#include <linux/init.h>
#include <linux/serial.h>
#include <linux/serial_sci.h>
#include <linux/io.h>
#include <linux/mm.h>
#include <linux/dma-mapping.h>
#include <linux/sh_timer.h>
#include <linux/sh_dma.h>
#include <linux/sh_intc.h>
#include <linux/usb/ohci_pdriver.h>
#include <cpu/dma-register.h>
#include <cpu/sh7757.h>
#include <asm/platform_early.h>
static struct plat_sci_port scif2_platform_data = {
.scscr = SCSCR_REIE,
.type = PORT_SCIF,
};
static struct resource scif2_resources[] = {
DEFINE_RES_MEM(0xfe4b0000, 0x100), /* SCIF2 */
DEFINE_RES_IRQ(evt2irq(0x700)),
};
static struct platform_device scif2_device = {
.name = "sh-sci",
.id = 0,
.resource = scif2_resources,
.num_resources = ARRAY_SIZE(scif2_resources),
.dev = {
.platform_data = &scif2_platform_data,
},
};
static struct plat_sci_port scif3_platform_data = {
.scscr = SCSCR_REIE,
.type = PORT_SCIF,
};
static struct resource scif3_resources[] = {
DEFINE_RES_MEM(0xfe4c0000, 0x100), /* SCIF3 */
DEFINE_RES_IRQ(evt2irq(0xb80)),
};
static struct platform_device scif3_device = {
.name = "sh-sci",
.id = 1,
.resource = scif3_resources,
.num_resources = ARRAY_SIZE(scif3_resources),
.dev = {
.platform_data = &scif3_platform_data,
},
};
static struct plat_sci_port scif4_platform_data = {
.scscr = SCSCR_REIE,
.type = PORT_SCIF,
};
static struct resource scif4_resources[] = {
DEFINE_RES_MEM(0xfe4d0000, 0x100), /* SCIF4 */
DEFINE_RES_IRQ(evt2irq(0xf00)),
};
static struct platform_device scif4_device = {
.name = "sh-sci",
.id = 2,
.resource = scif4_resources,
.num_resources = ARRAY_SIZE(scif4_resources),
.dev = {
.platform_data = &scif4_platform_data,
},
};
static struct sh_timer_config tmu0_platform_data = {
.channels_mask = 3,
};
static struct resource tmu0_resources[] = {
DEFINE_RES_MEM(0xfe430000, 0x20),
DEFINE_RES_IRQ(evt2irq(0x580)),
DEFINE_RES_IRQ(evt2irq(0x5a0)),
};
static struct platform_device tmu0_device = {
.name = "sh-tmu",
.id = 0,
.dev = {
.platform_data = &tmu0_platform_data,
},
.resource = tmu0_resources,
.num_resources = ARRAY_SIZE(tmu0_resources),
};
static struct resource spi0_resources[] = {
[0] = {
.start = 0xfe002000,
.end = 0xfe0020ff,
.flags = IORESOURCE_MEM | IORESOURCE_MEM_32BIT,
},
[1] = {
.start = evt2irq(0xcc0),
.flags = IORESOURCE_IRQ,
},
};
/* DMA */
static const struct sh_dmae_slave_config sh7757_dmae0_slaves[] = {
{
.slave_id = SHDMA_SLAVE_SDHI_TX,
.addr = 0x1fe50030,
.chcr = SM_INC | RS_ERS | 0x40000000 |
TS_INDEX2VAL(XMIT_SZ_16BIT),
.mid_rid = 0xc5,
},
{
.slave_id = SHDMA_SLAVE_SDHI_RX,
.addr = 0x1fe50030,
.chcr = DM_INC | RS_ERS | 0x40000000 |
TS_INDEX2VAL(XMIT_SZ_16BIT),
.mid_rid = 0xc6,
},
{
.slave_id = SHDMA_SLAVE_MMCIF_TX,
.addr = 0x1fcb0034,
.chcr = SM_INC | RS_ERS | 0x40000000 |
TS_INDEX2VAL(XMIT_SZ_32BIT),
.mid_rid = 0xd3,
},
{
.slave_id = SHDMA_SLAVE_MMCIF_RX,
.addr = 0x1fcb0034,
.chcr = DM_INC | RS_ERS | 0x40000000 |
TS_INDEX2VAL(XMIT_SZ_32BIT),
.mid_rid = 0xd7,
},
};
static const struct sh_dmae_slave_config sh7757_dmae1_slaves[] = {
{
.slave_id = SHDMA_SLAVE_SCIF2_TX,
.addr = 0x1f4b000c,
.chcr = SM_INC | RS_ERS | 0x40000000 |
TS_INDEX2VAL(XMIT_SZ_8BIT),
.mid_rid = 0x21,
},
{
.slave_id = SHDMA_SLAVE_SCIF2_RX,
.addr = 0x1f4b0014,
.chcr = DM_INC | RS_ERS | 0x40000000 |
TS_INDEX2VAL(XMIT_SZ_8BIT),
.mid_rid = 0x22,
},
{
.slave_id = SHDMA_SLAVE_SCIF3_TX,
.addr = 0x1f4c000c,
.chcr = SM_INC | RS_ERS | 0x40000000 |
TS_INDEX2VAL(XMIT_SZ_8BIT),
.mid_rid = 0x29,
},
{
.slave_id = SHDMA_SLAVE_SCIF3_RX,
.addr = 0x1f4c0014,
.chcr = DM_INC | RS_ERS | 0x40000000 |
TS_INDEX2VAL(XMIT_SZ_8BIT),
.mid_rid = 0x2a,
},
{
.slave_id = SHDMA_SLAVE_SCIF4_TX,
.addr = 0x1f4d000c,
.chcr = SM_INC | RS_ERS | 0x40000000 |
TS_INDEX2VAL(XMIT_SZ_8BIT),
.mid_rid = 0x41,
},
{
.slave_id = SHDMA_SLAVE_SCIF4_RX,
.addr = 0x1f4d0014,
.chcr = DM_INC | RS_ERS | 0x40000000 |
TS_INDEX2VAL(XMIT_SZ_8BIT),
.mid_rid = 0x42,
},
{
.slave_id = SHDMA_SLAVE_RSPI_TX,
.addr = 0xfe480004,
.chcr = SM_INC | RS_ERS | 0x40000000 |
TS_INDEX2VAL(XMIT_SZ_16BIT),
.mid_rid = 0xc1,
},
{
.slave_id = SHDMA_SLAVE_RSPI_RX,
.addr = 0xfe480004,
.chcr = DM_INC | RS_ERS | 0x40000000 |
TS_INDEX2VAL(XMIT_SZ_16BIT),
.mid_rid = 0xc2,
},
};
static const struct sh_dmae_slave_config sh7757_dmae2_slaves[] = {
{
.slave_id = SHDMA_SLAVE_RIIC0_TX,
.addr = 0x1e500012,
.chcr = SM_INC | RS_ERS | 0x40000000 |
TS_INDEX2VAL(XMIT_SZ_8BIT),
.mid_rid = 0x21,
},
{
.slave_id = SHDMA_SLAVE_RIIC0_RX,
.addr = 0x1e500013,
.chcr = DM_INC | RS_ERS | 0x40000000 |
TS_INDEX2VAL(XMIT_SZ_8BIT),
.mid_rid = 0x22,
},
{
.slave_id = SHDMA_SLAVE_RIIC1_TX,
.addr = 0x1e510012,
.chcr = SM_INC | RS_ERS | 0x40000000 |
TS_INDEX2VAL(XMIT_SZ_8BIT),
.mid_rid = 0x29,
},
{
.slave_id = SHDMA_SLAVE_RIIC1_RX,
.addr = 0x1e510013,
.chcr = DM_INC | RS_ERS | 0x40000000 |
TS_INDEX2VAL(XMIT_SZ_8BIT),
.mid_rid = 0x2a,
},
{
.slave_id = SHDMA_SLAVE_RIIC2_TX,
.addr = 0x1e520012,
.chcr = SM_INC | RS_ERS | 0x40000000 |
TS_INDEX2VAL(XMIT_SZ_8BIT),
.mid_rid = 0xa1,
},
{
.slave_id = SHDMA_SLAVE_RIIC2_RX,
.addr = 0x1e520013,
.chcr = DM_INC | RS_ERS | 0x40000000 |
TS_INDEX2VAL(XMIT_SZ_8BIT),
.mid_rid = 0xa2,
},
{
.slave_id = SHDMA_SLAVE_RIIC3_TX,
.addr = 0x1e530012,
.chcr = SM_INC | RS_ERS | 0x40000000 |
TS_INDEX2VAL(XMIT_SZ_8BIT),
.mid_rid = 0xa9,
},
{
.slave_id = SHDMA_SLAVE_RIIC3_RX,
.addr = 0x1e530013,
.chcr = DM_INC | RS_ERS | 0x40000000 |
TS_INDEX2VAL(XMIT_SZ_8BIT),
.mid_rid = 0xaf,
},
{
.slave_id = SHDMA_SLAVE_RIIC4_TX,
.addr = 0x1e540012,
.chcr = SM_INC | RS_ERS | 0x40000000 |
TS_INDEX2VAL(XMIT_SZ_8BIT),
.mid_rid = 0xc5,
},
{
.slave_id = SHDMA_SLAVE_RIIC4_RX,
.addr = 0x1e540013,
.chcr = DM_INC | RS_ERS | 0x40000000 |
TS_INDEX2VAL(XMIT_SZ_8BIT),
.mid_rid = 0xc6,
},
};
static const struct sh_dmae_slave_config sh7757_dmae3_slaves[] = {
{
.slave_id = SHDMA_SLAVE_RIIC5_TX,
.addr = 0x1e550012,
.chcr = SM_INC | RS_ERS | 0x40000000 |
TS_INDEX2VAL(XMIT_SZ_8BIT),
.mid_rid = 0x21,
},
{
.slave_id = SHDMA_SLAVE_RIIC5_RX,
.addr = 0x1e550013,
.chcr = DM_INC | RS_ERS | 0x40000000 |
TS_INDEX2VAL(XMIT_SZ_8BIT),
.mid_rid = 0x22,
},
{
.slave_id = SHDMA_SLAVE_RIIC6_TX,
.addr = 0x1e560012,
.chcr = SM_INC | RS_ERS | 0x40000000 |
TS_INDEX2VAL(XMIT_SZ_8BIT),
.mid_rid = 0x29,
},
{
.slave_id = SHDMA_SLAVE_RIIC6_RX,
.addr = 0x1e560013,
.chcr = DM_INC | RS_ERS | 0x40000000 |
TS_INDEX2VAL(XMIT_SZ_8BIT),
.mid_rid = 0x2a,
},
{
.slave_id = SHDMA_SLAVE_RIIC7_TX,
.addr = 0x1e570012,
.chcr = SM_INC | RS_ERS | 0x40000000 |
TS_INDEX2VAL(XMIT_SZ_8BIT),
.mid_rid = 0x41,
},
{
.slave_id = SHDMA_SLAVE_RIIC7_RX,
.addr = 0x1e570013,
.chcr = DM_INC | RS_ERS | 0x40000000 |
TS_INDEX2VAL(XMIT_SZ_8BIT),
.mid_rid = 0x42,
},
{
.slave_id = SHDMA_SLAVE_RIIC8_TX,
.addr = 0x1e580012,
.chcr = SM_INC | RS_ERS | 0x40000000 |
TS_INDEX2VAL(XMIT_SZ_8BIT),
.mid_rid = 0x45,
},
{
.slave_id = SHDMA_SLAVE_RIIC8_RX,
.addr = 0x1e580013,
.chcr = DM_INC | RS_ERS | 0x40000000 |
TS_INDEX2VAL(XMIT_SZ_8BIT),
.mid_rid = 0x46,
},
{
.slave_id = SHDMA_SLAVE_RIIC9_TX,
.addr = 0x1e590012,
.chcr = SM_INC | RS_ERS | 0x40000000 |
TS_INDEX2VAL(XMIT_SZ_8BIT),
.mid_rid = 0x51,
},
{
.slave_id = SHDMA_SLAVE_RIIC9_RX,
.addr = 0x1e590013,
.chcr = DM_INC | RS_ERS | 0x40000000 |
TS_INDEX2VAL(XMIT_SZ_8BIT),
.mid_rid = 0x52,
},
};
static const struct sh_dmae_channel sh7757_dmae_channels[] = {
{
.offset = 0,
.dmars = 0,
.dmars_bit = 0,
}, {
.offset = 0x10,
.dmars = 0,
.dmars_bit = 8,
}, {
.offset = 0x20,
.dmars = 4,
.dmars_bit = 0,
}, {
.offset = 0x30,
.dmars = 4,
.dmars_bit = 8,
}, {
.offset = 0x50,
.dmars = 8,
.dmars_bit = 0,
}, {
.offset = 0x60,
.dmars = 8,
.dmars_bit = 8,
}
};
static const unsigned int ts_shift[] = TS_SHIFT;
static struct sh_dmae_pdata dma0_platform_data = {
.slave = sh7757_dmae0_slaves,
.slave_num = ARRAY_SIZE(sh7757_dmae0_slaves),
.channel = sh7757_dmae_channels,
.channel_num = ARRAY_SIZE(sh7757_dmae_channels),
.ts_low_shift = CHCR_TS_LOW_SHIFT,
.ts_low_mask = CHCR_TS_LOW_MASK,
.ts_high_shift = CHCR_TS_HIGH_SHIFT,
.ts_high_mask = CHCR_TS_HIGH_MASK,
.ts_shift = ts_shift,
.ts_shift_num = ARRAY_SIZE(ts_shift),
.dmaor_init = DMAOR_INIT,
};
static struct sh_dmae_pdata dma1_platform_data = {
.slave = sh7757_dmae1_slaves,
.slave_num = ARRAY_SIZE(sh7757_dmae1_slaves),
.channel = sh7757_dmae_channels,
.channel_num = ARRAY_SIZE(sh7757_dmae_channels),
.ts_low_shift = CHCR_TS_LOW_SHIFT,
.ts_low_mask = CHCR_TS_LOW_MASK,
.ts_high_shift = CHCR_TS_HIGH_SHIFT,
.ts_high_mask = CHCR_TS_HIGH_MASK,
.ts_shift = ts_shift,
.ts_shift_num = ARRAY_SIZE(ts_shift),
.dmaor_init = DMAOR_INIT,
};
static struct sh_dmae_pdata dma2_platform_data = {
.slave = sh7757_dmae2_slaves,
.slave_num = ARRAY_SIZE(sh7757_dmae2_slaves),
.channel = sh7757_dmae_channels,
.channel_num = ARRAY_SIZE(sh7757_dmae_channels),
.ts_low_shift = CHCR_TS_LOW_SHIFT,
.ts_low_mask = CHCR_TS_LOW_MASK,
.ts_high_shift = CHCR_TS_HIGH_SHIFT,
.ts_high_mask = CHCR_TS_HIGH_MASK,
.ts_shift = ts_shift,
.ts_shift_num = ARRAY_SIZE(ts_shift),
.dmaor_init = DMAOR_INIT,
};
static struct sh_dmae_pdata dma3_platform_data = {
.slave = sh7757_dmae3_slaves,
.slave_num = ARRAY_SIZE(sh7757_dmae3_slaves),
.channel = sh7757_dmae_channels,
.channel_num = ARRAY_SIZE(sh7757_dmae_channels),
.ts_low_shift = CHCR_TS_LOW_SHIFT,
.ts_low_mask = CHCR_TS_LOW_MASK,
.ts_high_shift = CHCR_TS_HIGH_SHIFT,
.ts_high_mask = CHCR_TS_HIGH_MASK,
.ts_shift = ts_shift,
.ts_shift_num = ARRAY_SIZE(ts_shift),
.dmaor_init = DMAOR_INIT,
};
/* channel 0 to 5 */
static struct resource sh7757_dmae0_resources[] = {
[0] = {
/* Channel registers and DMAOR */
.start = 0xff608020,
.end = 0xff60808f,
.flags = IORESOURCE_MEM,
},
[1] = {
/* DMARSx */
.start = 0xff609000,
.end = 0xff60900b,
.flags = IORESOURCE_MEM,
},
{
.name = "error_irq",
.start = evt2irq(0x640),
.end = evt2irq(0x640),
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE,
},
};
/* channel 6 to 11 */
static struct resource sh7757_dmae1_resources[] = {
[0] = {
/* Channel registers and DMAOR */
.start = 0xff618020,
.end = 0xff61808f,
.flags = IORESOURCE_MEM,
},
[1] = {
/* DMARSx */
.start = 0xff619000,
.end = 0xff61900b,
.flags = IORESOURCE_MEM,
},
{
.name = "error_irq",
.start = evt2irq(0x640),
.end = evt2irq(0x640),
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE,
},
{
/* IRQ for channels 4 */
.start = evt2irq(0x7c0),
.end = evt2irq(0x7c0),
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE,
},
{
/* IRQ for channels 5 */
.start = evt2irq(0x7c0),
.end = evt2irq(0x7c0),
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE,
},
{
/* IRQ for channels 6 */
.start = evt2irq(0xd00),
.end = evt2irq(0xd00),
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE,
},
{
/* IRQ for channels 7 */
.start = evt2irq(0xd00),
.end = evt2irq(0xd00),
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE,
},
{
/* IRQ for channels 8 */
.start = evt2irq(0xd00),
.end = evt2irq(0xd00),
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE,
},
{
/* IRQ for channels 9 */
.start = evt2irq(0xd00),
.end = evt2irq(0xd00),
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE,
},
{
/* IRQ for channels 10 */
.start = evt2irq(0xd00),
.end = evt2irq(0xd00),
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE,
},
{
/* IRQ for channels 11 */
.start = evt2irq(0xd00),
.end = evt2irq(0xd00),
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE,
},
};
/* channel 12 to 17 */
static struct resource sh7757_dmae2_resources[] = {
[0] = {
/* Channel registers and DMAOR */
.start = 0xff708020,
.end = 0xff70808f,
.flags = IORESOURCE_MEM,
},
[1] = {
/* DMARSx */
.start = 0xff709000,
.end = 0xff70900b,
.flags = IORESOURCE_MEM,
},
{
.name = "error_irq",
.start = evt2irq(0x2a60),
.end = evt2irq(0x2a60),
.flags = IORESOURCE_IRQ,
},
{
/* IRQ for channels 12 to 16 */
.start = evt2irq(0x2400),
.end = evt2irq(0x2480),
.flags = IORESOURCE_IRQ,
},
{
/* IRQ for channel 17 */
.start = evt2irq(0x24e0),
.end = evt2irq(0x24e0),
.flags = IORESOURCE_IRQ,
},
};
/* channel 18 to 23 */
static struct resource sh7757_dmae3_resources[] = {
[0] = {
/* Channel registers and DMAOR */
.start = 0xff718020,
.end = 0xff71808f,
.flags = IORESOURCE_MEM,
},
[1] = {
/* DMARSx */
.start = 0xff719000,
.end = 0xff71900b,
.flags = IORESOURCE_MEM,
},
{
.name = "error_irq",
.start = evt2irq(0x2a80),
.end = evt2irq(0x2a80),
.flags = IORESOURCE_IRQ,
},
{
/* IRQ for channels 18 to 22 */
.start = evt2irq(0x2500),
.end = evt2irq(0x2580),
.flags = IORESOURCE_IRQ,
},
{
/* IRQ for channel 23 */
.start = evt2irq(0x2600),
.end = evt2irq(0x2600),
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device dma0_device = {
.name = "sh-dma-engine",
.id = 0,
.resource = sh7757_dmae0_resources,
.num_resources = ARRAY_SIZE(sh7757_dmae0_resources),
.dev = {
.platform_data = &dma0_platform_data,
},
};
static struct platform_device dma1_device = {
.name = "sh-dma-engine",
.id = 1,
.resource = sh7757_dmae1_resources,
.num_resources = ARRAY_SIZE(sh7757_dmae1_resources),
.dev = {
.platform_data = &dma1_platform_data,
},
};
static struct platform_device dma2_device = {
.name = "sh-dma-engine",
.id = 2,
.resource = sh7757_dmae2_resources,
.num_resources = ARRAY_SIZE(sh7757_dmae2_resources),
.dev = {
.platform_data = &dma2_platform_data,
},
};
static struct platform_device dma3_device = {
.name = "sh-dma-engine",
.id = 3,
.resource = sh7757_dmae3_resources,
.num_resources = ARRAY_SIZE(sh7757_dmae3_resources),
.dev = {
.platform_data = &dma3_platform_data,
},
};
static struct platform_device spi0_device = {
.name = "sh_spi",
.id = 0,
.dev = {
.dma_mask = NULL,
.coherent_dma_mask = 0xffffffff,
},
.num_resources = ARRAY_SIZE(spi0_resources),
.resource = spi0_resources,
};
static struct resource spi1_resources[] = {
{
.start = 0xffd8ee70,
.end = 0xffd8eeff,
.flags = IORESOURCE_MEM | IORESOURCE_MEM_8BIT,
},
{
.start = evt2irq(0x8c0),
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device spi1_device = {
.name = "sh_spi",
.id = 1,
.num_resources = ARRAY_SIZE(spi1_resources),
.resource = spi1_resources,
};
static struct resource rspi_resources[] = {
{
.start = 0xfe480000,
.end = 0xfe4800ff,
.flags = IORESOURCE_MEM,
},
{
.start = evt2irq(0x1d80),
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device rspi_device = {
.name = "rspi",
.id = 2,
.num_resources = ARRAY_SIZE(rspi_resources),
.resource = rspi_resources,
};
static struct resource usb_ehci_resources[] = {
[0] = {
.start = 0xfe4f1000,
.end = 0xfe4f10ff,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = evt2irq(0x920),
.end = evt2irq(0x920),
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device usb_ehci_device = {
.name = "sh_ehci",
.id = -1,
.dev = {
.dma_mask = &usb_ehci_device.dev.coherent_dma_mask,
.coherent_dma_mask = DMA_BIT_MASK(32),
},
.num_resources = ARRAY_SIZE(usb_ehci_resources),
.resource = usb_ehci_resources,
};
static struct resource usb_ohci_resources[] = {
[0] = {
.start = 0xfe4f1800,
.end = 0xfe4f18ff,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = evt2irq(0x920),
.end = evt2irq(0x920),
.flags = IORESOURCE_IRQ,
},
};
static struct usb_ohci_pdata usb_ohci_pdata;
static struct platform_device usb_ohci_device = {
.name = "ohci-platform",
.id = -1,
.dev = {
.dma_mask = &usb_ohci_device.dev.coherent_dma_mask,
.coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = &usb_ohci_pdata,
},
.num_resources = ARRAY_SIZE(usb_ohci_resources),
.resource = usb_ohci_resources,
};
static struct platform_device *sh7757_devices[] __initdata = {
&scif2_device,
&scif3_device,
&scif4_device,
&tmu0_device,
&dma0_device,
&dma1_device,
&dma2_device,
&dma3_device,
&spi0_device,
&spi1_device,
&rspi_device,
&usb_ehci_device,
&usb_ohci_device,
};
static int __init sh7757_devices_setup(void)
{
return platform_add_devices(sh7757_devices,
ARRAY_SIZE(sh7757_devices));
}
arch_initcall(sh7757_devices_setup);
static struct platform_device *sh7757_early_devices[] __initdata = {
&scif2_device,
&scif3_device,
&scif4_device,
&tmu0_device,
};
void __init plat_early_device_setup(void)
{
sh_early_platform_add_devices(sh7757_early_devices,
ARRAY_SIZE(sh7757_early_devices));
}
enum {
UNUSED = 0,
/* interrupt sources */
IRL0_LLLL, IRL0_LLLH, IRL0_LLHL, IRL0_LLHH,
IRL0_LHLL, IRL0_LHLH, IRL0_LHHL, IRL0_LHHH,
IRL0_HLLL, IRL0_HLLH, IRL0_HLHL, IRL0_HLHH,
IRL0_HHLL, IRL0_HHLH, IRL0_HHHL,
IRL4_LLLL, IRL4_LLLH, IRL4_LLHL, IRL4_LLHH,
IRL4_LHLL, IRL4_LHLH, IRL4_LHHL, IRL4_LHHH,
IRL4_HLLL, IRL4_HLLH, IRL4_HLHL, IRL4_HLHH,
IRL4_HHLL, IRL4_HHLH, IRL4_HHHL,
IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7,
SDHI, DVC,
IRQ8, IRQ9, IRQ11, IRQ10, IRQ12, IRQ13, IRQ14, IRQ15,
TMU0, TMU1, TMU2, TMU2_TICPI, TMU3, TMU4, TMU5,
HUDI,
ARC4,
DMAC0_5, DMAC6_7, DMAC8_11,
SCIF0, SCIF1, SCIF2, SCIF3, SCIF4,
USB0, USB1,
JMC,
SPI0, SPI1,
TMR01, TMR23, TMR45,
FRT,
LPC, LPC5, LPC6, LPC7, LPC8,
PECI0, PECI1, PECI2, PECI3, PECI4, PECI5,
ETHERC,
ADC0, ADC1,
SIM,
IIC0_0, IIC0_1, IIC0_2, IIC0_3,
IIC1_0, IIC1_1, IIC1_2, IIC1_3,
IIC2_0, IIC2_1, IIC2_2, IIC2_3,
IIC3_0, IIC3_1, IIC3_2, IIC3_3,
IIC4_0, IIC4_1, IIC4_2, IIC4_3,
IIC5_0, IIC5_1, IIC5_2, IIC5_3,
IIC6_0, IIC6_1, IIC6_2, IIC6_3,
IIC7_0, IIC7_1, IIC7_2, IIC7_3,
IIC8_0, IIC8_1, IIC8_2, IIC8_3,
IIC9_0, IIC9_1, IIC9_2, IIC9_3,
ONFICTL,
MMC1, MMC2,
ECCU,
PCIC,
G200,
RSPI,
SGPIO,
DMINT12, DMINT13, DMINT14, DMINT15, DMINT16, DMINT17, DMINT18, DMINT19,
DMINT20, DMINT21, DMINT22, DMINT23,
DDRECC,
TSIP,
PCIE_BRIDGE,
WDT0B, WDT1B, WDT2B, WDT3B, WDT4B, WDT5B, WDT6B, WDT7B, WDT8B,
GETHER0, GETHER1, GETHER2,
PBIA, PBIB, PBIC,
DMAE2, DMAE3,
SERMUX2, SERMUX3,
/* interrupt groups */
TMU012, TMU345,
};
static struct intc_vect vectors[] __initdata = {
INTC_VECT(SDHI, 0x480), INTC_VECT(SDHI, 0x04a0),
INTC_VECT(SDHI, 0x4c0),
INTC_VECT(DVC, 0x4e0),
INTC_VECT(IRQ8, 0x500), INTC_VECT(IRQ9, 0x520),
INTC_VECT(IRQ10, 0x540),
INTC_VECT(TMU0, 0x580), INTC_VECT(TMU1, 0x5a0),
INTC_VECT(TMU2, 0x5c0), INTC_VECT(TMU2_TICPI, 0x5e0),
INTC_VECT(HUDI, 0x600),
INTC_VECT(ARC4, 0x620),
INTC_VECT(DMAC0_5, 0x640), INTC_VECT(DMAC0_5, 0x660),
INTC_VECT(DMAC0_5, 0x680), INTC_VECT(DMAC0_5, 0x6a0),
INTC_VECT(DMAC0_5, 0x6c0),
INTC_VECT(IRQ11, 0x6e0),
INTC_VECT(SCIF2, 0x700), INTC_VECT(SCIF2, 0x720),
INTC_VECT(SCIF2, 0x740), INTC_VECT(SCIF2, 0x760),
INTC_VECT(DMAC0_5, 0x780), INTC_VECT(DMAC0_5, 0x7a0),
INTC_VECT(DMAC6_7, 0x7c0), INTC_VECT(DMAC6_7, 0x7e0),
INTC_VECT(USB0, 0x840),
INTC_VECT(IRQ12, 0x880),
INTC_VECT(JMC, 0x8a0),
INTC_VECT(SPI1, 0x8c0),
INTC_VECT(IRQ13, 0x8e0), INTC_VECT(IRQ14, 0x900),
INTC_VECT(USB1, 0x920),
INTC_VECT(TMR01, 0xa00), INTC_VECT(TMR23, 0xa20),
INTC_VECT(TMR45, 0xa40),
INTC_VECT(FRT, 0xa80),
INTC_VECT(LPC, 0xaa0), INTC_VECT(LPC, 0xac0),
INTC_VECT(LPC, 0xae0), INTC_VECT(LPC, 0xb00),
INTC_VECT(LPC, 0xb20),
INTC_VECT(SCIF0, 0xb40), INTC_VECT(SCIF1, 0xb60),
INTC_VECT(SCIF3, 0xb80), INTC_VECT(SCIF3, 0xba0),
INTC_VECT(SCIF3, 0xbc0), INTC_VECT(SCIF3, 0xbe0),
INTC_VECT(PECI0, 0xc00), INTC_VECT(PECI1, 0xc20),
INTC_VECT(PECI2, 0xc40),
INTC_VECT(IRQ15, 0xc60),
INTC_VECT(ETHERC, 0xc80), INTC_VECT(ETHERC, 0xca0),
INTC_VECT(SPI0, 0xcc0),
INTC_VECT(ADC1, 0xce0),
INTC_VECT(DMAC8_11, 0xd00), INTC_VECT(DMAC8_11, 0xd20),
INTC_VECT(DMAC8_11, 0xd40), INTC_VECT(DMAC8_11, 0xd60),
INTC_VECT(SIM, 0xd80), INTC_VECT(SIM, 0xda0),
INTC_VECT(SIM, 0xdc0), INTC_VECT(SIM, 0xde0),
INTC_VECT(TMU3, 0xe00), INTC_VECT(TMU4, 0xe20),
INTC_VECT(TMU5, 0xe40),
INTC_VECT(ADC0, 0xe60),
INTC_VECT(SCIF4, 0xf00), INTC_VECT(SCIF4, 0xf20),
INTC_VECT(SCIF4, 0xf40), INTC_VECT(SCIF4, 0xf60),
INTC_VECT(IIC0_0, 0x1400), INTC_VECT(IIC0_1, 0x1420),
INTC_VECT(IIC0_2, 0x1440), INTC_VECT(IIC0_3, 0x1460),
INTC_VECT(IIC1_0, 0x1480), INTC_VECT(IIC1_1, 0x14e0),
INTC_VECT(IIC1_2, 0x1500), INTC_VECT(IIC1_3, 0x1520),
INTC_VECT(IIC2_0, 0x1540), INTC_VECT(IIC2_1, 0x1560),
INTC_VECT(IIC2_2, 0x1580), INTC_VECT(IIC2_3, 0x1600),
INTC_VECT(IIC3_0, 0x1620), INTC_VECT(IIC3_1, 0x1640),
INTC_VECT(IIC3_2, 0x16e0), INTC_VECT(IIC3_3, 0x1700),
INTC_VECT(IIC4_0, 0x17c0), INTC_VECT(IIC4_1, 0x1800),
INTC_VECT(IIC4_2, 0x1820), INTC_VECT(IIC4_3, 0x1840),
INTC_VECT(IIC5_0, 0x1860), INTC_VECT(IIC5_1, 0x1880),
INTC_VECT(IIC5_2, 0x18a0), INTC_VECT(IIC5_3, 0x18c0),
INTC_VECT(IIC6_0, 0x18e0), INTC_VECT(IIC6_1, 0x1900),
INTC_VECT(IIC6_2, 0x1920),
INTC_VECT(ONFICTL, 0x1960),
INTC_VECT(IIC6_3, 0x1980),
INTC_VECT(IIC7_0, 0x19a0), INTC_VECT(IIC7_1, 0x1a00),
INTC_VECT(IIC7_2, 0x1a20), INTC_VECT(IIC7_3, 0x1a40),
INTC_VECT(IIC8_0, 0x1a60), INTC_VECT(IIC8_1, 0x1a80),
INTC_VECT(IIC8_2, 0x1aa0), INTC_VECT(IIC8_3, 0x1b40),
INTC_VECT(IIC9_0, 0x1b60), INTC_VECT(IIC9_1, 0x1b80),
INTC_VECT(IIC9_2, 0x1c00), INTC_VECT(IIC9_3, 0x1c20),
INTC_VECT(MMC1, 0x1c60), INTC_VECT(MMC2, 0x1c80),
INTC_VECT(ECCU, 0x1cc0),
INTC_VECT(PCIC, 0x1ce0),
INTC_VECT(G200, 0x1d00),
INTC_VECT(RSPI, 0x1d80), INTC_VECT(RSPI, 0x1da0),
INTC_VECT(RSPI, 0x1dc0), INTC_VECT(RSPI, 0x1de0),
INTC_VECT(PECI3, 0x1ec0), INTC_VECT(PECI4, 0x1ee0),
INTC_VECT(PECI5, 0x1f00),
INTC_VECT(SGPIO, 0x1f80), INTC_VECT(SGPIO, 0x1fa0),
INTC_VECT(SGPIO, 0x1fc0),
INTC_VECT(DMINT12, 0x2400), INTC_VECT(DMINT13, 0x2420),
INTC_VECT(DMINT14, 0x2440), INTC_VECT(DMINT15, 0x2460),
INTC_VECT(DMINT16, 0x2480), INTC_VECT(DMINT17, 0x24e0),
INTC_VECT(DMINT18, 0x2500), INTC_VECT(DMINT19, 0x2520),
INTC_VECT(DMINT20, 0x2540), INTC_VECT(DMINT21, 0x2560),
INTC_VECT(DMINT22, 0x2580), INTC_VECT(DMINT23, 0x2600),
INTC_VECT(DDRECC, 0x2620),
INTC_VECT(TSIP, 0x2640),
INTC_VECT(PCIE_BRIDGE, 0x27c0),
INTC_VECT(WDT0B, 0x2800), INTC_VECT(WDT1B, 0x2820),
INTC_VECT(WDT2B, 0x2840), INTC_VECT(WDT3B, 0x2860),
INTC_VECT(WDT4B, 0x2880), INTC_VECT(WDT5B, 0x28a0),
INTC_VECT(WDT6B, 0x28c0), INTC_VECT(WDT7B, 0x28e0),
INTC_VECT(WDT8B, 0x2900),
INTC_VECT(GETHER0, 0x2960), INTC_VECT(GETHER1, 0x2980),
INTC_VECT(GETHER2, 0x29a0),
INTC_VECT(PBIA, 0x2a00), INTC_VECT(PBIB, 0x2a20),
INTC_VECT(PBIC, 0x2a40),
INTC_VECT(DMAE2, 0x2a60), INTC_VECT(DMAE3, 0x2a80),
INTC_VECT(SERMUX2, 0x2aa0), INTC_VECT(SERMUX3, 0x2b40),
INTC_VECT(LPC5, 0x2b60), INTC_VECT(LPC6, 0x2b80),
INTC_VECT(LPC7, 0x2c00), INTC_VECT(LPC8, 0x2c20),
};
static struct intc_group groups[] __initdata = {
INTC_GROUP(TMU012, TMU0, TMU1, TMU2, TMU2_TICPI),
INTC_GROUP(TMU345, TMU3, TMU4, TMU5),
};
static struct intc_mask_reg mask_registers[] __initdata = {
{ 0xffd00044, 0xffd00064, 32, /* INTMSK0 / INTMSKCLR0 */
{ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
{ 0xffd40080, 0xffd40084, 32, /* INTMSK2 / INTMSKCLR2 */
{ IRL0_LLLL, IRL0_LLLH, IRL0_LLHL, IRL0_LLHH,
IRL0_LHLL, IRL0_LHLH, IRL0_LHHL, IRL0_LHHH,
IRL0_HLLL, IRL0_HLLH, IRL0_HLHL, IRL0_HLHH,
IRL0_HHLL, IRL0_HHLH, IRL0_HHHL, 0,
IRL4_LLLL, IRL4_LLLH, IRL4_LLHL, IRL4_LLHH,
IRL4_LHLL, IRL4_LHLH, IRL4_LHHL, IRL4_LHHH,
IRL4_HLLL, IRL4_HLLH, IRL4_HLHL, IRL4_HLHH,
IRL4_HHLL, IRL4_HHLH, IRL4_HHHL, 0, } },
{ 0xffd40038, 0xffd4003c, 32, /* INT2MSKR / INT2MSKCR */
{ 0, 0, 0, 0, 0, 0, 0, 0,
0, DMAC8_11, 0, PECI0, LPC, FRT, 0, TMR45,
TMR23, TMR01, 0, 0, 0, 0, 0, DMAC0_5,
HUDI, 0, 0, SCIF3, SCIF2, SDHI, TMU345, TMU012
} },
{ 0xffd400d0, 0xffd400d4, 32, /* INT2MSKR1 / INT2MSKCR1 */
{ IRQ15, IRQ14, IRQ13, IRQ12, IRQ11, IRQ10, SCIF4, ETHERC,
IRQ9, IRQ8, SCIF1, SCIF0, USB0, 0, 0, USB1,
ADC1, 0, DMAC6_7, ADC0, SPI0, SIM, PECI2, PECI1,
ARC4, 0, SPI1, JMC, 0, 0, 0, DVC
} },
{ 0xffd10038, 0xffd1003c, 32, /* INT2MSKR2 / INT2MSKCR2 */
{ IIC4_1, IIC4_2, IIC5_0, ONFICTL, 0, 0, SGPIO, 0,
0, G200, 0, IIC9_2, IIC8_2, IIC8_1, IIC8_0, IIC7_3,
IIC7_2, IIC7_1, IIC6_3, IIC0_0, IIC0_1, IIC0_2, IIC0_3, IIC3_1,
IIC2_3, 0, IIC2_1, IIC9_1, IIC3_3, IIC1_0, 0, IIC2_2
} },
{ 0xffd100d0, 0xffd100d4, 32, /* INT2MSKR3 / INT2MSKCR3 */
{ MMC1, IIC6_1, IIC6_0, IIC5_1, IIC3_2, IIC2_0, PECI5, MMC2,
IIC1_3, IIC1_2, IIC9_0, IIC8_3, IIC4_3, IIC7_0, 0, IIC6_2,
PCIC, 0, IIC4_0, 0, ECCU, RSPI, 0, IIC9_3,
IIC3_0, 0, IIC5_3, IIC5_2, 0, 0, 0, IIC1_1
} },
{ 0xffd20038, 0xffd2003c, 32, /* INT2MSKR4 / INT2MSKCR4 */
{ WDT0B, WDT1B, WDT3B, GETHER0, 0, 0, 0, 0,
0, 0, 0, LPC7, SERMUX2, DMAE3, DMAE2, PBIC,
PBIB, PBIA, GETHER1, DMINT12, DMINT13, DMINT14, DMINT15, TSIP,
DMINT23, 0, DMINT21, LPC6, 0, DMINT16, 0, DMINT22
} },
{ 0xffd200d0, 0xffd200d4, 32, /* INT2MSKR5 / INT2MSKCR5 */
{ 0, WDT8B, WDT7B, WDT4B, 0, DMINT20, 0, 0,
DMINT19, DMINT18, LPC5, SERMUX3, WDT2B, GETHER2, 0, 0,
0, 0, PCIE_BRIDGE, 0, 0, 0, 0, LPC8,
DDRECC, 0, WDT6B, WDT5B, 0, 0, 0, DMINT17
} },
};
#define INTPRI 0xffd00010
#define INT2PRI0 0xffd40000
#define INT2PRI1 0xffd40004
#define INT2PRI2 0xffd40008
#define INT2PRI3 0xffd4000c
#define INT2PRI4 0xffd40010
#define INT2PRI5 0xffd40014
#define INT2PRI6 0xffd40018
#define INT2PRI7 0xffd4001c
#define INT2PRI8 0xffd400a0
#define INT2PRI9 0xffd400a4
#define INT2PRI10 0xffd400a8
#define INT2PRI11 0xffd400ac
#define INT2PRI12 0xffd400b0
#define INT2PRI13 0xffd400b4
#define INT2PRI14 0xffd400b8
#define INT2PRI15 0xffd400bc
#define INT2PRI16 0xffd10000
#define INT2PRI17 0xffd10004
#define INT2PRI18 0xffd10008
#define INT2PRI19 0xffd1000c
#define INT2PRI20 0xffd10010
#define INT2PRI21 0xffd10014
#define INT2PRI22 0xffd10018
#define INT2PRI23 0xffd1001c
#define INT2PRI24 0xffd100a0
#define INT2PRI25 0xffd100a4
#define INT2PRI26 0xffd100a8
#define INT2PRI27 0xffd100ac
#define INT2PRI28 0xffd100b0
#define INT2PRI29 0xffd100b4
#define INT2PRI30 0xffd100b8
#define INT2PRI31 0xffd100bc
#define INT2PRI32 0xffd20000
#define INT2PRI33 0xffd20004
#define INT2PRI34 0xffd20008
#define INT2PRI35 0xffd2000c
#define INT2PRI36 0xffd20010
#define INT2PRI37 0xffd20014
#define INT2PRI38 0xffd20018
#define INT2PRI39 0xffd2001c
#define INT2PRI40 0xffd200a0
#define INT2PRI41 0xffd200a4
#define INT2PRI42 0xffd200a8
#define INT2PRI43 0xffd200ac
#define INT2PRI44 0xffd200b0
#define INT2PRI45 0xffd200b4
#define INT2PRI46 0xffd200b8
#define INT2PRI47 0xffd200bc
static struct intc_prio_reg prio_registers[] __initdata = {
{ INTPRI, 0, 32, 4, { IRQ0, IRQ1, IRQ2, IRQ3,
IRQ4, IRQ5, IRQ6, IRQ7 } },
{ INT2PRI0, 0, 32, 8, { TMU0, TMU1, TMU2, TMU2_TICPI } },
{ INT2PRI1, 0, 32, 8, { TMU3, TMU4, TMU5, SDHI } },
{ INT2PRI2, 0, 32, 8, { SCIF2, SCIF3, 0, IRQ8 } },
{ INT2PRI3, 0, 32, 8, { HUDI, DMAC0_5, ADC0, IRQ9 } },
{ INT2PRI4, 0, 32, 8, { IRQ10, 0, TMR01, TMR23 } },
{ INT2PRI5, 0, 32, 8, { TMR45, 0, FRT, LPC } },
{ INT2PRI6, 0, 32, 8, { PECI0, ETHERC, DMAC8_11, 0 } },
{ INT2PRI7, 0, 32, 8, { SCIF4, 0, IRQ11, IRQ12 } },
{ INT2PRI8, 0, 32, 8, { 0, 0, 0, DVC } },
{ INT2PRI9, 0, 32, 8, { ARC4, 0, SPI1, JMC } },
{ INT2PRI10, 0, 32, 8, { SPI0, SIM, PECI2, PECI1 } },
{ INT2PRI11, 0, 32, 8, { ADC1, IRQ13, DMAC6_7, IRQ14 } },
{ INT2PRI12, 0, 32, 8, { USB0, 0, IRQ15, USB1 } },
{ INT2PRI13, 0, 32, 8, { 0, 0, SCIF1, SCIF0 } },
{ INT2PRI16, 0, 32, 8, { IIC2_2, 0, 0, 0 } },
{ INT2PRI17, 0, 32, 8, { 0, 0, 0, IIC1_0 } },
{ INT2PRI18, 0, 32, 8, { IIC3_3, IIC9_1, IIC2_1, IIC1_2 } },
{ INT2PRI19, 0, 32, 8, { IIC2_3, IIC3_1, 0, IIC1_3 } },
{ INT2PRI20, 0, 32, 8, { IIC2_0, IIC6_3, IIC7_1, IIC7_2 } },
{ INT2PRI21, 0, 32, 8, { IIC7_3, IIC8_0, IIC8_1, IIC8_2 } },
{ INT2PRI22, 0, 32, 8, { IIC9_2, MMC2, G200, 0 } },
{ INT2PRI23, 0, 32, 8, { PECI5, SGPIO, IIC3_2, IIC5_1 } },
{ INT2PRI24, 0, 32, 8, { PECI4, PECI3, 0, IIC1_1 } },
{ INT2PRI25, 0, 32, 8, { IIC3_0, 0, IIC5_3, IIC5_2 } },
{ INT2PRI26, 0, 32, 8, { ECCU, RSPI, 0, IIC9_3 } },
{ INT2PRI27, 0, 32, 8, { PCIC, IIC6_0, IIC4_0, IIC6_1 } },
{ INT2PRI28, 0, 32, 8, { IIC4_3, IIC7_0, MMC1, IIC6_2 } },
{ INT2PRI29, 0, 32, 8, { 0, 0, IIC9_0, IIC8_3 } },
{ INT2PRI30, 0, 32, 8, { IIC4_1, IIC4_2, IIC5_0, ONFICTL } },
{ INT2PRI31, 0, 32, 8, { IIC0_0, IIC0_1, IIC0_2, IIC0_3 } },
{ INT2PRI32, 0, 32, 8, { DMINT22, 0, 0, 0 } },
{ INT2PRI33, 0, 32, 8, { 0, 0, 0, DMINT16 } },
{ INT2PRI34, 0, 32, 8, { 0, LPC6, DMINT21, DMINT18 } },
{ INT2PRI35, 0, 32, 8, { DMINT23, TSIP, 0, DMINT19 } },
{ INT2PRI36, 0, 32, 8, { DMINT20, GETHER1, PBIA, PBIB } },
{ INT2PRI37, 0, 32, 8, { PBIC, DMAE2, DMAE3, SERMUX2 } },
{ INT2PRI38, 0, 32, 8, { LPC7, 0, 0, 0 } },
{ INT2PRI39, 0, 32, 8, { 0, 0, 0, WDT4B } },
{ INT2PRI40, 0, 32, 8, { 0, 0, 0, DMINT17 } },
{ INT2PRI41, 0, 32, 8, { DDRECC, 0, WDT6B, WDT5B } },
{ INT2PRI42, 0, 32, 8, { 0, 0, 0, LPC8 } },
{ INT2PRI43, 0, 32, 8, { 0, WDT7B, PCIE_BRIDGE, WDT8B } },
{ INT2PRI44, 0, 32, 8, { WDT2B, GETHER2, 0, 0 } },
{ INT2PRI45, 0, 32, 8, { 0, 0, LPC5, SERMUX3 } },
{ INT2PRI46, 0, 32, 8, { WDT0B, WDT1B, WDT3B, GETHER0 } },
{ INT2PRI47, 0, 32, 8, { DMINT12, DMINT13, DMINT14, DMINT15 } },
};
static struct intc_sense_reg sense_registers_irq8to15[] __initdata = {
{ 0xffd100f8, 32, 2, /* ICR2 */ { IRQ15, IRQ14, IRQ13, IRQ12,
IRQ11, IRQ10, IRQ9, IRQ8 } },
};
static DECLARE_INTC_DESC(intc_desc, "sh7757", vectors, groups,
mask_registers, prio_registers,
sense_registers_irq8to15);
/* Support for external interrupt pins in IRQ mode */
static struct intc_vect vectors_irq0123[] __initdata = {
INTC_VECT(IRQ0, 0x200), INTC_VECT(IRQ1, 0x240),
INTC_VECT(IRQ2, 0x280), INTC_VECT(IRQ3, 0x2c0),
};
static struct intc_vect vectors_irq4567[] __initdata = {
INTC_VECT(IRQ4, 0x300), INTC_VECT(IRQ5, 0x340),
INTC_VECT(IRQ6, 0x380), INTC_VECT(IRQ7, 0x3c0),
};
static struct intc_sense_reg sense_registers[] __initdata = {
{ 0xffd0001c, 32, 2, /* ICR1 */ { IRQ0, IRQ1, IRQ2, IRQ3,
IRQ4, IRQ5, IRQ6, IRQ7 } },
};
static struct intc_mask_reg ack_registers[] __initdata = {
{ 0xffd00024, 0, 32, /* INTREQ */
{ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
};
static DECLARE_INTC_DESC_ACK(intc_desc_irq0123, "sh7757-irq0123",
vectors_irq0123, NULL, mask_registers,
prio_registers, sense_registers, ack_registers);
static DECLARE_INTC_DESC_ACK(intc_desc_irq4567, "sh7757-irq4567",
vectors_irq4567, NULL, mask_registers,
prio_registers, sense_registers, ack_registers);
/* External interrupt pins in IRL mode */
static struct intc_vect vectors_irl0123[] __initdata = {
INTC_VECT(IRL0_LLLL, 0x200), INTC_VECT(IRL0_LLLH, 0x220),
INTC_VECT(IRL0_LLHL, 0x240), INTC_VECT(IRL0_LLHH, 0x260),
INTC_VECT(IRL0_LHLL, 0x280), INTC_VECT(IRL0_LHLH, 0x2a0),
INTC_VECT(IRL0_LHHL, 0x2c0), INTC_VECT(IRL0_LHHH, 0x2e0),
INTC_VECT(IRL0_HLLL, 0x300), INTC_VECT(IRL0_HLLH, 0x320),
INTC_VECT(IRL0_HLHL, 0x340), INTC_VECT(IRL0_HLHH, 0x360),
INTC_VECT(IRL0_HHLL, 0x380), INTC_VECT(IRL0_HHLH, 0x3a0),
INTC_VECT(IRL0_HHHL, 0x3c0),
};
static struct intc_vect vectors_irl4567[] __initdata = {
INTC_VECT(IRL4_LLLL, 0x200), INTC_VECT(IRL4_LLLH, 0x220),
INTC_VECT(IRL4_LLHL, 0x240), INTC_VECT(IRL4_LLHH, 0x260),
INTC_VECT(IRL4_LHLL, 0x280), INTC_VECT(IRL4_LHLH, 0x2a0),
INTC_VECT(IRL4_LHHL, 0x2c0), INTC_VECT(IRL4_LHHH, 0x2e0),
INTC_VECT(IRL4_HLLL, 0x300), INTC_VECT(IRL4_HLLH, 0x320),
INTC_VECT(IRL4_HLHL, 0x340), INTC_VECT(IRL4_HLHH, 0x360),
INTC_VECT(IRL4_HHLL, 0x380), INTC_VECT(IRL4_HHLH, 0x3a0),
INTC_VECT(IRL4_HHHL, 0x3c0),
};
static DECLARE_INTC_DESC(intc_desc_irl0123, "sh7757-irl0123", vectors_irl0123,
NULL, mask_registers, NULL, NULL);
static DECLARE_INTC_DESC(intc_desc_irl4567, "sh7757-irl4567", vectors_irl4567,
NULL, mask_registers, NULL, NULL);
#define INTC_ICR0 0xffd00000
#define INTC_INTMSK0 0xffd00044
#define INTC_INTMSK1 0xffd00048
#define INTC_INTMSK2 0xffd40080
#define INTC_INTMSKCLR1 0xffd00068
#define INTC_INTMSKCLR2 0xffd40084
void __init plat_irq_setup(void)
{
/* disable IRQ3-0 + IRQ7-4 */
__raw_writel(0xff000000, INTC_INTMSK0);
/* disable IRL3-0 + IRL7-4 */
__raw_writel(0xc0000000, INTC_INTMSK1);
__raw_writel(0xfffefffe, INTC_INTMSK2);
/* select IRL mode for IRL3-0 + IRL7-4 */
__raw_writel(__raw_readl(INTC_ICR0) & ~0x00c00000, INTC_ICR0);
/* disable holding function, ie enable "SH-4 Mode" */
__raw_writel(__raw_readl(INTC_ICR0) | 0x00200000, INTC_ICR0);
register_intc_controller(&intc_desc);
}
void __init plat_irq_setup_pins(int mode)
{
switch (mode) {
case IRQ_MODE_IRQ7654:
/* select IRQ mode for IRL7-4 */
__raw_writel(__raw_readl(INTC_ICR0) | 0x00400000, INTC_ICR0);
register_intc_controller(&intc_desc_irq4567);
break;
case IRQ_MODE_IRQ3210:
/* select IRQ mode for IRL3-0 */
__raw_writel(__raw_readl(INTC_ICR0) | 0x00800000, INTC_ICR0);
register_intc_controller(&intc_desc_irq0123);
break;
case IRQ_MODE_IRL7654:
/* enable IRL7-4 but don't provide any masking */
__raw_writel(0x40000000, INTC_INTMSKCLR1);
__raw_writel(0x0000fffe, INTC_INTMSKCLR2);
break;
case IRQ_MODE_IRL3210:
/* enable IRL0-3 but don't provide any masking */
__raw_writel(0x80000000, INTC_INTMSKCLR1);
__raw_writel(0xfffe0000, INTC_INTMSKCLR2);
break;
case IRQ_MODE_IRL7654_MASK:
/* enable IRL7-4 and mask using cpu intc controller */
__raw_writel(0x40000000, INTC_INTMSKCLR1);
register_intc_controller(&intc_desc_irl4567);
break;
case IRQ_MODE_IRL3210_MASK:
/* enable IRL0-3 and mask using cpu intc controller */
__raw_writel(0x80000000, INTC_INTMSKCLR1);
register_intc_controller(&intc_desc_irl0123);
break;
default:
BUG();
}
}
void __init plat_mem_setup(void)
{
}
| linux-master | arch/sh/kernel/cpu/sh4a/setup-sh7757.c |
// SPDX-License-Identifier: GPL-2.0
/*
* SH7734 processor support - PFC hardware block
*
* Copyright (C) 2012 Renesas Solutions Corp.
* Copyright (C) 2012 Nobuhiro Iwamatsu <[email protected]>
*/
#include <linux/bug.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/ioport.h>
#include <cpu/pfc.h>
static struct resource sh7734_pfc_resources[] = {
[0] = { /* PFC */
.start = 0xFFFC0000,
.end = 0xFFFC011C,
.flags = IORESOURCE_MEM,
},
[1] = { /* GPIO */
.start = 0xFFC40000,
.end = 0xFFC4502B,
.flags = IORESOURCE_MEM,
}
};
static int __init plat_pinmux_setup(void)
{
return sh_pfc_register("pfc-sh7734", sh7734_pfc_resources,
ARRAY_SIZE(sh7734_pfc_resources));
}
arch_initcall(plat_pinmux_setup);
| linux-master | arch/sh/kernel/cpu/sh4a/pinmux-sh7734.c |
// SPDX-License-Identifier: GPL-2.0
/*
* arch/sh/kernel/cpu/sh4a/clock-sh7770.c
*
* SH7770 support for the clock framework
*
* Copyright (C) 2005 Paul Mundt
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <asm/clock.h>
#include <asm/freq.h>
#include <asm/io.h>
static int ifc_divisors[] = { 1, 1, 1, 1, 1, 1, 1, 1 };
static int bfc_divisors[] = { 1, 1, 1, 1, 1, 8,12, 1 };
static int pfc_divisors[] = { 1, 8, 1,10,12,16, 1, 1 };
static void master_clk_init(struct clk *clk)
{
clk->rate *= pfc_divisors[(__raw_readl(FRQCR) >> 28) & 0x000f];
}
static struct sh_clk_ops sh7770_master_clk_ops = {
.init = master_clk_init,
};
static unsigned long module_clk_recalc(struct clk *clk)
{
int idx = ((__raw_readl(FRQCR) >> 28) & 0x000f);
return clk->parent->rate / pfc_divisors[idx];
}
static struct sh_clk_ops sh7770_module_clk_ops = {
.recalc = module_clk_recalc,
};
static unsigned long bus_clk_recalc(struct clk *clk)
{
int idx = (__raw_readl(FRQCR) & 0x000f);
return clk->parent->rate / bfc_divisors[idx];
}
static struct sh_clk_ops sh7770_bus_clk_ops = {
.recalc = bus_clk_recalc,
};
static unsigned long cpu_clk_recalc(struct clk *clk)
{
int idx = ((__raw_readl(FRQCR) >> 24) & 0x000f);
return clk->parent->rate / ifc_divisors[idx];
}
static struct sh_clk_ops sh7770_cpu_clk_ops = {
.recalc = cpu_clk_recalc,
};
static struct sh_clk_ops *sh7770_clk_ops[] = {
&sh7770_master_clk_ops,
&sh7770_module_clk_ops,
&sh7770_bus_clk_ops,
&sh7770_cpu_clk_ops,
};
void __init arch_init_clk_ops(struct sh_clk_ops **ops, int idx)
{
if (idx < ARRAY_SIZE(sh7770_clk_ops))
*ops = sh7770_clk_ops[idx];
}
| linux-master | arch/sh/kernel/cpu/sh4a/clock-sh7770.c |
// SPDX-License-Identifier: GPL-2.0
/*
* arch/sh/kernel/cpu/sh4a/clock-sh7724.c
*
* SH7724 clock framework support
*
* Copyright (C) 2009 Magnus Damm
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/clkdev.h>
#include <linux/sh_clk.h>
#include <asm/clock.h>
#include <cpu/sh7724.h>
/* SH7724 registers */
#define FRQCRA 0xa4150000
#define FRQCRB 0xa4150004
#define VCLKCR 0xa4150048
#define FCLKACR 0xa4150008
#define FCLKBCR 0xa415000c
#define IRDACLKCR 0xa4150018
#define PLLCR 0xa4150024
#define MSTPCR0 0xa4150030
#define MSTPCR1 0xa4150034
#define MSTPCR2 0xa4150038
#define SPUCLKCR 0xa415003c
#define FLLFRQ 0xa4150050
#define LSTATS 0xa4150060
/* Fixed 32 KHz root clock for RTC and Power Management purposes */
static struct clk r_clk = {
.rate = 32768,
};
/*
* Default rate for the root input clock, reset this with clk_set_rate()
* from the platform code.
*/
static struct clk extal_clk = {
.rate = 33333333,
};
/* The fll multiplies the 32khz r_clk, may be used instead of extal */
static unsigned long fll_recalc(struct clk *clk)
{
unsigned long mult = 0;
unsigned long div = 1;
if (__raw_readl(PLLCR) & 0x1000)
mult = __raw_readl(FLLFRQ) & 0x3ff;
if (__raw_readl(FLLFRQ) & 0x4000)
div = 2;
return (clk->parent->rate * mult) / div;
}
static struct sh_clk_ops fll_clk_ops = {
.recalc = fll_recalc,
};
static struct clk fll_clk = {
.ops = &fll_clk_ops,
.parent = &r_clk,
.flags = CLK_ENABLE_ON_INIT,
};
static unsigned long pll_recalc(struct clk *clk)
{
unsigned long mult = 1;
if (__raw_readl(PLLCR) & 0x4000)
mult = (((__raw_readl(FRQCRA) >> 24) & 0x3f) + 1) * 2;
return clk->parent->rate * mult;
}
static struct sh_clk_ops pll_clk_ops = {
.recalc = pll_recalc,
};
static struct clk pll_clk = {
.ops = &pll_clk_ops,
.flags = CLK_ENABLE_ON_INIT,
};
/* A fixed divide-by-3 block use by the div6 clocks */
static unsigned long div3_recalc(struct clk *clk)
{
return clk->parent->rate / 3;
}
static struct sh_clk_ops div3_clk_ops = {
.recalc = div3_recalc,
};
static struct clk div3_clk = {
.ops = &div3_clk_ops,
.parent = &pll_clk,
};
/* External input clock (pin name: FSIMCKA/FSIMCKB/DV_CLKI ) */
struct clk sh7724_fsimcka_clk = {
};
struct clk sh7724_fsimckb_clk = {
};
struct clk sh7724_dv_clki = {
};
static struct clk *main_clks[] = {
&r_clk,
&extal_clk,
&fll_clk,
&pll_clk,
&div3_clk,
&sh7724_fsimcka_clk,
&sh7724_fsimckb_clk,
&sh7724_dv_clki,
};
static void div4_kick(struct clk *clk)
{
unsigned long value;
/* set KICK bit in FRQCRA to update hardware setting */
value = __raw_readl(FRQCRA);
value |= (1 << 31);
__raw_writel(value, FRQCRA);
}
static int divisors[] = { 2, 3, 4, 6, 8, 12, 16, 0, 24, 32, 36, 48, 0, 72 };
static struct clk_div_mult_table div4_div_mult_table = {
.divisors = divisors,
.nr_divisors = ARRAY_SIZE(divisors),
};
static struct clk_div4_table div4_table = {
.div_mult_table = &div4_div_mult_table,
.kick = div4_kick,
};
enum { DIV4_I, DIV4_SH, DIV4_B, DIV4_P, DIV4_M1, DIV4_NR };
#define DIV4(_reg, _bit, _mask, _flags) \
SH_CLK_DIV4(&pll_clk, _reg, _bit, _mask, _flags)
struct clk div4_clks[DIV4_NR] = {
[DIV4_I] = DIV4(FRQCRA, 20, 0x2f7d, CLK_ENABLE_ON_INIT),
[DIV4_SH] = DIV4(FRQCRA, 12, 0x2f7c, CLK_ENABLE_ON_INIT),
[DIV4_B] = DIV4(FRQCRA, 8, 0x2f7c, CLK_ENABLE_ON_INIT),
[DIV4_P] = DIV4(FRQCRA, 0, 0x2f7c, 0),
[DIV4_M1] = DIV4(FRQCRB, 4, 0x2f7c, CLK_ENABLE_ON_INIT),
};
enum { DIV6_V, DIV6_I, DIV6_S, DIV6_FA, DIV6_FB, DIV6_NR };
/* Indices are important - they are the actual src selecting values */
static struct clk *common_parent[] = {
[0] = &div3_clk,
[1] = NULL,
};
static struct clk *vclkcr_parent[8] = {
[0] = &div3_clk,
[2] = &sh7724_dv_clki,
[4] = &extal_clk,
};
static struct clk *fclkacr_parent[] = {
[0] = &div3_clk,
[1] = NULL,
[2] = &sh7724_fsimcka_clk,
[3] = NULL,
};
static struct clk *fclkbcr_parent[] = {
[0] = &div3_clk,
[1] = NULL,
[2] = &sh7724_fsimckb_clk,
[3] = NULL,
};
static struct clk div6_clks[DIV6_NR] = {
[DIV6_V] = SH_CLK_DIV6_EXT(VCLKCR, 0,
vclkcr_parent, ARRAY_SIZE(vclkcr_parent), 12, 3),
[DIV6_I] = SH_CLK_DIV6_EXT(IRDACLKCR, 0,
common_parent, ARRAY_SIZE(common_parent), 6, 1),
[DIV6_S] = SH_CLK_DIV6_EXT(SPUCLKCR, CLK_ENABLE_ON_INIT,
common_parent, ARRAY_SIZE(common_parent), 6, 1),
[DIV6_FA] = SH_CLK_DIV6_EXT(FCLKACR, 0,
fclkacr_parent, ARRAY_SIZE(fclkacr_parent), 6, 2),
[DIV6_FB] = SH_CLK_DIV6_EXT(FCLKBCR, 0,
fclkbcr_parent, ARRAY_SIZE(fclkbcr_parent), 6, 2),
};
static struct clk mstp_clks[HWBLK_NR] = {
[HWBLK_TLB] = SH_CLK_MSTP32(&div4_clks[DIV4_I], MSTPCR0, 31, CLK_ENABLE_ON_INIT),
[HWBLK_IC] = SH_CLK_MSTP32(&div4_clks[DIV4_I], MSTPCR0, 30, CLK_ENABLE_ON_INIT),
[HWBLK_OC] = SH_CLK_MSTP32(&div4_clks[DIV4_I], MSTPCR0, 29, CLK_ENABLE_ON_INIT),
[HWBLK_RSMEM] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR0, 28, CLK_ENABLE_ON_INIT),
[HWBLK_ILMEM] = SH_CLK_MSTP32(&div4_clks[DIV4_I], MSTPCR0, 27, CLK_ENABLE_ON_INIT),
[HWBLK_L2C] = SH_CLK_MSTP32(&div4_clks[DIV4_SH], MSTPCR0, 26, CLK_ENABLE_ON_INIT),
[HWBLK_FPU] = SH_CLK_MSTP32(&div4_clks[DIV4_I], MSTPCR0, 24, CLK_ENABLE_ON_INIT),
[HWBLK_INTC] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 22, CLK_ENABLE_ON_INIT),
[HWBLK_DMAC0] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR0, 21, 0),
[HWBLK_SHYWAY] = SH_CLK_MSTP32(&div4_clks[DIV4_SH], MSTPCR0, 20, CLK_ENABLE_ON_INIT),
[HWBLK_HUDI] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 19, 0),
[HWBLK_UBC] = SH_CLK_MSTP32(&div4_clks[DIV4_I], MSTPCR0, 17, 0),
[HWBLK_TMU0] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 15, 0),
[HWBLK_CMT] = SH_CLK_MSTP32(&r_clk, MSTPCR0, 14, 0),
[HWBLK_RWDT] = SH_CLK_MSTP32(&r_clk, MSTPCR0, 13, 0),
[HWBLK_DMAC1] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR0, 12, 0),
[HWBLK_TMU1] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 10, 0),
[HWBLK_SCIF0] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 9, 0),
[HWBLK_SCIF1] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 8, 0),
[HWBLK_SCIF2] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 7, 0),
[HWBLK_SCIF3] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR0, 6, 0),
[HWBLK_SCIF4] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR0, 5, 0),
[HWBLK_SCIF5] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR0, 4, 0),
[HWBLK_MSIOF0] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR0, 2, 0),
[HWBLK_MSIOF1] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR0, 1, 0),
[HWBLK_KEYSC] = SH_CLK_MSTP32(&r_clk, MSTPCR1, 12, 0),
[HWBLK_RTC] = SH_CLK_MSTP32(&r_clk, MSTPCR1, 11, 0),
[HWBLK_IIC0] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 9, 0),
[HWBLK_IIC1] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 8, 0),
[HWBLK_MMC] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 29, 0),
[HWBLK_ETHER] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 28, 0),
[HWBLK_ATAPI] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 26, 0),
[HWBLK_TPU] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 25, 0),
[HWBLK_IRDA] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR2, 24, 0),
[HWBLK_TSIF] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 22, 0),
[HWBLK_USB1] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 21, 0),
[HWBLK_USB0] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 20, 0),
[HWBLK_2DG] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 19, 0),
[HWBLK_SDHI0] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 18, 0),
[HWBLK_SDHI1] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 17, 0),
[HWBLK_VEU1] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 15, 0),
[HWBLK_CEU1] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 13, 0),
[HWBLK_BEU1] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 12, 0),
[HWBLK_2DDMAC] = SH_CLK_MSTP32(&div4_clks[DIV4_SH], MSTPCR2, 10, 0),
[HWBLK_SPU] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 9, 0),
[HWBLK_JPU] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 6, 0),
[HWBLK_VOU] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 5, 0),
[HWBLK_BEU0] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 4, 0),
[HWBLK_CEU0] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 3, 0),
[HWBLK_VEU0] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 2, 0),
[HWBLK_VPU] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 1, 0),
[HWBLK_LCDC] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 0, 0),
};
static struct clk_lookup lookups[] = {
/* main clocks */
CLKDEV_CON_ID("rclk", &r_clk),
CLKDEV_CON_ID("extal", &extal_clk),
CLKDEV_CON_ID("fll_clk", &fll_clk),
CLKDEV_CON_ID("pll_clk", &pll_clk),
CLKDEV_CON_ID("div3_clk", &div3_clk),
/* DIV4 clocks */
CLKDEV_CON_ID("cpu_clk", &div4_clks[DIV4_I]),
CLKDEV_CON_ID("shyway_clk", &div4_clks[DIV4_SH]),
CLKDEV_CON_ID("bus_clk", &div4_clks[DIV4_B]),
CLKDEV_CON_ID("peripheral_clk", &div4_clks[DIV4_P]),
CLKDEV_CON_ID("vpu_clk", &div4_clks[DIV4_M1]),
/* DIV6 clocks */
CLKDEV_CON_ID("video_clk", &div6_clks[DIV6_V]),
CLKDEV_CON_ID("fsia_clk", &div6_clks[DIV6_FA]),
CLKDEV_CON_ID("fsib_clk", &div6_clks[DIV6_FB]),
CLKDEV_CON_ID("irda_clk", &div6_clks[DIV6_I]),
CLKDEV_CON_ID("spu_clk", &div6_clks[DIV6_S]),
/* MSTP clocks */
CLKDEV_CON_ID("tlb0", &mstp_clks[HWBLK_TLB]),
CLKDEV_CON_ID("ic0", &mstp_clks[HWBLK_IC]),
CLKDEV_CON_ID("oc0", &mstp_clks[HWBLK_OC]),
CLKDEV_CON_ID("rs0", &mstp_clks[HWBLK_RSMEM]),
CLKDEV_CON_ID("ilmem0", &mstp_clks[HWBLK_ILMEM]),
CLKDEV_CON_ID("l2c0", &mstp_clks[HWBLK_L2C]),
CLKDEV_CON_ID("fpu0", &mstp_clks[HWBLK_FPU]),
CLKDEV_CON_ID("intc0", &mstp_clks[HWBLK_INTC]),
CLKDEV_DEV_ID("sh-dma-engine.0", &mstp_clks[HWBLK_DMAC0]),
CLKDEV_CON_ID("sh0", &mstp_clks[HWBLK_SHYWAY]),
CLKDEV_CON_ID("hudi0", &mstp_clks[HWBLK_HUDI]),
CLKDEV_CON_ID("ubc0", &mstp_clks[HWBLK_UBC]),
CLKDEV_ICK_ID("fck", "sh-tmu.0", &mstp_clks[HWBLK_TMU0]),
CLKDEV_ICK_ID("fck", "sh-tmu.1", &mstp_clks[HWBLK_TMU1]),
CLKDEV_ICK_ID("fck", "sh-cmt-32.0", &mstp_clks[HWBLK_CMT]),
CLKDEV_DEV_ID("sh-wdt.0", &mstp_clks[HWBLK_RWDT]),
CLKDEV_DEV_ID("sh-dma-engine.1", &mstp_clks[HWBLK_DMAC1]),
CLKDEV_DEV_ID("sh-sci.0", &mstp_clks[HWBLK_SCIF0]),
CLKDEV_DEV_ID("sh-sci.1", &mstp_clks[HWBLK_SCIF1]),
CLKDEV_DEV_ID("sh-sci.2", &mstp_clks[HWBLK_SCIF2]),
CLKDEV_DEV_ID("sh-sci.3", &mstp_clks[HWBLK_SCIF3]),
CLKDEV_DEV_ID("sh-sci.4", &mstp_clks[HWBLK_SCIF4]),
CLKDEV_DEV_ID("sh-sci.5", &mstp_clks[HWBLK_SCIF5]),
CLKDEV_DEV_ID("spi_sh_msiof.0", &mstp_clks[HWBLK_MSIOF0]),
CLKDEV_DEV_ID("spi_sh_msiof.1", &mstp_clks[HWBLK_MSIOF1]),
CLKDEV_DEV_ID("sh_keysc.0", &mstp_clks[HWBLK_KEYSC]),
CLKDEV_CON_ID("rtc0", &mstp_clks[HWBLK_RTC]),
CLKDEV_DEV_ID("i2c-sh_mobile.0", &mstp_clks[HWBLK_IIC0]),
CLKDEV_DEV_ID("i2c-sh_mobile.1", &mstp_clks[HWBLK_IIC1]),
CLKDEV_DEV_ID("sh_mmcif.0", &mstp_clks[HWBLK_MMC]),
CLKDEV_DEV_ID("sh7724-ether.0", &mstp_clks[HWBLK_ETHER]),
CLKDEV_CON_ID("atapi0", &mstp_clks[HWBLK_ATAPI]),
CLKDEV_CON_ID("tpu0", &mstp_clks[HWBLK_TPU]),
CLKDEV_CON_ID("irda0", &mstp_clks[HWBLK_IRDA]),
CLKDEV_CON_ID("tsif0", &mstp_clks[HWBLK_TSIF]),
CLKDEV_DEV_ID("renesas_usbhs.1", &mstp_clks[HWBLK_USB1]),
CLKDEV_DEV_ID("renesas_usbhs.0", &mstp_clks[HWBLK_USB0]),
CLKDEV_CON_ID("usb1", &mstp_clks[HWBLK_USB1]),
CLKDEV_CON_ID("usb0", &mstp_clks[HWBLK_USB0]),
CLKDEV_CON_ID("2dg0", &mstp_clks[HWBLK_2DG]),
CLKDEV_DEV_ID("sh_mobile_sdhi.0", &mstp_clks[HWBLK_SDHI0]),
CLKDEV_DEV_ID("sh_mobile_sdhi.1", &mstp_clks[HWBLK_SDHI1]),
CLKDEV_CON_ID("veu1", &mstp_clks[HWBLK_VEU1]),
CLKDEV_DEV_ID("renesas-ceu.1", &mstp_clks[HWBLK_CEU1]),
CLKDEV_CON_ID("beu1", &mstp_clks[HWBLK_BEU1]),
CLKDEV_CON_ID("2ddmac0", &mstp_clks[HWBLK_2DDMAC]),
CLKDEV_DEV_ID("sh_fsi.0", &mstp_clks[HWBLK_SPU]),
CLKDEV_CON_ID("jpu0", &mstp_clks[HWBLK_JPU]),
CLKDEV_DEV_ID("sh-vou", &mstp_clks[HWBLK_VOU]),
CLKDEV_CON_ID("beu0", &mstp_clks[HWBLK_BEU0]),
CLKDEV_DEV_ID("renesas-ceu.0", &mstp_clks[HWBLK_CEU0]),
CLKDEV_CON_ID("veu0", &mstp_clks[HWBLK_VEU0]),
CLKDEV_CON_ID("vpu0", &mstp_clks[HWBLK_VPU]),
CLKDEV_DEV_ID("sh_mobile_lcdc_fb.0", &mstp_clks[HWBLK_LCDC]),
};
int __init arch_clk_init(void)
{
int k, ret = 0;
/* autodetect extal or fll configuration */
if (__raw_readl(PLLCR) & 0x1000)
pll_clk.parent = &fll_clk;
else
pll_clk.parent = &extal_clk;
for (k = 0; !ret && (k < ARRAY_SIZE(main_clks)); k++)
ret = clk_register(main_clks[k]);
clkdev_add_table(lookups, ARRAY_SIZE(lookups));
if (!ret)
ret = sh_clk_div4_register(div4_clks, DIV4_NR, &div4_table);
if (!ret)
ret = sh_clk_div6_reparent_register(div6_clks, DIV6_NR);
if (!ret)
ret = sh_clk_mstp_register(mstp_clks, HWBLK_NR);
return ret;
}
| linux-master | arch/sh/kernel/cpu/sh4a/clock-sh7724.c |
// SPDX-License-Identifier: GPL-2.0
/*
* SH7724 Pinmux
*
* Copyright (C) 2009 Renesas Solutions Corp.
*
* Kuninori Morimoto <[email protected]>
*
* Based on SH7723 Pinmux
* Copyright (C) 2008 Magnus Damm
*/
#include <linux/bug.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/ioport.h>
#include <cpu/pfc.h>
static struct resource sh7724_pfc_resources[] = {
[0] = {
.start = 0xa4050100,
.end = 0xa405016f,
.flags = IORESOURCE_MEM,
},
};
static int __init plat_pinmux_setup(void)
{
return sh_pfc_register("pfc-sh7724", sh7724_pfc_resources,
ARRAY_SIZE(sh7724_pfc_resources));
}
arch_initcall(plat_pinmux_setup);
| linux-master | arch/sh/kernel/cpu/sh4a/pinmux-sh7724.c |
// SPDX-License-Identifier: GPL-2.0
/*
* SH7785 Setup
*
* Copyright (C) 2007 Paul Mundt
*/
#include <linux/platform_device.h>
#include <linux/init.h>
#include <linux/serial.h>
#include <linux/serial_sci.h>
#include <linux/io.h>
#include <linux/mm.h>
#include <linux/sh_dma.h>
#include <linux/sh_timer.h>
#include <linux/sh_intc.h>
#include <asm/mmzone.h>
#include <asm/platform_early.h>
#include <cpu/dma-register.h>
static struct plat_sci_port scif0_platform_data = {
.scscr = SCSCR_REIE | SCSCR_CKE1,
.type = PORT_SCIF,
.regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE,
};
static struct resource scif0_resources[] = {
DEFINE_RES_MEM(0xffea0000, 0x100),
DEFINE_RES_IRQ(evt2irq(0x700)),
};
static struct platform_device scif0_device = {
.name = "sh-sci",
.id = 0,
.resource = scif0_resources,
.num_resources = ARRAY_SIZE(scif0_resources),
.dev = {
.platform_data = &scif0_platform_data,
},
};
static struct plat_sci_port scif1_platform_data = {
.scscr = SCSCR_REIE | SCSCR_CKE1,
.type = PORT_SCIF,
.regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE,
};
static struct resource scif1_resources[] = {
DEFINE_RES_MEM(0xffeb0000, 0x100),
DEFINE_RES_IRQ(evt2irq(0x780)),
};
static struct platform_device scif1_device = {
.name = "sh-sci",
.id = 1,
.resource = scif1_resources,
.num_resources = ARRAY_SIZE(scif1_resources),
.dev = {
.platform_data = &scif1_platform_data,
},
};
static struct plat_sci_port scif2_platform_data = {
.scscr = SCSCR_REIE | SCSCR_CKE1,
.type = PORT_SCIF,
.regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE,
};
static struct resource scif2_resources[] = {
DEFINE_RES_MEM(0xffec0000, 0x100),
DEFINE_RES_IRQ(evt2irq(0x980)),
};
static struct platform_device scif2_device = {
.name = "sh-sci",
.id = 2,
.resource = scif2_resources,
.num_resources = ARRAY_SIZE(scif2_resources),
.dev = {
.platform_data = &scif2_platform_data,
},
};
static struct plat_sci_port scif3_platform_data = {
.scscr = SCSCR_REIE | SCSCR_CKE1,
.type = PORT_SCIF,
.regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE,
};
static struct resource scif3_resources[] = {
DEFINE_RES_MEM(0xffed0000, 0x100),
DEFINE_RES_IRQ(evt2irq(0x9a0)),
};
static struct platform_device scif3_device = {
.name = "sh-sci",
.id = 3,
.resource = scif3_resources,
.num_resources = ARRAY_SIZE(scif3_resources),
.dev = {
.platform_data = &scif3_platform_data,
},
};
static struct plat_sci_port scif4_platform_data = {
.scscr = SCSCR_REIE | SCSCR_CKE1,
.type = PORT_SCIF,
.regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE,
};
static struct resource scif4_resources[] = {
DEFINE_RES_MEM(0xffee0000, 0x100),
DEFINE_RES_IRQ(evt2irq(0x9c0)),
};
static struct platform_device scif4_device = {
.name = "sh-sci",
.id = 4,
.resource = scif4_resources,
.num_resources = ARRAY_SIZE(scif4_resources),
.dev = {
.platform_data = &scif4_platform_data,
},
};
static struct plat_sci_port scif5_platform_data = {
.scscr = SCSCR_REIE | SCSCR_CKE1,
.type = PORT_SCIF,
.regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE,
};
static struct resource scif5_resources[] = {
DEFINE_RES_MEM(0xffef0000, 0x100),
DEFINE_RES_IRQ(evt2irq(0x9e0)),
};
static struct platform_device scif5_device = {
.name = "sh-sci",
.id = 5,
.resource = scif5_resources,
.num_resources = ARRAY_SIZE(scif5_resources),
.dev = {
.platform_data = &scif5_platform_data,
},
};
static struct sh_timer_config tmu0_platform_data = {
.channels_mask = 7,
};
static struct resource tmu0_resources[] = {
DEFINE_RES_MEM(0xffd80000, 0x30),
DEFINE_RES_IRQ(evt2irq(0x580)),
DEFINE_RES_IRQ(evt2irq(0x5a0)),
DEFINE_RES_IRQ(evt2irq(0x5c0)),
};
static struct platform_device tmu0_device = {
.name = "sh-tmu",
.id = 0,
.dev = {
.platform_data = &tmu0_platform_data,
},
.resource = tmu0_resources,
.num_resources = ARRAY_SIZE(tmu0_resources),
};
static struct sh_timer_config tmu1_platform_data = {
.channels_mask = 7,
};
static struct resource tmu1_resources[] = {
DEFINE_RES_MEM(0xffdc0000, 0x2c),
DEFINE_RES_IRQ(evt2irq(0xe00)),
DEFINE_RES_IRQ(evt2irq(0xe20)),
DEFINE_RES_IRQ(evt2irq(0xe40)),
};
static struct platform_device tmu1_device = {
.name = "sh-tmu",
.id = 1,
.dev = {
.platform_data = &tmu1_platform_data,
},
.resource = tmu1_resources,
.num_resources = ARRAY_SIZE(tmu1_resources),
};
/* DMA */
static const struct sh_dmae_channel sh7785_dmae0_channels[] = {
{
.offset = 0,
.dmars = 0,
.dmars_bit = 0,
}, {
.offset = 0x10,
.dmars = 0,
.dmars_bit = 8,
}, {
.offset = 0x20,
.dmars = 4,
.dmars_bit = 0,
}, {
.offset = 0x30,
.dmars = 4,
.dmars_bit = 8,
}, {
.offset = 0x50,
.dmars = 8,
.dmars_bit = 0,
}, {
.offset = 0x60,
.dmars = 8,
.dmars_bit = 8,
}
};
static const struct sh_dmae_channel sh7785_dmae1_channels[] = {
{
.offset = 0,
}, {
.offset = 0x10,
}, {
.offset = 0x20,
}, {
.offset = 0x30,
}, {
.offset = 0x50,
}, {
.offset = 0x60,
}
};
static const unsigned int ts_shift[] = TS_SHIFT;
static struct sh_dmae_pdata dma0_platform_data = {
.channel = sh7785_dmae0_channels,
.channel_num = ARRAY_SIZE(sh7785_dmae0_channels),
.ts_low_shift = CHCR_TS_LOW_SHIFT,
.ts_low_mask = CHCR_TS_LOW_MASK,
.ts_high_shift = CHCR_TS_HIGH_SHIFT,
.ts_high_mask = CHCR_TS_HIGH_MASK,
.ts_shift = ts_shift,
.ts_shift_num = ARRAY_SIZE(ts_shift),
.dmaor_init = DMAOR_INIT,
};
static struct sh_dmae_pdata dma1_platform_data = {
.channel = sh7785_dmae1_channels,
.channel_num = ARRAY_SIZE(sh7785_dmae1_channels),
.ts_low_shift = CHCR_TS_LOW_SHIFT,
.ts_low_mask = CHCR_TS_LOW_MASK,
.ts_high_shift = CHCR_TS_HIGH_SHIFT,
.ts_high_mask = CHCR_TS_HIGH_MASK,
.ts_shift = ts_shift,
.ts_shift_num = ARRAY_SIZE(ts_shift),
.dmaor_init = DMAOR_INIT,
};
static struct resource sh7785_dmae0_resources[] = {
[0] = {
/* Channel registers and DMAOR */
.start = 0xfc808020,
.end = 0xfc80808f,
.flags = IORESOURCE_MEM,
},
[1] = {
/* DMARSx */
.start = 0xfc809000,
.end = 0xfc80900b,
.flags = IORESOURCE_MEM,
},
{
/*
* Real DMA error vector is 0x6e0, and channel
* vectors are 0x620-0x6c0
*/
.name = "error_irq",
.start = evt2irq(0x620),
.end = evt2irq(0x620),
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE,
},
};
static struct resource sh7785_dmae1_resources[] = {
[0] = {
/* Channel registers and DMAOR */
.start = 0xfcc08020,
.end = 0xfcc0808f,
.flags = IORESOURCE_MEM,
},
/* DMAC1 has no DMARS */
{
/*
* Real DMA error vector is 0x940, and channel
* vectors are 0x880-0x920
*/
.name = "error_irq",
.start = evt2irq(0x880),
.end = evt2irq(0x880),
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE,
},
};
static struct platform_device dma0_device = {
.name = "sh-dma-engine",
.id = 0,
.resource = sh7785_dmae0_resources,
.num_resources = ARRAY_SIZE(sh7785_dmae0_resources),
.dev = {
.platform_data = &dma0_platform_data,
},
};
static struct platform_device dma1_device = {
.name = "sh-dma-engine",
.id = 1,
.resource = sh7785_dmae1_resources,
.num_resources = ARRAY_SIZE(sh7785_dmae1_resources),
.dev = {
.platform_data = &dma1_platform_data,
},
};
static struct platform_device *sh7785_devices[] __initdata = {
&scif0_device,
&scif1_device,
&scif2_device,
&scif3_device,
&scif4_device,
&scif5_device,
&tmu0_device,
&tmu1_device,
&dma0_device,
&dma1_device,
};
static int __init sh7785_devices_setup(void)
{
return platform_add_devices(sh7785_devices,
ARRAY_SIZE(sh7785_devices));
}
arch_initcall(sh7785_devices_setup);
static struct platform_device *sh7785_early_devices[] __initdata = {
&scif0_device,
&scif1_device,
&scif2_device,
&scif3_device,
&scif4_device,
&scif5_device,
&tmu0_device,
&tmu1_device,
};
void __init plat_early_device_setup(void)
{
sh_early_platform_add_devices(sh7785_early_devices,
ARRAY_SIZE(sh7785_early_devices));
}
enum {
UNUSED = 0,
/* interrupt sources */
IRL0_LLLL, IRL0_LLLH, IRL0_LLHL, IRL0_LLHH,
IRL0_LHLL, IRL0_LHLH, IRL0_LHHL, IRL0_LHHH,
IRL0_HLLL, IRL0_HLLH, IRL0_HLHL, IRL0_HLHH,
IRL0_HHLL, IRL0_HHLH, IRL0_HHHL,
IRL4_LLLL, IRL4_LLLH, IRL4_LLHL, IRL4_LLHH,
IRL4_LHLL, IRL4_LHLH, IRL4_LHHL, IRL4_LHHH,
IRL4_HLLL, IRL4_HLLH, IRL4_HLHL, IRL4_HLHH,
IRL4_HHLL, IRL4_HHLH, IRL4_HHHL,
IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7,
WDT, TMU0, TMU1, TMU2, TMU2_TICPI,
HUDI, DMAC0, SCIF0, SCIF1, DMAC1, HSPI,
SCIF2, SCIF3, SCIF4, SCIF5,
PCISERR, PCIINTA, PCIINTB, PCIINTC, PCIINTD, PCIC5,
SIOF, MMCIF, DU, GDTA,
TMU3, TMU4, TMU5,
SSI0, SSI1,
HAC0, HAC1,
FLCTL, GPIO,
/* interrupt groups */
TMU012, TMU345
};
static struct intc_vect vectors[] __initdata = {
INTC_VECT(WDT, 0x560),
INTC_VECT(TMU0, 0x580), INTC_VECT(TMU1, 0x5a0),
INTC_VECT(TMU2, 0x5c0), INTC_VECT(TMU2_TICPI, 0x5e0),
INTC_VECT(HUDI, 0x600),
INTC_VECT(DMAC0, 0x620), INTC_VECT(DMAC0, 0x640),
INTC_VECT(DMAC0, 0x660), INTC_VECT(DMAC0, 0x680),
INTC_VECT(DMAC0, 0x6a0), INTC_VECT(DMAC0, 0x6c0),
INTC_VECT(DMAC0, 0x6e0),
INTC_VECT(SCIF0, 0x700), INTC_VECT(SCIF0, 0x720),
INTC_VECT(SCIF0, 0x740), INTC_VECT(SCIF0, 0x760),
INTC_VECT(SCIF1, 0x780), INTC_VECT(SCIF1, 0x7a0),
INTC_VECT(SCIF1, 0x7c0), INTC_VECT(SCIF1, 0x7e0),
INTC_VECT(DMAC1, 0x880), INTC_VECT(DMAC1, 0x8a0),
INTC_VECT(DMAC1, 0x8c0), INTC_VECT(DMAC1, 0x8e0),
INTC_VECT(DMAC1, 0x900), INTC_VECT(DMAC1, 0x920),
INTC_VECT(DMAC1, 0x940),
INTC_VECT(HSPI, 0x960),
INTC_VECT(SCIF2, 0x980), INTC_VECT(SCIF3, 0x9a0),
INTC_VECT(SCIF4, 0x9c0), INTC_VECT(SCIF5, 0x9e0),
INTC_VECT(PCISERR, 0xa00), INTC_VECT(PCIINTA, 0xa20),
INTC_VECT(PCIINTB, 0xa40), INTC_VECT(PCIINTC, 0xa60),
INTC_VECT(PCIINTD, 0xa80), INTC_VECT(PCIC5, 0xaa0),
INTC_VECT(PCIC5, 0xac0), INTC_VECT(PCIC5, 0xae0),
INTC_VECT(PCIC5, 0xb00), INTC_VECT(PCIC5, 0xb20),
INTC_VECT(SIOF, 0xc00),
INTC_VECT(MMCIF, 0xd00), INTC_VECT(MMCIF, 0xd20),
INTC_VECT(MMCIF, 0xd40), INTC_VECT(MMCIF, 0xd60),
INTC_VECT(DU, 0xd80),
INTC_VECT(GDTA, 0xda0), INTC_VECT(GDTA, 0xdc0),
INTC_VECT(GDTA, 0xde0),
INTC_VECT(TMU3, 0xe00), INTC_VECT(TMU4, 0xe20),
INTC_VECT(TMU5, 0xe40),
INTC_VECT(SSI0, 0xe80), INTC_VECT(SSI1, 0xea0),
INTC_VECT(HAC0, 0xec0), INTC_VECT(HAC1, 0xee0),
INTC_VECT(FLCTL, 0xf00), INTC_VECT(FLCTL, 0xf20),
INTC_VECT(FLCTL, 0xf40), INTC_VECT(FLCTL, 0xf60),
INTC_VECT(GPIO, 0xf80), INTC_VECT(GPIO, 0xfa0),
INTC_VECT(GPIO, 0xfc0), INTC_VECT(GPIO, 0xfe0),
};
static struct intc_group groups[] __initdata = {
INTC_GROUP(TMU012, TMU0, TMU1, TMU2, TMU2_TICPI),
INTC_GROUP(TMU345, TMU3, TMU4, TMU5),
};
static struct intc_mask_reg mask_registers[] __initdata = {
{ 0xffd00044, 0xffd00064, 32, /* INTMSK0 / INTMSKCLR0 */
{ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
{ 0xffd40080, 0xffd40084, 32, /* INTMSK2 / INTMSKCLR2 */
{ IRL0_LLLL, IRL0_LLLH, IRL0_LLHL, IRL0_LLHH,
IRL0_LHLL, IRL0_LHLH, IRL0_LHHL, IRL0_LHHH,
IRL0_HLLL, IRL0_HLLH, IRL0_HLHL, IRL0_HLHH,
IRL0_HHLL, IRL0_HHLH, IRL0_HHHL, 0,
IRL4_LLLL, IRL4_LLLH, IRL4_LLHL, IRL4_LLHH,
IRL4_LHLL, IRL4_LHLH, IRL4_LHHL, IRL4_LHHH,
IRL4_HLLL, IRL4_HLLH, IRL4_HLHL, IRL4_HLHH,
IRL4_HHLL, IRL4_HHLH, IRL4_HHHL, 0, } },
{ 0xffd40038, 0xffd4003c, 32, /* INT2MSKR / INT2MSKCR */
{ 0, 0, 0, GDTA, DU, SSI0, SSI1, GPIO,
FLCTL, MMCIF, HSPI, SIOF, PCIC5, PCIINTD, PCIINTC, PCIINTB,
PCIINTA, PCISERR, HAC1, HAC0, DMAC1, DMAC0, HUDI, WDT,
SCIF5, SCIF4, SCIF3, SCIF2, SCIF1, SCIF0, TMU345, TMU012 } },
};
static struct intc_prio_reg prio_registers[] __initdata = {
{ 0xffd00010, 0, 32, 4, /* INTPRI */ { IRQ0, IRQ1, IRQ2, IRQ3,
IRQ4, IRQ5, IRQ6, IRQ7 } },
{ 0xffd40000, 0, 32, 8, /* INT2PRI0 */ { TMU0, TMU1,
TMU2, TMU2_TICPI } },
{ 0xffd40004, 0, 32, 8, /* INT2PRI1 */ { TMU3, TMU4, TMU5, } },
{ 0xffd40008, 0, 32, 8, /* INT2PRI2 */ { SCIF0, SCIF1,
SCIF2, SCIF3 } },
{ 0xffd4000c, 0, 32, 8, /* INT2PRI3 */ { SCIF4, SCIF5, WDT, } },
{ 0xffd40010, 0, 32, 8, /* INT2PRI4 */ { HUDI, DMAC0, DMAC1, } },
{ 0xffd40014, 0, 32, 8, /* INT2PRI5 */ { HAC0, HAC1,
PCISERR, PCIINTA } },
{ 0xffd40018, 0, 32, 8, /* INT2PRI6 */ { PCIINTB, PCIINTC,
PCIINTD, PCIC5 } },
{ 0xffd4001c, 0, 32, 8, /* INT2PRI7 */ { SIOF, HSPI, MMCIF, } },
{ 0xffd40020, 0, 32, 8, /* INT2PRI8 */ { FLCTL, GPIO, SSI0, SSI1, } },
{ 0xffd40024, 0, 32, 8, /* INT2PRI9 */ { DU, GDTA, } },
};
static DECLARE_INTC_DESC(intc_desc, "sh7785", vectors, groups,
mask_registers, prio_registers, NULL);
/* Support for external interrupt pins in IRQ mode */
static struct intc_vect vectors_irq0123[] __initdata = {
INTC_VECT(IRQ0, 0x240), INTC_VECT(IRQ1, 0x280),
INTC_VECT(IRQ2, 0x2c0), INTC_VECT(IRQ3, 0x300),
};
static struct intc_vect vectors_irq4567[] __initdata = {
INTC_VECT(IRQ4, 0x340), INTC_VECT(IRQ5, 0x380),
INTC_VECT(IRQ6, 0x3c0), INTC_VECT(IRQ7, 0x200),
};
static struct intc_sense_reg sense_registers[] __initdata = {
{ 0xffd0001c, 32, 2, /* ICR1 */ { IRQ0, IRQ1, IRQ2, IRQ3,
IRQ4, IRQ5, IRQ6, IRQ7 } },
};
static struct intc_mask_reg ack_registers[] __initdata = {
{ 0xffd00024, 0, 32, /* INTREQ */
{ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
};
static DECLARE_INTC_DESC_ACK(intc_desc_irq0123, "sh7785-irq0123",
vectors_irq0123, NULL, mask_registers,
prio_registers, sense_registers, ack_registers);
static DECLARE_INTC_DESC_ACK(intc_desc_irq4567, "sh7785-irq4567",
vectors_irq4567, NULL, mask_registers,
prio_registers, sense_registers, ack_registers);
/* External interrupt pins in IRL mode */
static struct intc_vect vectors_irl0123[] __initdata = {
INTC_VECT(IRL0_LLLL, 0x200), INTC_VECT(IRL0_LLLH, 0x220),
INTC_VECT(IRL0_LLHL, 0x240), INTC_VECT(IRL0_LLHH, 0x260),
INTC_VECT(IRL0_LHLL, 0x280), INTC_VECT(IRL0_LHLH, 0x2a0),
INTC_VECT(IRL0_LHHL, 0x2c0), INTC_VECT(IRL0_LHHH, 0x2e0),
INTC_VECT(IRL0_HLLL, 0x300), INTC_VECT(IRL0_HLLH, 0x320),
INTC_VECT(IRL0_HLHL, 0x340), INTC_VECT(IRL0_HLHH, 0x360),
INTC_VECT(IRL0_HHLL, 0x380), INTC_VECT(IRL0_HHLH, 0x3a0),
INTC_VECT(IRL0_HHHL, 0x3c0),
};
static struct intc_vect vectors_irl4567[] __initdata = {
INTC_VECT(IRL4_LLLL, 0xb00), INTC_VECT(IRL4_LLLH, 0xb20),
INTC_VECT(IRL4_LLHL, 0xb40), INTC_VECT(IRL4_LLHH, 0xb60),
INTC_VECT(IRL4_LHLL, 0xb80), INTC_VECT(IRL4_LHLH, 0xba0),
INTC_VECT(IRL4_LHHL, 0xbc0), INTC_VECT(IRL4_LHHH, 0xbe0),
INTC_VECT(IRL4_HLLL, 0xc00), INTC_VECT(IRL4_HLLH, 0xc20),
INTC_VECT(IRL4_HLHL, 0xc40), INTC_VECT(IRL4_HLHH, 0xc60),
INTC_VECT(IRL4_HHLL, 0xc80), INTC_VECT(IRL4_HHLH, 0xca0),
INTC_VECT(IRL4_HHHL, 0xcc0),
};
static DECLARE_INTC_DESC(intc_desc_irl0123, "sh7785-irl0123", vectors_irl0123,
NULL, mask_registers, NULL, NULL);
static DECLARE_INTC_DESC(intc_desc_irl4567, "sh7785-irl4567", vectors_irl4567,
NULL, mask_registers, NULL, NULL);
#define INTC_ICR0 0xffd00000
#define INTC_INTMSK0 0xffd00044
#define INTC_INTMSK1 0xffd00048
#define INTC_INTMSK2 0xffd40080
#define INTC_INTMSKCLR1 0xffd00068
#define INTC_INTMSKCLR2 0xffd40084
void __init plat_irq_setup(void)
{
/* disable IRQ3-0 + IRQ7-4 */
__raw_writel(0xff000000, INTC_INTMSK0);
/* disable IRL3-0 + IRL7-4 */
__raw_writel(0xc0000000, INTC_INTMSK1);
__raw_writel(0xfffefffe, INTC_INTMSK2);
/* select IRL mode for IRL3-0 + IRL7-4 */
__raw_writel(__raw_readl(INTC_ICR0) & ~0x00c00000, INTC_ICR0);
/* disable holding function, ie enable "SH-4 Mode" */
__raw_writel(__raw_readl(INTC_ICR0) | 0x00200000, INTC_ICR0);
register_intc_controller(&intc_desc);
}
void __init plat_irq_setup_pins(int mode)
{
switch (mode) {
case IRQ_MODE_IRQ7654:
/* select IRQ mode for IRL7-4 */
__raw_writel(__raw_readl(INTC_ICR0) | 0x00400000, INTC_ICR0);
register_intc_controller(&intc_desc_irq4567);
break;
case IRQ_MODE_IRQ3210:
/* select IRQ mode for IRL3-0 */
__raw_writel(__raw_readl(INTC_ICR0) | 0x00800000, INTC_ICR0);
register_intc_controller(&intc_desc_irq0123);
break;
case IRQ_MODE_IRL7654:
/* enable IRL7-4 but don't provide any masking */
__raw_writel(0x40000000, INTC_INTMSKCLR1);
__raw_writel(0x0000fffe, INTC_INTMSKCLR2);
break;
case IRQ_MODE_IRL3210:
/* enable IRL0-3 but don't provide any masking */
__raw_writel(0x80000000, INTC_INTMSKCLR1);
__raw_writel(0xfffe0000, INTC_INTMSKCLR2);
break;
case IRQ_MODE_IRL7654_MASK:
/* enable IRL7-4 and mask using cpu intc controller */
__raw_writel(0x40000000, INTC_INTMSKCLR1);
register_intc_controller(&intc_desc_irl4567);
break;
case IRQ_MODE_IRL3210_MASK:
/* enable IRL0-3 and mask using cpu intc controller */
__raw_writel(0x80000000, INTC_INTMSKCLR1);
register_intc_controller(&intc_desc_irl0123);
break;
default:
BUG();
}
}
void __init plat_mem_setup(void)
{
/* Register the URAM space as Node 1 */
setup_bootmem_node(1, 0xe55f0000, 0xe5610000);
}
| linux-master | arch/sh/kernel/cpu/sh4a/setup-sh7785.c |
// SPDX-License-Identifier: GPL-2.0
/*
* SH7722 Setup
*
* Copyright (C) 2006 - 2008 Paul Mundt
*/
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/platform_device.h>
#include <linux/serial.h>
#include <linux/serial_sci.h>
#include <linux/sh_dma.h>
#include <linux/sh_timer.h>
#include <linux/sh_intc.h>
#include <linux/uio_driver.h>
#include <linux/usb/m66592.h>
#include <asm/clock.h>
#include <asm/mmzone.h>
#include <asm/siu.h>
#include <asm/platform_early.h>
#include <cpu/dma-register.h>
#include <cpu/sh7722.h>
#include <cpu/serial.h>
static const struct sh_dmae_slave_config sh7722_dmae_slaves[] = {
{
.slave_id = SHDMA_SLAVE_SCIF0_TX,
.addr = 0xffe0000c,
.chcr = DM_FIX | SM_INC | RS_ERS | TS_INDEX2VAL(XMIT_SZ_8BIT),
.mid_rid = 0x21,
}, {
.slave_id = SHDMA_SLAVE_SCIF0_RX,
.addr = 0xffe00014,
.chcr = DM_INC | SM_FIX | RS_ERS | TS_INDEX2VAL(XMIT_SZ_8BIT),
.mid_rid = 0x22,
}, {
.slave_id = SHDMA_SLAVE_SCIF1_TX,
.addr = 0xffe1000c,
.chcr = DM_FIX | SM_INC | RS_ERS | TS_INDEX2VAL(XMIT_SZ_8BIT),
.mid_rid = 0x25,
}, {
.slave_id = SHDMA_SLAVE_SCIF1_RX,
.addr = 0xffe10014,
.chcr = DM_INC | SM_FIX | RS_ERS | TS_INDEX2VAL(XMIT_SZ_8BIT),
.mid_rid = 0x26,
}, {
.slave_id = SHDMA_SLAVE_SCIF2_TX,
.addr = 0xffe2000c,
.chcr = DM_FIX | SM_INC | RS_ERS | TS_INDEX2VAL(XMIT_SZ_8BIT),
.mid_rid = 0x29,
}, {
.slave_id = SHDMA_SLAVE_SCIF2_RX,
.addr = 0xffe20014,
.chcr = DM_INC | SM_FIX | RS_ERS | TS_INDEX2VAL(XMIT_SZ_8BIT),
.mid_rid = 0x2a,
}, {
.slave_id = SHDMA_SLAVE_SIUA_TX,
.addr = 0xa454c098,
.chcr = DM_FIX | SM_INC | RS_ERS | TS_INDEX2VAL(XMIT_SZ_32BIT),
.mid_rid = 0xb1,
}, {
.slave_id = SHDMA_SLAVE_SIUA_RX,
.addr = 0xa454c090,
.chcr = DM_INC | SM_FIX | RS_ERS | TS_INDEX2VAL(XMIT_SZ_32BIT),
.mid_rid = 0xb2,
}, {
.slave_id = SHDMA_SLAVE_SIUB_TX,
.addr = 0xa454c09c,
.chcr = DM_FIX | SM_INC | RS_ERS | TS_INDEX2VAL(XMIT_SZ_32BIT),
.mid_rid = 0xb5,
}, {
.slave_id = SHDMA_SLAVE_SIUB_RX,
.addr = 0xa454c094,
.chcr = DM_INC | SM_FIX | RS_ERS | TS_INDEX2VAL(XMIT_SZ_32BIT),
.mid_rid = 0xb6,
}, {
.slave_id = SHDMA_SLAVE_SDHI0_TX,
.addr = 0x04ce0030,
.chcr = DM_FIX | SM_INC | RS_ERS | TS_INDEX2VAL(XMIT_SZ_16BIT),
.mid_rid = 0xc1,
}, {
.slave_id = SHDMA_SLAVE_SDHI0_RX,
.addr = 0x04ce0030,
.chcr = DM_INC | SM_FIX | RS_ERS | TS_INDEX2VAL(XMIT_SZ_16BIT),
.mid_rid = 0xc2,
},
};
static const struct sh_dmae_channel sh7722_dmae_channels[] = {
{
.offset = 0,
.dmars = 0,
.dmars_bit = 0,
}, {
.offset = 0x10,
.dmars = 0,
.dmars_bit = 8,
}, {
.offset = 0x20,
.dmars = 4,
.dmars_bit = 0,
}, {
.offset = 0x30,
.dmars = 4,
.dmars_bit = 8,
}, {
.offset = 0x50,
.dmars = 8,
.dmars_bit = 0,
}, {
.offset = 0x60,
.dmars = 8,
.dmars_bit = 8,
}
};
static const unsigned int ts_shift[] = TS_SHIFT;
static struct sh_dmae_pdata dma_platform_data = {
.slave = sh7722_dmae_slaves,
.slave_num = ARRAY_SIZE(sh7722_dmae_slaves),
.channel = sh7722_dmae_channels,
.channel_num = ARRAY_SIZE(sh7722_dmae_channels),
.ts_low_shift = CHCR_TS_LOW_SHIFT,
.ts_low_mask = CHCR_TS_LOW_MASK,
.ts_high_shift = CHCR_TS_HIGH_SHIFT,
.ts_high_mask = CHCR_TS_HIGH_MASK,
.ts_shift = ts_shift,
.ts_shift_num = ARRAY_SIZE(ts_shift),
.dmaor_init = DMAOR_INIT,
};
static struct resource sh7722_dmae_resources[] = {
[0] = {
/* Channel registers and DMAOR */
.start = 0xfe008020,
.end = 0xfe00808f,
.flags = IORESOURCE_MEM,
},
[1] = {
/* DMARSx */
.start = 0xfe009000,
.end = 0xfe00900b,
.flags = IORESOURCE_MEM,
},
{
.name = "error_irq",
.start = evt2irq(0xbc0),
.end = evt2irq(0xbc0),
.flags = IORESOURCE_IRQ,
},
{
/* IRQ for channels 0-3 */
.start = evt2irq(0x800),
.end = evt2irq(0x860),
.flags = IORESOURCE_IRQ,
},
{
/* IRQ for channels 4-5 */
.start = evt2irq(0xb80),
.end = evt2irq(0xba0),
.flags = IORESOURCE_IRQ,
},
};
struct platform_device dma_device = {
.name = "sh-dma-engine",
.id = -1,
.resource = sh7722_dmae_resources,
.num_resources = ARRAY_SIZE(sh7722_dmae_resources),
.dev = {
.platform_data = &dma_platform_data,
},
};
/* Serial */
static struct plat_sci_port scif0_platform_data = {
.scscr = SCSCR_REIE,
.type = PORT_SCIF,
.ops = &sh7722_sci_port_ops,
.regtype = SCIx_SH4_SCIF_NO_SCSPTR_REGTYPE,
};
static struct resource scif0_resources[] = {
DEFINE_RES_MEM(0xffe00000, 0x100),
DEFINE_RES_IRQ(evt2irq(0xc00)),
};
static struct platform_device scif0_device = {
.name = "sh-sci",
.id = 0,
.resource = scif0_resources,
.num_resources = ARRAY_SIZE(scif0_resources),
.dev = {
.platform_data = &scif0_platform_data,
},
};
static struct plat_sci_port scif1_platform_data = {
.scscr = SCSCR_REIE,
.type = PORT_SCIF,
.ops = &sh7722_sci_port_ops,
.regtype = SCIx_SH4_SCIF_NO_SCSPTR_REGTYPE,
};
static struct resource scif1_resources[] = {
DEFINE_RES_MEM(0xffe10000, 0x100),
DEFINE_RES_IRQ(evt2irq(0xc20)),
};
static struct platform_device scif1_device = {
.name = "sh-sci",
.id = 1,
.resource = scif1_resources,
.num_resources = ARRAY_SIZE(scif1_resources),
.dev = {
.platform_data = &scif1_platform_data,
},
};
static struct plat_sci_port scif2_platform_data = {
.scscr = SCSCR_REIE,
.type = PORT_SCIF,
.ops = &sh7722_sci_port_ops,
.regtype = SCIx_SH4_SCIF_NO_SCSPTR_REGTYPE,
};
static struct resource scif2_resources[] = {
DEFINE_RES_MEM(0xffe20000, 0x100),
DEFINE_RES_IRQ(evt2irq(0xc40)),
};
static struct platform_device scif2_device = {
.name = "sh-sci",
.id = 2,
.resource = scif2_resources,
.num_resources = ARRAY_SIZE(scif2_resources),
.dev = {
.platform_data = &scif2_platform_data,
},
};
static struct resource rtc_resources[] = {
[0] = {
.start = 0xa465fec0,
.end = 0xa465fec0 + 0x58 - 1,
.flags = IORESOURCE_IO,
},
[1] = {
/* Period IRQ */
.start = evt2irq(0x7a0),
.flags = IORESOURCE_IRQ,
},
[2] = {
/* Carry IRQ */
.start = evt2irq(0x7c0),
.flags = IORESOURCE_IRQ,
},
[3] = {
/* Alarm IRQ */
.start = evt2irq(0x780),
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device rtc_device = {
.name = "sh-rtc",
.id = -1,
.num_resources = ARRAY_SIZE(rtc_resources),
.resource = rtc_resources,
};
static struct m66592_platdata usbf_platdata = {
.on_chip = 1,
};
static struct resource usbf_resources[] = {
[0] = {
.name = "USBF",
.start = 0x04480000,
.end = 0x044800FF,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = evt2irq(0xa20),
.end = evt2irq(0xa20),
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device usbf_device = {
.name = "m66592_udc",
.id = 0, /* "usbf0" clock */
.dev = {
.dma_mask = NULL,
.coherent_dma_mask = 0xffffffff,
.platform_data = &usbf_platdata,
},
.num_resources = ARRAY_SIZE(usbf_resources),
.resource = usbf_resources,
};
static struct resource iic_resources[] = {
[0] = {
.name = "IIC",
.start = 0x04470000,
.end = 0x04470017,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = evt2irq(0xe00),
.end = evt2irq(0xe60),
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device iic_device = {
.name = "i2c-sh_mobile",
.id = 0, /* "i2c0" clock */
.num_resources = ARRAY_SIZE(iic_resources),
.resource = iic_resources,
};
static struct uio_info vpu_platform_data = {
.name = "VPU4",
.version = "0",
.irq = evt2irq(0x980),
};
static struct resource vpu_resources[] = {
[0] = {
.name = "VPU",
.start = 0xfe900000,
.end = 0xfe9022eb,
.flags = IORESOURCE_MEM,
},
[1] = {
/* place holder for contiguous memory */
},
};
static struct platform_device vpu_device = {
.name = "uio_pdrv_genirq",
.id = 0,
.dev = {
.platform_data = &vpu_platform_data,
},
.resource = vpu_resources,
.num_resources = ARRAY_SIZE(vpu_resources),
};
static struct uio_info veu_platform_data = {
.name = "VEU",
.version = "0",
.irq = evt2irq(0x8c0),
};
static struct resource veu_resources[] = {
[0] = {
.name = "VEU",
.start = 0xfe920000,
.end = 0xfe9200b7,
.flags = IORESOURCE_MEM,
},
[1] = {
/* place holder for contiguous memory */
},
};
static struct platform_device veu_device = {
.name = "uio_pdrv_genirq",
.id = 1,
.dev = {
.platform_data = &veu_platform_data,
},
.resource = veu_resources,
.num_resources = ARRAY_SIZE(veu_resources),
};
static struct uio_info jpu_platform_data = {
.name = "JPU",
.version = "0",
.irq = evt2irq(0x560),
};
static struct resource jpu_resources[] = {
[0] = {
.name = "JPU",
.start = 0xfea00000,
.end = 0xfea102d3,
.flags = IORESOURCE_MEM,
},
[1] = {
/* place holder for contiguous memory */
},
};
static struct platform_device jpu_device = {
.name = "uio_pdrv_genirq",
.id = 2,
.dev = {
.platform_data = &jpu_platform_data,
},
.resource = jpu_resources,
.num_resources = ARRAY_SIZE(jpu_resources),
};
static struct sh_timer_config cmt_platform_data = {
.channels_mask = 0x20,
};
static struct resource cmt_resources[] = {
DEFINE_RES_MEM(0x044a0000, 0x70),
DEFINE_RES_IRQ(evt2irq(0xf00)),
};
static struct platform_device cmt_device = {
.name = "sh-cmt-32",
.id = 0,
.dev = {
.platform_data = &cmt_platform_data,
},
.resource = cmt_resources,
.num_resources = ARRAY_SIZE(cmt_resources),
};
static struct sh_timer_config tmu0_platform_data = {
.channels_mask = 7,
};
static struct resource tmu0_resources[] = {
DEFINE_RES_MEM(0xffd80000, 0x2c),
DEFINE_RES_IRQ(evt2irq(0x400)),
DEFINE_RES_IRQ(evt2irq(0x420)),
DEFINE_RES_IRQ(evt2irq(0x440)),
};
static struct platform_device tmu0_device = {
.name = "sh-tmu",
.id = 0,
.dev = {
.platform_data = &tmu0_platform_data,
},
.resource = tmu0_resources,
.num_resources = ARRAY_SIZE(tmu0_resources),
};
static struct siu_platform siu_platform_data = {
.dma_slave_tx_a = SHDMA_SLAVE_SIUA_TX,
.dma_slave_rx_a = SHDMA_SLAVE_SIUA_RX,
.dma_slave_tx_b = SHDMA_SLAVE_SIUB_TX,
.dma_slave_rx_b = SHDMA_SLAVE_SIUB_RX,
};
static struct resource siu_resources[] = {
[0] = {
.start = 0xa4540000,
.end = 0xa454c10f,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = evt2irq(0xf80),
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device siu_device = {
.name = "siu-pcm-audio",
.id = -1,
.dev = {
.platform_data = &siu_platform_data,
},
.resource = siu_resources,
.num_resources = ARRAY_SIZE(siu_resources),
};
static struct platform_device *sh7722_devices[] __initdata = {
&scif0_device,
&scif1_device,
&scif2_device,
&cmt_device,
&tmu0_device,
&rtc_device,
&usbf_device,
&iic_device,
&vpu_device,
&veu_device,
&jpu_device,
&siu_device,
&dma_device,
};
static int __init sh7722_devices_setup(void)
{
platform_resource_setup_memory(&vpu_device, "vpu", 1 << 20);
platform_resource_setup_memory(&veu_device, "veu", 2 << 20);
platform_resource_setup_memory(&jpu_device, "jpu", 2 << 20);
return platform_add_devices(sh7722_devices,
ARRAY_SIZE(sh7722_devices));
}
arch_initcall(sh7722_devices_setup);
static struct platform_device *sh7722_early_devices[] __initdata = {
&scif0_device,
&scif1_device,
&scif2_device,
&cmt_device,
&tmu0_device,
};
void __init plat_early_device_setup(void)
{
sh_early_platform_add_devices(sh7722_early_devices,
ARRAY_SIZE(sh7722_early_devices));
}
enum {
UNUSED=0,
ENABLED,
DISABLED,
/* interrupt sources */
IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7,
HUDI,
SIM_ERI, SIM_RXI, SIM_TXI, SIM_TEI,
RTC_ATI, RTC_PRI, RTC_CUI,
DMAC0, DMAC1, DMAC2, DMAC3,
VIO_CEUI, VIO_BEUI, VIO_VEUI, VOU,
VPU, TPU,
USB_USBI0, USB_USBI1,
DMAC4, DMAC5, DMAC_DADERR,
KEYSC,
SCIF0, SCIF1, SCIF2, SIOF0, SIOF1, SIO,
FLCTL_FLSTEI, FLCTL_FLENDI, FLCTL_FLTREQ0I, FLCTL_FLTREQ1I,
I2C_ALI, I2C_TACKI, I2C_WAITI, I2C_DTEI,
CMT, TSIF, SIU, TWODG,
TMU0, TMU1, TMU2,
IRDA, JPU, LCDC,
/* interrupt groups */
SIM, RTC, DMAC0123, VIOVOU, USB, DMAC45, FLCTL, I2C, SDHI,
};
static struct intc_vect vectors[] __initdata = {
INTC_VECT(IRQ0, 0x600), INTC_VECT(IRQ1, 0x620),
INTC_VECT(IRQ2, 0x640), INTC_VECT(IRQ3, 0x660),
INTC_VECT(IRQ4, 0x680), INTC_VECT(IRQ5, 0x6a0),
INTC_VECT(IRQ6, 0x6c0), INTC_VECT(IRQ7, 0x6e0),
INTC_VECT(SIM_ERI, 0x700), INTC_VECT(SIM_RXI, 0x720),
INTC_VECT(SIM_TXI, 0x740), INTC_VECT(SIM_TEI, 0x760),
INTC_VECT(RTC_ATI, 0x780), INTC_VECT(RTC_PRI, 0x7a0),
INTC_VECT(RTC_CUI, 0x7c0),
INTC_VECT(DMAC0, 0x800), INTC_VECT(DMAC1, 0x820),
INTC_VECT(DMAC2, 0x840), INTC_VECT(DMAC3, 0x860),
INTC_VECT(VIO_CEUI, 0x880), INTC_VECT(VIO_BEUI, 0x8a0),
INTC_VECT(VIO_VEUI, 0x8c0), INTC_VECT(VOU, 0x8e0),
INTC_VECT(VPU, 0x980), INTC_VECT(TPU, 0x9a0),
INTC_VECT(USB_USBI0, 0xa20), INTC_VECT(USB_USBI1, 0xa40),
INTC_VECT(DMAC4, 0xb80), INTC_VECT(DMAC5, 0xba0),
INTC_VECT(DMAC_DADERR, 0xbc0), INTC_VECT(KEYSC, 0xbe0),
INTC_VECT(SCIF0, 0xc00), INTC_VECT(SCIF1, 0xc20),
INTC_VECT(SCIF2, 0xc40), INTC_VECT(SIOF0, 0xc80),
INTC_VECT(SIOF1, 0xca0), INTC_VECT(SIO, 0xd00),
INTC_VECT(FLCTL_FLSTEI, 0xd80), INTC_VECT(FLCTL_FLENDI, 0xda0),
INTC_VECT(FLCTL_FLTREQ0I, 0xdc0), INTC_VECT(FLCTL_FLTREQ1I, 0xde0),
INTC_VECT(I2C_ALI, 0xe00), INTC_VECT(I2C_TACKI, 0xe20),
INTC_VECT(I2C_WAITI, 0xe40), INTC_VECT(I2C_DTEI, 0xe60),
INTC_VECT(SDHI, 0xe80), INTC_VECT(SDHI, 0xea0),
INTC_VECT(SDHI, 0xec0), INTC_VECT(SDHI, 0xee0),
INTC_VECT(CMT, 0xf00), INTC_VECT(TSIF, 0xf20),
INTC_VECT(SIU, 0xf80), INTC_VECT(TWODG, 0xfa0),
INTC_VECT(TMU0, 0x400), INTC_VECT(TMU1, 0x420),
INTC_VECT(TMU2, 0x440), INTC_VECT(IRDA, 0x480),
INTC_VECT(JPU, 0x560), INTC_VECT(LCDC, 0x580),
};
static struct intc_group groups[] __initdata = {
INTC_GROUP(SIM, SIM_ERI, SIM_RXI, SIM_TXI, SIM_TEI),
INTC_GROUP(RTC, RTC_ATI, RTC_PRI, RTC_CUI),
INTC_GROUP(DMAC0123, DMAC0, DMAC1, DMAC2, DMAC3),
INTC_GROUP(VIOVOU, VIO_CEUI, VIO_BEUI, VIO_VEUI, VOU),
INTC_GROUP(USB, USB_USBI0, USB_USBI1),
INTC_GROUP(DMAC45, DMAC4, DMAC5, DMAC_DADERR),
INTC_GROUP(FLCTL, FLCTL_FLSTEI, FLCTL_FLENDI,
FLCTL_FLTREQ0I, FLCTL_FLTREQ1I),
INTC_GROUP(I2C, I2C_ALI, I2C_TACKI, I2C_WAITI, I2C_DTEI),
};
static struct intc_mask_reg mask_registers[] __initdata = {
{ 0xa4080080, 0xa40800c0, 8, /* IMR0 / IMCR0 */
{ } },
{ 0xa4080084, 0xa40800c4, 8, /* IMR1 / IMCR1 */
{ VOU, VIO_VEUI, VIO_BEUI, VIO_CEUI, DMAC3, DMAC2, DMAC1, DMAC0 } },
{ 0xa4080088, 0xa40800c8, 8, /* IMR2 / IMCR2 */
{ 0, 0, 0, VPU, } },
{ 0xa408008c, 0xa40800cc, 8, /* IMR3 / IMCR3 */
{ SIM_TEI, SIM_TXI, SIM_RXI, SIM_ERI, 0, 0, 0, IRDA } },
{ 0xa4080090, 0xa40800d0, 8, /* IMR4 / IMCR4 */
{ 0, TMU2, TMU1, TMU0, JPU, 0, 0, LCDC } },
{ 0xa4080094, 0xa40800d4, 8, /* IMR5 / IMCR5 */
{ KEYSC, DMAC_DADERR, DMAC5, DMAC4, 0, SCIF2, SCIF1, SCIF0 } },
{ 0xa4080098, 0xa40800d8, 8, /* IMR6 / IMCR6 */
{ 0, 0, 0, SIO, 0, 0, SIOF1, SIOF0 } },
{ 0xa408009c, 0xa40800dc, 8, /* IMR7 / IMCR7 */
{ I2C_DTEI, I2C_WAITI, I2C_TACKI, I2C_ALI,
FLCTL_FLTREQ1I, FLCTL_FLTREQ0I, FLCTL_FLENDI, FLCTL_FLSTEI } },
{ 0xa40800a0, 0xa40800e0, 8, /* IMR8 / IMCR8 */
{ DISABLED, ENABLED, ENABLED, ENABLED, 0, 0, TWODG, SIU } },
{ 0xa40800a4, 0xa40800e4, 8, /* IMR9 / IMCR9 */
{ 0, 0, 0, CMT, 0, USB_USBI1, USB_USBI0, } },
{ 0xa40800a8, 0xa40800e8, 8, /* IMR10 / IMCR10 */
{ } },
{ 0xa40800ac, 0xa40800ec, 8, /* IMR11 / IMCR11 */
{ 0, RTC_CUI, RTC_PRI, RTC_ATI, 0, TPU, 0, TSIF } },
{ 0xa4140044, 0xa4140064, 8, /* INTMSK00 / INTMSKCLR00 */
{ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
};
static struct intc_prio_reg prio_registers[] __initdata = {
{ 0xa4080000, 0, 16, 4, /* IPRA */ { TMU0, TMU1, TMU2, IRDA } },
{ 0xa4080004, 0, 16, 4, /* IPRB */ { JPU, LCDC, SIM } },
{ 0xa4080008, 0, 16, 4, /* IPRC */ { } },
{ 0xa408000c, 0, 16, 4, /* IPRD */ { } },
{ 0xa4080010, 0, 16, 4, /* IPRE */ { DMAC0123, VIOVOU, 0, VPU } },
{ 0xa4080014, 0, 16, 4, /* IPRF */ { KEYSC, DMAC45, USB, CMT } },
{ 0xa4080018, 0, 16, 4, /* IPRG */ { SCIF0, SCIF1, SCIF2 } },
{ 0xa408001c, 0, 16, 4, /* IPRH */ { SIOF0, SIOF1, FLCTL, I2C } },
{ 0xa4080020, 0, 16, 4, /* IPRI */ { SIO, 0, TSIF, RTC } },
{ 0xa4080024, 0, 16, 4, /* IPRJ */ { 0, 0, SIU } },
{ 0xa4080028, 0, 16, 4, /* IPRK */ { 0, 0, 0, SDHI } },
{ 0xa408002c, 0, 16, 4, /* IPRL */ { TWODG, 0, TPU } },
{ 0xa4140010, 0, 32, 4, /* INTPRI00 */
{ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
};
static struct intc_sense_reg sense_registers[] __initdata = {
{ 0xa414001c, 16, 2, /* ICR1 */
{ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
};
static struct intc_mask_reg ack_registers[] __initdata = {
{ 0xa4140024, 0, 8, /* INTREQ00 */
{ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
};
static struct intc_desc intc_desc __initdata = {
.name = "sh7722",
.force_enable = ENABLED,
.force_disable = DISABLED,
.hw = INTC_HW_DESC(vectors, groups, mask_registers,
prio_registers, sense_registers, ack_registers),
};
void __init plat_irq_setup(void)
{
register_intc_controller(&intc_desc);
}
void __init plat_mem_setup(void)
{
/* Register the URAM space as Node 1 */
setup_bootmem_node(1, 0x055f0000, 0x05610000);
}
| linux-master | arch/sh/kernel/cpu/sh4a/setup-sh7722.c |
// SPDX-License-Identifier: GPL-2.0
/*
* arch/sh/kernel/vsyscall/vsyscall.c
*
* Copyright (C) 2006 Paul Mundt
*
* vDSO randomization
* Copyright(C) 2005-2006, Red Hat, Inc., Ingo Molnar
*/
#include <linux/mm.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/gfp.h>
#include <linux/module.h>
#include <linux/elf.h>
#include <linux/sched.h>
#include <linux/err.h>
/*
* Should the kernel map a VDSO page into processes and pass its
* address down to glibc upon exec()?
*/
unsigned int __read_mostly vdso_enabled = 1;
EXPORT_SYMBOL_GPL(vdso_enabled);
static int __init vdso_setup(char *s)
{
vdso_enabled = simple_strtoul(s, NULL, 0);
return 1;
}
__setup("vdso=", vdso_setup);
/*
* These symbols are defined by vsyscall.o to mark the bounds
* of the ELF DSO images included therein.
*/
extern const char vsyscall_trapa_start, vsyscall_trapa_end;
static struct page *syscall_pages[1];
int __init vsyscall_init(void)
{
void *syscall_page = (void *)get_zeroed_page(GFP_ATOMIC);
syscall_pages[0] = virt_to_page(syscall_page);
/*
* XXX: Map this page to a fixmap entry if we get around
* to adding the page to ELF core dumps
*/
memcpy(syscall_page,
&vsyscall_trapa_start,
&vsyscall_trapa_end - &vsyscall_trapa_start);
return 0;
}
/* Setup a VMA at program startup for the vsyscall page */
int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
{
struct mm_struct *mm = current->mm;
unsigned long addr;
int ret;
if (mmap_write_lock_killable(mm))
return -EINTR;
addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
if (IS_ERR_VALUE(addr)) {
ret = addr;
goto up_fail;
}
ret = install_special_mapping(mm, addr, PAGE_SIZE,
VM_READ | VM_EXEC |
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
syscall_pages);
if (unlikely(ret))
goto up_fail;
current->mm->context.vdso = (void *)addr;
up_fail:
mmap_write_unlock(mm);
return ret;
}
const char *arch_vma_name(struct vm_area_struct *vma)
{
if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
return "[vdso]";
return NULL;
}
| linux-master | arch/sh/kernel/vsyscall/vsyscall.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Suspend support specific for i386/x86-64.
*
* Copyright (c) 2007 Rafael J. Wysocki <[email protected]>
* Copyright (c) 2002 Pavel Machek <[email protected]>
* Copyright (c) 2001 Patrick Mochel <[email protected]>
*/
#include <linux/suspend.h>
#include <linux/export.h>
#include <linux/smp.h>
#include <linux/perf_event.h>
#include <linux/tboot.h>
#include <linux/dmi.h>
#include <linux/pgtable.h>
#include <asm/proto.h>
#include <asm/mtrr.h>
#include <asm/page.h>
#include <asm/mce.h>
#include <asm/suspend.h>
#include <asm/fpu/api.h>
#include <asm/debugreg.h>
#include <asm/cpu.h>
#include <asm/cacheinfo.h>
#include <asm/mmu_context.h>
#include <asm/cpu_device_id.h>
#include <asm/microcode.h>
#ifdef CONFIG_X86_32
__visible unsigned long saved_context_ebx;
__visible unsigned long saved_context_esp, saved_context_ebp;
__visible unsigned long saved_context_esi, saved_context_edi;
__visible unsigned long saved_context_eflags;
#endif
struct saved_context saved_context;
static void msr_save_context(struct saved_context *ctxt)
{
struct saved_msr *msr = ctxt->saved_msrs.array;
struct saved_msr *end = msr + ctxt->saved_msrs.num;
while (msr < end) {
if (msr->valid)
rdmsrl(msr->info.msr_no, msr->info.reg.q);
msr++;
}
}
static void msr_restore_context(struct saved_context *ctxt)
{
struct saved_msr *msr = ctxt->saved_msrs.array;
struct saved_msr *end = msr + ctxt->saved_msrs.num;
while (msr < end) {
if (msr->valid)
wrmsrl(msr->info.msr_no, msr->info.reg.q);
msr++;
}
}
/**
* __save_processor_state() - Save CPU registers before creating a
* hibernation image and before restoring
* the memory state from it
* @ctxt: Structure to store the registers contents in.
*
* NOTE: If there is a CPU register the modification of which by the
* boot kernel (ie. the kernel used for loading the hibernation image)
* might affect the operations of the restored target kernel (ie. the one
* saved in the hibernation image), then its contents must be saved by this
* function. In other words, if kernel A is hibernated and different
* kernel B is used for loading the hibernation image into memory, the
* kernel A's __save_processor_state() function must save all registers
* needed by kernel A, so that it can operate correctly after the resume
* regardless of what kernel B does in the meantime.
*/
static void __save_processor_state(struct saved_context *ctxt)
{
#ifdef CONFIG_X86_32
mtrr_save_fixed_ranges(NULL);
#endif
kernel_fpu_begin();
/*
* descriptor tables
*/
store_idt(&ctxt->idt);
/*
* We save it here, but restore it only in the hibernate case.
* For ACPI S3 resume, this is loaded via 'early_gdt_desc' in 64-bit
* mode in "secondary_startup_64". In 32-bit mode it is done via
* 'pmode_gdt' in wakeup_start.
*/
ctxt->gdt_desc.size = GDT_SIZE - 1;
ctxt->gdt_desc.address = (unsigned long)get_cpu_gdt_rw(smp_processor_id());
store_tr(ctxt->tr);
/* XMM0..XMM15 should be handled by kernel_fpu_begin(). */
/*
* segment registers
*/
savesegment(gs, ctxt->gs);
#ifdef CONFIG_X86_64
savesegment(fs, ctxt->fs);
savesegment(ds, ctxt->ds);
savesegment(es, ctxt->es);
rdmsrl(MSR_FS_BASE, ctxt->fs_base);
rdmsrl(MSR_GS_BASE, ctxt->kernelmode_gs_base);
rdmsrl(MSR_KERNEL_GS_BASE, ctxt->usermode_gs_base);
mtrr_save_fixed_ranges(NULL);
rdmsrl(MSR_EFER, ctxt->efer);
#endif
/*
* control registers
*/
ctxt->cr0 = read_cr0();
ctxt->cr2 = read_cr2();
ctxt->cr3 = __read_cr3();
ctxt->cr4 = __read_cr4();
ctxt->misc_enable_saved = !rdmsrl_safe(MSR_IA32_MISC_ENABLE,
&ctxt->misc_enable);
msr_save_context(ctxt);
}
/* Needed by apm.c */
void save_processor_state(void)
{
__save_processor_state(&saved_context);
x86_platform.save_sched_clock_state();
}
#ifdef CONFIG_X86_32
EXPORT_SYMBOL(save_processor_state);
#endif
static void do_fpu_end(void)
{
/*
* Restore FPU regs if necessary.
*/
kernel_fpu_end();
}
static void fix_processor_context(void)
{
int cpu = smp_processor_id();
#ifdef CONFIG_X86_64
struct desc_struct *desc = get_cpu_gdt_rw(cpu);
tss_desc tss;
#endif
/*
* We need to reload TR, which requires that we change the
* GDT entry to indicate "available" first.
*
* XXX: This could probably all be replaced by a call to
* force_reload_TR().
*/
set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss);
#ifdef CONFIG_X86_64
memcpy(&tss, &desc[GDT_ENTRY_TSS], sizeof(tss_desc));
tss.type = 0x9; /* The available 64-bit TSS (see AMD vol 2, pg 91 */
write_gdt_entry(desc, GDT_ENTRY_TSS, &tss, DESC_TSS);
syscall_init(); /* This sets MSR_*STAR and related */
#else
if (boot_cpu_has(X86_FEATURE_SEP))
enable_sep_cpu();
#endif
load_TR_desc(); /* This does ltr */
load_mm_ldt(current->active_mm); /* This does lldt */
initialize_tlbstate_and_flush();
fpu__resume_cpu();
/* The processor is back on the direct GDT, load back the fixmap */
load_fixmap_gdt(cpu);
}
/**
* __restore_processor_state() - Restore the contents of CPU registers saved
* by __save_processor_state()
* @ctxt: Structure to load the registers contents from.
*
* The asm code that gets us here will have restored a usable GDT, although
* it will be pointing to the wrong alias.
*/
static void notrace __restore_processor_state(struct saved_context *ctxt)
{
struct cpuinfo_x86 *c;
if (ctxt->misc_enable_saved)
wrmsrl(MSR_IA32_MISC_ENABLE, ctxt->misc_enable);
/*
* control registers
*/
/* cr4 was introduced in the Pentium CPU */
#ifdef CONFIG_X86_32
if (ctxt->cr4)
__write_cr4(ctxt->cr4);
#else
/* CONFIG X86_64 */
wrmsrl(MSR_EFER, ctxt->efer);
__write_cr4(ctxt->cr4);
#endif
write_cr3(ctxt->cr3);
write_cr2(ctxt->cr2);
write_cr0(ctxt->cr0);
/* Restore the IDT. */
load_idt(&ctxt->idt);
/*
* Just in case the asm code got us here with the SS, DS, or ES
* out of sync with the GDT, update them.
*/
loadsegment(ss, __KERNEL_DS);
loadsegment(ds, __USER_DS);
loadsegment(es, __USER_DS);
/*
* Restore percpu access. Percpu access can happen in exception
* handlers or in complicated helpers like load_gs_index().
*/
#ifdef CONFIG_X86_64
wrmsrl(MSR_GS_BASE, ctxt->kernelmode_gs_base);
#else
loadsegment(fs, __KERNEL_PERCPU);
#endif
/* Restore the TSS, RO GDT, LDT, and usermode-relevant MSRs. */
fix_processor_context();
/*
* Now that we have descriptor tables fully restored and working
* exception handling, restore the usermode segments.
*/
#ifdef CONFIG_X86_64
loadsegment(ds, ctxt->es);
loadsegment(es, ctxt->es);
loadsegment(fs, ctxt->fs);
load_gs_index(ctxt->gs);
/*
* Restore FSBASE and GSBASE after restoring the selectors, since
* restoring the selectors clobbers the bases. Keep in mind
* that MSR_KERNEL_GS_BASE is horribly misnamed.
*/
wrmsrl(MSR_FS_BASE, ctxt->fs_base);
wrmsrl(MSR_KERNEL_GS_BASE, ctxt->usermode_gs_base);
#else
loadsegment(gs, ctxt->gs);
#endif
do_fpu_end();
tsc_verify_tsc_adjust(true);
x86_platform.restore_sched_clock_state();
cache_bp_restore();
perf_restore_debug_store();
c = &cpu_data(smp_processor_id());
if (cpu_has(c, X86_FEATURE_MSR_IA32_FEAT_CTL))
init_ia32_feat_ctl(c);
microcode_bsp_resume();
/*
* This needs to happen after the microcode has been updated upon resume
* because some of the MSRs are "emulated" in microcode.
*/
msr_restore_context(ctxt);
}
/* Needed by apm.c */
void notrace restore_processor_state(void)
{
__restore_processor_state(&saved_context);
}
#ifdef CONFIG_X86_32
EXPORT_SYMBOL(restore_processor_state);
#endif
#if defined(CONFIG_HIBERNATION) && defined(CONFIG_HOTPLUG_CPU)
static void __noreturn resume_play_dead(void)
{
play_dead_common();
tboot_shutdown(TB_SHUTDOWN_WFS);
hlt_play_dead();
}
int hibernate_resume_nonboot_cpu_disable(void)
{
void (*play_dead)(void) = smp_ops.play_dead;
int ret;
/*
* Ensure that MONITOR/MWAIT will not be used in the "play dead" loop
* during hibernate image restoration, because it is likely that the
* monitored address will be actually written to at that time and then
* the "dead" CPU will attempt to execute instructions again, but the
* address in its instruction pointer may not be possible to resolve
* any more at that point (the page tables used by it previously may
* have been overwritten by hibernate image data).
*
* First, make sure that we wake up all the potentially disabled SMT
* threads which have been initially brought up and then put into
* mwait/cpuidle sleep.
* Those will be put to proper (not interfering with hibernation
* resume) sleep afterwards, and the resumed kernel will decide itself
* what to do with them.
*/
ret = cpuhp_smt_enable();
if (ret)
return ret;
smp_ops.play_dead = resume_play_dead;
ret = freeze_secondary_cpus(0);
smp_ops.play_dead = play_dead;
return ret;
}
#endif
/*
* When bsp_check() is called in hibernate and suspend, cpu hotplug
* is disabled already. So it's unnecessary to handle race condition between
* cpumask query and cpu hotplug.
*/
static int bsp_check(void)
{
if (cpumask_first(cpu_online_mask) != 0) {
pr_warn("CPU0 is offline.\n");
return -ENODEV;
}
return 0;
}
static int bsp_pm_callback(struct notifier_block *nb, unsigned long action,
void *ptr)
{
int ret = 0;
switch (action) {
case PM_SUSPEND_PREPARE:
case PM_HIBERNATION_PREPARE:
ret = bsp_check();
break;
default:
break;
}
return notifier_from_errno(ret);
}
static int __init bsp_pm_check_init(void)
{
/*
* Set this bsp_pm_callback as lower priority than
* cpu_hotplug_pm_callback. So cpu_hotplug_pm_callback will be called
* earlier to disable cpu hotplug before bsp online check.
*/
pm_notifier(bsp_pm_callback, -INT_MAX);
return 0;
}
core_initcall(bsp_pm_check_init);
static int msr_build_context(const u32 *msr_id, const int num)
{
struct saved_msrs *saved_msrs = &saved_context.saved_msrs;
struct saved_msr *msr_array;
int total_num;
int i, j;
total_num = saved_msrs->num + num;
msr_array = kmalloc_array(total_num, sizeof(struct saved_msr), GFP_KERNEL);
if (!msr_array) {
pr_err("x86/pm: Can not allocate memory to save/restore MSRs during suspend.\n");
return -ENOMEM;
}
if (saved_msrs->array) {
/*
* Multiple callbacks can invoke this function, so copy any
* MSR save requests from previous invocations.
*/
memcpy(msr_array, saved_msrs->array,
sizeof(struct saved_msr) * saved_msrs->num);
kfree(saved_msrs->array);
}
for (i = saved_msrs->num, j = 0; i < total_num; i++, j++) {
u64 dummy;
msr_array[i].info.msr_no = msr_id[j];
msr_array[i].valid = !rdmsrl_safe(msr_id[j], &dummy);
msr_array[i].info.reg.q = 0;
}
saved_msrs->num = total_num;
saved_msrs->array = msr_array;
return 0;
}
/*
* The following sections are a quirk framework for problematic BIOSen:
* Sometimes MSRs are modified by the BIOSen after suspended to
* RAM, this might cause unexpected behavior after wakeup.
* Thus we save/restore these specified MSRs across suspend/resume
* in order to work around it.
*
* For any further problematic BIOSen/platforms,
* please add your own function similar to msr_initialize_bdw.
*/
static int msr_initialize_bdw(const struct dmi_system_id *d)
{
/* Add any extra MSR ids into this array. */
u32 bdw_msr_id[] = { MSR_IA32_THERM_CONTROL };
pr_info("x86/pm: %s detected, MSR saving is needed during suspending.\n", d->ident);
return msr_build_context(bdw_msr_id, ARRAY_SIZE(bdw_msr_id));
}
static const struct dmi_system_id msr_save_dmi_table[] = {
{
.callback = msr_initialize_bdw,
.ident = "BROADWELL BDX_EP",
.matches = {
DMI_MATCH(DMI_PRODUCT_NAME, "GRANTLEY"),
DMI_MATCH(DMI_PRODUCT_VERSION, "E63448-400"),
},
},
{}
};
static int msr_save_cpuid_features(const struct x86_cpu_id *c)
{
u32 cpuid_msr_id[] = {
MSR_AMD64_CPUID_FN_1,
};
pr_info("x86/pm: family %#hx cpu detected, MSR saving is needed during suspending.\n",
c->family);
return msr_build_context(cpuid_msr_id, ARRAY_SIZE(cpuid_msr_id));
}
static const struct x86_cpu_id msr_save_cpu_table[] = {
X86_MATCH_VENDOR_FAM(AMD, 0x15, &msr_save_cpuid_features),
X86_MATCH_VENDOR_FAM(AMD, 0x16, &msr_save_cpuid_features),
{}
};
typedef int (*pm_cpu_match_t)(const struct x86_cpu_id *);
static int pm_cpu_check(const struct x86_cpu_id *c)
{
const struct x86_cpu_id *m;
int ret = 0;
m = x86_match_cpu(msr_save_cpu_table);
if (m) {
pm_cpu_match_t fn;
fn = (pm_cpu_match_t)m->driver_data;
ret = fn(m);
}
return ret;
}
static void pm_save_spec_msr(void)
{
struct msr_enumeration {
u32 msr_no;
u32 feature;
} msr_enum[] = {
{ MSR_IA32_SPEC_CTRL, X86_FEATURE_MSR_SPEC_CTRL },
{ MSR_IA32_TSX_CTRL, X86_FEATURE_MSR_TSX_CTRL },
{ MSR_TSX_FORCE_ABORT, X86_FEATURE_TSX_FORCE_ABORT },
{ MSR_IA32_MCU_OPT_CTRL, X86_FEATURE_SRBDS_CTRL },
{ MSR_AMD64_LS_CFG, X86_FEATURE_LS_CFG_SSBD },
{ MSR_AMD64_DE_CFG, X86_FEATURE_LFENCE_RDTSC },
};
int i;
for (i = 0; i < ARRAY_SIZE(msr_enum); i++) {
if (boot_cpu_has(msr_enum[i].feature))
msr_build_context(&msr_enum[i].msr_no, 1);
}
}
static int pm_check_save_msr(void)
{
dmi_check_system(msr_save_dmi_table);
pm_cpu_check(msr_save_cpu_table);
pm_save_spec_msr();
return 0;
}
device_initcall(pm_check_save_msr);
| linux-master | arch/x86/power/cpu.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Hibernation support for x86-64
*
* Copyright (c) 2007 Rafael J. Wysocki <[email protected]>
* Copyright (c) 2002 Pavel Machek <[email protected]>
* Copyright (c) 2001 Patrick Mochel <[email protected]>
*/
#include <linux/gfp.h>
#include <linux/smp.h>
#include <linux/suspend.h>
#include <linux/scatterlist.h>
#include <linux/kdebug.h>
#include <linux/pgtable.h>
#include <crypto/hash.h>
#include <asm/e820/api.h>
#include <asm/init.h>
#include <asm/proto.h>
#include <asm/page.h>
#include <asm/mtrr.h>
#include <asm/sections.h>
#include <asm/suspend.h>
#include <asm/tlbflush.h>
static int set_up_temporary_text_mapping(pgd_t *pgd)
{
pmd_t *pmd;
pud_t *pud;
p4d_t *p4d = NULL;
pgprot_t pgtable_prot = __pgprot(_KERNPG_TABLE);
pgprot_t pmd_text_prot = __pgprot(__PAGE_KERNEL_LARGE_EXEC);
/* Filter out unsupported __PAGE_KERNEL* bits: */
pgprot_val(pmd_text_prot) &= __default_kernel_pte_mask;
pgprot_val(pgtable_prot) &= __default_kernel_pte_mask;
/*
* The new mapping only has to cover the page containing the image
* kernel's entry point (jump_address_phys), because the switch over to
* it is carried out by relocated code running from a page allocated
* specifically for this purpose and covered by the identity mapping, so
* the temporary kernel text mapping is only needed for the final jump.
* Moreover, in that mapping the virtual address of the image kernel's
* entry point must be the same as its virtual address in the image
* kernel (restore_jump_address), so the image kernel's
* restore_registers() code doesn't find itself in a different area of
* the virtual address space after switching over to the original page
* tables used by the image kernel.
*/
if (pgtable_l5_enabled()) {
p4d = (p4d_t *)get_safe_page(GFP_ATOMIC);
if (!p4d)
return -ENOMEM;
}
pud = (pud_t *)get_safe_page(GFP_ATOMIC);
if (!pud)
return -ENOMEM;
pmd = (pmd_t *)get_safe_page(GFP_ATOMIC);
if (!pmd)
return -ENOMEM;
set_pmd(pmd + pmd_index(restore_jump_address),
__pmd((jump_address_phys & PMD_MASK) | pgprot_val(pmd_text_prot)));
set_pud(pud + pud_index(restore_jump_address),
__pud(__pa(pmd) | pgprot_val(pgtable_prot)));
if (p4d) {
p4d_t new_p4d = __p4d(__pa(pud) | pgprot_val(pgtable_prot));
pgd_t new_pgd = __pgd(__pa(p4d) | pgprot_val(pgtable_prot));
set_p4d(p4d + p4d_index(restore_jump_address), new_p4d);
set_pgd(pgd + pgd_index(restore_jump_address), new_pgd);
} else {
/* No p4d for 4-level paging: point the pgd to the pud page table */
pgd_t new_pgd = __pgd(__pa(pud) | pgprot_val(pgtable_prot));
set_pgd(pgd + pgd_index(restore_jump_address), new_pgd);
}
return 0;
}
static void *alloc_pgt_page(void *context)
{
return (void *)get_safe_page(GFP_ATOMIC);
}
static int set_up_temporary_mappings(void)
{
struct x86_mapping_info info = {
.alloc_pgt_page = alloc_pgt_page,
.page_flag = __PAGE_KERNEL_LARGE_EXEC,
.offset = __PAGE_OFFSET,
};
unsigned long mstart, mend;
pgd_t *pgd;
int result;
int i;
pgd = (pgd_t *)get_safe_page(GFP_ATOMIC);
if (!pgd)
return -ENOMEM;
/* Prepare a temporary mapping for the kernel text */
result = set_up_temporary_text_mapping(pgd);
if (result)
return result;
/* Set up the direct mapping from scratch */
for (i = 0; i < nr_pfn_mapped; i++) {
mstart = pfn_mapped[i].start << PAGE_SHIFT;
mend = pfn_mapped[i].end << PAGE_SHIFT;
result = kernel_ident_mapping_init(&info, pgd, mstart, mend);
if (result)
return result;
}
temp_pgt = __pa(pgd);
return 0;
}
asmlinkage int swsusp_arch_resume(void)
{
int error;
/* We have got enough memory and from now on we cannot recover */
error = set_up_temporary_mappings();
if (error)
return error;
error = relocate_restore_code();
if (error)
return error;
restore_image();
return 0;
}
| linux-master | arch/x86/power/hibernate_64.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Hibernation support for x86
*
* Copyright (c) 2007 Rafael J. Wysocki <[email protected]>
* Copyright (c) 2002 Pavel Machek <[email protected]>
* Copyright (c) 2001 Patrick Mochel <[email protected]>
*/
#include <linux/gfp.h>
#include <linux/smp.h>
#include <linux/suspend.h>
#include <linux/scatterlist.h>
#include <linux/kdebug.h>
#include <linux/cpu.h>
#include <linux/pgtable.h>
#include <linux/types.h>
#include <linux/crc32.h>
#include <asm/e820/api.h>
#include <asm/init.h>
#include <asm/proto.h>
#include <asm/page.h>
#include <asm/mtrr.h>
#include <asm/sections.h>
#include <asm/suspend.h>
#include <asm/tlbflush.h>
/*
* Address to jump to in the last phase of restore in order to get to the image
* kernel's text (this value is passed in the image header).
*/
unsigned long restore_jump_address __visible;
unsigned long jump_address_phys;
/*
* Value of the cr3 register from before the hibernation (this value is passed
* in the image header).
*/
unsigned long restore_cr3 __visible;
unsigned long temp_pgt __visible;
unsigned long relocated_restore_code __visible;
/**
* pfn_is_nosave - check if given pfn is in the 'nosave' section
*/
int pfn_is_nosave(unsigned long pfn)
{
unsigned long nosave_begin_pfn;
unsigned long nosave_end_pfn;
nosave_begin_pfn = __pa_symbol(&__nosave_begin) >> PAGE_SHIFT;
nosave_end_pfn = PAGE_ALIGN(__pa_symbol(&__nosave_end)) >> PAGE_SHIFT;
return pfn >= nosave_begin_pfn && pfn < nosave_end_pfn;
}
struct restore_data_record {
unsigned long jump_address;
unsigned long jump_address_phys;
unsigned long cr3;
unsigned long magic;
unsigned long e820_checksum;
};
/**
* compute_e820_crc32 - calculate crc32 of a given e820 table
*
* @table: the e820 table to be calculated
*
* Return: the resulting checksum
*/
static inline u32 compute_e820_crc32(struct e820_table *table)
{
int size = offsetof(struct e820_table, entries) +
sizeof(struct e820_entry) * table->nr_entries;
return ~crc32_le(~0, (unsigned char const *)table, size);
}
#ifdef CONFIG_X86_64
#define RESTORE_MAGIC 0x23456789ABCDEF02UL
#else
#define RESTORE_MAGIC 0x12345679UL
#endif
/**
* arch_hibernation_header_save - populate the architecture specific part
* of a hibernation image header
* @addr: address to save the data at
*/
int arch_hibernation_header_save(void *addr, unsigned int max_size)
{
struct restore_data_record *rdr = addr;
if (max_size < sizeof(struct restore_data_record))
return -EOVERFLOW;
rdr->magic = RESTORE_MAGIC;
rdr->jump_address = (unsigned long)restore_registers;
rdr->jump_address_phys = __pa_symbol(restore_registers);
/*
* The restore code fixes up CR3 and CR4 in the following sequence:
*
* [in hibernation asm]
* 1. CR3 <= temporary page tables
* 2. CR4 <= mmu_cr4_features (from the kernel that restores us)
* 3. CR3 <= rdr->cr3
* 4. CR4 <= mmu_cr4_features (from us, i.e. the image kernel)
* [in restore_processor_state()]
* 5. CR4 <= saved CR4
* 6. CR3 <= saved CR3
*
* Our mmu_cr4_features has CR4.PCIDE=0, and toggling
* CR4.PCIDE while CR3's PCID bits are nonzero is illegal, so
* rdr->cr3 needs to point to valid page tables but must not
* have any of the PCID bits set.
*/
rdr->cr3 = restore_cr3 & ~CR3_PCID_MASK;
rdr->e820_checksum = compute_e820_crc32(e820_table_firmware);
return 0;
}
/**
* arch_hibernation_header_restore - read the architecture specific data
* from the hibernation image header
* @addr: address to read the data from
*/
int arch_hibernation_header_restore(void *addr)
{
struct restore_data_record *rdr = addr;
if (rdr->magic != RESTORE_MAGIC) {
pr_crit("Unrecognized hibernate image header format!\n");
return -EINVAL;
}
restore_jump_address = rdr->jump_address;
jump_address_phys = rdr->jump_address_phys;
restore_cr3 = rdr->cr3;
if (rdr->e820_checksum != compute_e820_crc32(e820_table_firmware)) {
pr_crit("Hibernate inconsistent memory map detected!\n");
return -ENODEV;
}
return 0;
}
int relocate_restore_code(void)
{
pgd_t *pgd;
p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
relocated_restore_code = get_safe_page(GFP_ATOMIC);
if (!relocated_restore_code)
return -ENOMEM;
__memcpy((void *)relocated_restore_code, core_restore_code, PAGE_SIZE);
/* Make the page containing the relocated code executable */
pgd = (pgd_t *)__va(read_cr3_pa()) +
pgd_index(relocated_restore_code);
p4d = p4d_offset(pgd, relocated_restore_code);
if (p4d_large(*p4d)) {
set_p4d(p4d, __p4d(p4d_val(*p4d) & ~_PAGE_NX));
goto out;
}
pud = pud_offset(p4d, relocated_restore_code);
if (pud_large(*pud)) {
set_pud(pud, __pud(pud_val(*pud) & ~_PAGE_NX));
goto out;
}
pmd = pmd_offset(pud, relocated_restore_code);
if (pmd_large(*pmd)) {
set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_NX));
goto out;
}
pte = pte_offset_kernel(pmd, relocated_restore_code);
set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_NX));
out:
__flush_tlb_all();
return 0;
}
int arch_resume_nosmt(void)
{
int ret = 0;
/*
* We reached this while coming out of hibernation. This means
* that SMT siblings are sleeping in hlt, as mwait is not safe
* against control transition during resume (see comment in
* hibernate_resume_nonboot_cpu_disable()).
*
* If the resumed kernel has SMT disabled, we have to take all the
* SMT siblings out of hlt, and offline them again so that they
* end up in mwait proper.
*
* Called with hotplug disabled.
*/
cpu_hotplug_enable();
if (cpu_smt_control == CPU_SMT_DISABLED ||
cpu_smt_control == CPU_SMT_FORCE_DISABLED) {
enum cpuhp_smt_control old = cpu_smt_control;
ret = cpuhp_smt_enable();
if (ret)
goto out;
ret = cpuhp_smt_disable(old);
if (ret)
goto out;
}
out:
cpu_hotplug_disable();
return ret;
}
| linux-master | arch/x86/power/hibernate.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Hibernation support specific for i386 - temporary page tables
*
* Copyright (c) 2006 Rafael J. Wysocki <[email protected]>
*/
#include <linux/gfp.h>
#include <linux/suspend.h>
#include <linux/memblock.h>
#include <linux/pgtable.h>
#include <asm/page.h>
#include <asm/mmzone.h>
#include <asm/sections.h>
#include <asm/suspend.h>
/* Pointer to the temporary resume page tables */
pgd_t *resume_pg_dir;
/* The following three functions are based on the analogous code in
* arch/x86/mm/init_32.c
*/
/*
* Create a middle page table on a resume-safe page and put a pointer to it in
* the given global directory entry. This only returns the gd entry
* in non-PAE compilation mode, since the middle layer is folded.
*/
static pmd_t *resume_one_md_table_init(pgd_t *pgd)
{
p4d_t *p4d;
pud_t *pud;
pmd_t *pmd_table;
#ifdef CONFIG_X86_PAE
pmd_table = (pmd_t *)get_safe_page(GFP_ATOMIC);
if (!pmd_table)
return NULL;
set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
p4d = p4d_offset(pgd, 0);
pud = pud_offset(p4d, 0);
BUG_ON(pmd_table != pmd_offset(pud, 0));
#else
p4d = p4d_offset(pgd, 0);
pud = pud_offset(p4d, 0);
pmd_table = pmd_offset(pud, 0);
#endif
return pmd_table;
}
/*
* Create a page table on a resume-safe page and place a pointer to it in
* a middle page directory entry.
*/
static pte_t *resume_one_page_table_init(pmd_t *pmd)
{
if (pmd_none(*pmd)) {
pte_t *page_table = (pte_t *)get_safe_page(GFP_ATOMIC);
if (!page_table)
return NULL;
set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
BUG_ON(page_table != pte_offset_kernel(pmd, 0));
return page_table;
}
return pte_offset_kernel(pmd, 0);
}
/*
* This maps the physical memory to kernel virtual address space, a total
* of max_low_pfn pages, by creating page tables starting from address
* PAGE_OFFSET. The page tables are allocated out of resume-safe pages.
*/
static int resume_physical_mapping_init(pgd_t *pgd_base)
{
unsigned long pfn;
pgd_t *pgd;
pmd_t *pmd;
pte_t *pte;
int pgd_idx, pmd_idx;
pgd_idx = pgd_index(PAGE_OFFSET);
pgd = pgd_base + pgd_idx;
pfn = 0;
for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
pmd = resume_one_md_table_init(pgd);
if (!pmd)
return -ENOMEM;
if (pfn >= max_low_pfn)
continue;
for (pmd_idx = 0; pmd_idx < PTRS_PER_PMD; pmd++, pmd_idx++) {
if (pfn >= max_low_pfn)
break;
/* Map with big pages if possible, otherwise create
* normal page tables.
* NOTE: We can mark everything as executable here
*/
if (boot_cpu_has(X86_FEATURE_PSE)) {
set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE_EXEC));
pfn += PTRS_PER_PTE;
} else {
pte_t *max_pte;
pte = resume_one_page_table_init(pmd);
if (!pte)
return -ENOMEM;
max_pte = pte + PTRS_PER_PTE;
for (; pte < max_pte; pte++, pfn++) {
if (pfn >= max_low_pfn)
break;
set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
}
}
}
}
return 0;
}
static inline void resume_init_first_level_page_table(pgd_t *pg_dir)
{
#ifdef CONFIG_X86_PAE
int i;
/* Init entries of the first-level page table to the zero page */
for (i = 0; i < PTRS_PER_PGD; i++)
set_pgd(pg_dir + i,
__pgd(__pa(empty_zero_page) | _PAGE_PRESENT));
#endif
}
static int set_up_temporary_text_mapping(pgd_t *pgd_base)
{
pgd_t *pgd;
pmd_t *pmd;
pte_t *pte;
pgd = pgd_base + pgd_index(restore_jump_address);
pmd = resume_one_md_table_init(pgd);
if (!pmd)
return -ENOMEM;
if (boot_cpu_has(X86_FEATURE_PSE)) {
set_pmd(pmd + pmd_index(restore_jump_address),
__pmd((jump_address_phys & PMD_MASK) | pgprot_val(PAGE_KERNEL_LARGE_EXEC)));
} else {
pte = resume_one_page_table_init(pmd);
if (!pte)
return -ENOMEM;
set_pte(pte + pte_index(restore_jump_address),
__pte((jump_address_phys & PAGE_MASK) | pgprot_val(PAGE_KERNEL_EXEC)));
}
return 0;
}
asmlinkage int swsusp_arch_resume(void)
{
int error;
resume_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC);
if (!resume_pg_dir)
return -ENOMEM;
resume_init_first_level_page_table(resume_pg_dir);
error = set_up_temporary_text_mapping(resume_pg_dir);
if (error)
return error;
error = resume_physical_mapping_init(resume_pg_dir);
if (error)
return error;
temp_pgt = __pa(resume_pg_dir);
error = relocate_restore_code();
if (error)
return error;
/* We have got enough memory and from now on we cannot recover */
restore_image();
return 0;
}
| linux-master | arch/x86/power/hibernate_32.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/ftrace.h>
#include <xen/interface/xen.h>
#include <xen/interface/xen-mca.h>
#define HYPERCALL(x) [__HYPERVISOR_##x] = "("#x")",
static const char *xen_hypercall_names[] = {
#include <asm/xen-hypercalls.h>
};
#undef HYPERCALL
static const char *xen_hypercall_name(unsigned op)
{
if (op < ARRAY_SIZE(xen_hypercall_names) && xen_hypercall_names[op] != NULL)
return xen_hypercall_names[op];
return "";
}
#define CREATE_TRACE_POINTS
#include <trace/events/xen.h>
| linux-master | arch/x86/xen/trace.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/types.h>
#include <linux/interrupt.h>
#include <asm/xen/hypercall.h>
#include <xen/xen.h>
#include <xen/page.h>
#include <xen/interface/xen.h>
#include <xen/interface/vcpu.h>
#include <xen/interface/xenpmu.h>
#include "xen-ops.h"
#include "pmu.h"
/* x86_pmu.handle_irq definition */
#include "../events/perf_event.h"
#define XENPMU_IRQ_PROCESSING 1
struct xenpmu {
/* Shared page between hypervisor and domain */
struct xen_pmu_data *xenpmu_data;
uint8_t flags;
};
static DEFINE_PER_CPU(struct xenpmu, xenpmu_shared);
#define get_xenpmu_data() (this_cpu_ptr(&xenpmu_shared)->xenpmu_data)
#define get_xenpmu_flags() (this_cpu_ptr(&xenpmu_shared)->flags)
/* Macro for computing address of a PMU MSR bank */
#define field_offset(ctxt, field) ((void *)((uintptr_t)ctxt + \
(uintptr_t)ctxt->field))
/* AMD PMU */
#define F15H_NUM_COUNTERS 6
#define F10H_NUM_COUNTERS 4
static __read_mostly uint32_t amd_counters_base;
static __read_mostly uint32_t amd_ctrls_base;
static __read_mostly int amd_msr_step;
static __read_mostly int k7_counters_mirrored;
static __read_mostly int amd_num_counters;
/* Intel PMU */
#define MSR_TYPE_COUNTER 0
#define MSR_TYPE_CTRL 1
#define MSR_TYPE_GLOBAL 2
#define MSR_TYPE_ARCH_COUNTER 3
#define MSR_TYPE_ARCH_CTRL 4
/* Number of general pmu registers (CPUID.EAX[0xa].EAX[8..15]) */
#define PMU_GENERAL_NR_SHIFT 8
#define PMU_GENERAL_NR_BITS 8
#define PMU_GENERAL_NR_MASK (((1 << PMU_GENERAL_NR_BITS) - 1) \
<< PMU_GENERAL_NR_SHIFT)
/* Number of fixed pmu registers (CPUID.EDX[0xa].EDX[0..4]) */
#define PMU_FIXED_NR_SHIFT 0
#define PMU_FIXED_NR_BITS 5
#define PMU_FIXED_NR_MASK (((1 << PMU_FIXED_NR_BITS) - 1) \
<< PMU_FIXED_NR_SHIFT)
/* Alias registers (0x4c1) for full-width writes to PMCs */
#define MSR_PMC_ALIAS_MASK (~(MSR_IA32_PERFCTR0 ^ MSR_IA32_PMC0))
#define INTEL_PMC_TYPE_SHIFT 30
static __read_mostly int intel_num_arch_counters, intel_num_fixed_counters;
static void xen_pmu_arch_init(void)
{
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
switch (boot_cpu_data.x86) {
case 0x15:
amd_num_counters = F15H_NUM_COUNTERS;
amd_counters_base = MSR_F15H_PERF_CTR;
amd_ctrls_base = MSR_F15H_PERF_CTL;
amd_msr_step = 2;
k7_counters_mirrored = 1;
break;
case 0x10:
case 0x12:
case 0x14:
case 0x16:
default:
amd_num_counters = F10H_NUM_COUNTERS;
amd_counters_base = MSR_K7_PERFCTR0;
amd_ctrls_base = MSR_K7_EVNTSEL0;
amd_msr_step = 1;
k7_counters_mirrored = 0;
break;
}
} else if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
amd_num_counters = F10H_NUM_COUNTERS;
amd_counters_base = MSR_K7_PERFCTR0;
amd_ctrls_base = MSR_K7_EVNTSEL0;
amd_msr_step = 1;
k7_counters_mirrored = 0;
} else {
uint32_t eax, ebx, ecx, edx;
cpuid(0xa, &eax, &ebx, &ecx, &edx);
intel_num_arch_counters = (eax & PMU_GENERAL_NR_MASK) >>
PMU_GENERAL_NR_SHIFT;
intel_num_fixed_counters = (edx & PMU_FIXED_NR_MASK) >>
PMU_FIXED_NR_SHIFT;
}
}
static inline uint32_t get_fam15h_addr(u32 addr)
{
switch (addr) {
case MSR_K7_PERFCTR0:
case MSR_K7_PERFCTR1:
case MSR_K7_PERFCTR2:
case MSR_K7_PERFCTR3:
return MSR_F15H_PERF_CTR + (addr - MSR_K7_PERFCTR0);
case MSR_K7_EVNTSEL0:
case MSR_K7_EVNTSEL1:
case MSR_K7_EVNTSEL2:
case MSR_K7_EVNTSEL3:
return MSR_F15H_PERF_CTL + (addr - MSR_K7_EVNTSEL0);
default:
break;
}
return addr;
}
static inline bool is_amd_pmu_msr(unsigned int msr)
{
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
return false;
if ((msr >= MSR_F15H_PERF_CTL &&
msr < MSR_F15H_PERF_CTR + (amd_num_counters * 2)) ||
(msr >= MSR_K7_EVNTSEL0 &&
msr < MSR_K7_PERFCTR0 + amd_num_counters))
return true;
return false;
}
static bool is_intel_pmu_msr(u32 msr_index, int *type, int *index)
{
u32 msr_index_pmc;
if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL &&
boot_cpu_data.x86_vendor != X86_VENDOR_CENTAUR &&
boot_cpu_data.x86_vendor != X86_VENDOR_ZHAOXIN)
return false;
switch (msr_index) {
case MSR_CORE_PERF_FIXED_CTR_CTRL:
case MSR_IA32_DS_AREA:
case MSR_IA32_PEBS_ENABLE:
*type = MSR_TYPE_CTRL;
return true;
case MSR_CORE_PERF_GLOBAL_CTRL:
case MSR_CORE_PERF_GLOBAL_STATUS:
case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
*type = MSR_TYPE_GLOBAL;
return true;
default:
if ((msr_index >= MSR_CORE_PERF_FIXED_CTR0) &&
(msr_index < MSR_CORE_PERF_FIXED_CTR0 +
intel_num_fixed_counters)) {
*index = msr_index - MSR_CORE_PERF_FIXED_CTR0;
*type = MSR_TYPE_COUNTER;
return true;
}
if ((msr_index >= MSR_P6_EVNTSEL0) &&
(msr_index < MSR_P6_EVNTSEL0 + intel_num_arch_counters)) {
*index = msr_index - MSR_P6_EVNTSEL0;
*type = MSR_TYPE_ARCH_CTRL;
return true;
}
msr_index_pmc = msr_index & MSR_PMC_ALIAS_MASK;
if ((msr_index_pmc >= MSR_IA32_PERFCTR0) &&
(msr_index_pmc < MSR_IA32_PERFCTR0 +
intel_num_arch_counters)) {
*type = MSR_TYPE_ARCH_COUNTER;
*index = msr_index_pmc - MSR_IA32_PERFCTR0;
return true;
}
return false;
}
}
static bool xen_intel_pmu_emulate(unsigned int msr, u64 *val, int type,
int index, bool is_read)
{
uint64_t *reg = NULL;
struct xen_pmu_intel_ctxt *ctxt;
uint64_t *fix_counters;
struct xen_pmu_cntr_pair *arch_cntr_pair;
struct xen_pmu_data *xenpmu_data = get_xenpmu_data();
uint8_t xenpmu_flags = get_xenpmu_flags();
if (!xenpmu_data || !(xenpmu_flags & XENPMU_IRQ_PROCESSING))
return false;
ctxt = &xenpmu_data->pmu.c.intel;
switch (msr) {
case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
reg = &ctxt->global_ovf_ctrl;
break;
case MSR_CORE_PERF_GLOBAL_STATUS:
reg = &ctxt->global_status;
break;
case MSR_CORE_PERF_GLOBAL_CTRL:
reg = &ctxt->global_ctrl;
break;
case MSR_CORE_PERF_FIXED_CTR_CTRL:
reg = &ctxt->fixed_ctrl;
break;
default:
switch (type) {
case MSR_TYPE_COUNTER:
fix_counters = field_offset(ctxt, fixed_counters);
reg = &fix_counters[index];
break;
case MSR_TYPE_ARCH_COUNTER:
arch_cntr_pair = field_offset(ctxt, arch_counters);
reg = &arch_cntr_pair[index].counter;
break;
case MSR_TYPE_ARCH_CTRL:
arch_cntr_pair = field_offset(ctxt, arch_counters);
reg = &arch_cntr_pair[index].control;
break;
default:
return false;
}
}
if (reg) {
if (is_read)
*val = *reg;
else {
*reg = *val;
if (msr == MSR_CORE_PERF_GLOBAL_OVF_CTRL)
ctxt->global_status &= (~(*val));
}
return true;
}
return false;
}
static bool xen_amd_pmu_emulate(unsigned int msr, u64 *val, bool is_read)
{
uint64_t *reg = NULL;
int i, off = 0;
struct xen_pmu_amd_ctxt *ctxt;
uint64_t *counter_regs, *ctrl_regs;
struct xen_pmu_data *xenpmu_data = get_xenpmu_data();
uint8_t xenpmu_flags = get_xenpmu_flags();
if (!xenpmu_data || !(xenpmu_flags & XENPMU_IRQ_PROCESSING))
return false;
if (k7_counters_mirrored &&
((msr >= MSR_K7_EVNTSEL0) && (msr <= MSR_K7_PERFCTR3)))
msr = get_fam15h_addr(msr);
ctxt = &xenpmu_data->pmu.c.amd;
for (i = 0; i < amd_num_counters; i++) {
if (msr == amd_ctrls_base + off) {
ctrl_regs = field_offset(ctxt, ctrls);
reg = &ctrl_regs[i];
break;
} else if (msr == amd_counters_base + off) {
counter_regs = field_offset(ctxt, counters);
reg = &counter_regs[i];
break;
}
off += amd_msr_step;
}
if (reg) {
if (is_read)
*val = *reg;
else
*reg = *val;
return true;
}
return false;
}
static bool pmu_msr_chk_emulated(unsigned int msr, uint64_t *val, bool is_read,
bool *emul)
{
int type, index = 0;
if (is_amd_pmu_msr(msr))
*emul = xen_amd_pmu_emulate(msr, val, is_read);
else if (is_intel_pmu_msr(msr, &type, &index))
*emul = xen_intel_pmu_emulate(msr, val, type, index, is_read);
else
return false;
return true;
}
bool pmu_msr_read(unsigned int msr, uint64_t *val, int *err)
{
bool emulated;
if (!pmu_msr_chk_emulated(msr, val, true, &emulated))
return false;
if (!emulated) {
*val = err ? native_read_msr_safe(msr, err)
: native_read_msr(msr);
}
return true;
}
bool pmu_msr_write(unsigned int msr, uint32_t low, uint32_t high, int *err)
{
uint64_t val = ((uint64_t)high << 32) | low;
bool emulated;
if (!pmu_msr_chk_emulated(msr, &val, false, &emulated))
return false;
if (!emulated) {
if (err)
*err = native_write_msr_safe(msr, low, high);
else
native_write_msr(msr, low, high);
}
return true;
}
static unsigned long long xen_amd_read_pmc(int counter)
{
struct xen_pmu_amd_ctxt *ctxt;
uint64_t *counter_regs;
struct xen_pmu_data *xenpmu_data = get_xenpmu_data();
uint8_t xenpmu_flags = get_xenpmu_flags();
if (!xenpmu_data || !(xenpmu_flags & XENPMU_IRQ_PROCESSING)) {
uint32_t msr;
int err;
msr = amd_counters_base + (counter * amd_msr_step);
return native_read_msr_safe(msr, &err);
}
ctxt = &xenpmu_data->pmu.c.amd;
counter_regs = field_offset(ctxt, counters);
return counter_regs[counter];
}
static unsigned long long xen_intel_read_pmc(int counter)
{
struct xen_pmu_intel_ctxt *ctxt;
uint64_t *fixed_counters;
struct xen_pmu_cntr_pair *arch_cntr_pair;
struct xen_pmu_data *xenpmu_data = get_xenpmu_data();
uint8_t xenpmu_flags = get_xenpmu_flags();
if (!xenpmu_data || !(xenpmu_flags & XENPMU_IRQ_PROCESSING)) {
uint32_t msr;
int err;
if (counter & (1 << INTEL_PMC_TYPE_SHIFT))
msr = MSR_CORE_PERF_FIXED_CTR0 + (counter & 0xffff);
else
msr = MSR_IA32_PERFCTR0 + counter;
return native_read_msr_safe(msr, &err);
}
ctxt = &xenpmu_data->pmu.c.intel;
if (counter & (1 << INTEL_PMC_TYPE_SHIFT)) {
fixed_counters = field_offset(ctxt, fixed_counters);
return fixed_counters[counter & 0xffff];
}
arch_cntr_pair = field_offset(ctxt, arch_counters);
return arch_cntr_pair[counter].counter;
}
unsigned long long xen_read_pmc(int counter)
{
if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
return xen_amd_read_pmc(counter);
else
return xen_intel_read_pmc(counter);
}
int pmu_apic_update(uint32_t val)
{
int ret;
struct xen_pmu_data *xenpmu_data = get_xenpmu_data();
if (!xenpmu_data) {
pr_warn_once("%s: pmudata not initialized\n", __func__);
return -EINVAL;
}
xenpmu_data->pmu.l.lapic_lvtpc = val;
if (get_xenpmu_flags() & XENPMU_IRQ_PROCESSING)
return 0;
ret = HYPERVISOR_xenpmu_op(XENPMU_lvtpc_set, NULL);
return ret;
}
/* perf callbacks */
static unsigned int xen_guest_state(void)
{
const struct xen_pmu_data *xenpmu_data = get_xenpmu_data();
unsigned int state = 0;
if (!xenpmu_data) {
pr_warn_once("%s: pmudata not initialized\n", __func__);
return state;
}
if (!xen_initial_domain() || (xenpmu_data->domain_id >= DOMID_SELF))
return state;
state |= PERF_GUEST_ACTIVE;
if (xenpmu_data->pmu.pmu_flags & PMU_SAMPLE_PV) {
if (xenpmu_data->pmu.pmu_flags & PMU_SAMPLE_USER)
state |= PERF_GUEST_USER;
} else if (xenpmu_data->pmu.r.regs.cpl & 3) {
state |= PERF_GUEST_USER;
}
return state;
}
static unsigned long xen_get_guest_ip(void)
{
const struct xen_pmu_data *xenpmu_data = get_xenpmu_data();
if (!xenpmu_data) {
pr_warn_once("%s: pmudata not initialized\n", __func__);
return 0;
}
return xenpmu_data->pmu.r.regs.ip;
}
static struct perf_guest_info_callbacks xen_guest_cbs = {
.state = xen_guest_state,
.get_ip = xen_get_guest_ip,
};
/* Convert registers from Xen's format to Linux' */
static void xen_convert_regs(const struct xen_pmu_regs *xen_regs,
struct pt_regs *regs, uint64_t pmu_flags)
{
regs->ip = xen_regs->ip;
regs->cs = xen_regs->cs;
regs->sp = xen_regs->sp;
if (pmu_flags & PMU_SAMPLE_PV) {
if (pmu_flags & PMU_SAMPLE_USER)
regs->cs |= 3;
else
regs->cs &= ~3;
} else {
if (xen_regs->cpl)
regs->cs |= 3;
else
regs->cs &= ~3;
}
}
irqreturn_t xen_pmu_irq_handler(int irq, void *dev_id)
{
int err, ret = IRQ_NONE;
struct pt_regs regs = {0};
const struct xen_pmu_data *xenpmu_data = get_xenpmu_data();
uint8_t xenpmu_flags = get_xenpmu_flags();
if (!xenpmu_data) {
pr_warn_once("%s: pmudata not initialized\n", __func__);
return ret;
}
this_cpu_ptr(&xenpmu_shared)->flags =
xenpmu_flags | XENPMU_IRQ_PROCESSING;
xen_convert_regs(&xenpmu_data->pmu.r.regs, ®s,
xenpmu_data->pmu.pmu_flags);
if (x86_pmu.handle_irq(®s))
ret = IRQ_HANDLED;
/* Write out cached context to HW */
err = HYPERVISOR_xenpmu_op(XENPMU_flush, NULL);
this_cpu_ptr(&xenpmu_shared)->flags = xenpmu_flags;
if (err) {
pr_warn_once("%s: failed hypercall, err: %d\n", __func__, err);
return IRQ_NONE;
}
return ret;
}
bool is_xen_pmu;
void xen_pmu_init(int cpu)
{
int err;
struct xen_pmu_params xp;
unsigned long pfn;
struct xen_pmu_data *xenpmu_data;
BUILD_BUG_ON(sizeof(struct xen_pmu_data) > PAGE_SIZE);
if (xen_hvm_domain() || (cpu != 0 && !is_xen_pmu))
return;
xenpmu_data = (struct xen_pmu_data *)get_zeroed_page(GFP_KERNEL);
if (!xenpmu_data) {
pr_err("VPMU init: No memory\n");
return;
}
pfn = virt_to_pfn(xenpmu_data);
xp.val = pfn_to_mfn(pfn);
xp.vcpu = cpu;
xp.version.maj = XENPMU_VER_MAJ;
xp.version.min = XENPMU_VER_MIN;
err = HYPERVISOR_xenpmu_op(XENPMU_init, &xp);
if (err)
goto fail;
per_cpu(xenpmu_shared, cpu).xenpmu_data = xenpmu_data;
per_cpu(xenpmu_shared, cpu).flags = 0;
if (!is_xen_pmu) {
is_xen_pmu = true;
perf_register_guest_info_callbacks(&xen_guest_cbs);
xen_pmu_arch_init();
}
return;
fail:
if (err == -EOPNOTSUPP || err == -ENOSYS)
pr_info_once("VPMU disabled by hypervisor.\n");
else
pr_info_once("Could not initialize VPMU for cpu %d, error %d\n",
cpu, err);
free_pages((unsigned long)xenpmu_data, 0);
}
void xen_pmu_finish(int cpu)
{
struct xen_pmu_params xp;
if (xen_hvm_domain())
return;
xp.vcpu = cpu;
xp.version.maj = XENPMU_VER_MAJ;
xp.version.min = XENPMU_VER_MIN;
(void)HYPERVISOR_xenpmu_op(XENPMU_finish, &xp);
free_pages((unsigned long)per_cpu(xenpmu_shared, cpu).xenpmu_data, 0);
per_cpu(xenpmu_shared, cpu).xenpmu_data = NULL;
}
| linux-master | arch/x86/xen/pmu.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/hardirq.h>
#include <asm/x86_init.h>
#include <xen/interface/xen.h>
#include <xen/interface/sched.h>
#include <xen/interface/vcpu.h>
#include <xen/features.h>
#include <xen/events.h>
#include <asm/xen/hypercall.h>
#include <asm/xen/hypervisor.h>
#include "xen-ops.h"
/*
* Force a proper event-channel callback from Xen after clearing the
* callback mask. We do this in a very simple manner, by making a call
* down into Xen. The pending flag will be checked by Xen on return.
*/
noinstr void xen_force_evtchn_callback(void)
{
(void)HYPERVISOR_xen_version(0, NULL);
}
static noinstr void xen_safe_halt(void)
{
/* Blocking includes an implicit local_irq_enable(). */
if (HYPERVISOR_sched_op(SCHEDOP_block, NULL) != 0)
BUG();
}
static void xen_halt(void)
{
if (irqs_disabled())
HYPERVISOR_vcpu_op(VCPUOP_down,
xen_vcpu_nr(smp_processor_id()), NULL);
else
xen_safe_halt();
}
static const typeof(pv_ops) xen_irq_ops __initconst = {
.irq = {
/* Initial interrupt flag handling only called while interrupts off. */
.save_fl = __PV_IS_CALLEE_SAVE(paravirt_ret0),
.irq_disable = __PV_IS_CALLEE_SAVE(paravirt_nop),
.irq_enable = __PV_IS_CALLEE_SAVE(paravirt_BUG),
.safe_halt = xen_safe_halt,
.halt = xen_halt,
},
};
void __init xen_init_irq_ops(void)
{
pv_ops.irq = xen_irq_ops.irq;
x86_init.irqs.intr_init = xen_init_IRQ;
}
| linux-master | arch/x86/xen/irq.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/init.h>
#include <linux/debugfs.h>
#include <linux/slab.h>
#include "debugfs.h"
static struct dentry *d_xen_debug;
struct dentry * __init xen_init_debugfs(void)
{
if (!d_xen_debug)
d_xen_debug = debugfs_create_dir("xen", NULL);
return d_xen_debug;
}
| linux-master | arch/x86/xen/debugfs.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/types.h>
#include <linux/tick.h>
#include <linux/percpu-defs.h>
#include <xen/xen.h>
#include <xen/interface/xen.h>
#include <xen/grant_table.h>
#include <xen/events.h>
#include <asm/cpufeatures.h>
#include <asm/msr-index.h>
#include <asm/xen/hypercall.h>
#include <asm/xen/page.h>
#include <asm/fixmap.h>
#include "xen-ops.h"
#include "mmu.h"
#include "pmu.h"
static DEFINE_PER_CPU(u64, spec_ctrl);
void xen_arch_pre_suspend(void)
{
xen_save_time_memory_area();
if (xen_pv_domain())
xen_pv_pre_suspend();
}
void xen_arch_post_suspend(int cancelled)
{
if (xen_pv_domain())
xen_pv_post_suspend(cancelled);
else
xen_hvm_post_suspend(cancelled);
xen_restore_time_memory_area();
}
static void xen_vcpu_notify_restore(void *data)
{
if (xen_pv_domain() && boot_cpu_has(X86_FEATURE_SPEC_CTRL))
wrmsrl(MSR_IA32_SPEC_CTRL, this_cpu_read(spec_ctrl));
/* Boot processor notified via generic timekeeping_resume() */
if (smp_processor_id() == 0)
return;
tick_resume_local();
}
static void xen_vcpu_notify_suspend(void *data)
{
u64 tmp;
tick_suspend_local();
if (xen_pv_domain() && boot_cpu_has(X86_FEATURE_SPEC_CTRL)) {
rdmsrl(MSR_IA32_SPEC_CTRL, tmp);
this_cpu_write(spec_ctrl, tmp);
wrmsrl(MSR_IA32_SPEC_CTRL, 0);
}
}
void xen_arch_resume(void)
{
int cpu;
on_each_cpu(xen_vcpu_notify_restore, NULL, 1);
for_each_online_cpu(cpu)
xen_pmu_init(cpu);
}
void xen_arch_suspend(void)
{
int cpu;
for_each_online_cpu(cpu)
xen_pmu_finish(cpu);
on_each_cpu(xen_vcpu_notify_suspend, NULL, 1);
}
| linux-master | arch/x86/xen/suspend.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Xen SMP support
*
* This file implements the Xen versions of smp_ops. SMP under Xen is
* very straightforward. Bringing a CPU up is simply a matter of
* loading its initial context and setting it running.
*
* IPIs are handled through the Xen event mechanism.
*
* Because virtual CPUs can be scheduled onto any real CPU, there's no
* useful topology information for the kernel to make use of. As a
* result, all CPUs are treated as if they're single-core and
* single-threaded.
*/
#include <linux/sched.h>
#include <linux/sched/task_stack.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/smp.h>
#include <linux/irq_work.h>
#include <linux/tick.h>
#include <linux/nmi.h>
#include <linux/cpuhotplug.h>
#include <linux/stackprotector.h>
#include <linux/pgtable.h>
#include <asm/paravirt.h>
#include <asm/idtentry.h>
#include <asm/desc.h>
#include <asm/cpu.h>
#include <asm/io_apic.h>
#include <xen/interface/xen.h>
#include <xen/interface/vcpu.h>
#include <xen/interface/xenpmu.h>
#include <asm/spec-ctrl.h>
#include <asm/xen/interface.h>
#include <asm/xen/hypercall.h>
#include <xen/xen.h>
#include <xen/page.h>
#include <xen/events.h>
#include <xen/hvc-console.h>
#include "xen-ops.h"
#include "mmu.h"
#include "smp.h"
#include "pmu.h"
cpumask_var_t xen_cpu_initialized_map;
static DEFINE_PER_CPU(struct xen_common_irq, xen_irq_work) = { .irq = -1 };
static DEFINE_PER_CPU(struct xen_common_irq, xen_pmu_irq) = { .irq = -1 };
static irqreturn_t xen_irq_work_interrupt(int irq, void *dev_id);
static void cpu_bringup(void)
{
int cpu;
cr4_init();
cpuhp_ap_sync_alive();
cpu_init();
fpu__init_cpu();
touch_softlockup_watchdog();
/* PVH runs in ring 0 and allows us to do native syscalls. Yay! */
if (!xen_feature(XENFEAT_supervisor_mode_kernel)) {
xen_enable_sysenter();
xen_enable_syscall();
}
cpu = smp_processor_id();
smp_store_cpu_info(cpu);
cpu_data(cpu).x86_max_cores = 1;
set_cpu_sibling_map(cpu);
speculative_store_bypass_ht_init();
xen_setup_cpu_clockevents();
notify_cpu_starting(cpu);
set_cpu_online(cpu, true);
smp_mb();
/* We can take interrupts now: we're officially "up". */
local_irq_enable();
}
asmlinkage __visible void cpu_bringup_and_idle(void)
{
cpu_bringup();
cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
}
void xen_smp_intr_free_pv(unsigned int cpu)
{
kfree(per_cpu(xen_irq_work, cpu).name);
per_cpu(xen_irq_work, cpu).name = NULL;
if (per_cpu(xen_irq_work, cpu).irq >= 0) {
unbind_from_irqhandler(per_cpu(xen_irq_work, cpu).irq, NULL);
per_cpu(xen_irq_work, cpu).irq = -1;
}
kfree(per_cpu(xen_pmu_irq, cpu).name);
per_cpu(xen_pmu_irq, cpu).name = NULL;
if (per_cpu(xen_pmu_irq, cpu).irq >= 0) {
unbind_from_irqhandler(per_cpu(xen_pmu_irq, cpu).irq, NULL);
per_cpu(xen_pmu_irq, cpu).irq = -1;
}
}
int xen_smp_intr_init_pv(unsigned int cpu)
{
int rc;
char *callfunc_name, *pmu_name;
callfunc_name = kasprintf(GFP_KERNEL, "irqwork%d", cpu);
per_cpu(xen_irq_work, cpu).name = callfunc_name;
rc = bind_ipi_to_irqhandler(XEN_IRQ_WORK_VECTOR,
cpu,
xen_irq_work_interrupt,
IRQF_PERCPU|IRQF_NOBALANCING,
callfunc_name,
NULL);
if (rc < 0)
goto fail;
per_cpu(xen_irq_work, cpu).irq = rc;
if (is_xen_pmu) {
pmu_name = kasprintf(GFP_KERNEL, "pmu%d", cpu);
per_cpu(xen_pmu_irq, cpu).name = pmu_name;
rc = bind_virq_to_irqhandler(VIRQ_XENPMU, cpu,
xen_pmu_irq_handler,
IRQF_PERCPU|IRQF_NOBALANCING,
pmu_name, NULL);
if (rc < 0)
goto fail;
per_cpu(xen_pmu_irq, cpu).irq = rc;
}
return 0;
fail:
xen_smp_intr_free_pv(cpu);
return rc;
}
static void __init _get_smp_config(unsigned int early)
{
int i, rc;
unsigned int subtract = 0;
if (early)
return;
num_processors = 0;
disabled_cpus = 0;
for (i = 0; i < nr_cpu_ids; i++) {
rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL);
if (rc >= 0) {
num_processors++;
set_cpu_possible(i, true);
} else {
set_cpu_possible(i, false);
set_cpu_present(i, false);
subtract++;
}
}
#ifdef CONFIG_HOTPLUG_CPU
/* This is akin to using 'nr_cpus' on the Linux command line.
* Which is OK as when we use 'dom0_max_vcpus=X' we can only
* have up to X, while nr_cpu_ids is greater than X. This
* normally is not a problem, except when CPU hotplugging
* is involved and then there might be more than X CPUs
* in the guest - which will not work as there is no
* hypercall to expand the max number of VCPUs an already
* running guest has. So cap it up to X. */
if (subtract)
set_nr_cpu_ids(nr_cpu_ids - subtract);
#endif
/* Pretend to be a proper enumerated system */
smp_found_config = 1;
}
static void __init xen_pv_smp_prepare_boot_cpu(void)
{
BUG_ON(smp_processor_id() != 0);
native_smp_prepare_boot_cpu();
if (!xen_feature(XENFEAT_writable_page_tables))
/* We've switched to the "real" per-cpu gdt, so make
* sure the old memory can be recycled. */
make_lowmem_page_readwrite(xen_initial_gdt);
xen_setup_vcpu_info_placement();
/*
* The alternative logic (which patches the unlock/lock) runs before
* the smp bootup up code is activated. Hence we need to set this up
* the core kernel is being patched. Otherwise we will have only
* modules patched but not core code.
*/
xen_init_spinlocks();
}
static void __init xen_pv_smp_prepare_cpus(unsigned int max_cpus)
{
unsigned cpu;
if (ioapic_is_disabled) {
char *m = (max_cpus == 0) ?
"The nosmp parameter is incompatible with Xen; " \
"use Xen dom0_max_vcpus=1 parameter" :
"The noapic parameter is incompatible with Xen";
xen_raw_printk(m);
panic(m);
}
xen_init_lock_cpu(0);
smp_prepare_cpus_common();
cpu_data(0).x86_max_cores = 1;
speculative_store_bypass_ht_init();
xen_pmu_init(0);
if (xen_smp_intr_init(0) || xen_smp_intr_init_pv(0))
BUG();
if (!alloc_cpumask_var(&xen_cpu_initialized_map, GFP_KERNEL))
panic("could not allocate xen_cpu_initialized_map\n");
cpumask_copy(xen_cpu_initialized_map, cpumask_of(0));
/* Restrict the possible_map according to max_cpus. */
while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) {
for (cpu = nr_cpu_ids - 1; !cpu_possible(cpu); cpu--)
continue;
set_cpu_possible(cpu, false);
}
for_each_possible_cpu(cpu)
set_cpu_present(cpu, true);
}
static int
cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
{
struct vcpu_guest_context *ctxt;
struct desc_struct *gdt;
unsigned long gdt_mfn;
if (cpumask_test_and_set_cpu(cpu, xen_cpu_initialized_map))
return 0;
ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
if (ctxt == NULL) {
cpumask_clear_cpu(cpu, xen_cpu_initialized_map);
return -ENOMEM;
}
gdt = get_cpu_gdt_rw(cpu);
/*
* Bring up the CPU in cpu_bringup_and_idle() with the stack
* pointing just below where pt_regs would be if it were a normal
* kernel entry.
*/
ctxt->user_regs.eip = (unsigned long)asm_cpu_bringup_and_idle;
ctxt->flags = VGCF_IN_KERNEL;
ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
ctxt->user_regs.ds = __USER_DS;
ctxt->user_regs.es = __USER_DS;
ctxt->user_regs.ss = __KERNEL_DS;
ctxt->user_regs.cs = __KERNEL_CS;
ctxt->user_regs.esp = (unsigned long)task_pt_regs(idle);
xen_copy_trap_info(ctxt->trap_ctxt);
BUG_ON((unsigned long)gdt & ~PAGE_MASK);
gdt_mfn = arbitrary_virt_to_mfn(gdt);
make_lowmem_page_readonly(gdt);
make_lowmem_page_readonly(mfn_to_virt(gdt_mfn));
ctxt->gdt_frames[0] = gdt_mfn;
ctxt->gdt_ents = GDT_ENTRIES;
/*
* Set SS:SP that Xen will use when entering guest kernel mode
* from guest user mode. Subsequent calls to load_sp0() can
* change this value.
*/
ctxt->kernel_ss = __KERNEL_DS;
ctxt->kernel_sp = task_top_of_stack(idle);
ctxt->gs_base_kernel = per_cpu_offset(cpu);
ctxt->event_callback_eip =
(unsigned long)xen_asm_exc_xen_hypervisor_callback;
ctxt->failsafe_callback_eip =
(unsigned long)xen_failsafe_callback;
per_cpu(xen_cr3, cpu) = __pa(swapper_pg_dir);
ctxt->ctrlreg[3] = xen_pfn_to_cr3(virt_to_gfn(swapper_pg_dir));
if (HYPERVISOR_vcpu_op(VCPUOP_initialise, xen_vcpu_nr(cpu), ctxt))
BUG();
kfree(ctxt);
return 0;
}
static int xen_pv_kick_ap(unsigned int cpu, struct task_struct *idle)
{
int rc;
rc = common_cpu_up(cpu, idle);
if (rc)
return rc;
xen_setup_runstate_info(cpu);
/* make sure interrupts start blocked */
per_cpu(xen_vcpu, cpu)->evtchn_upcall_mask = 1;
rc = cpu_initialize_context(cpu, idle);
if (rc)
return rc;
xen_pmu_init(cpu);
/*
* Why is this a BUG? If the hypercall fails then everything can be
* rolled back, no?
*/
BUG_ON(HYPERVISOR_vcpu_op(VCPUOP_up, xen_vcpu_nr(cpu), NULL));
return 0;
}
static void xen_pv_poll_sync_state(void)
{
HYPERVISOR_sched_op(SCHEDOP_yield, NULL);
}
#ifdef CONFIG_HOTPLUG_CPU
static int xen_pv_cpu_disable(void)
{
unsigned int cpu = smp_processor_id();
if (cpu == 0)
return -EBUSY;
cpu_disable_common();
load_cr3(swapper_pg_dir);
return 0;
}
static void xen_pv_cpu_die(unsigned int cpu)
{
while (HYPERVISOR_vcpu_op(VCPUOP_is_up, xen_vcpu_nr(cpu), NULL)) {
__set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(HZ/10);
}
}
static void xen_pv_cleanup_dead_cpu(unsigned int cpu)
{
xen_smp_intr_free(cpu);
xen_uninit_lock_cpu(cpu);
xen_teardown_timer(cpu);
xen_pmu_finish(cpu);
}
static void __noreturn xen_pv_play_dead(void) /* used only with HOTPLUG_CPU */
{
play_dead_common();
HYPERVISOR_vcpu_op(VCPUOP_down, xen_vcpu_nr(smp_processor_id()), NULL);
xen_cpu_bringup_again((unsigned long)task_pt_regs(current));
BUG();
}
#else /* !CONFIG_HOTPLUG_CPU */
static int xen_pv_cpu_disable(void)
{
return -ENOSYS;
}
static void xen_pv_cpu_die(unsigned int cpu)
{
BUG();
}
static void xen_pv_cleanup_dead_cpu(unsigned int cpu)
{
BUG();
}
static void __noreturn xen_pv_play_dead(void)
{
BUG();
}
#endif
static void stop_self(void *v)
{
int cpu = smp_processor_id();
/* make sure we're not pinning something down */
load_cr3(swapper_pg_dir);
/* should set up a minimal gdt */
set_cpu_online(cpu, false);
HYPERVISOR_vcpu_op(VCPUOP_down, xen_vcpu_nr(cpu), NULL);
BUG();
}
static void xen_pv_stop_other_cpus(int wait)
{
smp_call_function(stop_self, NULL, wait);
}
static irqreturn_t xen_irq_work_interrupt(int irq, void *dev_id)
{
irq_work_run();
inc_irq_stat(apic_irq_work_irqs);
return IRQ_HANDLED;
}
static const struct smp_ops xen_smp_ops __initconst = {
.smp_prepare_boot_cpu = xen_pv_smp_prepare_boot_cpu,
.smp_prepare_cpus = xen_pv_smp_prepare_cpus,
.smp_cpus_done = xen_smp_cpus_done,
.kick_ap_alive = xen_pv_kick_ap,
.cpu_die = xen_pv_cpu_die,
.cleanup_dead_cpu = xen_pv_cleanup_dead_cpu,
.poll_sync_state = xen_pv_poll_sync_state,
.cpu_disable = xen_pv_cpu_disable,
.play_dead = xen_pv_play_dead,
.stop_other_cpus = xen_pv_stop_other_cpus,
.smp_send_reschedule = xen_smp_send_reschedule,
.send_call_func_ipi = xen_smp_send_call_function_ipi,
.send_call_func_single_ipi = xen_smp_send_call_function_single_ipi,
};
void __init xen_smp_init(void)
{
smp_ops = xen_smp_ops;
/* Avoid searching for BIOS MP tables */
x86_init.mpparse.find_smp_config = x86_init_noop;
x86_init.mpparse.get_smp_config = _get_smp_config;
}
| linux-master | arch/x86/xen/smp_pv.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/acpi.h>
#include <linux/cpu.h>
#include <linux/kexec.h>
#include <linux/memblock.h>
#include <linux/virtio_anchor.h>
#include <xen/features.h>
#include <xen/events.h>
#include <xen/hvm.h>
#include <xen/interface/hvm/hvm_op.h>
#include <xen/interface/memory.h>
#include <asm/apic.h>
#include <asm/cpu.h>
#include <asm/smp.h>
#include <asm/io_apic.h>
#include <asm/reboot.h>
#include <asm/setup.h>
#include <asm/idtentry.h>
#include <asm/hypervisor.h>
#include <asm/e820/api.h>
#include <asm/early_ioremap.h>
#include <asm/xen/cpuid.h>
#include <asm/xen/hypervisor.h>
#include <asm/xen/page.h>
#include "xen-ops.h"
#include "mmu.h"
#include "smp.h"
static unsigned long shared_info_pfn;
__ro_after_init bool xen_percpu_upcall;
EXPORT_SYMBOL_GPL(xen_percpu_upcall);
void xen_hvm_init_shared_info(void)
{
struct xen_add_to_physmap xatp;
xatp.domid = DOMID_SELF;
xatp.idx = 0;
xatp.space = XENMAPSPACE_shared_info;
xatp.gpfn = shared_info_pfn;
if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp))
BUG();
}
static void __init reserve_shared_info(void)
{
u64 pa;
/*
* Search for a free page starting at 4kB physical address.
* Low memory is preferred to avoid an EPT large page split up
* by the mapping.
* Starting below X86_RESERVE_LOW (usually 64kB) is fine as
* the BIOS used for HVM guests is well behaved and won't
* clobber memory other than the first 4kB.
*/
for (pa = PAGE_SIZE;
!e820__mapped_all(pa, pa + PAGE_SIZE, E820_TYPE_RAM) ||
memblock_is_reserved(pa);
pa += PAGE_SIZE)
;
shared_info_pfn = PHYS_PFN(pa);
memblock_reserve(pa, PAGE_SIZE);
HYPERVISOR_shared_info = early_memremap(pa, PAGE_SIZE);
}
static void __init xen_hvm_init_mem_mapping(void)
{
early_memunmap(HYPERVISOR_shared_info, PAGE_SIZE);
HYPERVISOR_shared_info = __va(PFN_PHYS(shared_info_pfn));
/*
* The virtual address of the shared_info page has changed, so
* the vcpu_info pointer for VCPU 0 is now stale.
*
* The prepare_boot_cpu callback will re-initialize it via
* xen_vcpu_setup, but we can't rely on that to be called for
* old Xen versions (xen_have_vector_callback == 0).
*
* It is, in any case, bad to have a stale vcpu_info pointer
* so reset it now.
*/
xen_vcpu_info_reset(0);
}
static void __init init_hvm_pv_info(void)
{
int major, minor;
uint32_t eax, ebx, ecx, edx, base;
base = xen_cpuid_base();
eax = cpuid_eax(base + 1);
major = eax >> 16;
minor = eax & 0xffff;
printk(KERN_INFO "Xen version %d.%d.\n", major, minor);
xen_domain_type = XEN_HVM_DOMAIN;
/* PVH set up hypercall page in xen_prepare_pvh(). */
if (xen_pvh_domain())
pv_info.name = "Xen PVH";
else {
u64 pfn;
uint32_t msr;
pv_info.name = "Xen HVM";
msr = cpuid_ebx(base + 2);
pfn = __pa(hypercall_page);
wrmsr_safe(msr, (u32)pfn, (u32)(pfn >> 32));
}
xen_setup_features();
cpuid(base + 4, &eax, &ebx, &ecx, &edx);
if (eax & XEN_HVM_CPUID_VCPU_ID_PRESENT)
this_cpu_write(xen_vcpu_id, ebx);
else
this_cpu_write(xen_vcpu_id, smp_processor_id());
}
DEFINE_IDTENTRY_SYSVEC(sysvec_xen_hvm_callback)
{
struct pt_regs *old_regs = set_irq_regs(regs);
if (xen_percpu_upcall)
apic_eoi();
inc_irq_stat(irq_hv_callback_count);
xen_evtchn_do_upcall();
set_irq_regs(old_regs);
}
#ifdef CONFIG_KEXEC_CORE
static void xen_hvm_shutdown(void)
{
native_machine_shutdown();
if (kexec_in_progress)
xen_reboot(SHUTDOWN_soft_reset);
}
static void xen_hvm_crash_shutdown(struct pt_regs *regs)
{
native_machine_crash_shutdown(regs);
xen_reboot(SHUTDOWN_soft_reset);
}
#endif
static int xen_cpu_up_prepare_hvm(unsigned int cpu)
{
int rc = 0;
/*
* If a CPU was offlined earlier and offlining timed out then the
* lock mechanism is still initialized. Uninit it unconditionally
* as it's safe to call even if already uninited. Interrupts and
* timer have already been handled in xen_cpu_dead_hvm().
*/
xen_uninit_lock_cpu(cpu);
if (cpu_acpi_id(cpu) != U32_MAX)
per_cpu(xen_vcpu_id, cpu) = cpu_acpi_id(cpu);
else
per_cpu(xen_vcpu_id, cpu) = cpu;
xen_vcpu_setup(cpu);
if (!xen_have_vector_callback)
return 0;
if (xen_percpu_upcall) {
rc = xen_set_upcall_vector(cpu);
if (rc) {
WARN(1, "HVMOP_set_evtchn_upcall_vector"
" for CPU %d failed: %d\n", cpu, rc);
return rc;
}
}
if (xen_feature(XENFEAT_hvm_safe_pvclock))
xen_setup_timer(cpu);
rc = xen_smp_intr_init(cpu);
if (rc) {
WARN(1, "xen_smp_intr_init() for CPU %d failed: %d\n",
cpu, rc);
}
return rc;
}
static int xen_cpu_dead_hvm(unsigned int cpu)
{
xen_smp_intr_free(cpu);
if (xen_have_vector_callback && xen_feature(XENFEAT_hvm_safe_pvclock))
xen_teardown_timer(cpu);
return 0;
}
static void __init xen_hvm_guest_init(void)
{
if (xen_pv_domain())
return;
if (IS_ENABLED(CONFIG_XEN_VIRTIO_FORCE_GRANT))
virtio_set_mem_acc_cb(xen_virtio_restricted_mem_acc);
init_hvm_pv_info();
reserve_shared_info();
xen_hvm_init_shared_info();
/*
* xen_vcpu is a pointer to the vcpu_info struct in the shared_info
* page, we use it in the event channel upcall and in some pvclock
* related functions.
*/
xen_vcpu_info_reset(0);
xen_panic_handler_init();
xen_hvm_smp_init();
WARN_ON(xen_cpuhp_setup(xen_cpu_up_prepare_hvm, xen_cpu_dead_hvm));
xen_unplug_emulated_devices();
x86_init.irqs.intr_init = xen_init_IRQ;
xen_hvm_init_time_ops();
xen_hvm_init_mmu_ops();
#ifdef CONFIG_KEXEC_CORE
machine_ops.shutdown = xen_hvm_shutdown;
machine_ops.crash_shutdown = xen_hvm_crash_shutdown;
#endif
}
static __init int xen_parse_nopv(char *arg)
{
pr_notice("\"xen_nopv\" is deprecated, please use \"nopv\" instead\n");
if (xen_cpuid_base())
nopv = true;
return 0;
}
early_param("xen_nopv", xen_parse_nopv);
static __init int xen_parse_no_vector_callback(char *arg)
{
xen_have_vector_callback = false;
return 0;
}
early_param("xen_no_vector_callback", xen_parse_no_vector_callback);
static __init bool xen_x2apic_available(void)
{
return x2apic_supported();
}
static bool __init msi_ext_dest_id(void)
{
return cpuid_eax(xen_cpuid_base() + 4) & XEN_HVM_CPUID_EXT_DEST_ID;
}
static __init void xen_hvm_guest_late_init(void)
{
#ifdef CONFIG_XEN_PVH
/* Test for PVH domain (PVH boot path taken overrides ACPI flags). */
if (!xen_pvh &&
(x86_platform.legacy.rtc || !x86_platform.legacy.no_vga))
return;
/* PVH detected. */
xen_pvh = true;
if (nopv)
panic("\"nopv\" and \"xen_nopv\" parameters are unsupported in PVH guest.");
/* Make sure we don't fall back to (default) ACPI_IRQ_MODEL_PIC. */
if (!nr_ioapics && acpi_irq_model == ACPI_IRQ_MODEL_PIC)
acpi_irq_model = ACPI_IRQ_MODEL_PLATFORM;
machine_ops.emergency_restart = xen_emergency_restart;
pv_info.name = "Xen PVH";
#endif
}
static uint32_t __init xen_platform_hvm(void)
{
uint32_t xen_domain = xen_cpuid_base();
struct x86_hyper_init *h = &x86_hyper_xen_hvm.init;
if (xen_pv_domain())
return 0;
if (xen_pvh_domain() && nopv) {
/* Guest booting via the Xen-PVH boot entry goes here */
pr_info("\"nopv\" parameter is ignored in PVH guest\n");
nopv = false;
} else if (nopv && xen_domain) {
/*
* Guest booting via normal boot entry (like via grub2) goes
* here.
*
* Use interface functions for bare hardware if nopv,
* xen_hvm_guest_late_init is an exception as we need to
* detect PVH and panic there.
*/
h->init_platform = x86_init_noop;
h->x2apic_available = bool_x86_init_noop;
h->init_mem_mapping = x86_init_noop;
h->init_after_bootmem = x86_init_noop;
h->guest_late_init = xen_hvm_guest_late_init;
x86_hyper_xen_hvm.runtime.pin_vcpu = x86_op_int_noop;
}
return xen_domain;
}
struct hypervisor_x86 x86_hyper_xen_hvm __initdata = {
.name = "Xen HVM",
.detect = xen_platform_hvm,
.type = X86_HYPER_XEN_HVM,
.init.init_platform = xen_hvm_guest_init,
.init.x2apic_available = xen_x2apic_available,
.init.init_mem_mapping = xen_hvm_init_mem_mapping,
.init.guest_late_init = xen_hvm_guest_late_init,
.init.msi_ext_dest_id = msi_ext_dest_id,
.runtime.pin_vcpu = xen_pin_vcpu,
.ignore_nopv = true,
};
| linux-master | arch/x86/xen/enlighten_hvm.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/acpi.h>
#include <linux/export.h>
#include <xen/hvc-console.h>
#include <asm/io_apic.h>
#include <asm/hypervisor.h>
#include <asm/e820/api.h>
#include <xen/xen.h>
#include <asm/xen/interface.h>
#include <asm/xen/hypercall.h>
#include <xen/interface/memory.h>
#include "xen-ops.h"
/*
* PVH variables.
*
* The variable xen_pvh needs to live in a data segment since it is used
* after startup_{32|64} is invoked, which will clear the .bss segment.
*/
bool __ro_after_init xen_pvh;
EXPORT_SYMBOL_GPL(xen_pvh);
void __init xen_pvh_init(struct boot_params *boot_params)
{
u32 msr;
u64 pfn;
xen_pvh = 1;
xen_domain_type = XEN_HVM_DOMAIN;
xen_start_flags = pvh_start_info.flags;
msr = cpuid_ebx(xen_cpuid_base() + 2);
pfn = __pa(hypercall_page);
wrmsr_safe(msr, (u32)pfn, (u32)(pfn >> 32));
if (xen_initial_domain())
x86_init.oem.arch_setup = xen_add_preferred_consoles;
x86_init.oem.banner = xen_banner;
xen_efi_init(boot_params);
if (xen_initial_domain()) {
struct xen_platform_op op = {
.cmd = XENPF_get_dom0_console,
};
int ret = HYPERVISOR_platform_op(&op);
if (ret > 0)
xen_init_vga(&op.u.dom0_console,
min(ret * sizeof(char),
sizeof(op.u.dom0_console)),
&boot_params->screen_info);
}
}
void __init mem_map_via_hcall(struct boot_params *boot_params_p)
{
struct xen_memory_map memmap;
int rc;
memmap.nr_entries = ARRAY_SIZE(boot_params_p->e820_table);
set_xen_guest_handle(memmap.buffer, boot_params_p->e820_table);
rc = HYPERVISOR_memory_op(XENMEM_memory_map, &memmap);
if (rc) {
xen_raw_printk("XENMEM_memory_map failed (%d)\n", rc);
BUG();
}
boot_params_p->e820_entries = memmap.nr_entries;
}
| linux-master | arch/x86/xen/enlighten_pvh.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Core of Xen paravirt_ops implementation.
*
* This file contains the xen_paravirt_ops structure itself, and the
* implementations for:
* - privileged instructions
* - interrupt flags
* - segment operations
* - booting and setup
*
* Jeremy Fitzhardinge <[email protected]>, XenSource Inc, 2007
*/
#include <linux/cpu.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/smp.h>
#include <linux/preempt.h>
#include <linux/hardirq.h>
#include <linux/percpu.h>
#include <linux/delay.h>
#include <linux/start_kernel.h>
#include <linux/sched.h>
#include <linux/kprobes.h>
#include <linux/kstrtox.h>
#include <linux/memblock.h>
#include <linux/export.h>
#include <linux/mm.h>
#include <linux/page-flags.h>
#include <linux/pci.h>
#include <linux/gfp.h>
#include <linux/edd.h>
#include <linux/reboot.h>
#include <linux/virtio_anchor.h>
#include <linux/stackprotector.h>
#include <xen/xen.h>
#include <xen/events.h>
#include <xen/interface/xen.h>
#include <xen/interface/version.h>
#include <xen/interface/physdev.h>
#include <xen/interface/vcpu.h>
#include <xen/interface/memory.h>
#include <xen/interface/nmi.h>
#include <xen/interface/xen-mca.h>
#include <xen/features.h>
#include <xen/page.h>
#include <xen/hvc-console.h>
#include <xen/acpi.h>
#include <asm/paravirt.h>
#include <asm/apic.h>
#include <asm/page.h>
#include <asm/xen/pci.h>
#include <asm/xen/hypercall.h>
#include <asm/xen/hypervisor.h>
#include <asm/xen/cpuid.h>
#include <asm/fixmap.h>
#include <asm/processor.h>
#include <asm/proto.h>
#include <asm/msr-index.h>
#include <asm/traps.h>
#include <asm/setup.h>
#include <asm/desc.h>
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
#include <asm/reboot.h>
#include <asm/hypervisor.h>
#include <asm/mach_traps.h>
#include <asm/mtrr.h>
#include <asm/mwait.h>
#include <asm/pci_x86.h>
#include <asm/cpu.h>
#ifdef CONFIG_X86_IOPL_IOPERM
#include <asm/io_bitmap.h>
#endif
#ifdef CONFIG_ACPI
#include <linux/acpi.h>
#include <asm/acpi.h>
#include <acpi/proc_cap_intel.h>
#include <acpi/processor.h>
#include <xen/interface/platform.h>
#endif
#include "xen-ops.h"
#include "mmu.h"
#include "smp.h"
#include "multicalls.h"
#include "pmu.h"
#include "../kernel/cpu/cpu.h" /* get_cpu_cap() */
void *xen_initial_gdt;
static int xen_cpu_up_prepare_pv(unsigned int cpu);
static int xen_cpu_dead_pv(unsigned int cpu);
struct tls_descs {
struct desc_struct desc[3];
};
DEFINE_PER_CPU(enum xen_lazy_mode, xen_lazy_mode) = XEN_LAZY_NONE;
DEFINE_PER_CPU(unsigned int, xen_lazy_nesting);
enum xen_lazy_mode xen_get_lazy_mode(void)
{
if (in_interrupt())
return XEN_LAZY_NONE;
return this_cpu_read(xen_lazy_mode);
}
/*
* Updating the 3 TLS descriptors in the GDT on every task switch is
* surprisingly expensive so we avoid updating them if they haven't
* changed. Since Xen writes different descriptors than the one
* passed in the update_descriptor hypercall we keep shadow copies to
* compare against.
*/
static DEFINE_PER_CPU(struct tls_descs, shadow_tls_desc);
static __read_mostly bool xen_msr_safe = IS_ENABLED(CONFIG_XEN_PV_MSR_SAFE);
static int __init parse_xen_msr_safe(char *str)
{
if (str)
return kstrtobool(str, &xen_msr_safe);
return -EINVAL;
}
early_param("xen_msr_safe", parse_xen_msr_safe);
/* Get MTRR settings from Xen and put them into mtrr_state. */
static void __init xen_set_mtrr_data(void)
{
#ifdef CONFIG_MTRR
struct xen_platform_op op = {
.cmd = XENPF_read_memtype,
.interface_version = XENPF_INTERFACE_VERSION,
};
unsigned int reg;
unsigned long mask;
uint32_t eax, width;
static struct mtrr_var_range var[MTRR_MAX_VAR_RANGES] __initdata;
/* Get physical address width (only 64-bit cpus supported). */
width = 36;
eax = cpuid_eax(0x80000000);
if ((eax >> 16) == 0x8000 && eax >= 0x80000008) {
eax = cpuid_eax(0x80000008);
width = eax & 0xff;
}
for (reg = 0; reg < MTRR_MAX_VAR_RANGES; reg++) {
op.u.read_memtype.reg = reg;
if (HYPERVISOR_platform_op(&op))
break;
/*
* Only called in dom0, which has all RAM PFNs mapped at
* RAM MFNs, and all PCI space etc. is identity mapped.
* This means we can treat MFN == PFN regarding MTRR settings.
*/
var[reg].base_lo = op.u.read_memtype.type;
var[reg].base_lo |= op.u.read_memtype.mfn << PAGE_SHIFT;
var[reg].base_hi = op.u.read_memtype.mfn >> (32 - PAGE_SHIFT);
mask = ~((op.u.read_memtype.nr_mfns << PAGE_SHIFT) - 1);
mask &= (1UL << width) - 1;
if (mask)
mask |= MTRR_PHYSMASK_V;
var[reg].mask_lo = mask;
var[reg].mask_hi = mask >> 32;
}
/* Only overwrite MTRR state if any MTRR could be got from Xen. */
if (reg)
mtrr_overwrite_state(var, reg, MTRR_TYPE_UNCACHABLE);
#endif
}
static void __init xen_pv_init_platform(void)
{
/* PV guests can't operate virtio devices without grants. */
if (IS_ENABLED(CONFIG_XEN_VIRTIO))
virtio_set_mem_acc_cb(xen_virtio_restricted_mem_acc);
populate_extra_pte(fix_to_virt(FIX_PARAVIRT_BOOTMAP));
set_fixmap(FIX_PARAVIRT_BOOTMAP, xen_start_info->shared_info);
HYPERVISOR_shared_info = (void *)fix_to_virt(FIX_PARAVIRT_BOOTMAP);
/* xen clock uses per-cpu vcpu_info, need to init it for boot cpu */
xen_vcpu_info_reset(0);
/* pvclock is in shared info area */
xen_init_time_ops();
if (xen_initial_domain())
xen_set_mtrr_data();
else
mtrr_overwrite_state(NULL, 0, MTRR_TYPE_WRBACK);
}
static void __init xen_pv_guest_late_init(void)
{
#ifndef CONFIG_SMP
/* Setup shared vcpu info for non-smp configurations */
xen_setup_vcpu_info_placement();
#endif
}
static __read_mostly unsigned int cpuid_leaf5_ecx_val;
static __read_mostly unsigned int cpuid_leaf5_edx_val;
static void xen_cpuid(unsigned int *ax, unsigned int *bx,
unsigned int *cx, unsigned int *dx)
{
unsigned maskebx = ~0;
/*
* Mask out inconvenient features, to try and disable as many
* unsupported kernel subsystems as possible.
*/
switch (*ax) {
case CPUID_MWAIT_LEAF:
/* Synthesize the values.. */
*ax = 0;
*bx = 0;
*cx = cpuid_leaf5_ecx_val;
*dx = cpuid_leaf5_edx_val;
return;
case 0xb:
/* Suppress extended topology stuff */
maskebx = 0;
break;
}
asm(XEN_EMULATE_PREFIX "cpuid"
: "=a" (*ax),
"=b" (*bx),
"=c" (*cx),
"=d" (*dx)
: "0" (*ax), "2" (*cx));
*bx &= maskebx;
}
static bool __init xen_check_mwait(void)
{
#ifdef CONFIG_ACPI
struct xen_platform_op op = {
.cmd = XENPF_set_processor_pminfo,
.u.set_pminfo.id = -1,
.u.set_pminfo.type = XEN_PM_PDC,
};
uint32_t buf[3];
unsigned int ax, bx, cx, dx;
unsigned int mwait_mask;
/* We need to determine whether it is OK to expose the MWAIT
* capability to the kernel to harvest deeper than C3 states from ACPI
* _CST using the processor_harvest_xen.c module. For this to work, we
* need to gather the MWAIT_LEAF values (which the cstate.c code
* checks against). The hypervisor won't expose the MWAIT flag because
* it would break backwards compatibility; so we will find out directly
* from the hardware and hypercall.
*/
if (!xen_initial_domain())
return false;
/*
* When running under platform earlier than Xen4.2, do not expose
* mwait, to avoid the risk of loading native acpi pad driver
*/
if (!xen_running_on_version_or_later(4, 2))
return false;
ax = 1;
cx = 0;
native_cpuid(&ax, &bx, &cx, &dx);
mwait_mask = (1 << (X86_FEATURE_EST % 32)) |
(1 << (X86_FEATURE_MWAIT % 32));
if ((cx & mwait_mask) != mwait_mask)
return false;
/* We need to emulate the MWAIT_LEAF and for that we need both
* ecx and edx. The hypercall provides only partial information.
*/
ax = CPUID_MWAIT_LEAF;
bx = 0;
cx = 0;
dx = 0;
native_cpuid(&ax, &bx, &cx, &dx);
/* Ask the Hypervisor whether to clear ACPI_PROC_CAP_C_C2C3_FFH. If so,
* don't expose MWAIT_LEAF and let ACPI pick the IOPORT version of C3.
*/
buf[0] = ACPI_PDC_REVISION_ID;
buf[1] = 1;
buf[2] = (ACPI_PROC_CAP_C_CAPABILITY_SMP | ACPI_PROC_CAP_EST_CAPABILITY_SWSMP);
set_xen_guest_handle(op.u.set_pminfo.pdc, buf);
if ((HYPERVISOR_platform_op(&op) == 0) &&
(buf[2] & (ACPI_PROC_CAP_C_C1_FFH | ACPI_PROC_CAP_C_C2C3_FFH))) {
cpuid_leaf5_ecx_val = cx;
cpuid_leaf5_edx_val = dx;
}
return true;
#else
return false;
#endif
}
static bool __init xen_check_xsave(void)
{
unsigned int cx, xsave_mask;
cx = cpuid_ecx(1);
xsave_mask = (1 << (X86_FEATURE_XSAVE % 32)) |
(1 << (X86_FEATURE_OSXSAVE % 32));
/* Xen will set CR4.OSXSAVE if supported and not disabled by force */
return (cx & xsave_mask) == xsave_mask;
}
static void __init xen_init_capabilities(void)
{
setup_force_cpu_cap(X86_FEATURE_XENPV);
setup_clear_cpu_cap(X86_FEATURE_DCA);
setup_clear_cpu_cap(X86_FEATURE_APERFMPERF);
setup_clear_cpu_cap(X86_FEATURE_MTRR);
setup_clear_cpu_cap(X86_FEATURE_ACC);
setup_clear_cpu_cap(X86_FEATURE_X2APIC);
setup_clear_cpu_cap(X86_FEATURE_SME);
setup_clear_cpu_cap(X86_FEATURE_LKGS);
/*
* Xen PV would need some work to support PCID: CR3 handling as well
* as xen_flush_tlb_others() would need updating.
*/
setup_clear_cpu_cap(X86_FEATURE_PCID);
if (!xen_initial_domain())
setup_clear_cpu_cap(X86_FEATURE_ACPI);
if (xen_check_mwait())
setup_force_cpu_cap(X86_FEATURE_MWAIT);
else
setup_clear_cpu_cap(X86_FEATURE_MWAIT);
if (!xen_check_xsave()) {
setup_clear_cpu_cap(X86_FEATURE_XSAVE);
setup_clear_cpu_cap(X86_FEATURE_OSXSAVE);
}
}
static noinstr void xen_set_debugreg(int reg, unsigned long val)
{
HYPERVISOR_set_debugreg(reg, val);
}
static noinstr unsigned long xen_get_debugreg(int reg)
{
return HYPERVISOR_get_debugreg(reg);
}
static void xen_start_context_switch(struct task_struct *prev)
{
BUG_ON(preemptible());
if (this_cpu_read(xen_lazy_mode) == XEN_LAZY_MMU) {
arch_leave_lazy_mmu_mode();
set_ti_thread_flag(task_thread_info(prev), TIF_LAZY_MMU_UPDATES);
}
enter_lazy(XEN_LAZY_CPU);
}
static void xen_end_context_switch(struct task_struct *next)
{
BUG_ON(preemptible());
xen_mc_flush();
leave_lazy(XEN_LAZY_CPU);
if (test_and_clear_ti_thread_flag(task_thread_info(next), TIF_LAZY_MMU_UPDATES))
arch_enter_lazy_mmu_mode();
}
static unsigned long xen_store_tr(void)
{
return 0;
}
/*
* Set the page permissions for a particular virtual address. If the
* address is a vmalloc mapping (or other non-linear mapping), then
* find the linear mapping of the page and also set its protections to
* match.
*/
static void set_aliased_prot(void *v, pgprot_t prot)
{
int level;
pte_t *ptep;
pte_t pte;
unsigned long pfn;
unsigned char dummy;
void *va;
ptep = lookup_address((unsigned long)v, &level);
BUG_ON(ptep == NULL);
pfn = pte_pfn(*ptep);
pte = pfn_pte(pfn, prot);
/*
* Careful: update_va_mapping() will fail if the virtual address
* we're poking isn't populated in the page tables. We don't
* need to worry about the direct map (that's always in the page
* tables), but we need to be careful about vmap space. In
* particular, the top level page table can lazily propagate
* entries between processes, so if we've switched mms since we
* vmapped the target in the first place, we might not have the
* top-level page table entry populated.
*
* We disable preemption because we want the same mm active when
* we probe the target and when we issue the hypercall. We'll
* have the same nominal mm, but if we're a kernel thread, lazy
* mm dropping could change our pgd.
*
* Out of an abundance of caution, this uses __get_user() to fault
* in the target address just in case there's some obscure case
* in which the target address isn't readable.
*/
preempt_disable();
copy_from_kernel_nofault(&dummy, v, 1);
if (HYPERVISOR_update_va_mapping((unsigned long)v, pte, 0))
BUG();
va = __va(PFN_PHYS(pfn));
if (va != v && HYPERVISOR_update_va_mapping((unsigned long)va, pte, 0))
BUG();
preempt_enable();
}
static void xen_alloc_ldt(struct desc_struct *ldt, unsigned entries)
{
const unsigned entries_per_page = PAGE_SIZE / LDT_ENTRY_SIZE;
int i;
/*
* We need to mark the all aliases of the LDT pages RO. We
* don't need to call vm_flush_aliases(), though, since that's
* only responsible for flushing aliases out the TLBs, not the
* page tables, and Xen will flush the TLB for us if needed.
*
* To avoid confusing future readers: none of this is necessary
* to load the LDT. The hypervisor only checks this when the
* LDT is faulted in due to subsequent descriptor access.
*/
for (i = 0; i < entries; i += entries_per_page)
set_aliased_prot(ldt + i, PAGE_KERNEL_RO);
}
static void xen_free_ldt(struct desc_struct *ldt, unsigned entries)
{
const unsigned entries_per_page = PAGE_SIZE / LDT_ENTRY_SIZE;
int i;
for (i = 0; i < entries; i += entries_per_page)
set_aliased_prot(ldt + i, PAGE_KERNEL);
}
static void xen_set_ldt(const void *addr, unsigned entries)
{
struct mmuext_op *op;
struct multicall_space mcs = xen_mc_entry(sizeof(*op));
trace_xen_cpu_set_ldt(addr, entries);
op = mcs.args;
op->cmd = MMUEXT_SET_LDT;
op->arg1.linear_addr = (unsigned long)addr;
op->arg2.nr_ents = entries;
MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
xen_mc_issue(XEN_LAZY_CPU);
}
static void xen_load_gdt(const struct desc_ptr *dtr)
{
unsigned long va = dtr->address;
unsigned int size = dtr->size + 1;
unsigned long pfn, mfn;
int level;
pte_t *ptep;
void *virt;
/* @size should be at most GDT_SIZE which is smaller than PAGE_SIZE. */
BUG_ON(size > PAGE_SIZE);
BUG_ON(va & ~PAGE_MASK);
/*
* The GDT is per-cpu and is in the percpu data area.
* That can be virtually mapped, so we need to do a
* page-walk to get the underlying MFN for the
* hypercall. The page can also be in the kernel's
* linear range, so we need to RO that mapping too.
*/
ptep = lookup_address(va, &level);
BUG_ON(ptep == NULL);
pfn = pte_pfn(*ptep);
mfn = pfn_to_mfn(pfn);
virt = __va(PFN_PHYS(pfn));
make_lowmem_page_readonly((void *)va);
make_lowmem_page_readonly(virt);
if (HYPERVISOR_set_gdt(&mfn, size / sizeof(struct desc_struct)))
BUG();
}
/*
* load_gdt for early boot, when the gdt is only mapped once
*/
static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
{
unsigned long va = dtr->address;
unsigned int size = dtr->size + 1;
unsigned long pfn, mfn;
pte_t pte;
/* @size should be at most GDT_SIZE which is smaller than PAGE_SIZE. */
BUG_ON(size > PAGE_SIZE);
BUG_ON(va & ~PAGE_MASK);
pfn = virt_to_pfn((void *)va);
mfn = pfn_to_mfn(pfn);
pte = pfn_pte(pfn, PAGE_KERNEL_RO);
if (HYPERVISOR_update_va_mapping((unsigned long)va, pte, 0))
BUG();
if (HYPERVISOR_set_gdt(&mfn, size / sizeof(struct desc_struct)))
BUG();
}
static inline bool desc_equal(const struct desc_struct *d1,
const struct desc_struct *d2)
{
return !memcmp(d1, d2, sizeof(*d1));
}
static void load_TLS_descriptor(struct thread_struct *t,
unsigned int cpu, unsigned int i)
{
struct desc_struct *shadow = &per_cpu(shadow_tls_desc, cpu).desc[i];
struct desc_struct *gdt;
xmaddr_t maddr;
struct multicall_space mc;
if (desc_equal(shadow, &t->tls_array[i]))
return;
*shadow = t->tls_array[i];
gdt = get_cpu_gdt_rw(cpu);
maddr = arbitrary_virt_to_machine(&gdt[GDT_ENTRY_TLS_MIN+i]);
mc = __xen_mc_entry(0);
MULTI_update_descriptor(mc.mc, maddr.maddr, t->tls_array[i]);
}
static void xen_load_tls(struct thread_struct *t, unsigned int cpu)
{
/*
* In lazy mode we need to zero %fs, otherwise we may get an
* exception between the new %fs descriptor being loaded and
* %fs being effectively cleared at __switch_to().
*/
if (xen_get_lazy_mode() == XEN_LAZY_CPU)
loadsegment(fs, 0);
xen_mc_batch();
load_TLS_descriptor(t, cpu, 0);
load_TLS_descriptor(t, cpu, 1);
load_TLS_descriptor(t, cpu, 2);
xen_mc_issue(XEN_LAZY_CPU);
}
static void xen_load_gs_index(unsigned int idx)
{
if (HYPERVISOR_set_segment_base(SEGBASE_GS_USER_SEL, idx))
BUG();
}
static void xen_write_ldt_entry(struct desc_struct *dt, int entrynum,
const void *ptr)
{
xmaddr_t mach_lp = arbitrary_virt_to_machine(&dt[entrynum]);
u64 entry = *(u64 *)ptr;
trace_xen_cpu_write_ldt_entry(dt, entrynum, entry);
preempt_disable();
xen_mc_flush();
if (HYPERVISOR_update_descriptor(mach_lp.maddr, entry))
BUG();
preempt_enable();
}
void noist_exc_debug(struct pt_regs *regs);
DEFINE_IDTENTRY_RAW(xenpv_exc_nmi)
{
/* On Xen PV, NMI doesn't use IST. The C part is the same as native. */
exc_nmi(regs);
}
DEFINE_IDTENTRY_RAW_ERRORCODE(xenpv_exc_double_fault)
{
/* On Xen PV, DF doesn't use IST. The C part is the same as native. */
exc_double_fault(regs, error_code);
}
DEFINE_IDTENTRY_RAW(xenpv_exc_debug)
{
/*
* There's no IST on Xen PV, but we still need to dispatch
* to the correct handler.
*/
if (user_mode(regs))
noist_exc_debug(regs);
else
exc_debug(regs);
}
DEFINE_IDTENTRY_RAW(exc_xen_unknown_trap)
{
/* This should never happen and there is no way to handle it. */
instrumentation_begin();
pr_err("Unknown trap in Xen PV mode.");
BUG();
instrumentation_end();
}
#ifdef CONFIG_X86_MCE
DEFINE_IDTENTRY_RAW(xenpv_exc_machine_check)
{
/*
* There's no IST on Xen PV, but we still need to dispatch
* to the correct handler.
*/
if (user_mode(regs))
noist_exc_machine_check(regs);
else
exc_machine_check(regs);
}
#endif
struct trap_array_entry {
void (*orig)(void);
void (*xen)(void);
bool ist_okay;
};
#define TRAP_ENTRY(func, ist_ok) { \
.orig = asm_##func, \
.xen = xen_asm_##func, \
.ist_okay = ist_ok }
#define TRAP_ENTRY_REDIR(func, ist_ok) { \
.orig = asm_##func, \
.xen = xen_asm_xenpv_##func, \
.ist_okay = ist_ok }
static struct trap_array_entry trap_array[] = {
TRAP_ENTRY_REDIR(exc_debug, true ),
TRAP_ENTRY_REDIR(exc_double_fault, true ),
#ifdef CONFIG_X86_MCE
TRAP_ENTRY_REDIR(exc_machine_check, true ),
#endif
TRAP_ENTRY_REDIR(exc_nmi, true ),
TRAP_ENTRY(exc_int3, false ),
TRAP_ENTRY(exc_overflow, false ),
#ifdef CONFIG_IA32_EMULATION
{ entry_INT80_compat, xen_entry_INT80_compat, false },
#endif
TRAP_ENTRY(exc_page_fault, false ),
TRAP_ENTRY(exc_divide_error, false ),
TRAP_ENTRY(exc_bounds, false ),
TRAP_ENTRY(exc_invalid_op, false ),
TRAP_ENTRY(exc_device_not_available, false ),
TRAP_ENTRY(exc_coproc_segment_overrun, false ),
TRAP_ENTRY(exc_invalid_tss, false ),
TRAP_ENTRY(exc_segment_not_present, false ),
TRAP_ENTRY(exc_stack_segment, false ),
TRAP_ENTRY(exc_general_protection, false ),
TRAP_ENTRY(exc_spurious_interrupt_bug, false ),
TRAP_ENTRY(exc_coprocessor_error, false ),
TRAP_ENTRY(exc_alignment_check, false ),
TRAP_ENTRY(exc_simd_coprocessor_error, false ),
#ifdef CONFIG_X86_CET
TRAP_ENTRY(exc_control_protection, false ),
#endif
};
static bool __ref get_trap_addr(void **addr, unsigned int ist)
{
unsigned int nr;
bool ist_okay = false;
bool found = false;
/*
* Replace trap handler addresses by Xen specific ones.
* Check for known traps using IST and whitelist them.
* The debugger ones are the only ones we care about.
* Xen will handle faults like double_fault, so we should never see
* them. Warn if there's an unexpected IST-using fault handler.
*/
for (nr = 0; nr < ARRAY_SIZE(trap_array); nr++) {
struct trap_array_entry *entry = trap_array + nr;
if (*addr == entry->orig) {
*addr = entry->xen;
ist_okay = entry->ist_okay;
found = true;
break;
}
}
if (nr == ARRAY_SIZE(trap_array) &&
*addr >= (void *)early_idt_handler_array[0] &&
*addr < (void *)early_idt_handler_array[NUM_EXCEPTION_VECTORS]) {
nr = (*addr - (void *)early_idt_handler_array[0]) /
EARLY_IDT_HANDLER_SIZE;
*addr = (void *)xen_early_idt_handler_array[nr];
found = true;
}
if (!found)
*addr = (void *)xen_asm_exc_xen_unknown_trap;
if (WARN_ON(found && ist != 0 && !ist_okay))
return false;
return true;
}
static int cvt_gate_to_trap(int vector, const gate_desc *val,
struct trap_info *info)
{
unsigned long addr;
if (val->bits.type != GATE_TRAP && val->bits.type != GATE_INTERRUPT)
return 0;
info->vector = vector;
addr = gate_offset(val);
if (!get_trap_addr((void **)&addr, val->bits.ist))
return 0;
info->address = addr;
info->cs = gate_segment(val);
info->flags = val->bits.dpl;
/* interrupt gates clear IF */
if (val->bits.type == GATE_INTERRUPT)
info->flags |= 1 << 2;
return 1;
}
/* Locations of each CPU's IDT */
static DEFINE_PER_CPU(struct desc_ptr, idt_desc);
/* Set an IDT entry. If the entry is part of the current IDT, then
also update Xen. */
static void xen_write_idt_entry(gate_desc *dt, int entrynum, const gate_desc *g)
{
unsigned long p = (unsigned long)&dt[entrynum];
unsigned long start, end;
trace_xen_cpu_write_idt_entry(dt, entrynum, g);
preempt_disable();
start = __this_cpu_read(idt_desc.address);
end = start + __this_cpu_read(idt_desc.size) + 1;
xen_mc_flush();
native_write_idt_entry(dt, entrynum, g);
if (p >= start && (p + 8) <= end) {
struct trap_info info[2];
info[1].address = 0;
if (cvt_gate_to_trap(entrynum, g, &info[0]))
if (HYPERVISOR_set_trap_table(info))
BUG();
}
preempt_enable();
}
static unsigned xen_convert_trap_info(const struct desc_ptr *desc,
struct trap_info *traps, bool full)
{
unsigned in, out, count;
count = (desc->size+1) / sizeof(gate_desc);
BUG_ON(count > 256);
for (in = out = 0; in < count; in++) {
gate_desc *entry = (gate_desc *)(desc->address) + in;
if (cvt_gate_to_trap(in, entry, &traps[out]) || full)
out++;
}
return out;
}
void xen_copy_trap_info(struct trap_info *traps)
{
const struct desc_ptr *desc = this_cpu_ptr(&idt_desc);
xen_convert_trap_info(desc, traps, true);
}
/* Load a new IDT into Xen. In principle this can be per-CPU, so we
hold a spinlock to protect the static traps[] array (static because
it avoids allocation, and saves stack space). */
static void xen_load_idt(const struct desc_ptr *desc)
{
static DEFINE_SPINLOCK(lock);
static struct trap_info traps[257];
static const struct trap_info zero = { };
unsigned out;
trace_xen_cpu_load_idt(desc);
spin_lock(&lock);
memcpy(this_cpu_ptr(&idt_desc), desc, sizeof(idt_desc));
out = xen_convert_trap_info(desc, traps, false);
traps[out] = zero;
xen_mc_flush();
if (HYPERVISOR_set_trap_table(traps))
BUG();
spin_unlock(&lock);
}
/* Write a GDT descriptor entry. Ignore LDT descriptors, since
they're handled differently. */
static void xen_write_gdt_entry(struct desc_struct *dt, int entry,
const void *desc, int type)
{
trace_xen_cpu_write_gdt_entry(dt, entry, desc, type);
preempt_disable();
switch (type) {
case DESC_LDT:
case DESC_TSS:
/* ignore */
break;
default: {
xmaddr_t maddr = arbitrary_virt_to_machine(&dt[entry]);
xen_mc_flush();
if (HYPERVISOR_update_descriptor(maddr.maddr, *(u64 *)desc))
BUG();
}
}
preempt_enable();
}
/*
* Version of write_gdt_entry for use at early boot-time needed to
* update an entry as simply as possible.
*/
static void __init xen_write_gdt_entry_boot(struct desc_struct *dt, int entry,
const void *desc, int type)
{
trace_xen_cpu_write_gdt_entry(dt, entry, desc, type);
switch (type) {
case DESC_LDT:
case DESC_TSS:
/* ignore */
break;
default: {
xmaddr_t maddr = virt_to_machine(&dt[entry]);
if (HYPERVISOR_update_descriptor(maddr.maddr, *(u64 *)desc))
dt[entry] = *(struct desc_struct *)desc;
}
}
}
static void xen_load_sp0(unsigned long sp0)
{
struct multicall_space mcs;
mcs = xen_mc_entry(0);
MULTI_stack_switch(mcs.mc, __KERNEL_DS, sp0);
xen_mc_issue(XEN_LAZY_CPU);
this_cpu_write(cpu_tss_rw.x86_tss.sp0, sp0);
}
#ifdef CONFIG_X86_IOPL_IOPERM
static void xen_invalidate_io_bitmap(void)
{
struct physdev_set_iobitmap iobitmap = {
.bitmap = NULL,
.nr_ports = 0,
};
native_tss_invalidate_io_bitmap();
HYPERVISOR_physdev_op(PHYSDEVOP_set_iobitmap, &iobitmap);
}
static void xen_update_io_bitmap(void)
{
struct physdev_set_iobitmap iobitmap;
struct tss_struct *tss = this_cpu_ptr(&cpu_tss_rw);
native_tss_update_io_bitmap();
iobitmap.bitmap = (uint8_t *)(&tss->x86_tss) +
tss->x86_tss.io_bitmap_base;
if (tss->x86_tss.io_bitmap_base == IO_BITMAP_OFFSET_INVALID)
iobitmap.nr_ports = 0;
else
iobitmap.nr_ports = IO_BITMAP_BITS;
HYPERVISOR_physdev_op(PHYSDEVOP_set_iobitmap, &iobitmap);
}
#endif
static void xen_io_delay(void)
{
}
static DEFINE_PER_CPU(unsigned long, xen_cr0_value);
static unsigned long xen_read_cr0(void)
{
unsigned long cr0 = this_cpu_read(xen_cr0_value);
if (unlikely(cr0 == 0)) {
cr0 = native_read_cr0();
this_cpu_write(xen_cr0_value, cr0);
}
return cr0;
}
static void xen_write_cr0(unsigned long cr0)
{
struct multicall_space mcs;
this_cpu_write(xen_cr0_value, cr0);
/* Only pay attention to cr0.TS; everything else is
ignored. */
mcs = xen_mc_entry(0);
MULTI_fpu_taskswitch(mcs.mc, (cr0 & X86_CR0_TS) != 0);
xen_mc_issue(XEN_LAZY_CPU);
}
static void xen_write_cr4(unsigned long cr4)
{
cr4 &= ~(X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PCE);
native_write_cr4(cr4);
}
static u64 xen_do_read_msr(unsigned int msr, int *err)
{
u64 val = 0; /* Avoid uninitialized value for safe variant. */
if (pmu_msr_read(msr, &val, err))
return val;
if (err)
val = native_read_msr_safe(msr, err);
else
val = native_read_msr(msr);
switch (msr) {
case MSR_IA32_APICBASE:
val &= ~X2APIC_ENABLE;
break;
}
return val;
}
static void set_seg(unsigned int which, unsigned int low, unsigned int high,
int *err)
{
u64 base = ((u64)high << 32) | low;
if (HYPERVISOR_set_segment_base(which, base) == 0)
return;
if (err)
*err = -EIO;
else
WARN(1, "Xen set_segment_base(%u, %llx) failed\n", which, base);
}
/*
* Support write_msr_safe() and write_msr() semantics.
* With err == NULL write_msr() semantics are selected.
* Supplying an err pointer requires err to be pre-initialized with 0.
*/
static void xen_do_write_msr(unsigned int msr, unsigned int low,
unsigned int high, int *err)
{
switch (msr) {
case MSR_FS_BASE:
set_seg(SEGBASE_FS, low, high, err);
break;
case MSR_KERNEL_GS_BASE:
set_seg(SEGBASE_GS_USER, low, high, err);
break;
case MSR_GS_BASE:
set_seg(SEGBASE_GS_KERNEL, low, high, err);
break;
case MSR_STAR:
case MSR_CSTAR:
case MSR_LSTAR:
case MSR_SYSCALL_MASK:
case MSR_IA32_SYSENTER_CS:
case MSR_IA32_SYSENTER_ESP:
case MSR_IA32_SYSENTER_EIP:
/* Fast syscall setup is all done in hypercalls, so
these are all ignored. Stub them out here to stop
Xen console noise. */
break;
default:
if (!pmu_msr_write(msr, low, high, err)) {
if (err)
*err = native_write_msr_safe(msr, low, high);
else
native_write_msr(msr, low, high);
}
}
}
static u64 xen_read_msr_safe(unsigned int msr, int *err)
{
return xen_do_read_msr(msr, err);
}
static int xen_write_msr_safe(unsigned int msr, unsigned int low,
unsigned int high)
{
int err = 0;
xen_do_write_msr(msr, low, high, &err);
return err;
}
static u64 xen_read_msr(unsigned int msr)
{
int err;
return xen_do_read_msr(msr, xen_msr_safe ? &err : NULL);
}
static void xen_write_msr(unsigned int msr, unsigned low, unsigned high)
{
int err;
xen_do_write_msr(msr, low, high, xen_msr_safe ? &err : NULL);
}
/* This is called once we have the cpu_possible_mask */
void __init xen_setup_vcpu_info_placement(void)
{
int cpu;
for_each_possible_cpu(cpu) {
/* Set up direct vCPU id mapping for PV guests. */
per_cpu(xen_vcpu_id, cpu) = cpu;
xen_vcpu_setup(cpu);
}
pv_ops.irq.save_fl = __PV_IS_CALLEE_SAVE(xen_save_fl_direct);
pv_ops.irq.irq_disable = __PV_IS_CALLEE_SAVE(xen_irq_disable_direct);
pv_ops.irq.irq_enable = __PV_IS_CALLEE_SAVE(xen_irq_enable_direct);
pv_ops.mmu.read_cr2 = __PV_IS_CALLEE_SAVE(xen_read_cr2_direct);
}
static const struct pv_info xen_info __initconst = {
.extra_user_64bit_cs = FLAT_USER_CS64,
.name = "Xen",
};
static const typeof(pv_ops) xen_cpu_ops __initconst = {
.cpu = {
.cpuid = xen_cpuid,
.set_debugreg = xen_set_debugreg,
.get_debugreg = xen_get_debugreg,
.read_cr0 = xen_read_cr0,
.write_cr0 = xen_write_cr0,
.write_cr4 = xen_write_cr4,
.wbinvd = pv_native_wbinvd,
.read_msr = xen_read_msr,
.write_msr = xen_write_msr,
.read_msr_safe = xen_read_msr_safe,
.write_msr_safe = xen_write_msr_safe,
.read_pmc = xen_read_pmc,
.load_tr_desc = paravirt_nop,
.set_ldt = xen_set_ldt,
.load_gdt = xen_load_gdt,
.load_idt = xen_load_idt,
.load_tls = xen_load_tls,
.load_gs_index = xen_load_gs_index,
.alloc_ldt = xen_alloc_ldt,
.free_ldt = xen_free_ldt,
.store_tr = xen_store_tr,
.write_ldt_entry = xen_write_ldt_entry,
.write_gdt_entry = xen_write_gdt_entry,
.write_idt_entry = xen_write_idt_entry,
.load_sp0 = xen_load_sp0,
#ifdef CONFIG_X86_IOPL_IOPERM
.invalidate_io_bitmap = xen_invalidate_io_bitmap,
.update_io_bitmap = xen_update_io_bitmap,
#endif
.io_delay = xen_io_delay,
.start_context_switch = xen_start_context_switch,
.end_context_switch = xen_end_context_switch,
},
};
static void xen_restart(char *msg)
{
xen_reboot(SHUTDOWN_reboot);
}
static void xen_machine_halt(void)
{
xen_reboot(SHUTDOWN_poweroff);
}
static void xen_machine_power_off(void)
{
do_kernel_power_off();
xen_reboot(SHUTDOWN_poweroff);
}
static void xen_crash_shutdown(struct pt_regs *regs)
{
xen_reboot(SHUTDOWN_crash);
}
static const struct machine_ops xen_machine_ops __initconst = {
.restart = xen_restart,
.halt = xen_machine_halt,
.power_off = xen_machine_power_off,
.shutdown = xen_machine_halt,
.crash_shutdown = xen_crash_shutdown,
.emergency_restart = xen_emergency_restart,
};
static unsigned char xen_get_nmi_reason(void)
{
unsigned char reason = 0;
/* Construct a value which looks like it came from port 0x61. */
if (test_bit(_XEN_NMIREASON_io_error,
&HYPERVISOR_shared_info->arch.nmi_reason))
reason |= NMI_REASON_IOCHK;
if (test_bit(_XEN_NMIREASON_pci_serr,
&HYPERVISOR_shared_info->arch.nmi_reason))
reason |= NMI_REASON_SERR;
return reason;
}
static void __init xen_boot_params_init_edd(void)
{
#if IS_ENABLED(CONFIG_EDD)
struct xen_platform_op op;
struct edd_info *edd_info;
u32 *mbr_signature;
unsigned nr;
int ret;
edd_info = boot_params.eddbuf;
mbr_signature = boot_params.edd_mbr_sig_buffer;
op.cmd = XENPF_firmware_info;
op.u.firmware_info.type = XEN_FW_DISK_INFO;
for (nr = 0; nr < EDDMAXNR; nr++) {
struct edd_info *info = edd_info + nr;
op.u.firmware_info.index = nr;
info->params.length = sizeof(info->params);
set_xen_guest_handle(op.u.firmware_info.u.disk_info.edd_params,
&info->params);
ret = HYPERVISOR_platform_op(&op);
if (ret)
break;
#define C(x) info->x = op.u.firmware_info.u.disk_info.x
C(device);
C(version);
C(interface_support);
C(legacy_max_cylinder);
C(legacy_max_head);
C(legacy_sectors_per_track);
#undef C
}
boot_params.eddbuf_entries = nr;
op.u.firmware_info.type = XEN_FW_DISK_MBR_SIGNATURE;
for (nr = 0; nr < EDD_MBR_SIG_MAX; nr++) {
op.u.firmware_info.index = nr;
ret = HYPERVISOR_platform_op(&op);
if (ret)
break;
mbr_signature[nr] = op.u.firmware_info.u.disk_mbr_signature.mbr_signature;
}
boot_params.edd_mbr_sig_buf_entries = nr;
#endif
}
/*
* Set up the GDT and segment registers for -fstack-protector. Until
* we do this, we have to be careful not to call any stack-protected
* function, which is most of the kernel.
*/
static void __init xen_setup_gdt(int cpu)
{
pv_ops.cpu.write_gdt_entry = xen_write_gdt_entry_boot;
pv_ops.cpu.load_gdt = xen_load_gdt_boot;
switch_gdt_and_percpu_base(cpu);
pv_ops.cpu.write_gdt_entry = xen_write_gdt_entry;
pv_ops.cpu.load_gdt = xen_load_gdt;
}
static void __init xen_dom0_set_legacy_features(void)
{
x86_platform.legacy.rtc = 1;
}
static void __init xen_domu_set_legacy_features(void)
{
x86_platform.legacy.rtc = 0;
}
extern void early_xen_iret_patch(void);
/* First C function to be called on Xen boot */
asmlinkage __visible void __init xen_start_kernel(struct start_info *si)
{
struct physdev_set_iopl set_iopl;
unsigned long initrd_start = 0;
int rc;
if (!si)
return;
clear_bss();
xen_start_info = si;
__text_gen_insn(&early_xen_iret_patch,
JMP32_INSN_OPCODE, &early_xen_iret_patch, &xen_iret,
JMP32_INSN_SIZE);
xen_domain_type = XEN_PV_DOMAIN;
xen_start_flags = xen_start_info->flags;
xen_setup_features();
/* Install Xen paravirt ops */
pv_info = xen_info;
pv_ops.cpu = xen_cpu_ops.cpu;
xen_init_irq_ops();
/*
* Setup xen_vcpu early because it is needed for
* local_irq_disable(), irqs_disabled(), e.g. in printk().
*
* Don't do the full vcpu_info placement stuff until we have
* the cpu_possible_mask and a non-dummy shared_info.
*/
xen_vcpu_info_reset(0);
x86_platform.get_nmi_reason = xen_get_nmi_reason;
x86_platform.realmode_reserve = x86_init_noop;
x86_platform.realmode_init = x86_init_noop;
x86_init.resources.memory_setup = xen_memory_setup;
x86_init.irqs.intr_mode_select = x86_init_noop;
x86_init.irqs.intr_mode_init = x86_64_probe_apic;
x86_init.oem.arch_setup = xen_arch_setup;
x86_init.oem.banner = xen_banner;
x86_init.hyper.init_platform = xen_pv_init_platform;
x86_init.hyper.guest_late_init = xen_pv_guest_late_init;
/*
* Set up some pagetable state before starting to set any ptes.
*/
xen_setup_machphys_mapping();
xen_init_mmu_ops();
/* Prevent unwanted bits from being set in PTEs. */
__supported_pte_mask &= ~_PAGE_GLOBAL;
__default_kernel_pte_mask &= ~_PAGE_GLOBAL;
/* Get mfn list */
xen_build_dynamic_phys_to_machine();
/* Work out if we support NX */
get_cpu_cap(&boot_cpu_data);
x86_configure_nx();
/*
* Set up kernel GDT and segment registers, mainly so that
* -fstack-protector code can be executed.
*/
xen_setup_gdt(0);
/* Determine virtual and physical address sizes */
get_cpu_address_sizes(&boot_cpu_data);
/* Let's presume PV guests always boot on vCPU with id 0. */
per_cpu(xen_vcpu_id, 0) = 0;
idt_setup_early_handler();
xen_init_capabilities();
/*
* set up the basic apic ops.
*/
xen_init_apic();
machine_ops = xen_machine_ops;
/*
* The only reliable way to retain the initial address of the
* percpu gdt_page is to remember it here, so we can go and
* mark it RW later, when the initial percpu area is freed.
*/
xen_initial_gdt = &per_cpu(gdt_page, 0);
xen_smp_init();
#ifdef CONFIG_ACPI_NUMA
/*
* The pages we from Xen are not related to machine pages, so
* any NUMA information the kernel tries to get from ACPI will
* be meaningless. Prevent it from trying.
*/
disable_srat();
#endif
WARN_ON(xen_cpuhp_setup(xen_cpu_up_prepare_pv, xen_cpu_dead_pv));
local_irq_disable();
early_boot_irqs_disabled = true;
xen_raw_console_write("mapping kernel into physical memory\n");
xen_setup_kernel_pagetable((pgd_t *)xen_start_info->pt_base,
xen_start_info->nr_pages);
xen_reserve_special_pages();
/*
* We used to do this in xen_arch_setup, but that is too late
* on AMD were early_cpu_init (run before ->arch_setup()) calls
* early_amd_init which pokes 0xcf8 port.
*/
set_iopl.iopl = 1;
rc = HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl);
if (rc != 0)
xen_raw_printk("physdev_op failed %d\n", rc);
if (xen_start_info->mod_start) {
if (xen_start_info->flags & SIF_MOD_START_PFN)
initrd_start = PFN_PHYS(xen_start_info->mod_start);
else
initrd_start = __pa(xen_start_info->mod_start);
}
/* Poke various useful things into boot_params */
boot_params.hdr.type_of_loader = (9 << 4) | 0;
boot_params.hdr.ramdisk_image = initrd_start;
boot_params.hdr.ramdisk_size = xen_start_info->mod_len;
boot_params.hdr.cmd_line_ptr = __pa(xen_start_info->cmd_line);
boot_params.hdr.hardware_subarch = X86_SUBARCH_XEN;
if (!xen_initial_domain()) {
if (pci_xen)
x86_init.pci.arch_init = pci_xen_init;
x86_platform.set_legacy_features =
xen_domu_set_legacy_features;
} else {
const struct dom0_vga_console_info *info =
(void *)((char *)xen_start_info +
xen_start_info->console.dom0.info_off);
struct xen_platform_op op = {
.cmd = XENPF_firmware_info,
.interface_version = XENPF_INTERFACE_VERSION,
.u.firmware_info.type = XEN_FW_KBD_SHIFT_FLAGS,
};
x86_platform.set_legacy_features =
xen_dom0_set_legacy_features;
xen_init_vga(info, xen_start_info->console.dom0.info_size,
&boot_params.screen_info);
xen_start_info->console.domU.mfn = 0;
xen_start_info->console.domU.evtchn = 0;
if (HYPERVISOR_platform_op(&op) == 0)
boot_params.kbd_status = op.u.firmware_info.u.kbd_shift_flags;
/* Make sure ACS will be enabled */
pci_request_acs();
xen_acpi_sleep_register();
xen_boot_params_init_edd();
#ifdef CONFIG_ACPI
/*
* Disable selecting "Firmware First mode" for correctable
* memory errors, as this is the duty of the hypervisor to
* decide.
*/
acpi_disable_cmcff = 1;
#endif
}
xen_add_preferred_consoles();
#ifdef CONFIG_PCI
/* PCI BIOS service won't work from a PV guest. */
pci_probe &= ~PCI_PROBE_BIOS;
#endif
xen_raw_console_write("about to get started...\n");
/* We need this for printk timestamps */
xen_setup_runstate_info(0);
xen_efi_init(&boot_params);
/* Start the world */
cr4_init_shadow(); /* 32b kernel does this in i386_start_kernel() */
x86_64_start_reservations((char *)__pa_symbol(&boot_params));
}
static int xen_cpu_up_prepare_pv(unsigned int cpu)
{
int rc;
if (per_cpu(xen_vcpu, cpu) == NULL)
return -ENODEV;
xen_setup_timer(cpu);
rc = xen_smp_intr_init(cpu);
if (rc) {
WARN(1, "xen_smp_intr_init() for CPU %d failed: %d\n",
cpu, rc);
return rc;
}
rc = xen_smp_intr_init_pv(cpu);
if (rc) {
WARN(1, "xen_smp_intr_init_pv() for CPU %d failed: %d\n",
cpu, rc);
return rc;
}
return 0;
}
static int xen_cpu_dead_pv(unsigned int cpu)
{
xen_smp_intr_free(cpu);
xen_smp_intr_free_pv(cpu);
xen_teardown_timer(cpu);
return 0;
}
static uint32_t __init xen_platform_pv(void)
{
if (xen_pv_domain())
return xen_cpuid_base();
return 0;
}
const __initconst struct hypervisor_x86 x86_hyper_xen_pv = {
.name = "Xen PV",
.detect = xen_platform_pv,
.type = X86_HYPER_XEN_PV,
.runtime.pin_vcpu = xen_pin_vcpu,
.ignore_nopv = true,
};
| linux-master | arch/x86/xen/enlighten_pv.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2014 Oracle Co., Daniel Kiper
*/
#include <linux/bitops.h>
#include <linux/efi.h>
#include <linux/init.h>
#include <linux/string.h>
#include <xen/xen.h>
#include <xen/xen-ops.h>
#include <xen/interface/platform.h>
#include <asm/page.h>
#include <asm/setup.h>
#include <asm/xen/hypercall.h>
#include "xen-ops.h"
static efi_char16_t vendor[100] __initdata;
static efi_system_table_t efi_systab_xen __initdata = {
.hdr = {
.signature = EFI_SYSTEM_TABLE_SIGNATURE,
.revision = 0, /* Initialized later. */
.headersize = 0, /* Ignored by Linux Kernel. */
.crc32 = 0, /* Ignored by Linux Kernel. */
.reserved = 0
},
.fw_vendor = EFI_INVALID_TABLE_ADDR, /* Initialized later. */
.fw_revision = 0, /* Initialized later. */
.con_in_handle = EFI_INVALID_TABLE_ADDR, /* Not used under Xen. */
.con_in = NULL, /* Not used under Xen. */
.con_out_handle = EFI_INVALID_TABLE_ADDR, /* Not used under Xen. */
.con_out = NULL, /* Not used under Xen. */
.stderr_handle = EFI_INVALID_TABLE_ADDR, /* Not used under Xen. */
.stderr = EFI_INVALID_TABLE_ADDR, /* Not used under Xen. */
.runtime = (efi_runtime_services_t *)EFI_INVALID_TABLE_ADDR,
/* Not used under Xen. */
.boottime = (efi_boot_services_t *)EFI_INVALID_TABLE_ADDR,
/* Not used under Xen. */
.nr_tables = 0, /* Initialized later. */
.tables = EFI_INVALID_TABLE_ADDR /* Initialized later. */
};
static efi_system_table_t __init *xen_efi_probe(void)
{
struct xen_platform_op op = {
.cmd = XENPF_firmware_info,
.u.firmware_info = {
.type = XEN_FW_EFI_INFO,
.index = XEN_FW_EFI_CONFIG_TABLE
}
};
union xenpf_efi_info *info = &op.u.firmware_info.u.efi_info;
if (!xen_initial_domain() || HYPERVISOR_platform_op(&op) < 0)
return NULL;
/* Here we know that Xen runs on EFI platform. */
xen_efi_runtime_setup();
efi_systab_xen.tables = info->cfg.addr;
efi_systab_xen.nr_tables = info->cfg.nent;
op.cmd = XENPF_firmware_info;
op.u.firmware_info.type = XEN_FW_EFI_INFO;
op.u.firmware_info.index = XEN_FW_EFI_VENDOR;
info->vendor.bufsz = sizeof(vendor);
set_xen_guest_handle(info->vendor.name, vendor);
if (HYPERVISOR_platform_op(&op) == 0) {
efi_systab_xen.fw_vendor = __pa_symbol(vendor);
efi_systab_xen.fw_revision = info->vendor.revision;
} else
efi_systab_xen.fw_vendor = __pa_symbol(L"UNKNOWN");
op.cmd = XENPF_firmware_info;
op.u.firmware_info.type = XEN_FW_EFI_INFO;
op.u.firmware_info.index = XEN_FW_EFI_VERSION;
if (HYPERVISOR_platform_op(&op) == 0)
efi_systab_xen.hdr.revision = info->version;
op.cmd = XENPF_firmware_info;
op.u.firmware_info.type = XEN_FW_EFI_INFO;
op.u.firmware_info.index = XEN_FW_EFI_RT_VERSION;
if (HYPERVISOR_platform_op(&op) == 0)
efi.runtime_version = info->version;
return &efi_systab_xen;
}
/*
* Determine whether we're in secure boot mode.
*/
static enum efi_secureboot_mode xen_efi_get_secureboot(void)
{
static efi_guid_t shim_guid = EFI_SHIM_LOCK_GUID;
enum efi_secureboot_mode mode;
efi_status_t status;
u8 moksbstate;
unsigned long size;
mode = efi_get_secureboot_mode(efi.get_variable);
if (mode == efi_secureboot_mode_unknown) {
pr_err("Could not determine UEFI Secure Boot status.\n");
return efi_secureboot_mode_unknown;
}
if (mode != efi_secureboot_mode_enabled)
return mode;
/* See if a user has put the shim into insecure mode. */
size = sizeof(moksbstate);
status = efi.get_variable(L"MokSBStateRT", &shim_guid,
NULL, &size, &moksbstate);
/* If it fails, we don't care why. Default to secure. */
if (status != EFI_SUCCESS)
goto secure_boot_enabled;
if (moksbstate == 1)
return efi_secureboot_mode_disabled;
secure_boot_enabled:
pr_info("UEFI Secure Boot is enabled.\n");
return efi_secureboot_mode_enabled;
}
void __init xen_efi_init(struct boot_params *boot_params)
{
efi_system_table_t *efi_systab_xen;
efi_systab_xen = xen_efi_probe();
if (efi_systab_xen == NULL)
return;
strscpy((char *)&boot_params->efi_info.efi_loader_signature, "Xen",
sizeof(boot_params->efi_info.efi_loader_signature));
boot_params->efi_info.efi_systab = (__u32)__pa(efi_systab_xen);
boot_params->efi_info.efi_systab_hi = (__u32)(__pa(efi_systab_xen) >> 32);
boot_params->secure_boot = xen_efi_get_secureboot();
set_bit(EFI_BOOT, &efi.flags);
set_bit(EFI_PARAVIRT, &efi.flags);
set_bit(EFI_64BIT, &efi.flags);
}
| linux-master | arch/x86/xen/efi.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Machine specific setup for xen
*
* Jeremy Fitzhardinge <[email protected]>, XenSource Inc, 2007
*/
#include <linux/init.h>
#include <linux/iscsi_ibft.h>
#include <linux/sched.h>
#include <linux/kstrtox.h>
#include <linux/mm.h>
#include <linux/pm.h>
#include <linux/memblock.h>
#include <linux/cpuidle.h>
#include <linux/cpufreq.h>
#include <linux/memory_hotplug.h>
#include <asm/elf.h>
#include <asm/vdso.h>
#include <asm/e820/api.h>
#include <asm/setup.h>
#include <asm/acpi.h>
#include <asm/numa.h>
#include <asm/idtentry.h>
#include <asm/xen/hypervisor.h>
#include <asm/xen/hypercall.h>
#include <xen/xen.h>
#include <xen/page.h>
#include <xen/interface/callback.h>
#include <xen/interface/memory.h>
#include <xen/interface/physdev.h>
#include <xen/features.h>
#include <xen/hvc-console.h>
#include "xen-ops.h"
#include "mmu.h"
#define GB(x) ((uint64_t)(x) * 1024 * 1024 * 1024)
/* Amount of extra memory space we add to the e820 ranges */
struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata;
/* Number of pages released from the initial allocation. */
unsigned long xen_released_pages;
/* Memory map would allow PCI passthrough. */
bool xen_pv_pci_possible;
/* E820 map used during setting up memory. */
static struct e820_table xen_e820_table __initdata;
/*
* Buffer used to remap identity mapped pages. We only need the virtual space.
* The physical page behind this address is remapped as needed to different
* buffer pages.
*/
#define REMAP_SIZE (P2M_PER_PAGE - 3)
static struct {
unsigned long next_area_mfn;
unsigned long target_pfn;
unsigned long size;
unsigned long mfns[REMAP_SIZE];
} xen_remap_buf __initdata __aligned(PAGE_SIZE);
static unsigned long xen_remap_mfn __initdata = INVALID_P2M_ENTRY;
/*
* The maximum amount of extra memory compared to the base size. The
* main scaling factor is the size of struct page. At extreme ratios
* of base:extra, all the base memory can be filled with page
* structures for the extra memory, leaving no space for anything
* else.
*
* 10x seems like a reasonable balance between scaling flexibility and
* leaving a practically usable system.
*/
#define EXTRA_MEM_RATIO (10)
static bool xen_512gb_limit __initdata = IS_ENABLED(CONFIG_XEN_512GB);
static void __init xen_parse_512gb(void)
{
bool val = false;
char *arg;
arg = strstr(xen_start_info->cmd_line, "xen_512gb_limit");
if (!arg)
return;
arg = strstr(xen_start_info->cmd_line, "xen_512gb_limit=");
if (!arg)
val = true;
else if (kstrtobool(arg + strlen("xen_512gb_limit="), &val))
return;
xen_512gb_limit = val;
}
static void __init xen_add_extra_mem(unsigned long start_pfn,
unsigned long n_pfns)
{
int i;
/*
* No need to check for zero size, should happen rarely and will only
* write a new entry regarded to be unused due to zero size.
*/
for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
/* Add new region. */
if (xen_extra_mem[i].n_pfns == 0) {
xen_extra_mem[i].start_pfn = start_pfn;
xen_extra_mem[i].n_pfns = n_pfns;
break;
}
/* Append to existing region. */
if (xen_extra_mem[i].start_pfn + xen_extra_mem[i].n_pfns ==
start_pfn) {
xen_extra_mem[i].n_pfns += n_pfns;
break;
}
}
if (i == XEN_EXTRA_MEM_MAX_REGIONS)
printk(KERN_WARNING "Warning: not enough extra memory regions\n");
memblock_reserve(PFN_PHYS(start_pfn), PFN_PHYS(n_pfns));
}
static void __init xen_del_extra_mem(unsigned long start_pfn,
unsigned long n_pfns)
{
int i;
unsigned long start_r, size_r;
for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
start_r = xen_extra_mem[i].start_pfn;
size_r = xen_extra_mem[i].n_pfns;
/* Start of region. */
if (start_r == start_pfn) {
BUG_ON(n_pfns > size_r);
xen_extra_mem[i].start_pfn += n_pfns;
xen_extra_mem[i].n_pfns -= n_pfns;
break;
}
/* End of region. */
if (start_r + size_r == start_pfn + n_pfns) {
BUG_ON(n_pfns > size_r);
xen_extra_mem[i].n_pfns -= n_pfns;
break;
}
/* Mid of region. */
if (start_pfn > start_r && start_pfn < start_r + size_r) {
BUG_ON(start_pfn + n_pfns > start_r + size_r);
xen_extra_mem[i].n_pfns = start_pfn - start_r;
/* Calling memblock_reserve() again is okay. */
xen_add_extra_mem(start_pfn + n_pfns, start_r + size_r -
(start_pfn + n_pfns));
break;
}
}
memblock_phys_free(PFN_PHYS(start_pfn), PFN_PHYS(n_pfns));
}
/*
* Called during boot before the p2m list can take entries beyond the
* hypervisor supplied p2m list. Entries in extra mem are to be regarded as
* invalid.
*/
unsigned long __ref xen_chk_extra_mem(unsigned long pfn)
{
int i;
for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
if (pfn >= xen_extra_mem[i].start_pfn &&
pfn < xen_extra_mem[i].start_pfn + xen_extra_mem[i].n_pfns)
return INVALID_P2M_ENTRY;
}
return IDENTITY_FRAME(pfn);
}
/*
* Mark all pfns of extra mem as invalid in p2m list.
*/
void __init xen_inv_extra_mem(void)
{
unsigned long pfn, pfn_s, pfn_e;
int i;
for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
if (!xen_extra_mem[i].n_pfns)
continue;
pfn_s = xen_extra_mem[i].start_pfn;
pfn_e = pfn_s + xen_extra_mem[i].n_pfns;
for (pfn = pfn_s; pfn < pfn_e; pfn++)
set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
}
}
/*
* Finds the next RAM pfn available in the E820 map after min_pfn.
* This function updates min_pfn with the pfn found and returns
* the size of that range or zero if not found.
*/
static unsigned long __init xen_find_pfn_range(unsigned long *min_pfn)
{
const struct e820_entry *entry = xen_e820_table.entries;
unsigned int i;
unsigned long done = 0;
for (i = 0; i < xen_e820_table.nr_entries; i++, entry++) {
unsigned long s_pfn;
unsigned long e_pfn;
if (entry->type != E820_TYPE_RAM)
continue;
e_pfn = PFN_DOWN(entry->addr + entry->size);
/* We only care about E820 after this */
if (e_pfn <= *min_pfn)
continue;
s_pfn = PFN_UP(entry->addr);
/* If min_pfn falls within the E820 entry, we want to start
* at the min_pfn PFN.
*/
if (s_pfn <= *min_pfn) {
done = e_pfn - *min_pfn;
} else {
done = e_pfn - s_pfn;
*min_pfn = s_pfn;
}
break;
}
return done;
}
static int __init xen_free_mfn(unsigned long mfn)
{
struct xen_memory_reservation reservation = {
.address_bits = 0,
.extent_order = 0,
.domid = DOMID_SELF
};
set_xen_guest_handle(reservation.extent_start, &mfn);
reservation.nr_extents = 1;
return HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
}
/*
* This releases a chunk of memory and then does the identity map. It's used
* as a fallback if the remapping fails.
*/
static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn,
unsigned long end_pfn, unsigned long nr_pages)
{
unsigned long pfn, end;
int ret;
WARN_ON(start_pfn > end_pfn);
/* Release pages first. */
end = min(end_pfn, nr_pages);
for (pfn = start_pfn; pfn < end; pfn++) {
unsigned long mfn = pfn_to_mfn(pfn);
/* Make sure pfn exists to start with */
if (mfn == INVALID_P2M_ENTRY || mfn_to_pfn(mfn) != pfn)
continue;
ret = xen_free_mfn(mfn);
WARN(ret != 1, "Failed to release pfn %lx err=%d\n", pfn, ret);
if (ret == 1) {
xen_released_pages++;
if (!__set_phys_to_machine(pfn, INVALID_P2M_ENTRY))
break;
} else
break;
}
set_phys_range_identity(start_pfn, end_pfn);
}
/*
* Helper function to update the p2m and m2p tables and kernel mapping.
*/
static void __init xen_update_mem_tables(unsigned long pfn, unsigned long mfn)
{
struct mmu_update update = {
.ptr = ((uint64_t)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE,
.val = pfn
};
/* Update p2m */
if (!set_phys_to_machine(pfn, mfn)) {
WARN(1, "Failed to set p2m mapping for pfn=%ld mfn=%ld\n",
pfn, mfn);
BUG();
}
/* Update m2p */
if (HYPERVISOR_mmu_update(&update, 1, NULL, DOMID_SELF) < 0) {
WARN(1, "Failed to set m2p mapping for mfn=%ld pfn=%ld\n",
mfn, pfn);
BUG();
}
if (HYPERVISOR_update_va_mapping((unsigned long)__va(pfn << PAGE_SHIFT),
mfn_pte(mfn, PAGE_KERNEL), 0)) {
WARN(1, "Failed to update kernel mapping for mfn=%ld pfn=%ld\n",
mfn, pfn);
BUG();
}
}
/*
* This function updates the p2m and m2p tables with an identity map from
* start_pfn to start_pfn+size and prepares remapping the underlying RAM of the
* original allocation at remap_pfn. The information needed for remapping is
* saved in the memory itself to avoid the need for allocating buffers. The
* complete remap information is contained in a list of MFNs each containing
* up to REMAP_SIZE MFNs and the start target PFN for doing the remap.
* This enables us to preserve the original mfn sequence while doing the
* remapping at a time when the memory management is capable of allocating
* virtual and physical memory in arbitrary amounts, see 'xen_remap_memory' and
* its callers.
*/
static void __init xen_do_set_identity_and_remap_chunk(
unsigned long start_pfn, unsigned long size, unsigned long remap_pfn)
{
unsigned long buf = (unsigned long)&xen_remap_buf;
unsigned long mfn_save, mfn;
unsigned long ident_pfn_iter, remap_pfn_iter;
unsigned long ident_end_pfn = start_pfn + size;
unsigned long left = size;
unsigned int i, chunk;
WARN_ON(size == 0);
mfn_save = virt_to_mfn((void *)buf);
for (ident_pfn_iter = start_pfn, remap_pfn_iter = remap_pfn;
ident_pfn_iter < ident_end_pfn;
ident_pfn_iter += REMAP_SIZE, remap_pfn_iter += REMAP_SIZE) {
chunk = (left < REMAP_SIZE) ? left : REMAP_SIZE;
/* Map first pfn to xen_remap_buf */
mfn = pfn_to_mfn(ident_pfn_iter);
set_pte_mfn(buf, mfn, PAGE_KERNEL);
/* Save mapping information in page */
xen_remap_buf.next_area_mfn = xen_remap_mfn;
xen_remap_buf.target_pfn = remap_pfn_iter;
xen_remap_buf.size = chunk;
for (i = 0; i < chunk; i++)
xen_remap_buf.mfns[i] = pfn_to_mfn(ident_pfn_iter + i);
/* Put remap buf into list. */
xen_remap_mfn = mfn;
/* Set identity map */
set_phys_range_identity(ident_pfn_iter, ident_pfn_iter + chunk);
left -= chunk;
}
/* Restore old xen_remap_buf mapping */
set_pte_mfn(buf, mfn_save, PAGE_KERNEL);
}
/*
* This function takes a contiguous pfn range that needs to be identity mapped
* and:
*
* 1) Finds a new range of pfns to use to remap based on E820 and remap_pfn.
* 2) Calls the do_ function to actually do the mapping/remapping work.
*
* The goal is to not allocate additional memory but to remap the existing
* pages. In the case of an error the underlying memory is simply released back
* to Xen and not remapped.
*/
static unsigned long __init xen_set_identity_and_remap_chunk(
unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages,
unsigned long remap_pfn)
{
unsigned long pfn;
unsigned long i = 0;
unsigned long n = end_pfn - start_pfn;
if (remap_pfn == 0)
remap_pfn = nr_pages;
while (i < n) {
unsigned long cur_pfn = start_pfn + i;
unsigned long left = n - i;
unsigned long size = left;
unsigned long remap_range_size;
/* Do not remap pages beyond the current allocation */
if (cur_pfn >= nr_pages) {
/* Identity map remaining pages */
set_phys_range_identity(cur_pfn, cur_pfn + size);
break;
}
if (cur_pfn + size > nr_pages)
size = nr_pages - cur_pfn;
remap_range_size = xen_find_pfn_range(&remap_pfn);
if (!remap_range_size) {
pr_warn("Unable to find available pfn range, not remapping identity pages\n");
xen_set_identity_and_release_chunk(cur_pfn,
cur_pfn + left, nr_pages);
break;
}
/* Adjust size to fit in current e820 RAM region */
if (size > remap_range_size)
size = remap_range_size;
xen_do_set_identity_and_remap_chunk(cur_pfn, size, remap_pfn);
/* Update variables to reflect new mappings. */
i += size;
remap_pfn += size;
}
/*
* If the PFNs are currently mapped, their VA mappings need to be
* zapped.
*/
for (pfn = start_pfn; pfn <= max_pfn_mapped && pfn < end_pfn; pfn++)
(void)HYPERVISOR_update_va_mapping(
(unsigned long)__va(pfn << PAGE_SHIFT),
native_make_pte(0), 0);
return remap_pfn;
}
static unsigned long __init xen_count_remap_pages(
unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages,
unsigned long remap_pages)
{
if (start_pfn >= nr_pages)
return remap_pages;
return remap_pages + min(end_pfn, nr_pages) - start_pfn;
}
static unsigned long __init xen_foreach_remap_area(unsigned long nr_pages,
unsigned long (*func)(unsigned long start_pfn, unsigned long end_pfn,
unsigned long nr_pages, unsigned long last_val))
{
phys_addr_t start = 0;
unsigned long ret_val = 0;
const struct e820_entry *entry = xen_e820_table.entries;
int i;
/*
* Combine non-RAM regions and gaps until a RAM region (or the
* end of the map) is reached, then call the provided function
* to perform its duty on the non-RAM region.
*
* The combined non-RAM regions are rounded to a whole number
* of pages so any partial pages are accessible via the 1:1
* mapping. This is needed for some BIOSes that put (for
* example) the DMI tables in a reserved region that begins on
* a non-page boundary.
*/
for (i = 0; i < xen_e820_table.nr_entries; i++, entry++) {
phys_addr_t end = entry->addr + entry->size;
if (entry->type == E820_TYPE_RAM || i == xen_e820_table.nr_entries - 1) {
unsigned long start_pfn = PFN_DOWN(start);
unsigned long end_pfn = PFN_UP(end);
if (entry->type == E820_TYPE_RAM)
end_pfn = PFN_UP(entry->addr);
if (start_pfn < end_pfn)
ret_val = func(start_pfn, end_pfn, nr_pages,
ret_val);
start = end;
}
}
return ret_val;
}
/*
* Remap the memory prepared in xen_do_set_identity_and_remap_chunk().
* The remap information (which mfn remap to which pfn) is contained in the
* to be remapped memory itself in a linked list anchored at xen_remap_mfn.
* This scheme allows to remap the different chunks in arbitrary order while
* the resulting mapping will be independent from the order.
*/
void __init xen_remap_memory(void)
{
unsigned long buf = (unsigned long)&xen_remap_buf;
unsigned long mfn_save, pfn;
unsigned long remapped = 0;
unsigned int i;
unsigned long pfn_s = ~0UL;
unsigned long len = 0;
mfn_save = virt_to_mfn((void *)buf);
while (xen_remap_mfn != INVALID_P2M_ENTRY) {
/* Map the remap information */
set_pte_mfn(buf, xen_remap_mfn, PAGE_KERNEL);
BUG_ON(xen_remap_mfn != xen_remap_buf.mfns[0]);
pfn = xen_remap_buf.target_pfn;
for (i = 0; i < xen_remap_buf.size; i++) {
xen_update_mem_tables(pfn, xen_remap_buf.mfns[i]);
remapped++;
pfn++;
}
if (pfn_s == ~0UL || pfn == pfn_s) {
pfn_s = xen_remap_buf.target_pfn;
len += xen_remap_buf.size;
} else if (pfn_s + len == xen_remap_buf.target_pfn) {
len += xen_remap_buf.size;
} else {
xen_del_extra_mem(pfn_s, len);
pfn_s = xen_remap_buf.target_pfn;
len = xen_remap_buf.size;
}
xen_remap_mfn = xen_remap_buf.next_area_mfn;
}
if (pfn_s != ~0UL && len)
xen_del_extra_mem(pfn_s, len);
set_pte_mfn(buf, mfn_save, PAGE_KERNEL);
pr_info("Remapped %ld page(s)\n", remapped);
}
static unsigned long __init xen_get_pages_limit(void)
{
unsigned long limit;
limit = MAXMEM / PAGE_SIZE;
if (!xen_initial_domain() && xen_512gb_limit)
limit = GB(512) / PAGE_SIZE;
return limit;
}
static unsigned long __init xen_get_max_pages(void)
{
unsigned long max_pages, limit;
domid_t domid = DOMID_SELF;
long ret;
limit = xen_get_pages_limit();
max_pages = limit;
/*
* For the initial domain we use the maximum reservation as
* the maximum page.
*
* For guest domains the current maximum reservation reflects
* the current maximum rather than the static maximum. In this
* case the e820 map provided to us will cover the static
* maximum region.
*/
if (xen_initial_domain()) {
ret = HYPERVISOR_memory_op(XENMEM_maximum_reservation, &domid);
if (ret > 0)
max_pages = ret;
}
return min(max_pages, limit);
}
static void __init xen_align_and_add_e820_region(phys_addr_t start,
phys_addr_t size, int type)
{
phys_addr_t end = start + size;
/* Align RAM regions to page boundaries. */
if (type == E820_TYPE_RAM) {
start = PAGE_ALIGN(start);
end &= ~((phys_addr_t)PAGE_SIZE - 1);
#ifdef CONFIG_MEMORY_HOTPLUG
/*
* Don't allow adding memory not in E820 map while booting the
* system. Once the balloon driver is up it will remove that
* restriction again.
*/
max_mem_size = end;
#endif
}
e820__range_add(start, end - start, type);
}
static void __init xen_ignore_unusable(void)
{
struct e820_entry *entry = xen_e820_table.entries;
unsigned int i;
for (i = 0; i < xen_e820_table.nr_entries; i++, entry++) {
if (entry->type == E820_TYPE_UNUSABLE)
entry->type = E820_TYPE_RAM;
}
}
bool __init xen_is_e820_reserved(phys_addr_t start, phys_addr_t size)
{
struct e820_entry *entry;
unsigned mapcnt;
phys_addr_t end;
if (!size)
return false;
end = start + size;
entry = xen_e820_table.entries;
for (mapcnt = 0; mapcnt < xen_e820_table.nr_entries; mapcnt++) {
if (entry->type == E820_TYPE_RAM && entry->addr <= start &&
(entry->addr + entry->size) >= end)
return false;
entry++;
}
return true;
}
/*
* Find a free area in physical memory not yet reserved and compliant with
* E820 map.
* Used to relocate pre-allocated areas like initrd or p2m list which are in
* conflict with the to be used E820 map.
* In case no area is found, return 0. Otherwise return the physical address
* of the area which is already reserved for convenience.
*/
phys_addr_t __init xen_find_free_area(phys_addr_t size)
{
unsigned mapcnt;
phys_addr_t addr, start;
struct e820_entry *entry = xen_e820_table.entries;
for (mapcnt = 0; mapcnt < xen_e820_table.nr_entries; mapcnt++, entry++) {
if (entry->type != E820_TYPE_RAM || entry->size < size)
continue;
start = entry->addr;
for (addr = start; addr < start + size; addr += PAGE_SIZE) {
if (!memblock_is_reserved(addr))
continue;
start = addr + PAGE_SIZE;
if (start + size > entry->addr + entry->size)
break;
}
if (addr >= start + size) {
memblock_reserve(start, size);
return start;
}
}
return 0;
}
/*
* Like memcpy, but with physical addresses for dest and src.
*/
static void __init xen_phys_memcpy(phys_addr_t dest, phys_addr_t src,
phys_addr_t n)
{
phys_addr_t dest_off, src_off, dest_len, src_len, len;
void *from, *to;
while (n) {
dest_off = dest & ~PAGE_MASK;
src_off = src & ~PAGE_MASK;
dest_len = n;
if (dest_len > (NR_FIX_BTMAPS << PAGE_SHIFT) - dest_off)
dest_len = (NR_FIX_BTMAPS << PAGE_SHIFT) - dest_off;
src_len = n;
if (src_len > (NR_FIX_BTMAPS << PAGE_SHIFT) - src_off)
src_len = (NR_FIX_BTMAPS << PAGE_SHIFT) - src_off;
len = min(dest_len, src_len);
to = early_memremap(dest - dest_off, dest_len + dest_off);
from = early_memremap(src - src_off, src_len + src_off);
memcpy(to, from, len);
early_memunmap(to, dest_len + dest_off);
early_memunmap(from, src_len + src_off);
n -= len;
dest += len;
src += len;
}
}
/*
* Reserve Xen mfn_list.
*/
static void __init xen_reserve_xen_mfnlist(void)
{
phys_addr_t start, size;
if (xen_start_info->mfn_list >= __START_KERNEL_map) {
start = __pa(xen_start_info->mfn_list);
size = PFN_ALIGN(xen_start_info->nr_pages *
sizeof(unsigned long));
} else {
start = PFN_PHYS(xen_start_info->first_p2m_pfn);
size = PFN_PHYS(xen_start_info->nr_p2m_frames);
}
memblock_reserve(start, size);
if (!xen_is_e820_reserved(start, size))
return;
xen_relocate_p2m();
memblock_phys_free(start, size);
}
/**
* xen_memory_setup - Hook for machine specific memory setup.
**/
char * __init xen_memory_setup(void)
{
unsigned long max_pfn, pfn_s, n_pfns;
phys_addr_t mem_end, addr, size, chunk_size;
u32 type;
int rc;
struct xen_memory_map memmap;
unsigned long max_pages;
unsigned long extra_pages = 0;
int i;
int op;
xen_parse_512gb();
max_pfn = xen_get_pages_limit();
max_pfn = min(max_pfn, xen_start_info->nr_pages);
mem_end = PFN_PHYS(max_pfn);
memmap.nr_entries = ARRAY_SIZE(xen_e820_table.entries);
set_xen_guest_handle(memmap.buffer, xen_e820_table.entries);
#if defined(CONFIG_MEMORY_HOTPLUG) && defined(CONFIG_XEN_BALLOON)
xen_saved_max_mem_size = max_mem_size;
#endif
op = xen_initial_domain() ?
XENMEM_machine_memory_map :
XENMEM_memory_map;
rc = HYPERVISOR_memory_op(op, &memmap);
if (rc == -ENOSYS) {
BUG_ON(xen_initial_domain());
memmap.nr_entries = 1;
xen_e820_table.entries[0].addr = 0ULL;
xen_e820_table.entries[0].size = mem_end;
/* 8MB slack (to balance backend allocations). */
xen_e820_table.entries[0].size += 8ULL << 20;
xen_e820_table.entries[0].type = E820_TYPE_RAM;
rc = 0;
}
BUG_ON(rc);
BUG_ON(memmap.nr_entries == 0);
xen_e820_table.nr_entries = memmap.nr_entries;
if (xen_initial_domain()) {
/*
* Xen won't allow a 1:1 mapping to be created to UNUSABLE
* regions, so if we're using the machine memory map leave the
* region as RAM as it is in the pseudo-physical map.
*
* UNUSABLE regions in domUs are not handled and will need
* a patch in the future.
*/
xen_ignore_unusable();
#ifdef CONFIG_ISCSI_IBFT_FIND
/* Reserve 0.5 MiB to 1 MiB region so iBFT can be found */
xen_e820_table.entries[xen_e820_table.nr_entries].addr = IBFT_START;
xen_e820_table.entries[xen_e820_table.nr_entries].size = IBFT_END - IBFT_START;
xen_e820_table.entries[xen_e820_table.nr_entries].type = E820_TYPE_RESERVED;
xen_e820_table.nr_entries++;
#endif
}
/* Make sure the Xen-supplied memory map is well-ordered. */
e820__update_table(&xen_e820_table);
max_pages = xen_get_max_pages();
/* How many extra pages do we need due to remapping? */
max_pages += xen_foreach_remap_area(max_pfn, xen_count_remap_pages);
if (max_pages > max_pfn)
extra_pages += max_pages - max_pfn;
/*
* Clamp the amount of extra memory to a EXTRA_MEM_RATIO
* factor the base size.
*
* Make sure we have no memory above max_pages, as this area
* isn't handled by the p2m management.
*/
extra_pages = min3(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)),
extra_pages, max_pages - max_pfn);
i = 0;
addr = xen_e820_table.entries[0].addr;
size = xen_e820_table.entries[0].size;
while (i < xen_e820_table.nr_entries) {
bool discard = false;
chunk_size = size;
type = xen_e820_table.entries[i].type;
if (type == E820_TYPE_RESERVED)
xen_pv_pci_possible = true;
if (type == E820_TYPE_RAM) {
if (addr < mem_end) {
chunk_size = min(size, mem_end - addr);
} else if (extra_pages) {
chunk_size = min(size, PFN_PHYS(extra_pages));
pfn_s = PFN_UP(addr);
n_pfns = PFN_DOWN(addr + chunk_size) - pfn_s;
extra_pages -= n_pfns;
xen_add_extra_mem(pfn_s, n_pfns);
xen_max_p2m_pfn = pfn_s + n_pfns;
} else
discard = true;
}
if (!discard)
xen_align_and_add_e820_region(addr, chunk_size, type);
addr += chunk_size;
size -= chunk_size;
if (size == 0) {
i++;
if (i < xen_e820_table.nr_entries) {
addr = xen_e820_table.entries[i].addr;
size = xen_e820_table.entries[i].size;
}
}
}
/*
* Set the rest as identity mapped, in case PCI BARs are
* located here.
*/
set_phys_range_identity(addr / PAGE_SIZE, ~0ul);
/*
* In domU, the ISA region is normal, usable memory, but we
* reserve ISA memory anyway because too many things poke
* about in there.
*/
e820__range_add(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_TYPE_RESERVED);
e820__update_table(e820_table);
/*
* Check whether the kernel itself conflicts with the target E820 map.
* Failing now is better than running into weird problems later due
* to relocating (and even reusing) pages with kernel text or data.
*/
if (xen_is_e820_reserved(__pa_symbol(_text),
__pa_symbol(__bss_stop) - __pa_symbol(_text))) {
xen_raw_console_write("Xen hypervisor allocated kernel memory conflicts with E820 map\n");
BUG();
}
/*
* Check for a conflict of the hypervisor supplied page tables with
* the target E820 map.
*/
xen_pt_check_e820();
xen_reserve_xen_mfnlist();
/* Check for a conflict of the initrd with the target E820 map. */
if (xen_is_e820_reserved(boot_params.hdr.ramdisk_image,
boot_params.hdr.ramdisk_size)) {
phys_addr_t new_area, start, size;
new_area = xen_find_free_area(boot_params.hdr.ramdisk_size);
if (!new_area) {
xen_raw_console_write("Can't find new memory area for initrd needed due to E820 map conflict\n");
BUG();
}
start = boot_params.hdr.ramdisk_image;
size = boot_params.hdr.ramdisk_size;
xen_phys_memcpy(new_area, start, size);
pr_info("initrd moved from [mem %#010llx-%#010llx] to [mem %#010llx-%#010llx]\n",
start, start + size, new_area, new_area + size);
memblock_phys_free(start, size);
boot_params.hdr.ramdisk_image = new_area;
boot_params.ext_ramdisk_image = new_area >> 32;
}
/*
* Set identity map on non-RAM pages and prepare remapping the
* underlying RAM.
*/
xen_foreach_remap_area(max_pfn, xen_set_identity_and_remap_chunk);
pr_info("Released %ld page(s)\n", xen_released_pages);
return "Xen";
}
static int register_callback(unsigned type, const void *func)
{
struct callback_register callback = {
.type = type,
.address = XEN_CALLBACK(__KERNEL_CS, func),
.flags = CALLBACKF_mask_events,
};
return HYPERVISOR_callback_op(CALLBACKOP_register, &callback);
}
void xen_enable_sysenter(void)
{
if (cpu_feature_enabled(X86_FEATURE_SYSENTER32) &&
register_callback(CALLBACKTYPE_sysenter, xen_entry_SYSENTER_compat))
setup_clear_cpu_cap(X86_FEATURE_SYSENTER32);
}
void xen_enable_syscall(void)
{
int ret;
ret = register_callback(CALLBACKTYPE_syscall, xen_entry_SYSCALL_64);
if (ret != 0) {
printk(KERN_ERR "Failed to set syscall callback: %d\n", ret);
/* Pretty fatal; 64-bit userspace has no other
mechanism for syscalls. */
}
if (cpu_feature_enabled(X86_FEATURE_SYSCALL32) &&
register_callback(CALLBACKTYPE_syscall32, xen_entry_SYSCALL_compat))
setup_clear_cpu_cap(X86_FEATURE_SYSCALL32);
}
static void __init xen_pvmmu_arch_setup(void)
{
HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_writable_pagetables);
if (register_callback(CALLBACKTYPE_event,
xen_asm_exc_xen_hypervisor_callback) ||
register_callback(CALLBACKTYPE_failsafe, xen_failsafe_callback))
BUG();
xen_enable_sysenter();
xen_enable_syscall();
}
/* This function is not called for HVM domains */
void __init xen_arch_setup(void)
{
xen_panic_handler_init();
xen_pvmmu_arch_setup();
#ifdef CONFIG_ACPI
if (!(xen_start_info->flags & SIF_INITDOMAIN)) {
printk(KERN_INFO "ACPI in unprivileged domain disabled\n");
disable_acpi();
}
#endif
memcpy(boot_command_line, xen_start_info->cmd_line,
MAX_GUEST_CMDLINE > COMMAND_LINE_SIZE ?
COMMAND_LINE_SIZE : MAX_GUEST_CMDLINE);
/* Set up idle, making sure it calls safe_halt() pvop */
disable_cpuidle();
disable_cpufreq();
WARN_ON(xen_set_default_idle());
#ifdef CONFIG_NUMA
numa_off = 1;
#endif
}
| linux-master | arch/x86/xen/setup.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/types.h>
#include <asm/xen/hypercall.h>
#include <asm/xen/page.h>
#include <asm/fixmap.h>
#include "xen-ops.h"
void xen_pv_pre_suspend(void)
{
xen_mm_pin_all();
xen_start_info->store_mfn = mfn_to_pfn(xen_start_info->store_mfn);
xen_start_info->console.domU.mfn =
mfn_to_pfn(xen_start_info->console.domU.mfn);
BUG_ON(!irqs_disabled());
HYPERVISOR_shared_info = &xen_dummy_shared_info;
if (HYPERVISOR_update_va_mapping(fix_to_virt(FIX_PARAVIRT_BOOTMAP),
__pte_ma(0), 0))
BUG();
}
void xen_pv_post_suspend(int suspend_cancelled)
{
xen_build_mfn_list_list();
set_fixmap(FIX_PARAVIRT_BOOTMAP, xen_start_info->shared_info);
HYPERVISOR_shared_info = (void *)fix_to_virt(FIX_PARAVIRT_BOOTMAP);
xen_setup_mfn_list_list();
if (suspend_cancelled) {
xen_start_info->store_mfn =
pfn_to_mfn(xen_start_info->store_mfn);
xen_start_info->console.domU.mfn =
pfn_to_mfn(xen_start_info->console.domU.mfn);
} else {
#ifdef CONFIG_SMP
BUG_ON(xen_cpu_initialized_map == NULL);
cpumask_copy(xen_cpu_initialized_map, cpu_online_mask);
#endif
xen_vcpu_restore();
}
xen_mm_unpin_all();
}
| linux-master | arch/x86/xen/suspend_pv.c |
// SPDX-License-Identifier: GPL-2.0 OR MIT
/******************************************************************************
* grant_table.c
* x86 specific part
*
* Granting foreign access to our memory reservation.
*
* Copyright (c) 2005-2006, Christopher Clark
* Copyright (c) 2004-2005, K A Fraser
* Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
* VA Linux Systems Japan. Split out x86 specific part.
*/
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <xen/interface/xen.h>
#include <xen/page.h>
#include <xen/grant_table.h>
#include <xen/xen.h>
static struct gnttab_vm_area {
struct vm_struct *area;
pte_t **ptes;
int idx;
} gnttab_shared_vm_area, gnttab_status_vm_area;
int arch_gnttab_map_shared(unsigned long *frames, unsigned long nr_gframes,
unsigned long max_nr_gframes,
void **__shared)
{
void *shared = *__shared;
unsigned long addr;
unsigned long i;
if (shared == NULL)
*__shared = shared = gnttab_shared_vm_area.area->addr;
addr = (unsigned long)shared;
for (i = 0; i < nr_gframes; i++) {
set_pte_at(&init_mm, addr, gnttab_shared_vm_area.ptes[i],
mfn_pte(frames[i], PAGE_KERNEL));
addr += PAGE_SIZE;
}
return 0;
}
int arch_gnttab_map_status(uint64_t *frames, unsigned long nr_gframes,
unsigned long max_nr_gframes,
grant_status_t **__shared)
{
grant_status_t *shared = *__shared;
unsigned long addr;
unsigned long i;
if (shared == NULL)
*__shared = shared = gnttab_status_vm_area.area->addr;
addr = (unsigned long)shared;
for (i = 0; i < nr_gframes; i++) {
set_pte_at(&init_mm, addr, gnttab_status_vm_area.ptes[i],
mfn_pte(frames[i], PAGE_KERNEL));
addr += PAGE_SIZE;
}
return 0;
}
void arch_gnttab_unmap(void *shared, unsigned long nr_gframes)
{
pte_t **ptes;
unsigned long addr;
unsigned long i;
if (shared == gnttab_status_vm_area.area->addr)
ptes = gnttab_status_vm_area.ptes;
else
ptes = gnttab_shared_vm_area.ptes;
addr = (unsigned long)shared;
for (i = 0; i < nr_gframes; i++) {
set_pte_at(&init_mm, addr, ptes[i], __pte(0));
addr += PAGE_SIZE;
}
}
static int gnttab_apply(pte_t *pte, unsigned long addr, void *data)
{
struct gnttab_vm_area *area = data;
area->ptes[area->idx++] = pte;
return 0;
}
static int arch_gnttab_valloc(struct gnttab_vm_area *area, unsigned nr_frames)
{
area->ptes = kmalloc_array(nr_frames, sizeof(*area->ptes), GFP_KERNEL);
if (area->ptes == NULL)
return -ENOMEM;
area->area = get_vm_area(PAGE_SIZE * nr_frames, VM_IOREMAP);
if (!area->area)
goto out_free_ptes;
if (apply_to_page_range(&init_mm, (unsigned long)area->area->addr,
PAGE_SIZE * nr_frames, gnttab_apply, area))
goto out_free_vm_area;
return 0;
out_free_vm_area:
free_vm_area(area->area);
out_free_ptes:
kfree(area->ptes);
return -ENOMEM;
}
static void arch_gnttab_vfree(struct gnttab_vm_area *area)
{
free_vm_area(area->area);
kfree(area->ptes);
}
int arch_gnttab_init(unsigned long nr_shared, unsigned long nr_status)
{
int ret;
if (!xen_pv_domain())
return 0;
ret = arch_gnttab_valloc(&gnttab_shared_vm_area, nr_shared);
if (ret < 0)
return ret;
/*
* Always allocate the space for the status frames in case
* we're migrated to a host with V2 support.
*/
ret = arch_gnttab_valloc(&gnttab_status_vm_area, nr_status);
if (ret < 0)
goto err;
return 0;
err:
arch_gnttab_vfree(&gnttab_shared_vm_area);
return -ENOMEM;
}
#ifdef CONFIG_XEN_PVH
#include <xen/events.h>
#include <xen/xen-ops.h>
static int __init xen_pvh_gnttab_setup(void)
{
if (!xen_pvh_domain())
return -ENODEV;
xen_auto_xlat_grant_frames.count = gnttab_max_grant_frames();
return xen_xlate_map_ballooned_pages(&xen_auto_xlat_grant_frames.pfn,
&xen_auto_xlat_grant_frames.vaddr,
xen_auto_xlat_grant_frames.count);
}
/* Call it _before_ __gnttab_init as we need to initialize the
* xen_auto_xlat_grant_frames first. */
core_initcall(xen_pvh_gnttab_setup);
#endif
| linux-master | arch/x86/xen/grant-table.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Xen leaves the responsibility for maintaining p2m mappings to the
* guests themselves, but it must also access and update the p2m array
* during suspend/resume when all the pages are reallocated.
*
* The logical flat p2m table is mapped to a linear kernel memory area.
* For accesses by Xen a three-level tree linked via mfns only is set up to
* allow the address space to be sparse.
*
* Xen
* |
* p2m_top_mfn
* / \
* p2m_mid_mfn p2m_mid_mfn
* / /
* p2m p2m p2m ...
*
* The p2m_mid_mfn pages are mapped by p2m_top_mfn_p.
*
* The p2m_top_mfn level is limited to 1 page, so the maximum representable
* pseudo-physical address space is:
* P2M_TOP_PER_PAGE * P2M_MID_PER_PAGE * P2M_PER_PAGE pages
*
* P2M_PER_PAGE depends on the architecture, as a mfn is always
* unsigned long (8 bytes on 64-bit, 4 bytes on 32), leading to
* 512 and 1024 entries respectively.
*
* In short, these structures contain the Machine Frame Number (MFN) of the PFN.
*
* However not all entries are filled with MFNs. Specifically for all other
* leaf entries, or for the top root, or middle one, for which there is a void
* entry, we assume it is "missing". So (for example)
* pfn_to_mfn(0x90909090)=INVALID_P2M_ENTRY.
* We have a dedicated page p2m_missing with all entries being
* INVALID_P2M_ENTRY. This page may be referenced multiple times in the p2m
* list/tree in case there are multiple areas with P2M_PER_PAGE invalid pfns.
*
* We also have the possibility of setting 1-1 mappings on certain regions, so
* that:
* pfn_to_mfn(0xc0000)=0xc0000
*
* The benefit of this is, that we can assume for non-RAM regions (think
* PCI BARs, or ACPI spaces), we can create mappings easily because we
* get the PFN value to match the MFN.
*
* For this to work efficiently we have one new page p2m_identity. All entries
* in p2m_identity are set to INVALID_P2M_ENTRY type (Xen toolstack only
* recognizes that and MFNs, no other fancy value).
*
* On lookup we spot that the entry points to p2m_identity and return the
* identity value instead of dereferencing and returning INVALID_P2M_ENTRY.
* If the entry points to an allocated page, we just proceed as before and
* return the PFN. If the PFN has IDENTITY_FRAME_BIT set we unmask that in
* appropriate functions (pfn_to_mfn).
*
* The reason for having the IDENTITY_FRAME_BIT instead of just returning the
* PFN is that we could find ourselves where pfn_to_mfn(pfn)==pfn for a
* non-identity pfn. To protect ourselves against we elect to set (and get) the
* IDENTITY_FRAME_BIT on all identity mapped PFNs.
*/
#include <linux/init.h>
#include <linux/export.h>
#include <linux/list.h>
#include <linux/hash.h>
#include <linux/sched.h>
#include <linux/seq_file.h>
#include <linux/memblock.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <asm/cache.h>
#include <asm/setup.h>
#include <linux/uaccess.h>
#include <asm/xen/page.h>
#include <asm/xen/hypercall.h>
#include <asm/xen/hypervisor.h>
#include <xen/balloon.h>
#include <xen/grant_table.h>
#include "multicalls.h"
#include "xen-ops.h"
#define P2M_MID_PER_PAGE (PAGE_SIZE / sizeof(unsigned long *))
#define P2M_TOP_PER_PAGE (PAGE_SIZE / sizeof(unsigned long **))
#define MAX_P2M_PFN (P2M_TOP_PER_PAGE * P2M_MID_PER_PAGE * P2M_PER_PAGE)
#define PMDS_PER_MID_PAGE (P2M_MID_PER_PAGE / PTRS_PER_PTE)
unsigned long *xen_p2m_addr __read_mostly;
EXPORT_SYMBOL_GPL(xen_p2m_addr);
unsigned long xen_p2m_size __read_mostly;
EXPORT_SYMBOL_GPL(xen_p2m_size);
unsigned long xen_max_p2m_pfn __read_mostly;
EXPORT_SYMBOL_GPL(xen_max_p2m_pfn);
#ifdef CONFIG_XEN_MEMORY_HOTPLUG_LIMIT
#define P2M_LIMIT CONFIG_XEN_MEMORY_HOTPLUG_LIMIT
#else
#define P2M_LIMIT 0
#endif
static DEFINE_SPINLOCK(p2m_update_lock);
static unsigned long *p2m_mid_missing_mfn;
static unsigned long *p2m_top_mfn;
static unsigned long **p2m_top_mfn_p;
static unsigned long *p2m_missing;
static unsigned long *p2m_identity;
static pte_t *p2m_missing_pte;
static pte_t *p2m_identity_pte;
/*
* Hint at last populated PFN.
*
* Used to set HYPERVISOR_shared_info->arch.max_pfn so the toolstack
* can avoid scanning the whole P2M (which may be sized to account for
* hotplugged memory).
*/
static unsigned long xen_p2m_last_pfn;
static inline unsigned p2m_top_index(unsigned long pfn)
{
BUG_ON(pfn >= MAX_P2M_PFN);
return pfn / (P2M_MID_PER_PAGE * P2M_PER_PAGE);
}
static inline unsigned p2m_mid_index(unsigned long pfn)
{
return (pfn / P2M_PER_PAGE) % P2M_MID_PER_PAGE;
}
static void p2m_top_mfn_init(unsigned long *top)
{
unsigned i;
for (i = 0; i < P2M_TOP_PER_PAGE; i++)
top[i] = virt_to_mfn(p2m_mid_missing_mfn);
}
static void p2m_top_mfn_p_init(unsigned long **top)
{
unsigned i;
for (i = 0; i < P2M_TOP_PER_PAGE; i++)
top[i] = p2m_mid_missing_mfn;
}
static void p2m_mid_mfn_init(unsigned long *mid, unsigned long *leaf)
{
unsigned i;
for (i = 0; i < P2M_MID_PER_PAGE; i++)
mid[i] = virt_to_mfn(leaf);
}
static void p2m_init(unsigned long *p2m)
{
unsigned i;
for (i = 0; i < P2M_PER_PAGE; i++)
p2m[i] = INVALID_P2M_ENTRY;
}
static void p2m_init_identity(unsigned long *p2m, unsigned long pfn)
{
unsigned i;
for (i = 0; i < P2M_PER_PAGE; i++)
p2m[i] = IDENTITY_FRAME(pfn + i);
}
static void * __ref alloc_p2m_page(void)
{
if (unlikely(!slab_is_available())) {
void *ptr = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
if (!ptr)
panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
__func__, PAGE_SIZE, PAGE_SIZE);
return ptr;
}
return (void *)__get_free_page(GFP_KERNEL);
}
static void __ref free_p2m_page(void *p)
{
if (unlikely(!slab_is_available())) {
memblock_free(p, PAGE_SIZE);
return;
}
free_page((unsigned long)p);
}
/*
* Build the parallel p2m_top_mfn and p2m_mid_mfn structures
*
* This is called both at boot time, and after resuming from suspend:
* - At boot time we're called rather early, and must use alloc_bootmem*()
* to allocate memory.
*
* - After resume we're called from within stop_machine, but the mfn
* tree should already be completely allocated.
*/
void __ref xen_build_mfn_list_list(void)
{
unsigned long pfn, mfn;
pte_t *ptep;
unsigned int level, topidx, mididx;
unsigned long *mid_mfn_p;
if (xen_start_info->flags & SIF_VIRT_P2M_4TOOLS)
return;
/* Pre-initialize p2m_top_mfn to be completely missing */
if (p2m_top_mfn == NULL) {
p2m_mid_missing_mfn = alloc_p2m_page();
p2m_mid_mfn_init(p2m_mid_missing_mfn, p2m_missing);
p2m_top_mfn_p = alloc_p2m_page();
p2m_top_mfn_p_init(p2m_top_mfn_p);
p2m_top_mfn = alloc_p2m_page();
p2m_top_mfn_init(p2m_top_mfn);
} else {
/* Reinitialise, mfn's all change after migration */
p2m_mid_mfn_init(p2m_mid_missing_mfn, p2m_missing);
}
for (pfn = 0; pfn < xen_max_p2m_pfn && pfn < MAX_P2M_PFN;
pfn += P2M_PER_PAGE) {
topidx = p2m_top_index(pfn);
mididx = p2m_mid_index(pfn);
mid_mfn_p = p2m_top_mfn_p[topidx];
ptep = lookup_address((unsigned long)(xen_p2m_addr + pfn),
&level);
BUG_ON(!ptep || level != PG_LEVEL_4K);
mfn = pte_mfn(*ptep);
ptep = (pte_t *)((unsigned long)ptep & ~(PAGE_SIZE - 1));
/* Don't bother allocating any mfn mid levels if
* they're just missing, just update the stored mfn,
* since all could have changed over a migrate.
*/
if (ptep == p2m_missing_pte || ptep == p2m_identity_pte) {
BUG_ON(mididx);
BUG_ON(mid_mfn_p != p2m_mid_missing_mfn);
p2m_top_mfn[topidx] = virt_to_mfn(p2m_mid_missing_mfn);
pfn += (P2M_MID_PER_PAGE - 1) * P2M_PER_PAGE;
continue;
}
if (mid_mfn_p == p2m_mid_missing_mfn) {
mid_mfn_p = alloc_p2m_page();
p2m_mid_mfn_init(mid_mfn_p, p2m_missing);
p2m_top_mfn_p[topidx] = mid_mfn_p;
}
p2m_top_mfn[topidx] = virt_to_mfn(mid_mfn_p);
mid_mfn_p[mididx] = mfn;
}
}
void xen_setup_mfn_list_list(void)
{
BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info);
if (xen_start_info->flags & SIF_VIRT_P2M_4TOOLS)
HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list = ~0UL;
else
HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
virt_to_mfn(p2m_top_mfn);
HYPERVISOR_shared_info->arch.max_pfn = xen_p2m_last_pfn;
HYPERVISOR_shared_info->arch.p2m_generation = 0;
HYPERVISOR_shared_info->arch.p2m_vaddr = (unsigned long)xen_p2m_addr;
HYPERVISOR_shared_info->arch.p2m_cr3 =
xen_pfn_to_cr3(virt_to_mfn(swapper_pg_dir));
}
/* Set up p2m_top to point to the domain-builder provided p2m pages */
void __init xen_build_dynamic_phys_to_machine(void)
{
unsigned long pfn;
xen_p2m_addr = (unsigned long *)xen_start_info->mfn_list;
xen_p2m_size = ALIGN(xen_start_info->nr_pages, P2M_PER_PAGE);
for (pfn = xen_start_info->nr_pages; pfn < xen_p2m_size; pfn++)
xen_p2m_addr[pfn] = INVALID_P2M_ENTRY;
xen_max_p2m_pfn = xen_p2m_size;
}
#define P2M_TYPE_IDENTITY 0
#define P2M_TYPE_MISSING 1
#define P2M_TYPE_PFN 2
#define P2M_TYPE_UNKNOWN 3
static int xen_p2m_elem_type(unsigned long pfn)
{
unsigned long mfn;
if (pfn >= xen_p2m_size)
return P2M_TYPE_IDENTITY;
mfn = xen_p2m_addr[pfn];
if (mfn == INVALID_P2M_ENTRY)
return P2M_TYPE_MISSING;
if (mfn & IDENTITY_FRAME_BIT)
return P2M_TYPE_IDENTITY;
return P2M_TYPE_PFN;
}
static void __init xen_rebuild_p2m_list(unsigned long *p2m)
{
unsigned int i, chunk;
unsigned long pfn;
unsigned long *mfns;
pte_t *ptep;
pmd_t *pmdp;
int type;
p2m_missing = alloc_p2m_page();
p2m_init(p2m_missing);
p2m_identity = alloc_p2m_page();
p2m_init(p2m_identity);
p2m_missing_pte = alloc_p2m_page();
paravirt_alloc_pte(&init_mm, __pa(p2m_missing_pte) >> PAGE_SHIFT);
p2m_identity_pte = alloc_p2m_page();
paravirt_alloc_pte(&init_mm, __pa(p2m_identity_pte) >> PAGE_SHIFT);
for (i = 0; i < PTRS_PER_PTE; i++) {
set_pte(p2m_missing_pte + i,
pfn_pte(PFN_DOWN(__pa(p2m_missing)), PAGE_KERNEL_RO));
set_pte(p2m_identity_pte + i,
pfn_pte(PFN_DOWN(__pa(p2m_identity)), PAGE_KERNEL_RO));
}
for (pfn = 0; pfn < xen_max_p2m_pfn; pfn += chunk) {
/*
* Try to map missing/identity PMDs or p2m-pages if possible.
* We have to respect the structure of the mfn_list_list
* which will be built just afterwards.
* Chunk size to test is one p2m page if we are in the middle
* of a mfn_list_list mid page and the complete mid page area
* if we are at index 0 of the mid page. Please note that a
* mid page might cover more than one PMD, e.g. on 32 bit PAE
* kernels.
*/
chunk = (pfn & (P2M_PER_PAGE * P2M_MID_PER_PAGE - 1)) ?
P2M_PER_PAGE : P2M_PER_PAGE * P2M_MID_PER_PAGE;
type = xen_p2m_elem_type(pfn);
i = 0;
if (type != P2M_TYPE_PFN)
for (i = 1; i < chunk; i++)
if (xen_p2m_elem_type(pfn + i) != type)
break;
if (i < chunk)
/* Reset to minimal chunk size. */
chunk = P2M_PER_PAGE;
if (type == P2M_TYPE_PFN || i < chunk) {
/* Use initial p2m page contents. */
mfns = alloc_p2m_page();
copy_page(mfns, xen_p2m_addr + pfn);
ptep = populate_extra_pte((unsigned long)(p2m + pfn));
set_pte(ptep,
pfn_pte(PFN_DOWN(__pa(mfns)), PAGE_KERNEL));
continue;
}
if (chunk == P2M_PER_PAGE) {
/* Map complete missing or identity p2m-page. */
mfns = (type == P2M_TYPE_MISSING) ?
p2m_missing : p2m_identity;
ptep = populate_extra_pte((unsigned long)(p2m + pfn));
set_pte(ptep,
pfn_pte(PFN_DOWN(__pa(mfns)), PAGE_KERNEL_RO));
continue;
}
/* Complete missing or identity PMD(s) can be mapped. */
ptep = (type == P2M_TYPE_MISSING) ?
p2m_missing_pte : p2m_identity_pte;
for (i = 0; i < PMDS_PER_MID_PAGE; i++) {
pmdp = populate_extra_pmd(
(unsigned long)(p2m + pfn) + i * PMD_SIZE);
set_pmd(pmdp, __pmd(__pa(ptep) | _KERNPG_TABLE));
}
}
}
void __init xen_vmalloc_p2m_tree(void)
{
static struct vm_struct vm;
unsigned long p2m_limit;
xen_p2m_last_pfn = xen_max_p2m_pfn;
p2m_limit = (phys_addr_t)P2M_LIMIT * 1024 * 1024 * 1024 / PAGE_SIZE;
vm.flags = VM_ALLOC;
vm.size = ALIGN(sizeof(unsigned long) * max(xen_max_p2m_pfn, p2m_limit),
PMD_SIZE * PMDS_PER_MID_PAGE);
vm_area_register_early(&vm, PMD_SIZE * PMDS_PER_MID_PAGE);
pr_notice("p2m virtual area at %p, size is %lx\n", vm.addr, vm.size);
xen_max_p2m_pfn = vm.size / sizeof(unsigned long);
xen_rebuild_p2m_list(vm.addr);
xen_p2m_addr = vm.addr;
xen_p2m_size = xen_max_p2m_pfn;
xen_inv_extra_mem();
}
unsigned long get_phys_to_machine(unsigned long pfn)
{
pte_t *ptep;
unsigned int level;
if (unlikely(pfn >= xen_p2m_size)) {
if (pfn < xen_max_p2m_pfn)
return xen_chk_extra_mem(pfn);
return IDENTITY_FRAME(pfn);
}
ptep = lookup_address((unsigned long)(xen_p2m_addr + pfn), &level);
BUG_ON(!ptep || level != PG_LEVEL_4K);
/*
* The INVALID_P2M_ENTRY is filled in both p2m_*identity
* and in p2m_*missing, so returning the INVALID_P2M_ENTRY
* would be wrong.
*/
if (pte_pfn(*ptep) == PFN_DOWN(__pa(p2m_identity)))
return IDENTITY_FRAME(pfn);
return xen_p2m_addr[pfn];
}
EXPORT_SYMBOL_GPL(get_phys_to_machine);
/*
* Allocate new pmd(s). It is checked whether the old pmd is still in place.
* If not, nothing is changed. This is okay as the only reason for allocating
* a new pmd is to replace p2m_missing_pte or p2m_identity_pte by a individual
* pmd.
*/
static pte_t *alloc_p2m_pmd(unsigned long addr, pte_t *pte_pg)
{
pte_t *ptechk;
pte_t *pte_newpg[PMDS_PER_MID_PAGE];
pmd_t *pmdp;
unsigned int level;
unsigned long flags;
unsigned long vaddr;
int i;
/* Do all allocations first to bail out in error case. */
for (i = 0; i < PMDS_PER_MID_PAGE; i++) {
pte_newpg[i] = alloc_p2m_page();
if (!pte_newpg[i]) {
for (i--; i >= 0; i--)
free_p2m_page(pte_newpg[i]);
return NULL;
}
}
vaddr = addr & ~(PMD_SIZE * PMDS_PER_MID_PAGE - 1);
for (i = 0; i < PMDS_PER_MID_PAGE; i++) {
copy_page(pte_newpg[i], pte_pg);
paravirt_alloc_pte(&init_mm, __pa(pte_newpg[i]) >> PAGE_SHIFT);
pmdp = lookup_pmd_address(vaddr);
BUG_ON(!pmdp);
spin_lock_irqsave(&p2m_update_lock, flags);
ptechk = lookup_address(vaddr, &level);
if (ptechk == pte_pg) {
HYPERVISOR_shared_info->arch.p2m_generation++;
wmb(); /* Tools are synchronizing via p2m_generation. */
set_pmd(pmdp,
__pmd(__pa(pte_newpg[i]) | _KERNPG_TABLE));
wmb(); /* Tools are synchronizing via p2m_generation. */
HYPERVISOR_shared_info->arch.p2m_generation++;
pte_newpg[i] = NULL;
}
spin_unlock_irqrestore(&p2m_update_lock, flags);
if (pte_newpg[i]) {
paravirt_release_pte(__pa(pte_newpg[i]) >> PAGE_SHIFT);
free_p2m_page(pte_newpg[i]);
}
vaddr += PMD_SIZE;
}
return lookup_address(addr, &level);
}
/*
* Fully allocate the p2m structure for a given pfn. We need to check
* that both the top and mid levels are allocated, and make sure the
* parallel mfn tree is kept in sync. We may race with other cpus, so
* the new pages are installed with cmpxchg; if we lose the race then
* simply free the page we allocated and use the one that's there.
*/
int xen_alloc_p2m_entry(unsigned long pfn)
{
unsigned topidx;
unsigned long *top_mfn_p, *mid_mfn;
pte_t *ptep, *pte_pg;
unsigned int level;
unsigned long flags;
unsigned long addr = (unsigned long)(xen_p2m_addr + pfn);
unsigned long p2m_pfn;
ptep = lookup_address(addr, &level);
BUG_ON(!ptep || level != PG_LEVEL_4K);
pte_pg = (pte_t *)((unsigned long)ptep & ~(PAGE_SIZE - 1));
if (pte_pg == p2m_missing_pte || pte_pg == p2m_identity_pte) {
/* PMD level is missing, allocate a new one */
ptep = alloc_p2m_pmd(addr, pte_pg);
if (!ptep)
return -ENOMEM;
}
if (p2m_top_mfn && pfn < MAX_P2M_PFN) {
topidx = p2m_top_index(pfn);
top_mfn_p = &p2m_top_mfn[topidx];
mid_mfn = READ_ONCE(p2m_top_mfn_p[topidx]);
BUG_ON(virt_to_mfn(mid_mfn) != *top_mfn_p);
if (mid_mfn == p2m_mid_missing_mfn) {
/* Separately check the mid mfn level */
unsigned long missing_mfn;
unsigned long mid_mfn_mfn;
unsigned long old_mfn;
mid_mfn = alloc_p2m_page();
if (!mid_mfn)
return -ENOMEM;
p2m_mid_mfn_init(mid_mfn, p2m_missing);
missing_mfn = virt_to_mfn(p2m_mid_missing_mfn);
mid_mfn_mfn = virt_to_mfn(mid_mfn);
old_mfn = cmpxchg(top_mfn_p, missing_mfn, mid_mfn_mfn);
if (old_mfn != missing_mfn) {
free_p2m_page(mid_mfn);
mid_mfn = mfn_to_virt(old_mfn);
} else {
p2m_top_mfn_p[topidx] = mid_mfn;
}
}
} else {
mid_mfn = NULL;
}
p2m_pfn = pte_pfn(READ_ONCE(*ptep));
if (p2m_pfn == PFN_DOWN(__pa(p2m_identity)) ||
p2m_pfn == PFN_DOWN(__pa(p2m_missing))) {
/* p2m leaf page is missing */
unsigned long *p2m;
p2m = alloc_p2m_page();
if (!p2m)
return -ENOMEM;
if (p2m_pfn == PFN_DOWN(__pa(p2m_missing)))
p2m_init(p2m);
else
p2m_init_identity(p2m, pfn & ~(P2M_PER_PAGE - 1));
spin_lock_irqsave(&p2m_update_lock, flags);
if (pte_pfn(*ptep) == p2m_pfn) {
HYPERVISOR_shared_info->arch.p2m_generation++;
wmb(); /* Tools are synchronizing via p2m_generation. */
set_pte(ptep,
pfn_pte(PFN_DOWN(__pa(p2m)), PAGE_KERNEL));
wmb(); /* Tools are synchronizing via p2m_generation. */
HYPERVISOR_shared_info->arch.p2m_generation++;
if (mid_mfn)
mid_mfn[p2m_mid_index(pfn)] = virt_to_mfn(p2m);
p2m = NULL;
}
spin_unlock_irqrestore(&p2m_update_lock, flags);
if (p2m)
free_p2m_page(p2m);
}
/* Expanded the p2m? */
if (pfn >= xen_p2m_last_pfn) {
xen_p2m_last_pfn = ALIGN(pfn + 1, P2M_PER_PAGE);
HYPERVISOR_shared_info->arch.max_pfn = xen_p2m_last_pfn;
}
return 0;
}
EXPORT_SYMBOL(xen_alloc_p2m_entry);
unsigned long __init set_phys_range_identity(unsigned long pfn_s,
unsigned long pfn_e)
{
unsigned long pfn;
if (unlikely(pfn_s >= xen_p2m_size))
return 0;
if (pfn_s > pfn_e)
return 0;
if (pfn_e > xen_p2m_size)
pfn_e = xen_p2m_size;
for (pfn = pfn_s; pfn < pfn_e; pfn++)
xen_p2m_addr[pfn] = IDENTITY_FRAME(pfn);
return pfn - pfn_s;
}
bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
{
pte_t *ptep;
unsigned int level;
/* Only invalid entries allowed above the highest p2m covered frame. */
if (unlikely(pfn >= xen_p2m_size))
return mfn == INVALID_P2M_ENTRY;
/*
* The interface requires atomic updates on p2m elements.
* xen_safe_write_ulong() is using an atomic store via asm().
*/
if (likely(!xen_safe_write_ulong(xen_p2m_addr + pfn, mfn)))
return true;
ptep = lookup_address((unsigned long)(xen_p2m_addr + pfn), &level);
BUG_ON(!ptep || level != PG_LEVEL_4K);
if (pte_pfn(*ptep) == PFN_DOWN(__pa(p2m_missing)))
return mfn == INVALID_P2M_ENTRY;
if (pte_pfn(*ptep) == PFN_DOWN(__pa(p2m_identity)))
return mfn == IDENTITY_FRAME(pfn);
return false;
}
bool set_phys_to_machine(unsigned long pfn, unsigned long mfn)
{
if (unlikely(!__set_phys_to_machine(pfn, mfn))) {
int ret;
ret = xen_alloc_p2m_entry(pfn);
if (ret < 0)
return false;
return __set_phys_to_machine(pfn, mfn);
}
return true;
}
int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
struct gnttab_map_grant_ref *kmap_ops,
struct page **pages, unsigned int count)
{
int i, ret = 0;
pte_t *pte;
if (xen_feature(XENFEAT_auto_translated_physmap))
return 0;
if (kmap_ops) {
ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref,
kmap_ops, count);
if (ret)
goto out;
}
for (i = 0; i < count; i++) {
unsigned long mfn, pfn;
struct gnttab_unmap_grant_ref unmap[2];
int rc;
/* Do not add to override if the map failed. */
if (map_ops[i].status != GNTST_okay ||
(kmap_ops && kmap_ops[i].status != GNTST_okay))
continue;
if (map_ops[i].flags & GNTMAP_contains_pte) {
pte = (pte_t *)(mfn_to_virt(PFN_DOWN(map_ops[i].host_addr)) +
(map_ops[i].host_addr & ~PAGE_MASK));
mfn = pte_mfn(*pte);
} else {
mfn = PFN_DOWN(map_ops[i].dev_bus_addr);
}
pfn = page_to_pfn(pages[i]);
WARN(pfn_to_mfn(pfn) != INVALID_P2M_ENTRY, "page must be ballooned");
if (likely(set_phys_to_machine(pfn, FOREIGN_FRAME(mfn))))
continue;
/*
* Signal an error for this slot. This in turn requires
* immediate unmapping.
*/
map_ops[i].status = GNTST_general_error;
unmap[0].host_addr = map_ops[i].host_addr,
unmap[0].handle = map_ops[i].handle;
map_ops[i].handle = INVALID_GRANT_HANDLE;
if (map_ops[i].flags & GNTMAP_device_map)
unmap[0].dev_bus_addr = map_ops[i].dev_bus_addr;
else
unmap[0].dev_bus_addr = 0;
if (kmap_ops) {
kmap_ops[i].status = GNTST_general_error;
unmap[1].host_addr = kmap_ops[i].host_addr,
unmap[1].handle = kmap_ops[i].handle;
kmap_ops[i].handle = INVALID_GRANT_HANDLE;
if (kmap_ops[i].flags & GNTMAP_device_map)
unmap[1].dev_bus_addr = kmap_ops[i].dev_bus_addr;
else
unmap[1].dev_bus_addr = 0;
}
/*
* Pre-populate both status fields, to be recognizable in
* the log message below.
*/
unmap[0].status = 1;
unmap[1].status = 1;
rc = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref,
unmap, 1 + !!kmap_ops);
if (rc || unmap[0].status != GNTST_okay ||
unmap[1].status != GNTST_okay)
pr_err_once("gnttab unmap failed: rc=%d st0=%d st1=%d\n",
rc, unmap[0].status, unmap[1].status);
}
out:
return ret;
}
int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
struct gnttab_unmap_grant_ref *kunmap_ops,
struct page **pages, unsigned int count)
{
int i, ret = 0;
if (xen_feature(XENFEAT_auto_translated_physmap))
return 0;
for (i = 0; i < count; i++) {
unsigned long mfn = __pfn_to_mfn(page_to_pfn(pages[i]));
unsigned long pfn = page_to_pfn(pages[i]);
if (mfn != INVALID_P2M_ENTRY && (mfn & FOREIGN_FRAME_BIT))
set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
else
ret = -EINVAL;
}
if (kunmap_ops)
ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref,
kunmap_ops, count) ?: ret;
return ret;
}
#ifdef CONFIG_XEN_DEBUG_FS
#include <linux/debugfs.h>
#include "debugfs.h"
static int p2m_dump_show(struct seq_file *m, void *v)
{
static const char * const type_name[] = {
[P2M_TYPE_IDENTITY] = "identity",
[P2M_TYPE_MISSING] = "missing",
[P2M_TYPE_PFN] = "pfn",
[P2M_TYPE_UNKNOWN] = "abnormal"};
unsigned long pfn, first_pfn;
int type, prev_type;
prev_type = xen_p2m_elem_type(0);
first_pfn = 0;
for (pfn = 0; pfn < xen_p2m_size; pfn++) {
type = xen_p2m_elem_type(pfn);
if (type != prev_type) {
seq_printf(m, " [0x%lx->0x%lx] %s\n", first_pfn, pfn,
type_name[prev_type]);
prev_type = type;
first_pfn = pfn;
}
}
seq_printf(m, " [0x%lx->0x%lx] %s\n", first_pfn, pfn,
type_name[prev_type]);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(p2m_dump);
static struct dentry *d_mmu_debug;
static int __init xen_p2m_debugfs(void)
{
struct dentry *d_xen = xen_init_debugfs();
d_mmu_debug = debugfs_create_dir("mmu", d_xen);
debugfs_create_file("p2m", 0600, d_mmu_debug, NULL, &p2m_dump_fops);
return 0;
}
fs_initcall(xen_p2m_debugfs);
#endif /* CONFIG_XEN_DEBUG_FS */
| linux-master | arch/x86/xen/p2m.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Split spinlock implementation out into its own file, so it can be
* compiled in a FTRACE-compatible way.
*/
#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/atomic.h>
#include <asm/paravirt.h>
#include <asm/qspinlock.h>
#include <xen/events.h>
#include "xen-ops.h"
static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
static DEFINE_PER_CPU(char *, irq_name);
static DEFINE_PER_CPU(atomic_t, xen_qlock_wait_nest);
static bool xen_pvspin = true;
static void xen_qlock_kick(int cpu)
{
int irq = per_cpu(lock_kicker_irq, cpu);
/* Don't kick if the target's kicker interrupt is not initialized. */
if (irq == -1)
return;
xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR);
}
/*
* Halt the current CPU & release it back to the host
*/
static void xen_qlock_wait(u8 *byte, u8 val)
{
int irq = __this_cpu_read(lock_kicker_irq);
atomic_t *nest_cnt = this_cpu_ptr(&xen_qlock_wait_nest);
/* If kicker interrupts not initialized yet, just spin */
if (irq == -1 || in_nmi())
return;
/* Detect reentry. */
atomic_inc(nest_cnt);
/* If irq pending already and no nested call clear it. */
if (atomic_read(nest_cnt) == 1 && xen_test_irq_pending(irq)) {
xen_clear_irq_pending(irq);
} else if (READ_ONCE(*byte) == val) {
/* Block until irq becomes pending (or a spurious wakeup) */
xen_poll_irq(irq);
}
atomic_dec(nest_cnt);
}
static irqreturn_t dummy_handler(int irq, void *dev_id)
{
BUG();
return IRQ_HANDLED;
}
void xen_init_lock_cpu(int cpu)
{
int irq;
char *name;
if (!xen_pvspin)
return;
WARN(per_cpu(lock_kicker_irq, cpu) >= 0, "spinlock on CPU%d exists on IRQ%d!\n",
cpu, per_cpu(lock_kicker_irq, cpu));
name = kasprintf(GFP_KERNEL, "spinlock%d", cpu);
per_cpu(irq_name, cpu) = name;
irq = bind_ipi_to_irqhandler(XEN_SPIN_UNLOCK_VECTOR,
cpu,
dummy_handler,
IRQF_PERCPU|IRQF_NOBALANCING,
name,
NULL);
if (irq >= 0) {
disable_irq(irq); /* make sure it's never delivered */
per_cpu(lock_kicker_irq, cpu) = irq;
}
printk("cpu %d spinlock event irq %d\n", cpu, irq);
}
void xen_uninit_lock_cpu(int cpu)
{
int irq;
if (!xen_pvspin)
return;
kfree(per_cpu(irq_name, cpu));
per_cpu(irq_name, cpu) = NULL;
/*
* When booting the kernel with 'mitigations=auto,nosmt', the secondary
* CPUs are not activated, and lock_kicker_irq is not initialized.
*/
irq = per_cpu(lock_kicker_irq, cpu);
if (irq == -1)
return;
unbind_from_irqhandler(irq, NULL);
per_cpu(lock_kicker_irq, cpu) = -1;
}
PV_CALLEE_SAVE_REGS_THUNK(xen_vcpu_stolen);
/*
* Our init of PV spinlocks is split in two init functions due to us
* using paravirt patching and jump labels patching and having to do
* all of this before SMP code is invoked.
*
* The paravirt patching needs to be done _before_ the alternative asm code
* is started, otherwise we would not patch the core kernel code.
*/
void __init xen_init_spinlocks(void)
{
/* Don't need to use pvqspinlock code if there is only 1 vCPU. */
if (num_possible_cpus() == 1 || nopvspin)
xen_pvspin = false;
if (!xen_pvspin) {
printk(KERN_DEBUG "xen: PV spinlocks disabled\n");
static_branch_disable(&virt_spin_lock_key);
return;
}
printk(KERN_DEBUG "xen: PV spinlocks enabled\n");
__pv_init_lock_hash();
pv_ops.lock.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
pv_ops.lock.queued_spin_unlock =
PV_CALLEE_SAVE(__pv_queued_spin_unlock);
pv_ops.lock.wait = xen_qlock_wait;
pv_ops.lock.kick = xen_qlock_kick;
pv_ops.lock.vcpu_is_preempted = PV_CALLEE_SAVE(xen_vcpu_stolen);
}
static __init int xen_parse_nopvspin(char *arg)
{
pr_notice("\"xen_nopvspin\" is deprecated, please use \"nopvspin\" instead\n");
xen_pvspin = false;
return 0;
}
early_param("xen_nopvspin", xen_parse_nopvspin);
| linux-master | arch/x86/xen/spinlock.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Xen time implementation.
*
* This is implemented in terms of a clocksource driver which uses
* the hypervisor clock as a nanosecond timebase, and a clockevent
* driver which uses the hypervisor's timer mechanism.
*
* Jeremy Fitzhardinge <[email protected]>, XenSource Inc, 2007
*/
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/clocksource.h>
#include <linux/clockchips.h>
#include <linux/gfp.h>
#include <linux/slab.h>
#include <linux/pvclock_gtod.h>
#include <linux/timekeeper_internal.h>
#include <asm/pvclock.h>
#include <asm/xen/hypervisor.h>
#include <asm/xen/hypercall.h>
#include <asm/xen/cpuid.h>
#include <xen/events.h>
#include <xen/features.h>
#include <xen/interface/xen.h>
#include <xen/interface/vcpu.h>
#include "xen-ops.h"
/* Minimum amount of time until next clock event fires */
#define TIMER_SLOP 100000
static u64 xen_sched_clock_offset __read_mostly;
/* Get the TSC speed from Xen */
static unsigned long xen_tsc_khz(void)
{
struct pvclock_vcpu_time_info *info =
&HYPERVISOR_shared_info->vcpu_info[0].time;
setup_force_cpu_cap(X86_FEATURE_TSC_KNOWN_FREQ);
return pvclock_tsc_khz(info);
}
static u64 xen_clocksource_read(void)
{
struct pvclock_vcpu_time_info *src;
u64 ret;
preempt_disable_notrace();
src = &__this_cpu_read(xen_vcpu)->time;
ret = pvclock_clocksource_read(src);
preempt_enable_notrace();
return ret;
}
static u64 xen_clocksource_get_cycles(struct clocksource *cs)
{
return xen_clocksource_read();
}
static noinstr u64 xen_sched_clock(void)
{
struct pvclock_vcpu_time_info *src;
u64 ret;
src = &__this_cpu_read(xen_vcpu)->time;
ret = pvclock_clocksource_read_nowd(src);
ret -= xen_sched_clock_offset;
return ret;
}
static void xen_read_wallclock(struct timespec64 *ts)
{
struct shared_info *s = HYPERVISOR_shared_info;
struct pvclock_wall_clock *wall_clock = &(s->wc);
struct pvclock_vcpu_time_info *vcpu_time;
vcpu_time = &get_cpu_var(xen_vcpu)->time;
pvclock_read_wallclock(wall_clock, vcpu_time, ts);
put_cpu_var(xen_vcpu);
}
static void xen_get_wallclock(struct timespec64 *now)
{
xen_read_wallclock(now);
}
static int xen_set_wallclock(const struct timespec64 *now)
{
return -ENODEV;
}
static int xen_pvclock_gtod_notify(struct notifier_block *nb,
unsigned long was_set, void *priv)
{
/* Protected by the calling core code serialization */
static struct timespec64 next_sync;
struct xen_platform_op op;
struct timespec64 now;
struct timekeeper *tk = priv;
static bool settime64_supported = true;
int ret;
now.tv_sec = tk->xtime_sec;
now.tv_nsec = (long)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift);
/*
* We only take the expensive HV call when the clock was set
* or when the 11 minutes RTC synchronization time elapsed.
*/
if (!was_set && timespec64_compare(&now, &next_sync) < 0)
return NOTIFY_OK;
again:
if (settime64_supported) {
op.cmd = XENPF_settime64;
op.u.settime64.mbz = 0;
op.u.settime64.secs = now.tv_sec;
op.u.settime64.nsecs = now.tv_nsec;
op.u.settime64.system_time = xen_clocksource_read();
} else {
op.cmd = XENPF_settime32;
op.u.settime32.secs = now.tv_sec;
op.u.settime32.nsecs = now.tv_nsec;
op.u.settime32.system_time = xen_clocksource_read();
}
ret = HYPERVISOR_platform_op(&op);
if (ret == -ENOSYS && settime64_supported) {
settime64_supported = false;
goto again;
}
if (ret < 0)
return NOTIFY_BAD;
/*
* Move the next drift compensation time 11 minutes
* ahead. That's emulating the sync_cmos_clock() update for
* the hardware RTC.
*/
next_sync = now;
next_sync.tv_sec += 11 * 60;
return NOTIFY_OK;
}
static struct notifier_block xen_pvclock_gtod_notifier = {
.notifier_call = xen_pvclock_gtod_notify,
};
static int xen_cs_enable(struct clocksource *cs)
{
vclocks_set_used(VDSO_CLOCKMODE_PVCLOCK);
return 0;
}
static struct clocksource xen_clocksource __read_mostly = {
.name = "xen",
.rating = 400,
.read = xen_clocksource_get_cycles,
.mask = CLOCKSOURCE_MASK(64),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
.enable = xen_cs_enable,
};
/*
Xen clockevent implementation
Xen has two clockevent implementations:
The old timer_op one works with all released versions of Xen prior
to version 3.0.4. This version of the hypervisor provides a
single-shot timer with nanosecond resolution. However, sharing the
same event channel is a 100Hz tick which is delivered while the
vcpu is running. We don't care about or use this tick, but it will
cause the core time code to think the timer fired too soon, and
will end up resetting it each time. It could be filtered, but
doing so has complications when the ktime clocksource is not yet
the xen clocksource (ie, at boot time).
The new vcpu_op-based timer interface allows the tick timer period
to be changed or turned off. The tick timer is not useful as a
periodic timer because events are only delivered to running vcpus.
The one-shot timer can report when a timeout is in the past, so
set_next_event is capable of returning -ETIME when appropriate.
This interface is used when available.
*/
/*
Get a hypervisor absolute time. In theory we could maintain an
offset between the kernel's time and the hypervisor's time, and
apply that to a kernel's absolute timeout. Unfortunately the
hypervisor and kernel times can drift even if the kernel is using
the Xen clocksource, because ntp can warp the kernel's clocksource.
*/
static s64 get_abs_timeout(unsigned long delta)
{
return xen_clocksource_read() + delta;
}
static int xen_timerop_shutdown(struct clock_event_device *evt)
{
/* cancel timeout */
HYPERVISOR_set_timer_op(0);
return 0;
}
static int xen_timerop_set_next_event(unsigned long delta,
struct clock_event_device *evt)
{
WARN_ON(!clockevent_state_oneshot(evt));
if (HYPERVISOR_set_timer_op(get_abs_timeout(delta)) < 0)
BUG();
/* We may have missed the deadline, but there's no real way of
knowing for sure. If the event was in the past, then we'll
get an immediate interrupt. */
return 0;
}
static struct clock_event_device xen_timerop_clockevent __ro_after_init = {
.name = "xen",
.features = CLOCK_EVT_FEAT_ONESHOT,
.max_delta_ns = 0xffffffff,
.max_delta_ticks = 0xffffffff,
.min_delta_ns = TIMER_SLOP,
.min_delta_ticks = TIMER_SLOP,
.mult = 1,
.shift = 0,
.rating = 500,
.set_state_shutdown = xen_timerop_shutdown,
.set_next_event = xen_timerop_set_next_event,
};
static int xen_vcpuop_shutdown(struct clock_event_device *evt)
{
int cpu = smp_processor_id();
if (HYPERVISOR_vcpu_op(VCPUOP_stop_singleshot_timer, xen_vcpu_nr(cpu),
NULL) ||
HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, xen_vcpu_nr(cpu),
NULL))
BUG();
return 0;
}
static int xen_vcpuop_set_oneshot(struct clock_event_device *evt)
{
int cpu = smp_processor_id();
if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, xen_vcpu_nr(cpu),
NULL))
BUG();
return 0;
}
static int xen_vcpuop_set_next_event(unsigned long delta,
struct clock_event_device *evt)
{
int cpu = smp_processor_id();
struct vcpu_set_singleshot_timer single;
int ret;
WARN_ON(!clockevent_state_oneshot(evt));
single.timeout_abs_ns = get_abs_timeout(delta);
/* Get an event anyway, even if the timeout is already expired */
single.flags = 0;
ret = HYPERVISOR_vcpu_op(VCPUOP_set_singleshot_timer, xen_vcpu_nr(cpu),
&single);
BUG_ON(ret != 0);
return ret;
}
static struct clock_event_device xen_vcpuop_clockevent __ro_after_init = {
.name = "xen",
.features = CLOCK_EVT_FEAT_ONESHOT,
.max_delta_ns = 0xffffffff,
.max_delta_ticks = 0xffffffff,
.min_delta_ns = TIMER_SLOP,
.min_delta_ticks = TIMER_SLOP,
.mult = 1,
.shift = 0,
.rating = 500,
.set_state_shutdown = xen_vcpuop_shutdown,
.set_state_oneshot = xen_vcpuop_set_oneshot,
.set_next_event = xen_vcpuop_set_next_event,
};
static const struct clock_event_device *xen_clockevent =
&xen_timerop_clockevent;
struct xen_clock_event_device {
struct clock_event_device evt;
char name[16];
};
static DEFINE_PER_CPU(struct xen_clock_event_device, xen_clock_events) = { .evt.irq = -1 };
static irqreturn_t xen_timer_interrupt(int irq, void *dev_id)
{
struct clock_event_device *evt = this_cpu_ptr(&xen_clock_events.evt);
irqreturn_t ret;
ret = IRQ_NONE;
if (evt->event_handler) {
evt->event_handler(evt);
ret = IRQ_HANDLED;
}
return ret;
}
void xen_teardown_timer(int cpu)
{
struct clock_event_device *evt;
evt = &per_cpu(xen_clock_events, cpu).evt;
if (evt->irq >= 0) {
unbind_from_irqhandler(evt->irq, NULL);
evt->irq = -1;
}
}
void xen_setup_timer(int cpu)
{
struct xen_clock_event_device *xevt = &per_cpu(xen_clock_events, cpu);
struct clock_event_device *evt = &xevt->evt;
int irq;
WARN(evt->irq >= 0, "IRQ%d for CPU%d is already allocated\n", evt->irq, cpu);
if (evt->irq >= 0)
xen_teardown_timer(cpu);
printk(KERN_INFO "installing Xen timer for CPU %d\n", cpu);
snprintf(xevt->name, sizeof(xevt->name), "timer%d", cpu);
irq = bind_virq_to_irqhandler(VIRQ_TIMER, cpu, xen_timer_interrupt,
IRQF_PERCPU|IRQF_NOBALANCING|IRQF_TIMER|
IRQF_FORCE_RESUME|IRQF_EARLY_RESUME,
xevt->name, NULL);
(void)xen_set_irq_priority(irq, XEN_IRQ_PRIORITY_MAX);
memcpy(evt, xen_clockevent, sizeof(*evt));
evt->cpumask = cpumask_of(cpu);
evt->irq = irq;
}
void xen_setup_cpu_clockevents(void)
{
clockevents_register_device(this_cpu_ptr(&xen_clock_events.evt));
}
void xen_timer_resume(void)
{
int cpu;
if (xen_clockevent != &xen_vcpuop_clockevent)
return;
for_each_online_cpu(cpu) {
if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer,
xen_vcpu_nr(cpu), NULL))
BUG();
}
}
static struct pvclock_vsyscall_time_info *xen_clock __read_mostly;
static u64 xen_clock_value_saved;
void xen_save_time_memory_area(void)
{
struct vcpu_register_time_memory_area t;
int ret;
xen_clock_value_saved = xen_clocksource_read() - xen_sched_clock_offset;
if (!xen_clock)
return;
t.addr.v = NULL;
ret = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_time_memory_area, 0, &t);
if (ret != 0)
pr_notice("Cannot save secondary vcpu_time_info (err %d)",
ret);
else
clear_page(xen_clock);
}
void xen_restore_time_memory_area(void)
{
struct vcpu_register_time_memory_area t;
int ret;
if (!xen_clock)
goto out;
t.addr.v = &xen_clock->pvti;
ret = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_time_memory_area, 0, &t);
/*
* We don't disable VDSO_CLOCKMODE_PVCLOCK entirely if it fails to
* register the secondary time info with Xen or if we migrated to a
* host without the necessary flags. On both of these cases what
* happens is either process seeing a zeroed out pvti or seeing no
* PVCLOCK_TSC_STABLE_BIT bit set. Userspace checks the latter and
* if 0, it discards the data in pvti and fallbacks to a system
* call for a reliable timestamp.
*/
if (ret != 0)
pr_notice("Cannot restore secondary vcpu_time_info (err %d)",
ret);
out:
/* Need pvclock_resume() before using xen_clocksource_read(). */
pvclock_resume();
xen_sched_clock_offset = xen_clocksource_read() - xen_clock_value_saved;
}
static void xen_setup_vsyscall_time_info(void)
{
struct vcpu_register_time_memory_area t;
struct pvclock_vsyscall_time_info *ti;
int ret;
ti = (struct pvclock_vsyscall_time_info *)get_zeroed_page(GFP_KERNEL);
if (!ti)
return;
t.addr.v = &ti->pvti;
ret = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_time_memory_area, 0, &t);
if (ret) {
pr_notice("xen: VDSO_CLOCKMODE_PVCLOCK not supported (err %d)\n", ret);
free_page((unsigned long)ti);
return;
}
/*
* If primary time info had this bit set, secondary should too since
* it's the same data on both just different memory regions. But we
* still check it in case hypervisor is buggy.
*/
if (!(ti->pvti.flags & PVCLOCK_TSC_STABLE_BIT)) {
t.addr.v = NULL;
ret = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_time_memory_area,
0, &t);
if (!ret)
free_page((unsigned long)ti);
pr_notice("xen: VDSO_CLOCKMODE_PVCLOCK not supported (tsc unstable)\n");
return;
}
xen_clock = ti;
pvclock_set_pvti_cpu0_va(xen_clock);
xen_clocksource.vdso_clock_mode = VDSO_CLOCKMODE_PVCLOCK;
}
/*
* Check if it is possible to safely use the tsc as a clocksource. This is
* only true if the hypervisor notifies the guest that its tsc is invariant,
* the tsc is stable, and the tsc instruction will never be emulated.
*/
static int __init xen_tsc_safe_clocksource(void)
{
u32 eax, ebx, ecx, edx;
if (!(boot_cpu_has(X86_FEATURE_CONSTANT_TSC)))
return 0;
if (!(boot_cpu_has(X86_FEATURE_NONSTOP_TSC)))
return 0;
if (check_tsc_unstable())
return 0;
/* Leaf 4, sub-leaf 0 (0x40000x03) */
cpuid_count(xen_cpuid_base() + 3, 0, &eax, &ebx, &ecx, &edx);
return ebx == XEN_CPUID_TSC_MODE_NEVER_EMULATE;
}
static void __init xen_time_init(void)
{
struct pvclock_vcpu_time_info *pvti;
int cpu = smp_processor_id();
struct timespec64 tp;
/*
* As Dom0 is never moved, no penalty on using TSC there.
*
* If it is possible for the guest to determine that the tsc is a safe
* clocksource, then set xen_clocksource rating below that of the tsc
* so that the system prefers tsc instead.
*/
if (xen_initial_domain())
xen_clocksource.rating = 275;
else if (xen_tsc_safe_clocksource())
xen_clocksource.rating = 299;
clocksource_register_hz(&xen_clocksource, NSEC_PER_SEC);
if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, xen_vcpu_nr(cpu),
NULL) == 0) {
/* Successfully turned off 100Hz tick, so we have the
vcpuop-based timer interface */
printk(KERN_DEBUG "Xen: using vcpuop timer interface\n");
xen_clockevent = &xen_vcpuop_clockevent;
}
/* Set initial system time with full resolution */
xen_read_wallclock(&tp);
do_settimeofday64(&tp);
setup_force_cpu_cap(X86_FEATURE_TSC);
/*
* We check ahead on the primary time info if this
* bit is supported hence speeding up Xen clocksource.
*/
pvti = &__this_cpu_read(xen_vcpu)->time;
if (pvti->flags & PVCLOCK_TSC_STABLE_BIT) {
pvclock_set_flags(PVCLOCK_TSC_STABLE_BIT);
xen_setup_vsyscall_time_info();
}
xen_setup_runstate_info(cpu);
xen_setup_timer(cpu);
xen_setup_cpu_clockevents();
xen_time_setup_guest();
if (xen_initial_domain())
pvclock_gtod_register_notifier(&xen_pvclock_gtod_notifier);
}
static void __init xen_init_time_common(void)
{
xen_sched_clock_offset = xen_clocksource_read();
static_call_update(pv_steal_clock, xen_steal_clock);
paravirt_set_sched_clock(xen_sched_clock);
x86_platform.calibrate_tsc = xen_tsc_khz;
x86_platform.get_wallclock = xen_get_wallclock;
}
void __init xen_init_time_ops(void)
{
xen_init_time_common();
x86_init.timers.timer_init = xen_time_init;
x86_init.timers.setup_percpu_clockev = x86_init_noop;
x86_cpuinit.setup_percpu_clockev = x86_init_noop;
/* Dom0 uses the native method to set the hardware RTC. */
if (!xen_initial_domain())
x86_platform.set_wallclock = xen_set_wallclock;
}
#ifdef CONFIG_XEN_PVHVM
static void xen_hvm_setup_cpu_clockevents(void)
{
int cpu = smp_processor_id();
xen_setup_runstate_info(cpu);
/*
* xen_setup_timer(cpu) - snprintf is bad in atomic context. Hence
* doing it xen_hvm_cpu_notify (which gets called by smp_init during
* early bootup and also during CPU hotplug events).
*/
xen_setup_cpu_clockevents();
}
void __init xen_hvm_init_time_ops(void)
{
static bool hvm_time_initialized;
if (hvm_time_initialized)
return;
/*
* vector callback is needed otherwise we cannot receive interrupts
* on cpu > 0 and at this point we don't know how many cpus are
* available.
*/
if (!xen_have_vector_callback)
return;
if (!xen_feature(XENFEAT_hvm_safe_pvclock)) {
pr_info_once("Xen doesn't support pvclock on HVM, disable pv timer");
return;
}
/*
* Only MAX_VIRT_CPUS 'vcpu_info' are embedded inside 'shared_info'.
* The __this_cpu_read(xen_vcpu) is still NULL when Xen HVM guest
* boots on vcpu >= MAX_VIRT_CPUS (e.g., kexec), To access
* __this_cpu_read(xen_vcpu) via xen_clocksource_read() will panic.
*
* The xen_hvm_init_time_ops() should be called again later after
* __this_cpu_read(xen_vcpu) is available.
*/
if (!__this_cpu_read(xen_vcpu)) {
pr_info("Delay xen_init_time_common() as kernel is running on vcpu=%d\n",
xen_vcpu_nr(0));
return;
}
xen_init_time_common();
x86_init.timers.setup_percpu_clockev = xen_time_init;
x86_cpuinit.setup_percpu_clockev = xen_hvm_setup_cpu_clockevents;
x86_platform.set_wallclock = xen_set_wallclock;
hvm_time_initialized = true;
}
#endif
/* Kernel parameter to specify Xen timer slop */
static int __init parse_xen_timer_slop(char *ptr)
{
unsigned long slop = memparse(ptr, NULL);
xen_timerop_clockevent.min_delta_ns = slop;
xen_timerop_clockevent.min_delta_ticks = slop;
xen_vcpuop_clockevent.min_delta_ns = slop;
xen_vcpuop_clockevent.min_delta_ticks = slop;
return 0;
}
early_param("xen_timer_slop", parse_xen_timer_slop);
| linux-master | arch/x86/xen/time.c |
// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
* platform-pci-unplug.c
*
* Xen platform PCI device driver
* Copyright (c) 2010, Citrix
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/init.h>
#include <linux/io.h>
#include <linux/export.h>
#include <xen/xen.h>
#include <xen/platform_pci.h>
#include "xen-ops.h"
#define XEN_PLATFORM_ERR_MAGIC -1
#define XEN_PLATFORM_ERR_PROTOCOL -2
#define XEN_PLATFORM_ERR_BLACKLIST -3
/* store the value of xen_emul_unplug after the unplug is done */
static int xen_platform_pci_unplug;
static int xen_emul_unplug;
static int check_platform_magic(void)
{
short magic;
char protocol;
magic = inw(XEN_IOPORT_MAGIC);
if (magic != XEN_IOPORT_MAGIC_VAL) {
pr_err("Xen Platform PCI: unrecognised magic value\n");
return XEN_PLATFORM_ERR_MAGIC;
}
protocol = inb(XEN_IOPORT_PROTOVER);
pr_debug("Xen Platform PCI: I/O protocol version %d\n",
protocol);
switch (protocol) {
case 1:
outw(XEN_IOPORT_LINUX_PRODNUM, XEN_IOPORT_PRODNUM);
outl(XEN_IOPORT_LINUX_DRVVER, XEN_IOPORT_DRVVER);
if (inw(XEN_IOPORT_MAGIC) != XEN_IOPORT_MAGIC_VAL) {
pr_err("Xen Platform: blacklisted by host\n");
return XEN_PLATFORM_ERR_BLACKLIST;
}
break;
default:
pr_warn("Xen Platform PCI: unknown I/O protocol version\n");
return XEN_PLATFORM_ERR_PROTOCOL;
}
return 0;
}
bool xen_has_pv_devices(void)
{
if (!xen_domain())
return false;
/* PV and PVH domains always have them. */
if (xen_pv_domain() || xen_pvh_domain())
return true;
/* And user has xen_platform_pci=0 set in guest config as
* driver did not modify the value. */
if (xen_platform_pci_unplug == 0)
return false;
if (xen_platform_pci_unplug & XEN_UNPLUG_NEVER)
return false;
if (xen_platform_pci_unplug & XEN_UNPLUG_ALL)
return true;
/* This is an odd one - we are going to run legacy
* and PV drivers at the same time. */
if (xen_platform_pci_unplug & XEN_UNPLUG_UNNECESSARY)
return true;
/* And the caller has to follow with xen_pv_{disk,nic}_devices
* to be certain which driver can load. */
return false;
}
EXPORT_SYMBOL_GPL(xen_has_pv_devices);
static bool __xen_has_pv_device(int state)
{
/* HVM domains might or might not */
if (xen_hvm_domain() && (xen_platform_pci_unplug & state))
return true;
return xen_has_pv_devices();
}
bool xen_has_pv_nic_devices(void)
{
return __xen_has_pv_device(XEN_UNPLUG_ALL_NICS | XEN_UNPLUG_ALL);
}
EXPORT_SYMBOL_GPL(xen_has_pv_nic_devices);
bool xen_has_pv_disk_devices(void)
{
return __xen_has_pv_device(XEN_UNPLUG_ALL_IDE_DISKS |
XEN_UNPLUG_AUX_IDE_DISKS | XEN_UNPLUG_ALL);
}
EXPORT_SYMBOL_GPL(xen_has_pv_disk_devices);
/*
* This one is odd - it determines whether you want to run PV _and_
* legacy (IDE) drivers together. This combination is only possible
* under HVM.
*/
bool xen_has_pv_and_legacy_disk_devices(void)
{
if (!xen_domain())
return false;
/* N.B. This is only ever used in HVM mode */
if (xen_pv_domain())
return false;
if (xen_platform_pci_unplug & XEN_UNPLUG_UNNECESSARY)
return true;
return false;
}
EXPORT_SYMBOL_GPL(xen_has_pv_and_legacy_disk_devices);
void xen_unplug_emulated_devices(void)
{
int r;
/* PVH guests don't have emulated devices. */
if (xen_pvh_domain())
return;
/* user explicitly requested no unplug */
if (xen_emul_unplug & XEN_UNPLUG_NEVER)
return;
/* check the version of the xen platform PCI device */
r = check_platform_magic();
/* If the version matches enable the Xen platform PCI driver.
* Also enable the Xen platform PCI driver if the host does
* not support the unplug protocol (XEN_PLATFORM_ERR_MAGIC)
* but the user told us that unplugging is unnecessary. */
if (r && !(r == XEN_PLATFORM_ERR_MAGIC &&
(xen_emul_unplug & XEN_UNPLUG_UNNECESSARY)))
return;
/* Set the default value of xen_emul_unplug depending on whether or
* not the Xen PV frontends and the Xen platform PCI driver have
* been compiled for this kernel (modules or built-in are both OK). */
if (!xen_emul_unplug) {
if (xen_must_unplug_nics()) {
pr_info("Netfront and the Xen platform PCI driver have "
"been compiled for this kernel: unplug emulated NICs.\n");
xen_emul_unplug |= XEN_UNPLUG_ALL_NICS;
}
if (xen_must_unplug_disks()) {
pr_info("Blkfront and the Xen platform PCI driver have "
"been compiled for this kernel: unplug emulated disks.\n"
"You might have to change the root device\n"
"from /dev/hd[a-d] to /dev/xvd[a-d]\n"
"in your root= kernel command line option\n");
xen_emul_unplug |= XEN_UNPLUG_ALL_IDE_DISKS;
}
}
/* Now unplug the emulated devices */
if (!(xen_emul_unplug & XEN_UNPLUG_UNNECESSARY))
outw(xen_emul_unplug, XEN_IOPORT_UNPLUG);
xen_platform_pci_unplug = xen_emul_unplug;
}
static int __init parse_xen_emul_unplug(char *arg)
{
char *p, *q;
int l;
for (p = arg; p; p = q) {
q = strchr(p, ',');
if (q) {
l = q - p;
q++;
} else {
l = strlen(p);
}
if (!strncmp(p, "all", l))
xen_emul_unplug |= XEN_UNPLUG_ALL;
else if (!strncmp(p, "ide-disks", l))
xen_emul_unplug |= XEN_UNPLUG_ALL_IDE_DISKS;
else if (!strncmp(p, "aux-ide-disks", l))
xen_emul_unplug |= XEN_UNPLUG_AUX_IDE_DISKS;
else if (!strncmp(p, "nics", l))
xen_emul_unplug |= XEN_UNPLUG_ALL_NICS;
else if (!strncmp(p, "unnecessary", l))
xen_emul_unplug |= XEN_UNPLUG_UNNECESSARY;
else if (!strncmp(p, "never", l))
xen_emul_unplug |= XEN_UNPLUG_NEVER;
else
pr_warn("unrecognised option '%s' "
"in parameter 'xen_emul_unplug'\n", p);
}
return 0;
}
early_param("xen_emul_unplug", parse_xen_emul_unplug);
| linux-master | arch/x86/xen/platform-pci-unplug.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/types.h>
#include <linux/crash_dump.h>
#include <xen/interface/xen.h>
#include <xen/hvm.h>
#include "mmu.h"
#ifdef CONFIG_PROC_VMCORE
/*
* The kdump kernel has to check whether a pfn of the crashed kernel
* was a ballooned page. vmcore is using this function to decide
* whether to access a pfn of the crashed kernel.
* Returns "false" if the pfn is not backed by a RAM page, the caller may
* handle the pfn special in this case.
*/
static bool xen_vmcore_pfn_is_ram(struct vmcore_cb *cb, unsigned long pfn)
{
struct xen_hvm_get_mem_type a = {
.domid = DOMID_SELF,
.pfn = pfn,
};
if (HYPERVISOR_hvm_op(HVMOP_get_mem_type, &a)) {
pr_warn_once("Unexpected HVMOP_get_mem_type failure\n");
return true;
}
return a.mem_type != HVMMEM_mmio_dm;
}
static struct vmcore_cb xen_vmcore_cb = {
.pfn_is_ram = xen_vmcore_pfn_is_ram,
};
#endif
static void xen_hvm_exit_mmap(struct mm_struct *mm)
{
struct xen_hvm_pagetable_dying a;
int rc;
a.domid = DOMID_SELF;
a.gpa = __pa(mm->pgd);
rc = HYPERVISOR_hvm_op(HVMOP_pagetable_dying, &a);
WARN_ON_ONCE(rc < 0);
}
static int is_pagetable_dying_supported(void)
{
struct xen_hvm_pagetable_dying a;
int rc = 0;
a.domid = DOMID_SELF;
a.gpa = 0x00;
rc = HYPERVISOR_hvm_op(HVMOP_pagetable_dying, &a);
if (rc < 0) {
printk(KERN_DEBUG "HVMOP_pagetable_dying not supported\n");
return 0;
}
return 1;
}
void __init xen_hvm_init_mmu_ops(void)
{
if (is_pagetable_dying_supported())
pv_ops.mmu.exit_mmap = xen_hvm_exit_mmap;
#ifdef CONFIG_PROC_VMCORE
register_vmcore_cb(&xen_vmcore_cb);
#endif
}
| linux-master | arch/x86/xen/mmu_hvm.c |
// SPDX-License-Identifier: GPL-2.0
#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
#include <linux/memblock.h>
#endif
#include <linux/console.h>
#include <linux/cpu.h>
#include <linux/kexec.h>
#include <linux/slab.h>
#include <linux/panic_notifier.h>
#include <xen/xen.h>
#include <xen/features.h>
#include <xen/interface/sched.h>
#include <xen/interface/version.h>
#include <xen/page.h>
#include <asm/xen/hypercall.h>
#include <asm/xen/hypervisor.h>
#include <asm/cpu.h>
#include <asm/e820/api.h>
#include <asm/setup.h>
#include "xen-ops.h"
#include "smp.h"
#include "pmu.h"
EXPORT_SYMBOL_GPL(hypercall_page);
/*
* Pointer to the xen_vcpu_info structure or
* &HYPERVISOR_shared_info->vcpu_info[cpu]. See xen_hvm_init_shared_info
* and xen_vcpu_setup for details. By default it points to share_info->vcpu_info
* but during boot it is switched to point to xen_vcpu_info.
* The pointer is used in xen_evtchn_do_upcall to acknowledge pending events.
*/
DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu);
DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info);
/* Linux <-> Xen vCPU id mapping */
DEFINE_PER_CPU(uint32_t, xen_vcpu_id);
EXPORT_PER_CPU_SYMBOL(xen_vcpu_id);
unsigned long *machine_to_phys_mapping = (void *)MACH2PHYS_VIRT_START;
EXPORT_SYMBOL(machine_to_phys_mapping);
unsigned long machine_to_phys_nr;
EXPORT_SYMBOL(machine_to_phys_nr);
struct start_info *xen_start_info;
EXPORT_SYMBOL_GPL(xen_start_info);
struct shared_info xen_dummy_shared_info;
__read_mostly bool xen_have_vector_callback = true;
EXPORT_SYMBOL_GPL(xen_have_vector_callback);
/*
* NB: These need to live in .data or alike because they're used by
* xen_prepare_pvh() which runs before clearing the bss.
*/
enum xen_domain_type __ro_after_init xen_domain_type = XEN_NATIVE;
EXPORT_SYMBOL_GPL(xen_domain_type);
uint32_t __ro_after_init xen_start_flags;
EXPORT_SYMBOL(xen_start_flags);
/*
* Point at some empty memory to start with. We map the real shared_info
* page as soon as fixmap is up and running.
*/
struct shared_info *HYPERVISOR_shared_info = &xen_dummy_shared_info;
static int xen_cpu_up_online(unsigned int cpu)
{
xen_init_lock_cpu(cpu);
return 0;
}
int xen_cpuhp_setup(int (*cpu_up_prepare_cb)(unsigned int),
int (*cpu_dead_cb)(unsigned int))
{
int rc;
rc = cpuhp_setup_state_nocalls(CPUHP_XEN_PREPARE,
"x86/xen/guest:prepare",
cpu_up_prepare_cb, cpu_dead_cb);
if (rc >= 0) {
rc = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
"x86/xen/guest:online",
xen_cpu_up_online, NULL);
if (rc < 0)
cpuhp_remove_state_nocalls(CPUHP_XEN_PREPARE);
}
return rc >= 0 ? 0 : rc;
}
static void xen_vcpu_setup_restore(int cpu)
{
/* Any per_cpu(xen_vcpu) is stale, so reset it */
xen_vcpu_info_reset(cpu);
/*
* For PVH and PVHVM, setup online VCPUs only. The rest will
* be handled by hotplug.
*/
if (xen_pv_domain() ||
(xen_hvm_domain() && cpu_online(cpu)))
xen_vcpu_setup(cpu);
}
/*
* On restore, set the vcpu placement up again.
* If it fails, then we're in a bad state, since
* we can't back out from using it...
*/
void xen_vcpu_restore(void)
{
int cpu;
for_each_possible_cpu(cpu) {
bool other_cpu = (cpu != smp_processor_id());
bool is_up;
if (xen_vcpu_nr(cpu) == XEN_VCPU_ID_INVALID)
continue;
/* Only Xen 4.5 and higher support this. */
is_up = HYPERVISOR_vcpu_op(VCPUOP_is_up,
xen_vcpu_nr(cpu), NULL) > 0;
if (other_cpu && is_up &&
HYPERVISOR_vcpu_op(VCPUOP_down, xen_vcpu_nr(cpu), NULL))
BUG();
if (xen_pv_domain() || xen_feature(XENFEAT_hvm_safe_pvclock))
xen_setup_runstate_info(cpu);
xen_vcpu_setup_restore(cpu);
if (other_cpu && is_up &&
HYPERVISOR_vcpu_op(VCPUOP_up, xen_vcpu_nr(cpu), NULL))
BUG();
}
}
void xen_vcpu_info_reset(int cpu)
{
if (xen_vcpu_nr(cpu) < MAX_VIRT_CPUS) {
per_cpu(xen_vcpu, cpu) =
&HYPERVISOR_shared_info->vcpu_info[xen_vcpu_nr(cpu)];
} else {
/* Set to NULL so that if somebody accesses it we get an OOPS */
per_cpu(xen_vcpu, cpu) = NULL;
}
}
void xen_vcpu_setup(int cpu)
{
struct vcpu_register_vcpu_info info;
int err;
struct vcpu_info *vcpup;
BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info);
/*
* This path is called on PVHVM at bootup (xen_hvm_smp_prepare_boot_cpu)
* and at restore (xen_vcpu_restore). Also called for hotplugged
* VCPUs (cpu_init -> xen_hvm_cpu_prepare_hvm).
* However, the hypercall can only be done once (see below) so if a VCPU
* is offlined and comes back online then let's not redo the hypercall.
*
* For PV it is called during restore (xen_vcpu_restore) and bootup
* (xen_setup_vcpu_info_placement). The hotplug mechanism does not
* use this function.
*/
if (xen_hvm_domain()) {
if (per_cpu(xen_vcpu, cpu) == &per_cpu(xen_vcpu_info, cpu))
return;
}
vcpup = &per_cpu(xen_vcpu_info, cpu);
info.mfn = arbitrary_virt_to_mfn(vcpup);
info.offset = offset_in_page(vcpup);
/*
* N.B. This hypercall can _only_ be called once per CPU.
* Subsequent calls will error out with -EINVAL. This is due to
* the fact that hypervisor has no unregister variant and this
* hypercall does not allow to over-write info.mfn and
* info.offset.
*/
err = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, xen_vcpu_nr(cpu),
&info);
if (err)
panic("register_vcpu_info failed: cpu=%d err=%d\n", cpu, err);
per_cpu(xen_vcpu, cpu) = vcpup;
}
void __init xen_banner(void)
{
unsigned version = HYPERVISOR_xen_version(XENVER_version, NULL);
struct xen_extraversion extra;
HYPERVISOR_xen_version(XENVER_extraversion, &extra);
pr_info("Booting kernel on %s\n", pv_info.name);
pr_info("Xen version: %u.%u%s%s\n",
version >> 16, version & 0xffff, extra.extraversion,
xen_feature(XENFEAT_mmu_pt_update_preserve_ad)
? " (preserve-AD)" : "");
}
/* Check if running on Xen version (major, minor) or later */
bool xen_running_on_version_or_later(unsigned int major, unsigned int minor)
{
unsigned int version;
if (!xen_domain())
return false;
version = HYPERVISOR_xen_version(XENVER_version, NULL);
if ((((version >> 16) == major) && ((version & 0xffff) >= minor)) ||
((version >> 16) > major))
return true;
return false;
}
void __init xen_add_preferred_consoles(void)
{
add_preferred_console("xenboot", 0, NULL);
if (!boot_params.screen_info.orig_video_isVGA)
add_preferred_console("tty", 0, NULL);
add_preferred_console("hvc", 0, NULL);
if (boot_params.screen_info.orig_video_isVGA)
add_preferred_console("tty", 0, NULL);
}
void xen_reboot(int reason)
{
struct sched_shutdown r = { .reason = reason };
int cpu;
for_each_online_cpu(cpu)
xen_pmu_finish(cpu);
if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
BUG();
}
static int reboot_reason = SHUTDOWN_reboot;
static bool xen_legacy_crash;
void xen_emergency_restart(void)
{
xen_reboot(reboot_reason);
}
static int
xen_panic_event(struct notifier_block *this, unsigned long event, void *ptr)
{
if (!kexec_crash_loaded()) {
if (xen_legacy_crash)
xen_reboot(SHUTDOWN_crash);
reboot_reason = SHUTDOWN_crash;
/*
* If panic_timeout==0 then we are supposed to wait forever.
* However, to preserve original dom0 behavior we have to drop
* into hypervisor. (domU behavior is controlled by its
* config file)
*/
if (panic_timeout == 0)
panic_timeout = -1;
}
return NOTIFY_DONE;
}
static int __init parse_xen_legacy_crash(char *arg)
{
xen_legacy_crash = true;
return 0;
}
early_param("xen_legacy_crash", parse_xen_legacy_crash);
static struct notifier_block xen_panic_block = {
.notifier_call = xen_panic_event,
.priority = INT_MIN
};
int xen_panic_handler_init(void)
{
atomic_notifier_chain_register(&panic_notifier_list, &xen_panic_block);
return 0;
}
void xen_pin_vcpu(int cpu)
{
static bool disable_pinning;
struct sched_pin_override pin_override;
int ret;
if (disable_pinning)
return;
pin_override.pcpu = cpu;
ret = HYPERVISOR_sched_op(SCHEDOP_pin_override, &pin_override);
/* Ignore errors when removing override. */
if (cpu < 0)
return;
switch (ret) {
case -ENOSYS:
pr_warn("Unable to pin on physical cpu %d. In case of problems consider vcpu pinning.\n",
cpu);
disable_pinning = true;
break;
case -EPERM:
WARN(1, "Trying to pin vcpu without having privilege to do so\n");
disable_pinning = true;
break;
case -EINVAL:
case -EBUSY:
pr_warn("Physical cpu %d not available for pinning. Check Xen cpu configuration.\n",
cpu);
break;
case 0:
break;
default:
WARN(1, "rc %d while trying to pin vcpu\n", ret);
disable_pinning = true;
}
}
#ifdef CONFIG_HOTPLUG_CPU
void xen_arch_register_cpu(int num)
{
arch_register_cpu(num);
}
EXPORT_SYMBOL(xen_arch_register_cpu);
void xen_arch_unregister_cpu(int num)
{
arch_unregister_cpu(num);
}
EXPORT_SYMBOL(xen_arch_unregister_cpu);
#endif
| linux-master | arch/x86/xen/enlighten.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/thread_info.h>
#include <asm/smp.h>
#include <xen/events.h>
#include "xen-ops.h"
#include "smp.h"
static void __init xen_hvm_smp_prepare_boot_cpu(void)
{
BUG_ON(smp_processor_id() != 0);
native_smp_prepare_boot_cpu();
/*
* Setup vcpu_info for boot CPU. Secondary CPUs get their vcpu_info
* in xen_cpu_up_prepare_hvm().
*/
xen_vcpu_setup(0);
/*
* Called again in case the kernel boots on vcpu >= MAX_VIRT_CPUS.
* Refer to comments in xen_hvm_init_time_ops().
*/
xen_hvm_init_time_ops();
/*
* The alternative logic (which patches the unlock/lock) runs before
* the smp bootup up code is activated. Hence we need to set this up
* the core kernel is being patched. Otherwise we will have only
* modules patched but not core code.
*/
xen_init_spinlocks();
}
static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus)
{
int cpu;
native_smp_prepare_cpus(max_cpus);
if (xen_have_vector_callback) {
WARN_ON(xen_smp_intr_init(0));
xen_init_lock_cpu(0);
}
for_each_possible_cpu(cpu) {
if (cpu == 0)
continue;
/* Set default vcpu_id to make sure that we don't use cpu-0's */
per_cpu(xen_vcpu_id, cpu) = XEN_VCPU_ID_INVALID;
}
}
#ifdef CONFIG_HOTPLUG_CPU
static void xen_hvm_cleanup_dead_cpu(unsigned int cpu)
{
if (xen_have_vector_callback) {
xen_smp_intr_free(cpu);
xen_uninit_lock_cpu(cpu);
xen_teardown_timer(cpu);
}
}
#else
static void xen_hvm_cleanup_dead_cpu(unsigned int cpu)
{
BUG();
}
#endif
void __init xen_hvm_smp_init(void)
{
smp_ops.smp_prepare_boot_cpu = xen_hvm_smp_prepare_boot_cpu;
smp_ops.smp_prepare_cpus = xen_hvm_smp_prepare_cpus;
smp_ops.smp_cpus_done = xen_smp_cpus_done;
smp_ops.cleanup_dead_cpu = xen_hvm_cleanup_dead_cpu;
if (!xen_have_vector_callback) {
#ifdef CONFIG_PARAVIRT_SPINLOCKS
nopvspin = true;
#endif
return;
}
smp_ops.smp_send_reschedule = xen_smp_send_reschedule;
smp_ops.send_call_func_ipi = xen_smp_send_call_function_ipi;
smp_ops.send_call_func_single_ipi = xen_smp_send_call_function_single_ipi;
}
| linux-master | arch/x86/xen/smp_hvm.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Xen mmu operations
*
* This file contains the various mmu fetch and update operations.
* The most important job they must perform is the mapping between the
* domain's pfn and the overall machine mfns.
*
* Xen allows guests to directly update the pagetable, in a controlled
* fashion. In other words, the guest modifies the same pagetable
* that the CPU actually uses, which eliminates the overhead of having
* a separate shadow pagetable.
*
* In order to allow this, it falls on the guest domain to map its
* notion of a "physical" pfn - which is just a domain-local linear
* address - into a real "machine address" which the CPU's MMU can
* use.
*
* A pgd_t/pmd_t/pte_t will typically contain an mfn, and so can be
* inserted directly into the pagetable. When creating a new
* pte/pmd/pgd, it converts the passed pfn into an mfn. Conversely,
* when reading the content back with __(pgd|pmd|pte)_val, it converts
* the mfn back into a pfn.
*
* The other constraint is that all pages which make up a pagetable
* must be mapped read-only in the guest. This prevents uncontrolled
* guest updates to the pagetable. Xen strictly enforces this, and
* will disallow any pagetable update which will end up mapping a
* pagetable page RW, and will disallow using any writable page as a
* pagetable.
*
* Naively, when loading %cr3 with the base of a new pagetable, Xen
* would need to validate the whole pagetable before going on.
* Naturally, this is quite slow. The solution is to "pin" a
* pagetable, which enforces all the constraints on the pagetable even
* when it is not actively in use. This menas that Xen can be assured
* that it is still valid when you do load it into %cr3, and doesn't
* need to revalidate it.
*
* Jeremy Fitzhardinge <[email protected]>, XenSource Inc, 2007
*/
#include <linux/sched/mm.h>
#include <linux/debugfs.h>
#include <linux/bug.h>
#include <linux/vmalloc.h>
#include <linux/export.h>
#include <linux/init.h>
#include <linux/gfp.h>
#include <linux/memblock.h>
#include <linux/seq_file.h>
#include <linux/crash_dump.h>
#include <linux/pgtable.h>
#ifdef CONFIG_KEXEC_CORE
#include <linux/kexec.h>
#endif
#include <trace/events/xen.h>
#include <asm/tlbflush.h>
#include <asm/fixmap.h>
#include <asm/mmu_context.h>
#include <asm/setup.h>
#include <asm/paravirt.h>
#include <asm/e820/api.h>
#include <asm/linkage.h>
#include <asm/page.h>
#include <asm/init.h>
#include <asm/memtype.h>
#include <asm/smp.h>
#include <asm/tlb.h>
#include <asm/xen/hypercall.h>
#include <asm/xen/hypervisor.h>
#include <xen/xen.h>
#include <xen/page.h>
#include <xen/interface/xen.h>
#include <xen/interface/hvm/hvm_op.h>
#include <xen/interface/version.h>
#include <xen/interface/memory.h>
#include <xen/hvc-console.h>
#include <xen/swiotlb-xen.h>
#include "multicalls.h"
#include "mmu.h"
#include "debugfs.h"
/*
* Prototypes for functions called via PV_CALLEE_SAVE_REGS_THUNK() in order
* to avoid warnings with "-Wmissing-prototypes".
*/
pteval_t xen_pte_val(pte_t pte);
pgdval_t xen_pgd_val(pgd_t pgd);
pmdval_t xen_pmd_val(pmd_t pmd);
pudval_t xen_pud_val(pud_t pud);
p4dval_t xen_p4d_val(p4d_t p4d);
pte_t xen_make_pte(pteval_t pte);
pgd_t xen_make_pgd(pgdval_t pgd);
pmd_t xen_make_pmd(pmdval_t pmd);
pud_t xen_make_pud(pudval_t pud);
p4d_t xen_make_p4d(p4dval_t p4d);
pte_t xen_make_pte_init(pteval_t pte);
#ifdef CONFIG_X86_VSYSCALL_EMULATION
/* l3 pud for userspace vsyscall mapping */
static pud_t level3_user_vsyscall[PTRS_PER_PUD] __page_aligned_bss;
#endif
/*
* Protects atomic reservation decrease/increase against concurrent increases.
* Also protects non-atomic updates of current_pages and balloon lists.
*/
static DEFINE_SPINLOCK(xen_reservation_lock);
/*
* Note about cr3 (pagetable base) values:
*
* xen_cr3 contains the current logical cr3 value; it contains the
* last set cr3. This may not be the current effective cr3, because
* its update may be being lazily deferred. However, a vcpu looking
* at its own cr3 can use this value knowing that it everything will
* be self-consistent.
*
* xen_current_cr3 contains the actual vcpu cr3; it is set once the
* hypercall to set the vcpu cr3 is complete (so it may be a little
* out of date, but it will never be set early). If one vcpu is
* looking at another vcpu's cr3 value, it should use this variable.
*/
DEFINE_PER_CPU(unsigned long, xen_cr3); /* cr3 stored as physaddr */
DEFINE_PER_CPU(unsigned long, xen_current_cr3); /* actual vcpu cr3 */
static phys_addr_t xen_pt_base, xen_pt_size __initdata;
static DEFINE_STATIC_KEY_FALSE(xen_struct_pages_ready);
/*
* Just beyond the highest usermode address. STACK_TOP_MAX has a
* redzone above it, so round it up to a PGD boundary.
*/
#define USER_LIMIT ((STACK_TOP_MAX + PGDIR_SIZE - 1) & PGDIR_MASK)
void make_lowmem_page_readonly(void *vaddr)
{
pte_t *pte, ptev;
unsigned long address = (unsigned long)vaddr;
unsigned int level;
pte = lookup_address(address, &level);
if (pte == NULL)
return; /* vaddr missing */
ptev = pte_wrprotect(*pte);
if (HYPERVISOR_update_va_mapping(address, ptev, 0))
BUG();
}
void make_lowmem_page_readwrite(void *vaddr)
{
pte_t *pte, ptev;
unsigned long address = (unsigned long)vaddr;
unsigned int level;
pte = lookup_address(address, &level);
if (pte == NULL)
return; /* vaddr missing */
ptev = pte_mkwrite_novma(*pte);
if (HYPERVISOR_update_va_mapping(address, ptev, 0))
BUG();
}
/*
* During early boot all page table pages are pinned, but we do not have struct
* pages, so return true until struct pages are ready.
*/
static bool xen_page_pinned(void *ptr)
{
if (static_branch_likely(&xen_struct_pages_ready)) {
struct page *page = virt_to_page(ptr);
return PagePinned(page);
}
return true;
}
static void xen_extend_mmu_update(const struct mmu_update *update)
{
struct multicall_space mcs;
struct mmu_update *u;
mcs = xen_mc_extend_args(__HYPERVISOR_mmu_update, sizeof(*u));
if (mcs.mc != NULL) {
mcs.mc->args[1]++;
} else {
mcs = __xen_mc_entry(sizeof(*u));
MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
}
u = mcs.args;
*u = *update;
}
static void xen_extend_mmuext_op(const struct mmuext_op *op)
{
struct multicall_space mcs;
struct mmuext_op *u;
mcs = xen_mc_extend_args(__HYPERVISOR_mmuext_op, sizeof(*u));
if (mcs.mc != NULL) {
mcs.mc->args[1]++;
} else {
mcs = __xen_mc_entry(sizeof(*u));
MULTI_mmuext_op(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
}
u = mcs.args;
*u = *op;
}
static void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val)
{
struct mmu_update u;
preempt_disable();
xen_mc_batch();
/* ptr may be ioremapped for 64-bit pagetable setup */
u.ptr = arbitrary_virt_to_machine(ptr).maddr;
u.val = pmd_val_ma(val);
xen_extend_mmu_update(&u);
xen_mc_issue(XEN_LAZY_MMU);
preempt_enable();
}
static void xen_set_pmd(pmd_t *ptr, pmd_t val)
{
trace_xen_mmu_set_pmd(ptr, val);
/* If page is not pinned, we can just update the entry
directly */
if (!xen_page_pinned(ptr)) {
*ptr = val;
return;
}
xen_set_pmd_hyper(ptr, val);
}
/*
* Associate a virtual page frame with a given physical page frame
* and protection flags for that frame.
*/
void __init set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags)
{
if (HYPERVISOR_update_va_mapping(vaddr, mfn_pte(mfn, flags),
UVMF_INVLPG))
BUG();
}
static bool xen_batched_set_pte(pte_t *ptep, pte_t pteval)
{
struct mmu_update u;
if (xen_get_lazy_mode() != XEN_LAZY_MMU)
return false;
xen_mc_batch();
u.ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE;
u.val = pte_val_ma(pteval);
xen_extend_mmu_update(&u);
xen_mc_issue(XEN_LAZY_MMU);
return true;
}
static inline void __xen_set_pte(pte_t *ptep, pte_t pteval)
{
if (!xen_batched_set_pte(ptep, pteval)) {
/*
* Could call native_set_pte() here and trap and
* emulate the PTE write, but a hypercall is much cheaper.
*/
struct mmu_update u;
u.ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE;
u.val = pte_val_ma(pteval);
HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF);
}
}
static void xen_set_pte(pte_t *ptep, pte_t pteval)
{
trace_xen_mmu_set_pte(ptep, pteval);
__xen_set_pte(ptep, pteval);
}
pte_t xen_ptep_modify_prot_start(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep)
{
/* Just return the pte as-is. We preserve the bits on commit */
trace_xen_mmu_ptep_modify_prot_start(vma->vm_mm, addr, ptep, *ptep);
return *ptep;
}
void xen_ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr,
pte_t *ptep, pte_t pte)
{
struct mmu_update u;
trace_xen_mmu_ptep_modify_prot_commit(vma->vm_mm, addr, ptep, pte);
xen_mc_batch();
u.ptr = virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD;
u.val = pte_val_ma(pte);
xen_extend_mmu_update(&u);
xen_mc_issue(XEN_LAZY_MMU);
}
/* Assume pteval_t is equivalent to all the other *val_t types. */
static pteval_t pte_mfn_to_pfn(pteval_t val)
{
if (val & _PAGE_PRESENT) {
unsigned long mfn = (val & XEN_PTE_MFN_MASK) >> PAGE_SHIFT;
unsigned long pfn = mfn_to_pfn(mfn);
pteval_t flags = val & PTE_FLAGS_MASK;
if (unlikely(pfn == ~0))
val = flags & ~_PAGE_PRESENT;
else
val = ((pteval_t)pfn << PAGE_SHIFT) | flags;
}
return val;
}
static pteval_t pte_pfn_to_mfn(pteval_t val)
{
if (val & _PAGE_PRESENT) {
unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
pteval_t flags = val & PTE_FLAGS_MASK;
unsigned long mfn;
mfn = __pfn_to_mfn(pfn);
/*
* If there's no mfn for the pfn, then just create an
* empty non-present pte. Unfortunately this loses
* information about the original pfn, so
* pte_mfn_to_pfn is asymmetric.
*/
if (unlikely(mfn == INVALID_P2M_ENTRY)) {
mfn = 0;
flags = 0;
} else
mfn &= ~(FOREIGN_FRAME_BIT | IDENTITY_FRAME_BIT);
val = ((pteval_t)mfn << PAGE_SHIFT) | flags;
}
return val;
}
__visible pteval_t xen_pte_val(pte_t pte)
{
pteval_t pteval = pte.pte;
return pte_mfn_to_pfn(pteval);
}
PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val);
__visible pgdval_t xen_pgd_val(pgd_t pgd)
{
return pte_mfn_to_pfn(pgd.pgd);
}
PV_CALLEE_SAVE_REGS_THUNK(xen_pgd_val);
__visible pte_t xen_make_pte(pteval_t pte)
{
pte = pte_pfn_to_mfn(pte);
return native_make_pte(pte);
}
PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte);
__visible pgd_t xen_make_pgd(pgdval_t pgd)
{
pgd = pte_pfn_to_mfn(pgd);
return native_make_pgd(pgd);
}
PV_CALLEE_SAVE_REGS_THUNK(xen_make_pgd);
__visible pmdval_t xen_pmd_val(pmd_t pmd)
{
return pte_mfn_to_pfn(pmd.pmd);
}
PV_CALLEE_SAVE_REGS_THUNK(xen_pmd_val);
static void xen_set_pud_hyper(pud_t *ptr, pud_t val)
{
struct mmu_update u;
preempt_disable();
xen_mc_batch();
/* ptr may be ioremapped for 64-bit pagetable setup */
u.ptr = arbitrary_virt_to_machine(ptr).maddr;
u.val = pud_val_ma(val);
xen_extend_mmu_update(&u);
xen_mc_issue(XEN_LAZY_MMU);
preempt_enable();
}
static void xen_set_pud(pud_t *ptr, pud_t val)
{
trace_xen_mmu_set_pud(ptr, val);
/* If page is not pinned, we can just update the entry
directly */
if (!xen_page_pinned(ptr)) {
*ptr = val;
return;
}
xen_set_pud_hyper(ptr, val);
}
__visible pmd_t xen_make_pmd(pmdval_t pmd)
{
pmd = pte_pfn_to_mfn(pmd);
return native_make_pmd(pmd);
}
PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd);
__visible pudval_t xen_pud_val(pud_t pud)
{
return pte_mfn_to_pfn(pud.pud);
}
PV_CALLEE_SAVE_REGS_THUNK(xen_pud_val);
__visible pud_t xen_make_pud(pudval_t pud)
{
pud = pte_pfn_to_mfn(pud);
return native_make_pud(pud);
}
PV_CALLEE_SAVE_REGS_THUNK(xen_make_pud);
static pgd_t *xen_get_user_pgd(pgd_t *pgd)
{
pgd_t *pgd_page = (pgd_t *)(((unsigned long)pgd) & PAGE_MASK);
unsigned offset = pgd - pgd_page;
pgd_t *user_ptr = NULL;
if (offset < pgd_index(USER_LIMIT)) {
struct page *page = virt_to_page(pgd_page);
user_ptr = (pgd_t *)page->private;
if (user_ptr)
user_ptr += offset;
}
return user_ptr;
}
static void __xen_set_p4d_hyper(p4d_t *ptr, p4d_t val)
{
struct mmu_update u;
u.ptr = virt_to_machine(ptr).maddr;
u.val = p4d_val_ma(val);
xen_extend_mmu_update(&u);
}
/*
* Raw hypercall-based set_p4d, intended for in early boot before
* there's a page structure. This implies:
* 1. The only existing pagetable is the kernel's
* 2. It is always pinned
* 3. It has no user pagetable attached to it
*/
static void __init xen_set_p4d_hyper(p4d_t *ptr, p4d_t val)
{
preempt_disable();
xen_mc_batch();
__xen_set_p4d_hyper(ptr, val);
xen_mc_issue(XEN_LAZY_MMU);
preempt_enable();
}
static void xen_set_p4d(p4d_t *ptr, p4d_t val)
{
pgd_t *user_ptr = xen_get_user_pgd((pgd_t *)ptr);
pgd_t pgd_val;
trace_xen_mmu_set_p4d(ptr, (p4d_t *)user_ptr, val);
/* If page is not pinned, we can just update the entry
directly */
if (!xen_page_pinned(ptr)) {
*ptr = val;
if (user_ptr) {
WARN_ON(xen_page_pinned(user_ptr));
pgd_val.pgd = p4d_val_ma(val);
*user_ptr = pgd_val;
}
return;
}
/* If it's pinned, then we can at least batch the kernel and
user updates together. */
xen_mc_batch();
__xen_set_p4d_hyper(ptr, val);
if (user_ptr)
__xen_set_p4d_hyper((p4d_t *)user_ptr, val);
xen_mc_issue(XEN_LAZY_MMU);
}
#if CONFIG_PGTABLE_LEVELS >= 5
__visible p4dval_t xen_p4d_val(p4d_t p4d)
{
return pte_mfn_to_pfn(p4d.p4d);
}
PV_CALLEE_SAVE_REGS_THUNK(xen_p4d_val);
__visible p4d_t xen_make_p4d(p4dval_t p4d)
{
p4d = pte_pfn_to_mfn(p4d);
return native_make_p4d(p4d);
}
PV_CALLEE_SAVE_REGS_THUNK(xen_make_p4d);
#endif /* CONFIG_PGTABLE_LEVELS >= 5 */
static void xen_pmd_walk(struct mm_struct *mm, pmd_t *pmd,
void (*func)(struct mm_struct *mm, struct page *,
enum pt_level),
bool last, unsigned long limit)
{
int i, nr;
nr = last ? pmd_index(limit) + 1 : PTRS_PER_PMD;
for (i = 0; i < nr; i++) {
if (!pmd_none(pmd[i]))
(*func)(mm, pmd_page(pmd[i]), PT_PTE);
}
}
static void xen_pud_walk(struct mm_struct *mm, pud_t *pud,
void (*func)(struct mm_struct *mm, struct page *,
enum pt_level),
bool last, unsigned long limit)
{
int i, nr;
nr = last ? pud_index(limit) + 1 : PTRS_PER_PUD;
for (i = 0; i < nr; i++) {
pmd_t *pmd;
if (pud_none(pud[i]))
continue;
pmd = pmd_offset(&pud[i], 0);
if (PTRS_PER_PMD > 1)
(*func)(mm, virt_to_page(pmd), PT_PMD);
xen_pmd_walk(mm, pmd, func, last && i == nr - 1, limit);
}
}
static void xen_p4d_walk(struct mm_struct *mm, p4d_t *p4d,
void (*func)(struct mm_struct *mm, struct page *,
enum pt_level),
bool last, unsigned long limit)
{
pud_t *pud;
if (p4d_none(*p4d))
return;
pud = pud_offset(p4d, 0);
if (PTRS_PER_PUD > 1)
(*func)(mm, virt_to_page(pud), PT_PUD);
xen_pud_walk(mm, pud, func, last, limit);
}
/*
* (Yet another) pagetable walker. This one is intended for pinning a
* pagetable. This means that it walks a pagetable and calls the
* callback function on each page it finds making up the page table,
* at every level. It walks the entire pagetable, but it only bothers
* pinning pte pages which are below limit. In the normal case this
* will be STACK_TOP_MAX, but at boot we need to pin up to
* FIXADDR_TOP.
*
* We must skip the Xen hole in the middle of the address space, just after
* the big x86-64 virtual hole.
*/
static void __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd,
void (*func)(struct mm_struct *mm, struct page *,
enum pt_level),
unsigned long limit)
{
int i, nr;
unsigned hole_low = 0, hole_high = 0;
/* The limit is the last byte to be touched */
limit--;
BUG_ON(limit >= FIXADDR_TOP);
/*
* 64-bit has a great big hole in the middle of the address
* space, which contains the Xen mappings.
*/
hole_low = pgd_index(GUARD_HOLE_BASE_ADDR);
hole_high = pgd_index(GUARD_HOLE_END_ADDR);
nr = pgd_index(limit) + 1;
for (i = 0; i < nr; i++) {
p4d_t *p4d;
if (i >= hole_low && i < hole_high)
continue;
if (pgd_none(pgd[i]))
continue;
p4d = p4d_offset(&pgd[i], 0);
xen_p4d_walk(mm, p4d, func, i == nr - 1, limit);
}
/* Do the top level last, so that the callbacks can use it as
a cue to do final things like tlb flushes. */
(*func)(mm, virt_to_page(pgd), PT_PGD);
}
static void xen_pgd_walk(struct mm_struct *mm,
void (*func)(struct mm_struct *mm, struct page *,
enum pt_level),
unsigned long limit)
{
__xen_pgd_walk(mm, mm->pgd, func, limit);
}
/* If we're using split pte locks, then take the page's lock and
return a pointer to it. Otherwise return NULL. */
static spinlock_t *xen_pte_lock(struct page *page, struct mm_struct *mm)
{
spinlock_t *ptl = NULL;
#if USE_SPLIT_PTE_PTLOCKS
ptl = ptlock_ptr(page_ptdesc(page));
spin_lock_nest_lock(ptl, &mm->page_table_lock);
#endif
return ptl;
}
static void xen_pte_unlock(void *v)
{
spinlock_t *ptl = v;
spin_unlock(ptl);
}
static void xen_do_pin(unsigned level, unsigned long pfn)
{
struct mmuext_op op;
op.cmd = level;
op.arg1.mfn = pfn_to_mfn(pfn);
xen_extend_mmuext_op(&op);
}
static void xen_pin_page(struct mm_struct *mm, struct page *page,
enum pt_level level)
{
unsigned pgfl = TestSetPagePinned(page);
if (!pgfl) {
void *pt = lowmem_page_address(page);
unsigned long pfn = page_to_pfn(page);
struct multicall_space mcs = __xen_mc_entry(0);
spinlock_t *ptl;
/*
* We need to hold the pagetable lock between the time
* we make the pagetable RO and when we actually pin
* it. If we don't, then other users may come in and
* attempt to update the pagetable by writing it,
* which will fail because the memory is RO but not
* pinned, so Xen won't do the trap'n'emulate.
*
* If we're using split pte locks, we can't hold the
* entire pagetable's worth of locks during the
* traverse, because we may wrap the preempt count (8
* bits). The solution is to mark RO and pin each PTE
* page while holding the lock. This means the number
* of locks we end up holding is never more than a
* batch size (~32 entries, at present).
*
* If we're not using split pte locks, we needn't pin
* the PTE pages independently, because we're
* protected by the overall pagetable lock.
*/
ptl = NULL;
if (level == PT_PTE)
ptl = xen_pte_lock(page, mm);
MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
pfn_pte(pfn, PAGE_KERNEL_RO),
level == PT_PGD ? UVMF_TLB_FLUSH : 0);
if (ptl) {
xen_do_pin(MMUEXT_PIN_L1_TABLE, pfn);
/* Queue a deferred unlock for when this batch
is completed. */
xen_mc_callback(xen_pte_unlock, ptl);
}
}
}
/* This is called just after a mm has been created, but it has not
been used yet. We need to make sure that its pagetable is all
read-only, and can be pinned. */
static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd)
{
pgd_t *user_pgd = xen_get_user_pgd(pgd);
trace_xen_mmu_pgd_pin(mm, pgd);
xen_mc_batch();
__xen_pgd_walk(mm, pgd, xen_pin_page, USER_LIMIT);
xen_do_pin(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa(pgd)));
if (user_pgd) {
xen_pin_page(mm, virt_to_page(user_pgd), PT_PGD);
xen_do_pin(MMUEXT_PIN_L4_TABLE,
PFN_DOWN(__pa(user_pgd)));
}
xen_mc_issue(0);
}
static void xen_pgd_pin(struct mm_struct *mm)
{
__xen_pgd_pin(mm, mm->pgd);
}
/*
* On save, we need to pin all pagetables to make sure they get their
* mfns turned into pfns. Search the list for any unpinned pgds and pin
* them (unpinned pgds are not currently in use, probably because the
* process is under construction or destruction).
*
* Expected to be called in stop_machine() ("equivalent to taking
* every spinlock in the system"), so the locking doesn't really
* matter all that much.
*/
void xen_mm_pin_all(void)
{
struct page *page;
spin_lock(&pgd_lock);
list_for_each_entry(page, &pgd_list, lru) {
if (!PagePinned(page)) {
__xen_pgd_pin(&init_mm, (pgd_t *)page_address(page));
SetPageSavePinned(page);
}
}
spin_unlock(&pgd_lock);
}
static void __init xen_mark_pinned(struct mm_struct *mm, struct page *page,
enum pt_level level)
{
SetPagePinned(page);
}
/*
* The init_mm pagetable is really pinned as soon as its created, but
* that's before we have page structures to store the bits. So do all
* the book-keeping now once struct pages for allocated pages are
* initialized. This happens only after memblock_free_all() is called.
*/
static void __init xen_after_bootmem(void)
{
static_branch_enable(&xen_struct_pages_ready);
#ifdef CONFIG_X86_VSYSCALL_EMULATION
SetPagePinned(virt_to_page(level3_user_vsyscall));
#endif
xen_pgd_walk(&init_mm, xen_mark_pinned, FIXADDR_TOP);
}
static void xen_unpin_page(struct mm_struct *mm, struct page *page,
enum pt_level level)
{
unsigned pgfl = TestClearPagePinned(page);
if (pgfl) {
void *pt = lowmem_page_address(page);
unsigned long pfn = page_to_pfn(page);
spinlock_t *ptl = NULL;
struct multicall_space mcs;
/*
* Do the converse to pin_page. If we're using split
* pte locks, we must be holding the lock for while
* the pte page is unpinned but still RO to prevent
* concurrent updates from seeing it in this
* partially-pinned state.
*/
if (level == PT_PTE) {
ptl = xen_pte_lock(page, mm);
if (ptl)
xen_do_pin(MMUEXT_UNPIN_TABLE, pfn);
}
mcs = __xen_mc_entry(0);
MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
pfn_pte(pfn, PAGE_KERNEL),
level == PT_PGD ? UVMF_TLB_FLUSH : 0);
if (ptl) {
/* unlock when batch completed */
xen_mc_callback(xen_pte_unlock, ptl);
}
}
}
/* Release a pagetables pages back as normal RW */
static void __xen_pgd_unpin(struct mm_struct *mm, pgd_t *pgd)
{
pgd_t *user_pgd = xen_get_user_pgd(pgd);
trace_xen_mmu_pgd_unpin(mm, pgd);
xen_mc_batch();
xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
if (user_pgd) {
xen_do_pin(MMUEXT_UNPIN_TABLE,
PFN_DOWN(__pa(user_pgd)));
xen_unpin_page(mm, virt_to_page(user_pgd), PT_PGD);
}
__xen_pgd_walk(mm, pgd, xen_unpin_page, USER_LIMIT);
xen_mc_issue(0);
}
static void xen_pgd_unpin(struct mm_struct *mm)
{
__xen_pgd_unpin(mm, mm->pgd);
}
/*
* On resume, undo any pinning done at save, so that the rest of the
* kernel doesn't see any unexpected pinned pagetables.
*/
void xen_mm_unpin_all(void)
{
struct page *page;
spin_lock(&pgd_lock);
list_for_each_entry(page, &pgd_list, lru) {
if (PageSavePinned(page)) {
BUG_ON(!PagePinned(page));
__xen_pgd_unpin(&init_mm, (pgd_t *)page_address(page));
ClearPageSavePinned(page);
}
}
spin_unlock(&pgd_lock);
}
static void xen_enter_mmap(struct mm_struct *mm)
{
spin_lock(&mm->page_table_lock);
xen_pgd_pin(mm);
spin_unlock(&mm->page_table_lock);
}
static void drop_mm_ref_this_cpu(void *info)
{
struct mm_struct *mm = info;
if (this_cpu_read(cpu_tlbstate.loaded_mm) == mm)
leave_mm(smp_processor_id());
/*
* If this cpu still has a stale cr3 reference, then make sure
* it has been flushed.
*/
if (this_cpu_read(xen_current_cr3) == __pa(mm->pgd))
xen_mc_flush();
}
#ifdef CONFIG_SMP
/*
* Another cpu may still have their %cr3 pointing at the pagetable, so
* we need to repoint it somewhere else before we can unpin it.
*/
static void xen_drop_mm_ref(struct mm_struct *mm)
{
cpumask_var_t mask;
unsigned cpu;
drop_mm_ref_this_cpu(mm);
/* Get the "official" set of cpus referring to our pagetable. */
if (!alloc_cpumask_var(&mask, GFP_ATOMIC)) {
for_each_online_cpu(cpu) {
if (per_cpu(xen_current_cr3, cpu) != __pa(mm->pgd))
continue;
smp_call_function_single(cpu, drop_mm_ref_this_cpu, mm, 1);
}
return;
}
/*
* It's possible that a vcpu may have a stale reference to our
* cr3, because its in lazy mode, and it hasn't yet flushed
* its set of pending hypercalls yet. In this case, we can
* look at its actual current cr3 value, and force it to flush
* if needed.
*/
cpumask_clear(mask);
for_each_online_cpu(cpu) {
if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd))
cpumask_set_cpu(cpu, mask);
}
smp_call_function_many(mask, drop_mm_ref_this_cpu, mm, 1);
free_cpumask_var(mask);
}
#else
static void xen_drop_mm_ref(struct mm_struct *mm)
{
drop_mm_ref_this_cpu(mm);
}
#endif
/*
* While a process runs, Xen pins its pagetables, which means that the
* hypervisor forces it to be read-only, and it controls all updates
* to it. This means that all pagetable updates have to go via the
* hypervisor, which is moderately expensive.
*
* Since we're pulling the pagetable down, we switch to use init_mm,
* unpin old process pagetable and mark it all read-write, which
* allows further operations on it to be simple memory accesses.
*
* The only subtle point is that another CPU may be still using the
* pagetable because of lazy tlb flushing. This means we need need to
* switch all CPUs off this pagetable before we can unpin it.
*/
static void xen_exit_mmap(struct mm_struct *mm)
{
get_cpu(); /* make sure we don't move around */
xen_drop_mm_ref(mm);
put_cpu();
spin_lock(&mm->page_table_lock);
/* pgd may not be pinned in the error exit path of execve */
if (xen_page_pinned(mm->pgd))
xen_pgd_unpin(mm);
spin_unlock(&mm->page_table_lock);
}
static void xen_post_allocator_init(void);
static void __init pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
{
struct mmuext_op op;
op.cmd = cmd;
op.arg1.mfn = pfn_to_mfn(pfn);
if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF))
BUG();
}
static void __init xen_cleanhighmap(unsigned long vaddr,
unsigned long vaddr_end)
{
unsigned long kernel_end = roundup((unsigned long)_brk_end, PMD_SIZE) - 1;
pmd_t *pmd = level2_kernel_pgt + pmd_index(vaddr);
/* NOTE: The loop is more greedy than the cleanup_highmap variant.
* We include the PMD passed in on _both_ boundaries. */
for (; vaddr <= vaddr_end && (pmd < (level2_kernel_pgt + PTRS_PER_PMD));
pmd++, vaddr += PMD_SIZE) {
if (pmd_none(*pmd))
continue;
if (vaddr < (unsigned long) _text || vaddr > kernel_end)
set_pmd(pmd, __pmd(0));
}
/* In case we did something silly, we should crash in this function
* instead of somewhere later and be confusing. */
xen_mc_flush();
}
/*
* Make a page range writeable and free it.
*/
static void __init xen_free_ro_pages(unsigned long paddr, unsigned long size)
{
void *vaddr = __va(paddr);
void *vaddr_end = vaddr + size;
for (; vaddr < vaddr_end; vaddr += PAGE_SIZE)
make_lowmem_page_readwrite(vaddr);
memblock_phys_free(paddr, size);
}
static void __init xen_cleanmfnmap_free_pgtbl(void *pgtbl, bool unpin)
{
unsigned long pa = __pa(pgtbl) & PHYSICAL_PAGE_MASK;
if (unpin)
pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(pa));
ClearPagePinned(virt_to_page(__va(pa)));
xen_free_ro_pages(pa, PAGE_SIZE);
}
static void __init xen_cleanmfnmap_pmd(pmd_t *pmd, bool unpin)
{
unsigned long pa;
pte_t *pte_tbl;
int i;
if (pmd_large(*pmd)) {
pa = pmd_val(*pmd) & PHYSICAL_PAGE_MASK;
xen_free_ro_pages(pa, PMD_SIZE);
return;
}
pte_tbl = pte_offset_kernel(pmd, 0);
for (i = 0; i < PTRS_PER_PTE; i++) {
if (pte_none(pte_tbl[i]))
continue;
pa = pte_pfn(pte_tbl[i]) << PAGE_SHIFT;
xen_free_ro_pages(pa, PAGE_SIZE);
}
set_pmd(pmd, __pmd(0));
xen_cleanmfnmap_free_pgtbl(pte_tbl, unpin);
}
static void __init xen_cleanmfnmap_pud(pud_t *pud, bool unpin)
{
unsigned long pa;
pmd_t *pmd_tbl;
int i;
if (pud_large(*pud)) {
pa = pud_val(*pud) & PHYSICAL_PAGE_MASK;
xen_free_ro_pages(pa, PUD_SIZE);
return;
}
pmd_tbl = pmd_offset(pud, 0);
for (i = 0; i < PTRS_PER_PMD; i++) {
if (pmd_none(pmd_tbl[i]))
continue;
xen_cleanmfnmap_pmd(pmd_tbl + i, unpin);
}
set_pud(pud, __pud(0));
xen_cleanmfnmap_free_pgtbl(pmd_tbl, unpin);
}
static void __init xen_cleanmfnmap_p4d(p4d_t *p4d, bool unpin)
{
unsigned long pa;
pud_t *pud_tbl;
int i;
if (p4d_large(*p4d)) {
pa = p4d_val(*p4d) & PHYSICAL_PAGE_MASK;
xen_free_ro_pages(pa, P4D_SIZE);
return;
}
pud_tbl = pud_offset(p4d, 0);
for (i = 0; i < PTRS_PER_PUD; i++) {
if (pud_none(pud_tbl[i]))
continue;
xen_cleanmfnmap_pud(pud_tbl + i, unpin);
}
set_p4d(p4d, __p4d(0));
xen_cleanmfnmap_free_pgtbl(pud_tbl, unpin);
}
/*
* Since it is well isolated we can (and since it is perhaps large we should)
* also free the page tables mapping the initial P->M table.
*/
static void __init xen_cleanmfnmap(unsigned long vaddr)
{
pgd_t *pgd;
p4d_t *p4d;
bool unpin;
unpin = (vaddr == 2 * PGDIR_SIZE);
vaddr &= PMD_MASK;
pgd = pgd_offset_k(vaddr);
p4d = p4d_offset(pgd, 0);
if (!p4d_none(*p4d))
xen_cleanmfnmap_p4d(p4d, unpin);
}
static void __init xen_pagetable_p2m_free(void)
{
unsigned long size;
unsigned long addr;
size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long));
/* No memory or already called. */
if ((unsigned long)xen_p2m_addr == xen_start_info->mfn_list)
return;
/* using __ka address and sticking INVALID_P2M_ENTRY! */
memset((void *)xen_start_info->mfn_list, 0xff, size);
addr = xen_start_info->mfn_list;
/*
* We could be in __ka space.
* We roundup to the PMD, which means that if anybody at this stage is
* using the __ka address of xen_start_info or
* xen_start_info->shared_info they are in going to crash. Fortunately
* we have already revectored in xen_setup_kernel_pagetable.
*/
size = roundup(size, PMD_SIZE);
if (addr >= __START_KERNEL_map) {
xen_cleanhighmap(addr, addr + size);
size = PAGE_ALIGN(xen_start_info->nr_pages *
sizeof(unsigned long));
memblock_free((void *)addr, size);
} else {
xen_cleanmfnmap(addr);
}
}
static void __init xen_pagetable_cleanhighmap(void)
{
unsigned long size;
unsigned long addr;
/* At this stage, cleanup_highmap has already cleaned __ka space
* from _brk_limit way up to the max_pfn_mapped (which is the end of
* the ramdisk). We continue on, erasing PMD entries that point to page
* tables - do note that they are accessible at this stage via __va.
* As Xen is aligning the memory end to a 4MB boundary, for good
* measure we also round up to PMD_SIZE * 2 - which means that if
* anybody is using __ka address to the initial boot-stack - and try
* to use it - they are going to crash. The xen_start_info has been
* taken care of already in xen_setup_kernel_pagetable. */
addr = xen_start_info->pt_base;
size = xen_start_info->nr_pt_frames * PAGE_SIZE;
xen_cleanhighmap(addr, roundup(addr + size, PMD_SIZE * 2));
xen_start_info->pt_base = (unsigned long)__va(__pa(xen_start_info->pt_base));
}
static void __init xen_pagetable_p2m_setup(void)
{
xen_vmalloc_p2m_tree();
xen_pagetable_p2m_free();
xen_pagetable_cleanhighmap();
/* And revector! Bye bye old array */
xen_start_info->mfn_list = (unsigned long)xen_p2m_addr;
}
static void __init xen_pagetable_init(void)
{
/*
* The majority of further PTE writes is to pagetables already
* announced as such to Xen. Hence it is more efficient to use
* hypercalls for these updates.
*/
pv_ops.mmu.set_pte = __xen_set_pte;
paging_init();
xen_post_allocator_init();
xen_pagetable_p2m_setup();
/* Allocate and initialize top and mid mfn levels for p2m structure */
xen_build_mfn_list_list();
/* Remap memory freed due to conflicts with E820 map */
xen_remap_memory();
xen_setup_mfn_list_list();
}
static noinstr void xen_write_cr2(unsigned long cr2)
{
this_cpu_read(xen_vcpu)->arch.cr2 = cr2;
}
static noinline void xen_flush_tlb(void)
{
struct mmuext_op *op;
struct multicall_space mcs;
preempt_disable();
mcs = xen_mc_entry(sizeof(*op));
op = mcs.args;
op->cmd = MMUEXT_TLB_FLUSH_LOCAL;
MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
xen_mc_issue(XEN_LAZY_MMU);
preempt_enable();
}
static void xen_flush_tlb_one_user(unsigned long addr)
{
struct mmuext_op *op;
struct multicall_space mcs;
trace_xen_mmu_flush_tlb_one_user(addr);
preempt_disable();
mcs = xen_mc_entry(sizeof(*op));
op = mcs.args;
op->cmd = MMUEXT_INVLPG_LOCAL;
op->arg1.linear_addr = addr & PAGE_MASK;
MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
xen_mc_issue(XEN_LAZY_MMU);
preempt_enable();
}
static void xen_flush_tlb_multi(const struct cpumask *cpus,
const struct flush_tlb_info *info)
{
struct {
struct mmuext_op op;
DECLARE_BITMAP(mask, NR_CPUS);
} *args;
struct multicall_space mcs;
const size_t mc_entry_size = sizeof(args->op) +
sizeof(args->mask[0]) * BITS_TO_LONGS(num_possible_cpus());
trace_xen_mmu_flush_tlb_multi(cpus, info->mm, info->start, info->end);
if (cpumask_empty(cpus))
return; /* nothing to do */
mcs = xen_mc_entry(mc_entry_size);
args = mcs.args;
args->op.arg2.vcpumask = to_cpumask(args->mask);
/* Remove any offline CPUs */
cpumask_and(to_cpumask(args->mask), cpus, cpu_online_mask);
args->op.cmd = MMUEXT_TLB_FLUSH_MULTI;
if (info->end != TLB_FLUSH_ALL &&
(info->end - info->start) <= PAGE_SIZE) {
args->op.cmd = MMUEXT_INVLPG_MULTI;
args->op.arg1.linear_addr = info->start;
}
MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF);
xen_mc_issue(XEN_LAZY_MMU);
}
static unsigned long xen_read_cr3(void)
{
return this_cpu_read(xen_cr3);
}
static void set_current_cr3(void *v)
{
this_cpu_write(xen_current_cr3, (unsigned long)v);
}
static void __xen_write_cr3(bool kernel, unsigned long cr3)
{
struct mmuext_op op;
unsigned long mfn;
trace_xen_mmu_write_cr3(kernel, cr3);
if (cr3)
mfn = pfn_to_mfn(PFN_DOWN(cr3));
else
mfn = 0;
WARN_ON(mfn == 0 && kernel);
op.cmd = kernel ? MMUEXT_NEW_BASEPTR : MMUEXT_NEW_USER_BASEPTR;
op.arg1.mfn = mfn;
xen_extend_mmuext_op(&op);
if (kernel) {
this_cpu_write(xen_cr3, cr3);
/* Update xen_current_cr3 once the batch has actually
been submitted. */
xen_mc_callback(set_current_cr3, (void *)cr3);
}
}
static void xen_write_cr3(unsigned long cr3)
{
pgd_t *user_pgd = xen_get_user_pgd(__va(cr3));
BUG_ON(preemptible());
xen_mc_batch(); /* disables interrupts */
/* Update while interrupts are disabled, so its atomic with
respect to ipis */
this_cpu_write(xen_cr3, cr3);
__xen_write_cr3(true, cr3);
if (user_pgd)
__xen_write_cr3(false, __pa(user_pgd));
else
__xen_write_cr3(false, 0);
xen_mc_issue(XEN_LAZY_CPU); /* interrupts restored */
}
/*
* At the start of the day - when Xen launches a guest, it has already
* built pagetables for the guest. We diligently look over them
* in xen_setup_kernel_pagetable and graft as appropriate them in the
* init_top_pgt and its friends. Then when we are happy we load
* the new init_top_pgt - and continue on.
*
* The generic code starts (start_kernel) and 'init_mem_mapping' sets
* up the rest of the pagetables. When it has completed it loads the cr3.
* N.B. that baremetal would start at 'start_kernel' (and the early
* #PF handler would create bootstrap pagetables) - so we are running
* with the same assumptions as what to do when write_cr3 is executed
* at this point.
*
* Since there are no user-page tables at all, we have two variants
* of xen_write_cr3 - the early bootup (this one), and the late one
* (xen_write_cr3). The reason we have to do that is that in 64-bit
* the Linux kernel and user-space are both in ring 3 while the
* hypervisor is in ring 0.
*/
static void __init xen_write_cr3_init(unsigned long cr3)
{
BUG_ON(preemptible());
xen_mc_batch(); /* disables interrupts */
/* Update while interrupts are disabled, so its atomic with
respect to ipis */
this_cpu_write(xen_cr3, cr3);
__xen_write_cr3(true, cr3);
xen_mc_issue(XEN_LAZY_CPU); /* interrupts restored */
}
static int xen_pgd_alloc(struct mm_struct *mm)
{
pgd_t *pgd = mm->pgd;
struct page *page = virt_to_page(pgd);
pgd_t *user_pgd;
int ret = -ENOMEM;
BUG_ON(PagePinned(virt_to_page(pgd)));
BUG_ON(page->private != 0);
user_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
page->private = (unsigned long)user_pgd;
if (user_pgd != NULL) {
#ifdef CONFIG_X86_VSYSCALL_EMULATION
user_pgd[pgd_index(VSYSCALL_ADDR)] =
__pgd(__pa(level3_user_vsyscall) | _PAGE_TABLE);
#endif
ret = 0;
}
BUG_ON(PagePinned(virt_to_page(xen_get_user_pgd(pgd))));
return ret;
}
static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
pgd_t *user_pgd = xen_get_user_pgd(pgd);
if (user_pgd)
free_page((unsigned long)user_pgd);
}
/*
* Init-time set_pte while constructing initial pagetables, which
* doesn't allow RO page table pages to be remapped RW.
*
* If there is no MFN for this PFN then this page is initially
* ballooned out so clear the PTE (as in decrease_reservation() in
* drivers/xen/balloon.c).
*
* Many of these PTE updates are done on unpinned and writable pages
* and doing a hypercall for these is unnecessary and expensive. At
* this point it is rarely possible to tell if a page is pinned, so
* mostly write the PTE directly and rely on Xen trapping and
* emulating any updates as necessary.
*/
static void __init xen_set_pte_init(pte_t *ptep, pte_t pte)
{
if (unlikely(is_early_ioremap_ptep(ptep)))
__xen_set_pte(ptep, pte);
else
native_set_pte(ptep, pte);
}
__visible pte_t xen_make_pte_init(pteval_t pte)
{
unsigned long pfn;
/*
* Pages belonging to the initial p2m list mapped outside the default
* address range must be mapped read-only. This region contains the
* page tables for mapping the p2m list, too, and page tables MUST be
* mapped read-only.
*/
pfn = (pte & PTE_PFN_MASK) >> PAGE_SHIFT;
if (xen_start_info->mfn_list < __START_KERNEL_map &&
pfn >= xen_start_info->first_p2m_pfn &&
pfn < xen_start_info->first_p2m_pfn + xen_start_info->nr_p2m_frames)
pte &= ~_PAGE_RW;
pte = pte_pfn_to_mfn(pte);
return native_make_pte(pte);
}
PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte_init);
/* Early in boot, while setting up the initial pagetable, assume
everything is pinned. */
static void __init xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn)
{
#ifdef CONFIG_FLATMEM
BUG_ON(mem_map); /* should only be used early */
#endif
make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
}
/* Used for pmd and pud */
static void __init xen_alloc_pmd_init(struct mm_struct *mm, unsigned long pfn)
{
#ifdef CONFIG_FLATMEM
BUG_ON(mem_map); /* should only be used early */
#endif
make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
}
/* Early release_pte assumes that all pts are pinned, since there's
only init_mm and anything attached to that is pinned. */
static void __init xen_release_pte_init(unsigned long pfn)
{
pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
}
static void __init xen_release_pmd_init(unsigned long pfn)
{
make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
}
static inline void __pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
{
struct multicall_space mcs;
struct mmuext_op *op;
mcs = __xen_mc_entry(sizeof(*op));
op = mcs.args;
op->cmd = cmd;
op->arg1.mfn = pfn_to_mfn(pfn);
MULTI_mmuext_op(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
}
static inline void __set_pfn_prot(unsigned long pfn, pgprot_t prot)
{
struct multicall_space mcs;
unsigned long addr = (unsigned long)__va(pfn << PAGE_SHIFT);
mcs = __xen_mc_entry(0);
MULTI_update_va_mapping(mcs.mc, (unsigned long)addr,
pfn_pte(pfn, prot), 0);
}
/* This needs to make sure the new pte page is pinned iff its being
attached to a pinned pagetable. */
static inline void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn,
unsigned level)
{
bool pinned = xen_page_pinned(mm->pgd);
trace_xen_mmu_alloc_ptpage(mm, pfn, level, pinned);
if (pinned) {
struct page *page = pfn_to_page(pfn);
pinned = false;
if (static_branch_likely(&xen_struct_pages_ready)) {
pinned = PagePinned(page);
SetPagePinned(page);
}
xen_mc_batch();
__set_pfn_prot(pfn, PAGE_KERNEL_RO);
if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS && !pinned)
__pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
xen_mc_issue(XEN_LAZY_MMU);
}
}
static void xen_alloc_pte(struct mm_struct *mm, unsigned long pfn)
{
xen_alloc_ptpage(mm, pfn, PT_PTE);
}
static void xen_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
{
xen_alloc_ptpage(mm, pfn, PT_PMD);
}
/* This should never happen until we're OK to use struct page */
static inline void xen_release_ptpage(unsigned long pfn, unsigned level)
{
struct page *page = pfn_to_page(pfn);
bool pinned = PagePinned(page);
trace_xen_mmu_release_ptpage(pfn, level, pinned);
if (pinned) {
xen_mc_batch();
if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS)
__pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
__set_pfn_prot(pfn, PAGE_KERNEL);
xen_mc_issue(XEN_LAZY_MMU);
ClearPagePinned(page);
}
}
static void xen_release_pte(unsigned long pfn)
{
xen_release_ptpage(pfn, PT_PTE);
}
static void xen_release_pmd(unsigned long pfn)
{
xen_release_ptpage(pfn, PT_PMD);
}
static void xen_alloc_pud(struct mm_struct *mm, unsigned long pfn)
{
xen_alloc_ptpage(mm, pfn, PT_PUD);
}
static void xen_release_pud(unsigned long pfn)
{
xen_release_ptpage(pfn, PT_PUD);
}
/*
* Like __va(), but returns address in the kernel mapping (which is
* all we have until the physical memory mapping has been set up.
*/
static void * __init __ka(phys_addr_t paddr)
{
return (void *)(paddr + __START_KERNEL_map);
}
/* Convert a machine address to physical address */
static unsigned long __init m2p(phys_addr_t maddr)
{
phys_addr_t paddr;
maddr &= XEN_PTE_MFN_MASK;
paddr = mfn_to_pfn(maddr >> PAGE_SHIFT) << PAGE_SHIFT;
return paddr;
}
/* Convert a machine address to kernel virtual */
static void * __init m2v(phys_addr_t maddr)
{
return __ka(m2p(maddr));
}
/* Set the page permissions on an identity-mapped pages */
static void __init set_page_prot_flags(void *addr, pgprot_t prot,
unsigned long flags)
{
unsigned long pfn = __pa(addr) >> PAGE_SHIFT;
pte_t pte = pfn_pte(pfn, prot);
if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, flags))
BUG();
}
static void __init set_page_prot(void *addr, pgprot_t prot)
{
return set_page_prot_flags(addr, prot, UVMF_NONE);
}
void __init xen_setup_machphys_mapping(void)
{
struct xen_machphys_mapping mapping;
if (HYPERVISOR_memory_op(XENMEM_machphys_mapping, &mapping) == 0) {
machine_to_phys_mapping = (unsigned long *)mapping.v_start;
machine_to_phys_nr = mapping.max_mfn + 1;
} else {
machine_to_phys_nr = MACH2PHYS_NR_ENTRIES;
}
}
static void __init convert_pfn_mfn(void *v)
{
pte_t *pte = v;
int i;
/* All levels are converted the same way, so just treat them
as ptes. */
for (i = 0; i < PTRS_PER_PTE; i++)
pte[i] = xen_make_pte(pte[i].pte);
}
static void __init check_pt_base(unsigned long *pt_base, unsigned long *pt_end,
unsigned long addr)
{
if (*pt_base == PFN_DOWN(__pa(addr))) {
set_page_prot_flags((void *)addr, PAGE_KERNEL, UVMF_INVLPG);
clear_page((void *)addr);
(*pt_base)++;
}
if (*pt_end == PFN_DOWN(__pa(addr))) {
set_page_prot_flags((void *)addr, PAGE_KERNEL, UVMF_INVLPG);
clear_page((void *)addr);
(*pt_end)--;
}
}
/*
* Set up the initial kernel pagetable.
*
* We can construct this by grafting the Xen provided pagetable into
* head_64.S's preconstructed pagetables. We copy the Xen L2's into
* level2_ident_pgt, and level2_kernel_pgt. This means that only the
* kernel has a physical mapping to start with - but that's enough to
* get __va working. We need to fill in the rest of the physical
* mapping once some sort of allocator has been set up.
*/
void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
{
pud_t *l3;
pmd_t *l2;
unsigned long addr[3];
unsigned long pt_base, pt_end;
unsigned i;
/* max_pfn_mapped is the last pfn mapped in the initial memory
* mappings. Considering that on Xen after the kernel mappings we
* have the mappings of some pages that don't exist in pfn space, we
* set max_pfn_mapped to the last real pfn mapped. */
if (xen_start_info->mfn_list < __START_KERNEL_map)
max_pfn_mapped = xen_start_info->first_p2m_pfn;
else
max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->mfn_list));
pt_base = PFN_DOWN(__pa(xen_start_info->pt_base));
pt_end = pt_base + xen_start_info->nr_pt_frames;
/* Zap identity mapping */
init_top_pgt[0] = __pgd(0);
/* Pre-constructed entries are in pfn, so convert to mfn */
/* L4[273] -> level3_ident_pgt */
/* L4[511] -> level3_kernel_pgt */
convert_pfn_mfn(init_top_pgt);
/* L3_i[0] -> level2_ident_pgt */
convert_pfn_mfn(level3_ident_pgt);
/* L3_k[510] -> level2_kernel_pgt */
/* L3_k[511] -> level2_fixmap_pgt */
convert_pfn_mfn(level3_kernel_pgt);
/* L3_k[511][508-FIXMAP_PMD_NUM ... 507] -> level1_fixmap_pgt */
convert_pfn_mfn(level2_fixmap_pgt);
/* We get [511][511] and have Xen's version of level2_kernel_pgt */
l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
addr[0] = (unsigned long)pgd;
addr[1] = (unsigned long)l3;
addr[2] = (unsigned long)l2;
/* Graft it onto L4[273][0]. Note that we creating an aliasing problem:
* Both L4[273][0] and L4[511][510] have entries that point to the same
* L2 (PMD) tables. Meaning that if you modify it in __va space
* it will be also modified in the __ka space! (But if you just
* modify the PMD table to point to other PTE's or none, then you
* are OK - which is what cleanup_highmap does) */
copy_page(level2_ident_pgt, l2);
/* Graft it onto L4[511][510] */
copy_page(level2_kernel_pgt, l2);
/*
* Zap execute permission from the ident map. Due to the sharing of
* L1 entries we need to do this in the L2.
*/
if (__supported_pte_mask & _PAGE_NX) {
for (i = 0; i < PTRS_PER_PMD; ++i) {
if (pmd_none(level2_ident_pgt[i]))
continue;
level2_ident_pgt[i] = pmd_set_flags(level2_ident_pgt[i], _PAGE_NX);
}
}
/* Copy the initial P->M table mappings if necessary. */
i = pgd_index(xen_start_info->mfn_list);
if (i && i < pgd_index(__START_KERNEL_map))
init_top_pgt[i] = ((pgd_t *)xen_start_info->pt_base)[i];
/* Make pagetable pieces RO */
set_page_prot(init_top_pgt, PAGE_KERNEL_RO);
set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
for (i = 0; i < FIXMAP_PMD_NUM; i++) {
set_page_prot(level1_fixmap_pgt + i * PTRS_PER_PTE,
PAGE_KERNEL_RO);
}
/* Pin down new L4 */
pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
PFN_DOWN(__pa_symbol(init_top_pgt)));
/* Unpin Xen-provided one */
pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
#ifdef CONFIG_X86_VSYSCALL_EMULATION
/* Pin user vsyscall L3 */
set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE,
PFN_DOWN(__pa_symbol(level3_user_vsyscall)));
#endif
/*
* At this stage there can be no user pgd, and no page structure to
* attach it to, so make sure we just set kernel pgd.
*/
xen_mc_batch();
__xen_write_cr3(true, __pa(init_top_pgt));
xen_mc_issue(XEN_LAZY_CPU);
/* We can't that easily rip out L3 and L2, as the Xen pagetables are
* set out this way: [L4], [L1], [L2], [L3], [L1], [L1] ... for
* the initial domain. For guests using the toolstack, they are in:
* [L4], [L3], [L2], [L1], [L1], order .. So for dom0 we can only
* rip out the [L4] (pgd), but for guests we shave off three pages.
*/
for (i = 0; i < ARRAY_SIZE(addr); i++)
check_pt_base(&pt_base, &pt_end, addr[i]);
/* Our (by three pages) smaller Xen pagetable that we are using */
xen_pt_base = PFN_PHYS(pt_base);
xen_pt_size = (pt_end - pt_base) * PAGE_SIZE;
memblock_reserve(xen_pt_base, xen_pt_size);
/* Revector the xen_start_info */
xen_start_info = (struct start_info *)__va(__pa(xen_start_info));
}
/*
* Read a value from a physical address.
*/
static unsigned long __init xen_read_phys_ulong(phys_addr_t addr)
{
unsigned long *vaddr;
unsigned long val;
vaddr = early_memremap_ro(addr, sizeof(val));
val = *vaddr;
early_memunmap(vaddr, sizeof(val));
return val;
}
/*
* Translate a virtual address to a physical one without relying on mapped
* page tables. Don't rely on big pages being aligned in (guest) physical
* space!
*/
static phys_addr_t __init xen_early_virt_to_phys(unsigned long vaddr)
{
phys_addr_t pa;
pgd_t pgd;
pud_t pud;
pmd_t pmd;
pte_t pte;
pa = read_cr3_pa();
pgd = native_make_pgd(xen_read_phys_ulong(pa + pgd_index(vaddr) *
sizeof(pgd)));
if (!pgd_present(pgd))
return 0;
pa = pgd_val(pgd) & PTE_PFN_MASK;
pud = native_make_pud(xen_read_phys_ulong(pa + pud_index(vaddr) *
sizeof(pud)));
if (!pud_present(pud))
return 0;
pa = pud_val(pud) & PTE_PFN_MASK;
if (pud_large(pud))
return pa + (vaddr & ~PUD_MASK);
pmd = native_make_pmd(xen_read_phys_ulong(pa + pmd_index(vaddr) *
sizeof(pmd)));
if (!pmd_present(pmd))
return 0;
pa = pmd_val(pmd) & PTE_PFN_MASK;
if (pmd_large(pmd))
return pa + (vaddr & ~PMD_MASK);
pte = native_make_pte(xen_read_phys_ulong(pa + pte_index(vaddr) *
sizeof(pte)));
if (!pte_present(pte))
return 0;
pa = pte_pfn(pte) << PAGE_SHIFT;
return pa | (vaddr & ~PAGE_MASK);
}
/*
* Find a new area for the hypervisor supplied p2m list and relocate the p2m to
* this area.
*/
void __init xen_relocate_p2m(void)
{
phys_addr_t size, new_area, pt_phys, pmd_phys, pud_phys;
unsigned long p2m_pfn, p2m_pfn_end, n_frames, pfn, pfn_end;
int n_pte, n_pt, n_pmd, n_pud, idx_pte, idx_pt, idx_pmd, idx_pud;
pte_t *pt;
pmd_t *pmd;
pud_t *pud;
pgd_t *pgd;
unsigned long *new_p2m;
size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long));
n_pte = roundup(size, PAGE_SIZE) >> PAGE_SHIFT;
n_pt = roundup(size, PMD_SIZE) >> PMD_SHIFT;
n_pmd = roundup(size, PUD_SIZE) >> PUD_SHIFT;
n_pud = roundup(size, P4D_SIZE) >> P4D_SHIFT;
n_frames = n_pte + n_pt + n_pmd + n_pud;
new_area = xen_find_free_area(PFN_PHYS(n_frames));
if (!new_area) {
xen_raw_console_write("Can't find new memory area for p2m needed due to E820 map conflict\n");
BUG();
}
/*
* Setup the page tables for addressing the new p2m list.
* We have asked the hypervisor to map the p2m list at the user address
* PUD_SIZE. It may have done so, or it may have used a kernel space
* address depending on the Xen version.
* To avoid any possible virtual address collision, just use
* 2 * PUD_SIZE for the new area.
*/
pud_phys = new_area;
pmd_phys = pud_phys + PFN_PHYS(n_pud);
pt_phys = pmd_phys + PFN_PHYS(n_pmd);
p2m_pfn = PFN_DOWN(pt_phys) + n_pt;
pgd = __va(read_cr3_pa());
new_p2m = (unsigned long *)(2 * PGDIR_SIZE);
for (idx_pud = 0; idx_pud < n_pud; idx_pud++) {
pud = early_memremap(pud_phys, PAGE_SIZE);
clear_page(pud);
for (idx_pmd = 0; idx_pmd < min(n_pmd, PTRS_PER_PUD);
idx_pmd++) {
pmd = early_memremap(pmd_phys, PAGE_SIZE);
clear_page(pmd);
for (idx_pt = 0; idx_pt < min(n_pt, PTRS_PER_PMD);
idx_pt++) {
pt = early_memremap(pt_phys, PAGE_SIZE);
clear_page(pt);
for (idx_pte = 0;
idx_pte < min(n_pte, PTRS_PER_PTE);
idx_pte++) {
pt[idx_pte] = pfn_pte(p2m_pfn,
PAGE_KERNEL);
p2m_pfn++;
}
n_pte -= PTRS_PER_PTE;
early_memunmap(pt, PAGE_SIZE);
make_lowmem_page_readonly(__va(pt_phys));
pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE,
PFN_DOWN(pt_phys));
pmd[idx_pt] = __pmd(_PAGE_TABLE | pt_phys);
pt_phys += PAGE_SIZE;
}
n_pt -= PTRS_PER_PMD;
early_memunmap(pmd, PAGE_SIZE);
make_lowmem_page_readonly(__va(pmd_phys));
pin_pagetable_pfn(MMUEXT_PIN_L2_TABLE,
PFN_DOWN(pmd_phys));
pud[idx_pmd] = __pud(_PAGE_TABLE | pmd_phys);
pmd_phys += PAGE_SIZE;
}
n_pmd -= PTRS_PER_PUD;
early_memunmap(pud, PAGE_SIZE);
make_lowmem_page_readonly(__va(pud_phys));
pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, PFN_DOWN(pud_phys));
set_pgd(pgd + 2 + idx_pud, __pgd(_PAGE_TABLE | pud_phys));
pud_phys += PAGE_SIZE;
}
/* Now copy the old p2m info to the new area. */
memcpy(new_p2m, xen_p2m_addr, size);
xen_p2m_addr = new_p2m;
/* Release the old p2m list and set new list info. */
p2m_pfn = PFN_DOWN(xen_early_virt_to_phys(xen_start_info->mfn_list));
BUG_ON(!p2m_pfn);
p2m_pfn_end = p2m_pfn + PFN_DOWN(size);
if (xen_start_info->mfn_list < __START_KERNEL_map) {
pfn = xen_start_info->first_p2m_pfn;
pfn_end = xen_start_info->first_p2m_pfn +
xen_start_info->nr_p2m_frames;
set_pgd(pgd + 1, __pgd(0));
} else {
pfn = p2m_pfn;
pfn_end = p2m_pfn_end;
}
memblock_phys_free(PFN_PHYS(pfn), PAGE_SIZE * (pfn_end - pfn));
while (pfn < pfn_end) {
if (pfn == p2m_pfn) {
pfn = p2m_pfn_end;
continue;
}
make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
pfn++;
}
xen_start_info->mfn_list = (unsigned long)xen_p2m_addr;
xen_start_info->first_p2m_pfn = PFN_DOWN(new_area);
xen_start_info->nr_p2m_frames = n_frames;
}
void __init xen_reserve_special_pages(void)
{
phys_addr_t paddr;
memblock_reserve(__pa(xen_start_info), PAGE_SIZE);
if (xen_start_info->store_mfn) {
paddr = PFN_PHYS(mfn_to_pfn(xen_start_info->store_mfn));
memblock_reserve(paddr, PAGE_SIZE);
}
if (!xen_initial_domain()) {
paddr = PFN_PHYS(mfn_to_pfn(xen_start_info->console.domU.mfn));
memblock_reserve(paddr, PAGE_SIZE);
}
}
void __init xen_pt_check_e820(void)
{
if (xen_is_e820_reserved(xen_pt_base, xen_pt_size)) {
xen_raw_console_write("Xen hypervisor allocated page table memory conflicts with E820 map\n");
BUG();
}
}
static unsigned char dummy_mapping[PAGE_SIZE] __page_aligned_bss;
static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
{
pte_t pte;
unsigned long vaddr;
phys >>= PAGE_SHIFT;
switch (idx) {
case FIX_BTMAP_END ... FIX_BTMAP_BEGIN:
#ifdef CONFIG_X86_VSYSCALL_EMULATION
case VSYSCALL_PAGE:
#endif
/* All local page mappings */
pte = pfn_pte(phys, prot);
break;
#ifdef CONFIG_X86_LOCAL_APIC
case FIX_APIC_BASE: /* maps dummy local APIC */
pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL);
break;
#endif
#ifdef CONFIG_X86_IO_APIC
case FIX_IO_APIC_BASE_0 ... FIX_IO_APIC_BASE_END:
/*
* We just don't map the IO APIC - all access is via
* hypercalls. Keep the address in the pte for reference.
*/
pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL);
break;
#endif
case FIX_PARAVIRT_BOOTMAP:
/* This is an MFN, but it isn't an IO mapping from the
IO domain */
pte = mfn_pte(phys, prot);
break;
default:
/* By default, set_fixmap is used for hardware mappings */
pte = mfn_pte(phys, prot);
break;
}
vaddr = __fix_to_virt(idx);
if (HYPERVISOR_update_va_mapping(vaddr, pte, UVMF_INVLPG))
BUG();
#ifdef CONFIG_X86_VSYSCALL_EMULATION
/* Replicate changes to map the vsyscall page into the user
pagetable vsyscall mapping. */
if (idx == VSYSCALL_PAGE)
set_pte_vaddr_pud(level3_user_vsyscall, vaddr, pte);
#endif
}
static void xen_enter_lazy_mmu(void)
{
enter_lazy(XEN_LAZY_MMU);
}
static void xen_flush_lazy_mmu(void)
{
preempt_disable();
if (xen_get_lazy_mode() == XEN_LAZY_MMU) {
arch_leave_lazy_mmu_mode();
arch_enter_lazy_mmu_mode();
}
preempt_enable();
}
static void __init xen_post_allocator_init(void)
{
pv_ops.mmu.set_pte = xen_set_pte;
pv_ops.mmu.set_pmd = xen_set_pmd;
pv_ops.mmu.set_pud = xen_set_pud;
pv_ops.mmu.set_p4d = xen_set_p4d;
/* This will work as long as patching hasn't happened yet
(which it hasn't) */
pv_ops.mmu.alloc_pte = xen_alloc_pte;
pv_ops.mmu.alloc_pmd = xen_alloc_pmd;
pv_ops.mmu.release_pte = xen_release_pte;
pv_ops.mmu.release_pmd = xen_release_pmd;
pv_ops.mmu.alloc_pud = xen_alloc_pud;
pv_ops.mmu.release_pud = xen_release_pud;
pv_ops.mmu.make_pte = PV_CALLEE_SAVE(xen_make_pte);
pv_ops.mmu.write_cr3 = &xen_write_cr3;
}
static void xen_leave_lazy_mmu(void)
{
preempt_disable();
xen_mc_flush();
leave_lazy(XEN_LAZY_MMU);
preempt_enable();
}
static const typeof(pv_ops) xen_mmu_ops __initconst = {
.mmu = {
.read_cr2 = __PV_IS_CALLEE_SAVE(xen_read_cr2),
.write_cr2 = xen_write_cr2,
.read_cr3 = xen_read_cr3,
.write_cr3 = xen_write_cr3_init,
.flush_tlb_user = xen_flush_tlb,
.flush_tlb_kernel = xen_flush_tlb,
.flush_tlb_one_user = xen_flush_tlb_one_user,
.flush_tlb_multi = xen_flush_tlb_multi,
.tlb_remove_table = tlb_remove_table,
.pgd_alloc = xen_pgd_alloc,
.pgd_free = xen_pgd_free,
.alloc_pte = xen_alloc_pte_init,
.release_pte = xen_release_pte_init,
.alloc_pmd = xen_alloc_pmd_init,
.release_pmd = xen_release_pmd_init,
.set_pte = xen_set_pte_init,
.set_pmd = xen_set_pmd_hyper,
.ptep_modify_prot_start = xen_ptep_modify_prot_start,
.ptep_modify_prot_commit = xen_ptep_modify_prot_commit,
.pte_val = PV_CALLEE_SAVE(xen_pte_val),
.pgd_val = PV_CALLEE_SAVE(xen_pgd_val),
.make_pte = PV_CALLEE_SAVE(xen_make_pte_init),
.make_pgd = PV_CALLEE_SAVE(xen_make_pgd),
.set_pud = xen_set_pud_hyper,
.make_pmd = PV_CALLEE_SAVE(xen_make_pmd),
.pmd_val = PV_CALLEE_SAVE(xen_pmd_val),
.pud_val = PV_CALLEE_SAVE(xen_pud_val),
.make_pud = PV_CALLEE_SAVE(xen_make_pud),
.set_p4d = xen_set_p4d_hyper,
.alloc_pud = xen_alloc_pmd_init,
.release_pud = xen_release_pmd_init,
#if CONFIG_PGTABLE_LEVELS >= 5
.p4d_val = PV_CALLEE_SAVE(xen_p4d_val),
.make_p4d = PV_CALLEE_SAVE(xen_make_p4d),
#endif
.enter_mmap = xen_enter_mmap,
.exit_mmap = xen_exit_mmap,
.lazy_mode = {
.enter = xen_enter_lazy_mmu,
.leave = xen_leave_lazy_mmu,
.flush = xen_flush_lazy_mmu,
},
.set_fixmap = xen_set_fixmap,
},
};
void __init xen_init_mmu_ops(void)
{
x86_init.paging.pagetable_init = xen_pagetable_init;
x86_init.hyper.init_after_bootmem = xen_after_bootmem;
pv_ops.mmu = xen_mmu_ops.mmu;
memset(dummy_mapping, 0xff, PAGE_SIZE);
}
/* Protected by xen_reservation_lock. */
#define MAX_CONTIG_ORDER 9 /* 2MB */
static unsigned long discontig_frames[1<<MAX_CONTIG_ORDER];
#define VOID_PTE (mfn_pte(0, __pgprot(0)))
static void xen_zap_pfn_range(unsigned long vaddr, unsigned int order,
unsigned long *in_frames,
unsigned long *out_frames)
{
int i;
struct multicall_space mcs;
xen_mc_batch();
for (i = 0; i < (1UL<<order); i++, vaddr += PAGE_SIZE) {
mcs = __xen_mc_entry(0);
if (in_frames)
in_frames[i] = virt_to_mfn((void *)vaddr);
MULTI_update_va_mapping(mcs.mc, vaddr, VOID_PTE, 0);
__set_phys_to_machine(virt_to_pfn((void *)vaddr), INVALID_P2M_ENTRY);
if (out_frames)
out_frames[i] = virt_to_pfn((void *)vaddr);
}
xen_mc_issue(0);
}
/*
* Update the pfn-to-mfn mappings for a virtual address range, either to
* point to an array of mfns, or contiguously from a single starting
* mfn.
*/
static void xen_remap_exchanged_ptes(unsigned long vaddr, int order,
unsigned long *mfns,
unsigned long first_mfn)
{
unsigned i, limit;
unsigned long mfn;
xen_mc_batch();
limit = 1u << order;
for (i = 0; i < limit; i++, vaddr += PAGE_SIZE) {
struct multicall_space mcs;
unsigned flags;
mcs = __xen_mc_entry(0);
if (mfns)
mfn = mfns[i];
else
mfn = first_mfn + i;
if (i < (limit - 1))
flags = 0;
else {
if (order == 0)
flags = UVMF_INVLPG | UVMF_ALL;
else
flags = UVMF_TLB_FLUSH | UVMF_ALL;
}
MULTI_update_va_mapping(mcs.mc, vaddr,
mfn_pte(mfn, PAGE_KERNEL), flags);
set_phys_to_machine(virt_to_pfn((void *)vaddr), mfn);
}
xen_mc_issue(0);
}
/*
* Perform the hypercall to exchange a region of our pfns to point to
* memory with the required contiguous alignment. Takes the pfns as
* input, and populates mfns as output.
*
* Returns a success code indicating whether the hypervisor was able to
* satisfy the request or not.
*/
static int xen_exchange_memory(unsigned long extents_in, unsigned int order_in,
unsigned long *pfns_in,
unsigned long extents_out,
unsigned int order_out,
unsigned long *mfns_out,
unsigned int address_bits)
{
long rc;
int success;
struct xen_memory_exchange exchange = {
.in = {
.nr_extents = extents_in,
.extent_order = order_in,
.extent_start = pfns_in,
.domid = DOMID_SELF
},
.out = {
.nr_extents = extents_out,
.extent_order = order_out,
.extent_start = mfns_out,
.address_bits = address_bits,
.domid = DOMID_SELF
}
};
BUG_ON(extents_in << order_in != extents_out << order_out);
rc = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
success = (exchange.nr_exchanged == extents_in);
BUG_ON(!success && ((exchange.nr_exchanged != 0) || (rc == 0)));
BUG_ON(success && (rc != 0));
return success;
}
int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
unsigned int address_bits,
dma_addr_t *dma_handle)
{
unsigned long *in_frames = discontig_frames, out_frame;
unsigned long flags;
int success;
unsigned long vstart = (unsigned long)phys_to_virt(pstart);
if (unlikely(order > MAX_CONTIG_ORDER))
return -ENOMEM;
memset((void *) vstart, 0, PAGE_SIZE << order);
spin_lock_irqsave(&xen_reservation_lock, flags);
/* 1. Zap current PTEs, remembering MFNs. */
xen_zap_pfn_range(vstart, order, in_frames, NULL);
/* 2. Get a new contiguous memory extent. */
out_frame = virt_to_pfn((void *)vstart);
success = xen_exchange_memory(1UL << order, 0, in_frames,
1, order, &out_frame,
address_bits);
/* 3. Map the new extent in place of old pages. */
if (success)
xen_remap_exchanged_ptes(vstart, order, NULL, out_frame);
else
xen_remap_exchanged_ptes(vstart, order, in_frames, 0);
spin_unlock_irqrestore(&xen_reservation_lock, flags);
*dma_handle = virt_to_machine(vstart).maddr;
return success ? 0 : -ENOMEM;
}
void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order)
{
unsigned long *out_frames = discontig_frames, in_frame;
unsigned long flags;
int success;
unsigned long vstart;
if (unlikely(order > MAX_CONTIG_ORDER))
return;
vstart = (unsigned long)phys_to_virt(pstart);
memset((void *) vstart, 0, PAGE_SIZE << order);
spin_lock_irqsave(&xen_reservation_lock, flags);
/* 1. Find start MFN of contiguous extent. */
in_frame = virt_to_mfn((void *)vstart);
/* 2. Zap current PTEs. */
xen_zap_pfn_range(vstart, order, NULL, out_frames);
/* 3. Do the exchange for non-contiguous MFNs. */
success = xen_exchange_memory(1, order, &in_frame, 1UL << order,
0, out_frames, 0);
/* 4. Map new pages in place of old pages. */
if (success)
xen_remap_exchanged_ptes(vstart, order, out_frames, 0);
else
xen_remap_exchanged_ptes(vstart, order, NULL, in_frame);
spin_unlock_irqrestore(&xen_reservation_lock, flags);
}
static noinline void xen_flush_tlb_all(void)
{
struct mmuext_op *op;
struct multicall_space mcs;
preempt_disable();
mcs = xen_mc_entry(sizeof(*op));
op = mcs.args;
op->cmd = MMUEXT_TLB_FLUSH_ALL;
MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
xen_mc_issue(XEN_LAZY_MMU);
preempt_enable();
}
#define REMAP_BATCH_SIZE 16
struct remap_data {
xen_pfn_t *pfn;
bool contiguous;
bool no_translate;
pgprot_t prot;
struct mmu_update *mmu_update;
};
static int remap_area_pfn_pte_fn(pte_t *ptep, unsigned long addr, void *data)
{
struct remap_data *rmd = data;
pte_t pte = pte_mkspecial(mfn_pte(*rmd->pfn, rmd->prot));
/*
* If we have a contiguous range, just update the pfn itself,
* else update pointer to be "next pfn".
*/
if (rmd->contiguous)
(*rmd->pfn)++;
else
rmd->pfn++;
rmd->mmu_update->ptr = virt_to_machine(ptep).maddr;
rmd->mmu_update->ptr |= rmd->no_translate ?
MMU_PT_UPDATE_NO_TRANSLATE :
MMU_NORMAL_PT_UPDATE;
rmd->mmu_update->val = pte_val_ma(pte);
rmd->mmu_update++;
return 0;
}
int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr,
xen_pfn_t *pfn, int nr, int *err_ptr, pgprot_t prot,
unsigned int domid, bool no_translate)
{
int err = 0;
struct remap_data rmd;
struct mmu_update mmu_update[REMAP_BATCH_SIZE];
unsigned long range;
int mapped = 0;
BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) == (VM_PFNMAP | VM_IO)));
rmd.pfn = pfn;
rmd.prot = prot;
/*
* We use the err_ptr to indicate if there we are doing a contiguous
* mapping or a discontiguous mapping.
*/
rmd.contiguous = !err_ptr;
rmd.no_translate = no_translate;
while (nr) {
int index = 0;
int done = 0;
int batch = min(REMAP_BATCH_SIZE, nr);
int batch_left = batch;
range = (unsigned long)batch << PAGE_SHIFT;
rmd.mmu_update = mmu_update;
err = apply_to_page_range(vma->vm_mm, addr, range,
remap_area_pfn_pte_fn, &rmd);
if (err)
goto out;
/*
* We record the error for each page that gives an error, but
* continue mapping until the whole set is done
*/
do {
int i;
err = HYPERVISOR_mmu_update(&mmu_update[index],
batch_left, &done, domid);
/*
* @err_ptr may be the same buffer as @gfn, so
* only clear it after each chunk of @gfn is
* used.
*/
if (err_ptr) {
for (i = index; i < index + done; i++)
err_ptr[i] = 0;
}
if (err < 0) {
if (!err_ptr)
goto out;
err_ptr[i] = err;
done++; /* Skip failed frame. */
} else
mapped += done;
batch_left -= done;
index += done;
} while (batch_left);
nr -= batch;
addr += range;
if (err_ptr)
err_ptr += batch;
cond_resched();
}
out:
xen_flush_tlb_all();
return err < 0 ? err : mapped;
}
EXPORT_SYMBOL_GPL(xen_remap_pfn);
#ifdef CONFIG_KEXEC_CORE
phys_addr_t paddr_vmcoreinfo_note(void)
{
if (xen_pv_domain())
return virt_to_machine(vmcoreinfo_note).maddr;
else
return __pa(vmcoreinfo_note);
}
#endif /* CONFIG_KEXEC_CORE */
| linux-master | arch/x86/xen/mmu_pv.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/types.h>
#include <xen/xen.h>
#include <xen/hvm.h>
#include <xen/features.h>
#include <xen/interface/features.h>
#include <xen/events.h>
#include "xen-ops.h"
void xen_hvm_post_suspend(int suspend_cancelled)
{
if (!suspend_cancelled) {
xen_hvm_init_shared_info();
xen_vcpu_restore();
}
if (xen_percpu_upcall) {
unsigned int cpu;
for_each_online_cpu(cpu)
BUG_ON(xen_set_upcall_vector(cpu));
} else {
xen_setup_callback_vector();
}
xen_unplug_emulated_devices();
}
| linux-master | arch/x86/xen/suspend_hvm.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Xen hypercall batching.
*
* Xen allows multiple hypercalls to be issued at once, using the
* multicall interface. This allows the cost of trapping into the
* hypervisor to be amortized over several calls.
*
* This file implements a simple interface for multicalls. There's a
* per-cpu buffer of outstanding multicalls. When you want to queue a
* multicall for issuing, you can allocate a multicall slot for the
* call and its arguments, along with storage for space which is
* pointed to by the arguments (for passing pointers to structures,
* etc). When the multicall is actually issued, all the space for the
* commands and allocated memory is freed for reuse.
*
* Multicalls are flushed whenever any of the buffers get full, or
* when explicitly requested. There's no way to get per-multicall
* return results back. It will BUG if any of the multicalls fail.
*
* Jeremy Fitzhardinge <[email protected]>, XenSource Inc, 2007
*/
#include <linux/percpu.h>
#include <linux/hardirq.h>
#include <linux/debugfs.h>
#include <asm/xen/hypercall.h>
#include "multicalls.h"
#include "debugfs.h"
#define MC_BATCH 32
#define MC_DEBUG 0
#define MC_ARGS (MC_BATCH * 16)
struct mc_buffer {
unsigned mcidx, argidx, cbidx;
struct multicall_entry entries[MC_BATCH];
#if MC_DEBUG
struct multicall_entry debug[MC_BATCH];
void *caller[MC_BATCH];
#endif
unsigned char args[MC_ARGS];
struct callback {
void (*fn)(void *);
void *data;
} callbacks[MC_BATCH];
};
static DEFINE_PER_CPU(struct mc_buffer, mc_buffer);
DEFINE_PER_CPU(unsigned long, xen_mc_irq_flags);
void xen_mc_flush(void)
{
struct mc_buffer *b = this_cpu_ptr(&mc_buffer);
struct multicall_entry *mc;
int ret = 0;
unsigned long flags;
int i;
BUG_ON(preemptible());
/* Disable interrupts in case someone comes in and queues
something in the middle */
local_irq_save(flags);
trace_xen_mc_flush(b->mcidx, b->argidx, b->cbidx);
#if MC_DEBUG
memcpy(b->debug, b->entries,
b->mcidx * sizeof(struct multicall_entry));
#endif
switch (b->mcidx) {
case 0:
/* no-op */
BUG_ON(b->argidx != 0);
break;
case 1:
/* Singleton multicall - bypass multicall machinery
and just do the call directly. */
mc = &b->entries[0];
mc->result = xen_single_call(mc->op, mc->args[0], mc->args[1],
mc->args[2], mc->args[3],
mc->args[4]);
ret = mc->result < 0;
break;
default:
if (HYPERVISOR_multicall(b->entries, b->mcidx) != 0)
BUG();
for (i = 0; i < b->mcidx; i++)
if (b->entries[i].result < 0)
ret++;
}
if (WARN_ON(ret)) {
pr_err("%d of %d multicall(s) failed: cpu %d\n",
ret, b->mcidx, smp_processor_id());
for (i = 0; i < b->mcidx; i++) {
if (b->entries[i].result < 0) {
#if MC_DEBUG
pr_err(" call %2d: op=%lu arg=[%lx] result=%ld\t%pS\n",
i + 1,
b->debug[i].op,
b->debug[i].args[0],
b->entries[i].result,
b->caller[i]);
#else
pr_err(" call %2d: op=%lu arg=[%lx] result=%ld\n",
i + 1,
b->entries[i].op,
b->entries[i].args[0],
b->entries[i].result);
#endif
}
}
}
b->mcidx = 0;
b->argidx = 0;
for (i = 0; i < b->cbidx; i++) {
struct callback *cb = &b->callbacks[i];
(*cb->fn)(cb->data);
}
b->cbidx = 0;
local_irq_restore(flags);
}
struct multicall_space __xen_mc_entry(size_t args)
{
struct mc_buffer *b = this_cpu_ptr(&mc_buffer);
struct multicall_space ret;
unsigned argidx = roundup(b->argidx, sizeof(u64));
trace_xen_mc_entry_alloc(args);
BUG_ON(preemptible());
BUG_ON(b->argidx >= MC_ARGS);
if (unlikely(b->mcidx == MC_BATCH ||
(argidx + args) >= MC_ARGS)) {
trace_xen_mc_flush_reason((b->mcidx == MC_BATCH) ?
XEN_MC_FL_BATCH : XEN_MC_FL_ARGS);
xen_mc_flush();
argidx = roundup(b->argidx, sizeof(u64));
}
ret.mc = &b->entries[b->mcidx];
#if MC_DEBUG
b->caller[b->mcidx] = __builtin_return_address(0);
#endif
b->mcidx++;
ret.args = &b->args[argidx];
b->argidx = argidx + args;
BUG_ON(b->argidx >= MC_ARGS);
return ret;
}
struct multicall_space xen_mc_extend_args(unsigned long op, size_t size)
{
struct mc_buffer *b = this_cpu_ptr(&mc_buffer);
struct multicall_space ret = { NULL, NULL };
BUG_ON(preemptible());
BUG_ON(b->argidx >= MC_ARGS);
if (unlikely(b->mcidx == 0 ||
b->entries[b->mcidx - 1].op != op)) {
trace_xen_mc_extend_args(op, size, XEN_MC_XE_BAD_OP);
goto out;
}
if (unlikely((b->argidx + size) >= MC_ARGS)) {
trace_xen_mc_extend_args(op, size, XEN_MC_XE_NO_SPACE);
goto out;
}
ret.mc = &b->entries[b->mcidx - 1];
ret.args = &b->args[b->argidx];
b->argidx += size;
BUG_ON(b->argidx >= MC_ARGS);
trace_xen_mc_extend_args(op, size, XEN_MC_XE_OK);
out:
return ret;
}
void xen_mc_callback(void (*fn)(void *), void *data)
{
struct mc_buffer *b = this_cpu_ptr(&mc_buffer);
struct callback *cb;
if (b->cbidx == MC_BATCH) {
trace_xen_mc_flush_reason(XEN_MC_FL_CALLBACK);
xen_mc_flush();
}
trace_xen_mc_callback(fn, data);
cb = &b->callbacks[b->cbidx++];
cb->fn = fn;
cb->data = data;
}
| linux-master | arch/x86/xen/multicalls.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/smp.h>
#include <linux/cpu.h>
#include <linux/slab.h>
#include <linux/cpumask.h>
#include <linux/percpu.h>
#include <xen/events.h>
#include <xen/hvc-console.h>
#include "xen-ops.h"
#include "smp.h"
static DEFINE_PER_CPU(struct xen_common_irq, xen_resched_irq) = { .irq = -1 };
static DEFINE_PER_CPU(struct xen_common_irq, xen_callfunc_irq) = { .irq = -1 };
static DEFINE_PER_CPU(struct xen_common_irq, xen_callfuncsingle_irq) = { .irq = -1 };
static DEFINE_PER_CPU(struct xen_common_irq, xen_debug_irq) = { .irq = -1 };
static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id);
static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id);
/*
* Reschedule call back.
*/
static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id)
{
inc_irq_stat(irq_resched_count);
scheduler_ipi();
return IRQ_HANDLED;
}
void xen_smp_intr_free(unsigned int cpu)
{
kfree(per_cpu(xen_resched_irq, cpu).name);
per_cpu(xen_resched_irq, cpu).name = NULL;
if (per_cpu(xen_resched_irq, cpu).irq >= 0) {
unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu).irq, NULL);
per_cpu(xen_resched_irq, cpu).irq = -1;
}
kfree(per_cpu(xen_callfunc_irq, cpu).name);
per_cpu(xen_callfunc_irq, cpu).name = NULL;
if (per_cpu(xen_callfunc_irq, cpu).irq >= 0) {
unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu).irq, NULL);
per_cpu(xen_callfunc_irq, cpu).irq = -1;
}
kfree(per_cpu(xen_debug_irq, cpu).name);
per_cpu(xen_debug_irq, cpu).name = NULL;
if (per_cpu(xen_debug_irq, cpu).irq >= 0) {
unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu).irq, NULL);
per_cpu(xen_debug_irq, cpu).irq = -1;
}
kfree(per_cpu(xen_callfuncsingle_irq, cpu).name);
per_cpu(xen_callfuncsingle_irq, cpu).name = NULL;
if (per_cpu(xen_callfuncsingle_irq, cpu).irq >= 0) {
unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu).irq,
NULL);
per_cpu(xen_callfuncsingle_irq, cpu).irq = -1;
}
}
int xen_smp_intr_init(unsigned int cpu)
{
int rc;
char *resched_name, *callfunc_name, *debug_name;
resched_name = kasprintf(GFP_KERNEL, "resched%d", cpu);
per_cpu(xen_resched_irq, cpu).name = resched_name;
rc = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR,
cpu,
xen_reschedule_interrupt,
IRQF_PERCPU|IRQF_NOBALANCING,
resched_name,
NULL);
if (rc < 0)
goto fail;
per_cpu(xen_resched_irq, cpu).irq = rc;
callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu);
per_cpu(xen_callfunc_irq, cpu).name = callfunc_name;
rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR,
cpu,
xen_call_function_interrupt,
IRQF_PERCPU|IRQF_NOBALANCING,
callfunc_name,
NULL);
if (rc < 0)
goto fail;
per_cpu(xen_callfunc_irq, cpu).irq = rc;
if (!xen_fifo_events) {
debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu);
per_cpu(xen_debug_irq, cpu).name = debug_name;
rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu,
xen_debug_interrupt,
IRQF_PERCPU | IRQF_NOBALANCING,
debug_name, NULL);
if (rc < 0)
goto fail;
per_cpu(xen_debug_irq, cpu).irq = rc;
}
callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu);
per_cpu(xen_callfuncsingle_irq, cpu).name = callfunc_name;
rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR,
cpu,
xen_call_function_single_interrupt,
IRQF_PERCPU|IRQF_NOBALANCING,
callfunc_name,
NULL);
if (rc < 0)
goto fail;
per_cpu(xen_callfuncsingle_irq, cpu).irq = rc;
return 0;
fail:
xen_smp_intr_free(cpu);
return rc;
}
void __init xen_smp_cpus_done(unsigned int max_cpus)
{
if (xen_hvm_domain())
native_smp_cpus_done(max_cpus);
else
calculate_max_logical_packages();
}
void xen_smp_send_reschedule(int cpu)
{
xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR);
}
static void __xen_send_IPI_mask(const struct cpumask *mask,
int vector)
{
unsigned cpu;
for_each_cpu_and(cpu, mask, cpu_online_mask)
xen_send_IPI_one(cpu, vector);
}
void xen_smp_send_call_function_ipi(const struct cpumask *mask)
{
int cpu;
__xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR);
/* Make sure other vcpus get a chance to run if they need to. */
for_each_cpu(cpu, mask) {
if (xen_vcpu_stolen(cpu)) {
HYPERVISOR_sched_op(SCHEDOP_yield, NULL);
break;
}
}
}
void xen_smp_send_call_function_single_ipi(int cpu)
{
__xen_send_IPI_mask(cpumask_of(cpu),
XEN_CALL_FUNCTION_SINGLE_VECTOR);
}
static inline int xen_map_vector(int vector)
{
int xen_vector;
switch (vector) {
case RESCHEDULE_VECTOR:
xen_vector = XEN_RESCHEDULE_VECTOR;
break;
case CALL_FUNCTION_VECTOR:
xen_vector = XEN_CALL_FUNCTION_VECTOR;
break;
case CALL_FUNCTION_SINGLE_VECTOR:
xen_vector = XEN_CALL_FUNCTION_SINGLE_VECTOR;
break;
case IRQ_WORK_VECTOR:
xen_vector = XEN_IRQ_WORK_VECTOR;
break;
#ifdef CONFIG_X86_64
case NMI_VECTOR:
case APIC_DM_NMI: /* Some use that instead of NMI_VECTOR */
xen_vector = XEN_NMI_VECTOR;
break;
#endif
default:
xen_vector = -1;
printk(KERN_ERR "xen: vector 0x%x is not implemented\n",
vector);
}
return xen_vector;
}
void xen_send_IPI_mask(const struct cpumask *mask,
int vector)
{
int xen_vector = xen_map_vector(vector);
if (xen_vector >= 0)
__xen_send_IPI_mask(mask, xen_vector);
}
void xen_send_IPI_all(int vector)
{
int xen_vector = xen_map_vector(vector);
if (xen_vector >= 0)
__xen_send_IPI_mask(cpu_online_mask, xen_vector);
}
void xen_send_IPI_self(int vector)
{
int xen_vector = xen_map_vector(vector);
if (xen_vector >= 0)
xen_send_IPI_one(smp_processor_id(), xen_vector);
}
void xen_send_IPI_mask_allbutself(const struct cpumask *mask,
int vector)
{
unsigned cpu;
unsigned int this_cpu = smp_processor_id();
int xen_vector = xen_map_vector(vector);
if (!(num_online_cpus() > 1) || (xen_vector < 0))
return;
for_each_cpu_and(cpu, mask, cpu_online_mask) {
if (this_cpu == cpu)
continue;
xen_send_IPI_one(cpu, xen_vector);
}
}
void xen_send_IPI_allbutself(int vector)
{
xen_send_IPI_mask_allbutself(cpu_online_mask, vector);
}
static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id)
{
generic_smp_call_function_interrupt();
inc_irq_stat(irq_call_count);
return IRQ_HANDLED;
}
static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id)
{
generic_smp_call_function_single_interrupt();
inc_irq_stat(irq_call_count);
return IRQ_HANDLED;
}
| linux-master | arch/x86/xen/smp.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/pfn.h>
#include <asm/xen/page.h>
#include <asm/xen/hypercall.h>
#include <xen/interface/memory.h>
#include "multicalls.h"
#include "mmu.h"
unsigned long arbitrary_virt_to_mfn(void *vaddr)
{
xmaddr_t maddr = arbitrary_virt_to_machine(vaddr);
return PFN_DOWN(maddr.maddr);
}
xmaddr_t arbitrary_virt_to_machine(void *vaddr)
{
unsigned long address = (unsigned long)vaddr;
unsigned int level;
pte_t *pte;
unsigned offset;
/*
* if the PFN is in the linear mapped vaddr range, we can just use
* the (quick) virt_to_machine() p2m lookup
*/
if (virt_addr_valid(vaddr))
return virt_to_machine(vaddr);
/* otherwise we have to do a (slower) full page-table walk */
pte = lookup_address(address, &level);
BUG_ON(pte == NULL);
offset = address & ~PAGE_MASK;
return XMADDR(((phys_addr_t)pte_mfn(*pte) << PAGE_SHIFT) + offset);
}
EXPORT_SYMBOL_GPL(arbitrary_virt_to_machine);
/* Returns: 0 success */
int xen_unmap_domain_gfn_range(struct vm_area_struct *vma,
int nr, struct page **pages)
{
if (xen_feature(XENFEAT_auto_translated_physmap))
return xen_xlate_unmap_gfn_range(vma, nr, pages);
if (!pages)
return 0;
return -EINVAL;
}
EXPORT_SYMBOL_GPL(xen_unmap_domain_gfn_range);
| linux-master | arch/x86/xen/mmu.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/init.h>
#include <linux/thread_info.h>
#include <asm/x86_init.h>
#include <asm/apic.h>
#include <asm/io_apic.h>
#include <asm/xen/hypercall.h>
#include <xen/xen.h>
#include <xen/interface/physdev.h>
#include "xen-ops.h"
#include "pmu.h"
#include "smp.h"
static unsigned int xen_io_apic_read(unsigned apic, unsigned reg)
{
struct physdev_apic apic_op;
int ret;
apic_op.apic_physbase = mpc_ioapic_addr(apic);
apic_op.reg = reg;
ret = HYPERVISOR_physdev_op(PHYSDEVOP_apic_read, &apic_op);
if (!ret)
return apic_op.value;
/* fallback to return an emulated IO_APIC values */
if (reg == 0x1)
return 0x00170020;
else if (reg == 0x0)
return apic << 24;
return 0xfd;
}
static u32 xen_set_apic_id(unsigned int x)
{
WARN_ON(1);
return x;
}
static unsigned int xen_get_apic_id(unsigned long x)
{
return ((x)>>24) & 0xFFu;
}
static u32 xen_apic_read(u32 reg)
{
struct xen_platform_op op = {
.cmd = XENPF_get_cpuinfo,
.interface_version = XENPF_INTERFACE_VERSION,
.u.pcpu_info.xen_cpuid = 0,
};
int ret;
/* Shouldn't need this as APIC is turned off for PV, and we only
* get called on the bootup processor. But just in case. */
if (!xen_initial_domain() || smp_processor_id())
return 0;
if (reg == APIC_LVR)
return 0x14;
if (reg != APIC_ID)
return 0;
ret = HYPERVISOR_platform_op(&op);
if (ret)
op.u.pcpu_info.apic_id = BAD_APICID;
return op.u.pcpu_info.apic_id << 24;
}
static void xen_apic_write(u32 reg, u32 val)
{
if (reg == APIC_LVTPC) {
(void)pmu_apic_update(reg);
return;
}
/* Warn to see if there's any stray references */
WARN(1,"register: %x, value: %x\n", reg, val);
}
static void xen_apic_eoi(void)
{
WARN_ON_ONCE(1);
}
static u64 xen_apic_icr_read(void)
{
return 0;
}
static void xen_apic_icr_write(u32 low, u32 id)
{
/* Warn to see if there's any stray references */
WARN_ON(1);
}
static int xen_apic_probe_pv(void)
{
if (xen_pv_domain())
return 1;
return 0;
}
static int xen_madt_oem_check(char *oem_id, char *oem_table_id)
{
return xen_pv_domain();
}
static int xen_phys_pkg_id(int initial_apic_id, int index_msb)
{
return initial_apic_id >> index_msb;
}
static int xen_cpu_present_to_apicid(int cpu)
{
if (cpu_present(cpu))
return cpu_data(cpu).apicid;
else
return BAD_APICID;
}
static struct apic xen_pv_apic __ro_after_init = {
.name = "Xen PV",
.probe = xen_apic_probe_pv,
.acpi_madt_oem_check = xen_madt_oem_check,
/* .delivery_mode and .dest_mode_logical not used by XENPV */
.disable_esr = 0,
.cpu_present_to_apicid = xen_cpu_present_to_apicid,
.phys_pkg_id = xen_phys_pkg_id, /* detect_ht */
.max_apic_id = UINT_MAX,
.get_apic_id = xen_get_apic_id,
.set_apic_id = xen_set_apic_id,
.calc_dest_apicid = apic_flat_calc_apicid,
#ifdef CONFIG_SMP
.send_IPI_mask = xen_send_IPI_mask,
.send_IPI_mask_allbutself = xen_send_IPI_mask_allbutself,
.send_IPI_allbutself = xen_send_IPI_allbutself,
.send_IPI_all = xen_send_IPI_all,
.send_IPI_self = xen_send_IPI_self,
#endif
.read = xen_apic_read,
.write = xen_apic_write,
.eoi = xen_apic_eoi,
.icr_read = xen_apic_icr_read,
.icr_write = xen_apic_icr_write,
};
apic_driver(xen_pv_apic);
void __init xen_init_apic(void)
{
x86_apic_ops.io_apic_read = xen_io_apic_read;
}
| linux-master | arch/x86/xen/apic.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/screen_info.h>
#include <linux/init.h>
#include <asm/bootparam.h>
#include <asm/setup.h>
#include <xen/interface/xen.h>
#include "xen-ops.h"
void __init xen_init_vga(const struct dom0_vga_console_info *info, size_t size,
struct screen_info *screen_info)
{
/* This is drawn from a dump from vgacon:startup in
* standard Linux. */
screen_info->orig_video_mode = 3;
screen_info->orig_video_isVGA = 1;
screen_info->orig_video_lines = 25;
screen_info->orig_video_cols = 80;
screen_info->orig_video_ega_bx = 3;
screen_info->orig_video_points = 16;
screen_info->orig_y = screen_info->orig_video_lines - 1;
switch (info->video_type) {
case XEN_VGATYPE_TEXT_MODE_3:
if (size < offsetof(struct dom0_vga_console_info, u.text_mode_3)
+ sizeof(info->u.text_mode_3))
break;
screen_info->orig_video_lines = info->u.text_mode_3.rows;
screen_info->orig_video_cols = info->u.text_mode_3.columns;
screen_info->orig_x = info->u.text_mode_3.cursor_x;
screen_info->orig_y = info->u.text_mode_3.cursor_y;
screen_info->orig_video_points =
info->u.text_mode_3.font_height;
break;
case XEN_VGATYPE_EFI_LFB:
case XEN_VGATYPE_VESA_LFB:
if (size < offsetof(struct dom0_vga_console_info,
u.vesa_lfb.gbl_caps))
break;
screen_info->orig_video_isVGA = VIDEO_TYPE_VLFB;
screen_info->lfb_width = info->u.vesa_lfb.width;
screen_info->lfb_height = info->u.vesa_lfb.height;
screen_info->lfb_depth = info->u.vesa_lfb.bits_per_pixel;
screen_info->lfb_base = info->u.vesa_lfb.lfb_base;
screen_info->lfb_size = info->u.vesa_lfb.lfb_size;
screen_info->lfb_linelength = info->u.vesa_lfb.bytes_per_line;
screen_info->red_size = info->u.vesa_lfb.red_size;
screen_info->red_pos = info->u.vesa_lfb.red_pos;
screen_info->green_size = info->u.vesa_lfb.green_size;
screen_info->green_pos = info->u.vesa_lfb.green_pos;
screen_info->blue_size = info->u.vesa_lfb.blue_size;
screen_info->blue_pos = info->u.vesa_lfb.blue_pos;
screen_info->rsvd_size = info->u.vesa_lfb.rsvd_size;
screen_info->rsvd_pos = info->u.vesa_lfb.rsvd_pos;
if (size >= offsetof(struct dom0_vga_console_info,
u.vesa_lfb.ext_lfb_base)
+ sizeof(info->u.vesa_lfb.ext_lfb_base)
&& info->u.vesa_lfb.ext_lfb_base) {
screen_info->ext_lfb_base = info->u.vesa_lfb.ext_lfb_base;
screen_info->capabilities |= VIDEO_CAPABILITY_64BIT_BASE;
}
if (info->video_type == XEN_VGATYPE_EFI_LFB) {
screen_info->orig_video_isVGA = VIDEO_TYPE_EFI;
break;
}
if (size >= offsetof(struct dom0_vga_console_info,
u.vesa_lfb.mode_attrs)
+ sizeof(info->u.vesa_lfb.mode_attrs))
screen_info->vesa_attributes = info->u.vesa_lfb.mode_attrs;
break;
}
}
| linux-master | arch/x86/xen/vga.c |
/* GPL HEADER START
*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 only,
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License version 2 for more details (a copy is included
* in the LICENSE file that accompanied this code).
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see http://www.gnu.org/licenses
*
* Please visit http://www.xyratex.com/contact if you need additional
* information or have any questions.
*
* GPL HEADER END
*/
/*
* Copyright 2012 Xyratex Technology Limited
*
* Wrappers for kernel crypto shash api to pclmulqdq crc32 implementation.
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/crc32.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/simd.h>
#include <asm/cpufeatures.h>
#include <asm/cpu_device_id.h>
#include <asm/simd.h>
#define CHKSUM_BLOCK_SIZE 1
#define CHKSUM_DIGEST_SIZE 4
#define PCLMUL_MIN_LEN 64L /* minimum size of buffer
* for crc32_pclmul_le_16 */
#define SCALE_F 16L /* size of xmm register */
#define SCALE_F_MASK (SCALE_F - 1)
u32 crc32_pclmul_le_16(unsigned char const *buffer, size_t len, u32 crc32);
static u32 __attribute__((pure))
crc32_pclmul_le(u32 crc, unsigned char const *p, size_t len)
{
unsigned int iquotient;
unsigned int iremainder;
unsigned int prealign;
if (len < PCLMUL_MIN_LEN + SCALE_F_MASK || !crypto_simd_usable())
return crc32_le(crc, p, len);
if ((long)p & SCALE_F_MASK) {
/* align p to 16 byte */
prealign = SCALE_F - ((long)p & SCALE_F_MASK);
crc = crc32_le(crc, p, prealign);
len -= prealign;
p = (unsigned char *)(((unsigned long)p + SCALE_F_MASK) &
~SCALE_F_MASK);
}
iquotient = len & (~SCALE_F_MASK);
iremainder = len & SCALE_F_MASK;
kernel_fpu_begin();
crc = crc32_pclmul_le_16(p, iquotient, crc);
kernel_fpu_end();
if (iremainder)
crc = crc32_le(crc, p + iquotient, iremainder);
return crc;
}
static int crc32_pclmul_cra_init(struct crypto_tfm *tfm)
{
u32 *key = crypto_tfm_ctx(tfm);
*key = 0;
return 0;
}
static int crc32_pclmul_setkey(struct crypto_shash *hash, const u8 *key,
unsigned int keylen)
{
u32 *mctx = crypto_shash_ctx(hash);
if (keylen != sizeof(u32))
return -EINVAL;
*mctx = le32_to_cpup((__le32 *)key);
return 0;
}
static int crc32_pclmul_init(struct shash_desc *desc)
{
u32 *mctx = crypto_shash_ctx(desc->tfm);
u32 *crcp = shash_desc_ctx(desc);
*crcp = *mctx;
return 0;
}
static int crc32_pclmul_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
u32 *crcp = shash_desc_ctx(desc);
*crcp = crc32_pclmul_le(*crcp, data, len);
return 0;
}
/* No final XOR 0xFFFFFFFF, like crc32_le */
static int __crc32_pclmul_finup(u32 *crcp, const u8 *data, unsigned int len,
u8 *out)
{
*(__le32 *)out = cpu_to_le32(crc32_pclmul_le(*crcp, data, len));
return 0;
}
static int crc32_pclmul_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
return __crc32_pclmul_finup(shash_desc_ctx(desc), data, len, out);
}
static int crc32_pclmul_final(struct shash_desc *desc, u8 *out)
{
u32 *crcp = shash_desc_ctx(desc);
*(__le32 *)out = cpu_to_le32p(crcp);
return 0;
}
static int crc32_pclmul_digest(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
return __crc32_pclmul_finup(crypto_shash_ctx(desc->tfm), data, len,
out);
}
static struct shash_alg alg = {
.setkey = crc32_pclmul_setkey,
.init = crc32_pclmul_init,
.update = crc32_pclmul_update,
.final = crc32_pclmul_final,
.finup = crc32_pclmul_finup,
.digest = crc32_pclmul_digest,
.descsize = sizeof(u32),
.digestsize = CHKSUM_DIGEST_SIZE,
.base = {
.cra_name = "crc32",
.cra_driver_name = "crc32-pclmul",
.cra_priority = 200,
.cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
.cra_blocksize = CHKSUM_BLOCK_SIZE,
.cra_ctxsize = sizeof(u32),
.cra_module = THIS_MODULE,
.cra_init = crc32_pclmul_cra_init,
}
};
static const struct x86_cpu_id crc32pclmul_cpu_id[] = {
X86_MATCH_FEATURE(X86_FEATURE_PCLMULQDQ, NULL),
{}
};
MODULE_DEVICE_TABLE(x86cpu, crc32pclmul_cpu_id);
static int __init crc32_pclmul_mod_init(void)
{
if (!x86_match_cpu(crc32pclmul_cpu_id)) {
pr_info("PCLMULQDQ-NI instructions are not detected.\n");
return -ENODEV;
}
return crypto_register_shash(&alg);
}
static void __exit crc32_pclmul_mod_fini(void)
{
crypto_unregister_shash(&alg);
}
module_init(crc32_pclmul_mod_init);
module_exit(crc32_pclmul_mod_fini);
MODULE_AUTHOR("Alexander Boyko <[email protected]>");
MODULE_LICENSE("GPL");
MODULE_ALIAS_CRYPTO("crc32");
MODULE_ALIAS_CRYPTO("crc32-pclmul");
| linux-master | arch/x86/crypto/crc32-pclmul_glue.c |
// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
* Copyright (C) 2020 Jason A. Donenfeld <[email protected]>. All Rights Reserved.
* Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation
*/
#include <crypto/curve25519.h>
#include <crypto/internal/kpp.h>
#include <linux/types.h>
#include <linux/jump_label.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/scatterlist.h>
#include <asm/cpufeature.h>
#include <asm/processor.h>
static __always_inline u64 eq_mask(u64 a, u64 b)
{
u64 x = a ^ b;
u64 minus_x = ~x + (u64)1U;
u64 x_or_minus_x = x | minus_x;
u64 xnx = x_or_minus_x >> (u32)63U;
return xnx - (u64)1U;
}
static __always_inline u64 gte_mask(u64 a, u64 b)
{
u64 x = a;
u64 y = b;
u64 x_xor_y = x ^ y;
u64 x_sub_y = x - y;
u64 x_sub_y_xor_y = x_sub_y ^ y;
u64 q = x_xor_y | x_sub_y_xor_y;
u64 x_xor_q = x ^ q;
u64 x_xor_q_ = x_xor_q >> (u32)63U;
return x_xor_q_ - (u64)1U;
}
/* Computes the addition of four-element f1 with value in f2
* and returns the carry (if any) */
static inline u64 add_scalar(u64 *out, const u64 *f1, u64 f2)
{
u64 carry_r;
asm volatile(
/* Clear registers to propagate the carry bit */
" xor %%r8d, %%r8d;"
" xor %%r9d, %%r9d;"
" xor %%r10d, %%r10d;"
" xor %%r11d, %%r11d;"
" xor %k1, %k1;"
/* Begin addition chain */
" addq 0(%3), %0;"
" movq %0, 0(%2);"
" adcxq 8(%3), %%r8;"
" movq %%r8, 8(%2);"
" adcxq 16(%3), %%r9;"
" movq %%r9, 16(%2);"
" adcxq 24(%3), %%r10;"
" movq %%r10, 24(%2);"
/* Return the carry bit in a register */
" adcx %%r11, %1;"
: "+&r"(f2), "=&r"(carry_r)
: "r"(out), "r"(f1)
: "%r8", "%r9", "%r10", "%r11", "memory", "cc");
return carry_r;
}
/* Computes the field addition of two field elements */
static inline void fadd(u64 *out, const u64 *f1, const u64 *f2)
{
asm volatile(
/* Compute the raw addition of f1 + f2 */
" movq 0(%0), %%r8;"
" addq 0(%2), %%r8;"
" movq 8(%0), %%r9;"
" adcxq 8(%2), %%r9;"
" movq 16(%0), %%r10;"
" adcxq 16(%2), %%r10;"
" movq 24(%0), %%r11;"
" adcxq 24(%2), %%r11;"
/* Wrap the result back into the field */
/* Step 1: Compute carry*38 */
" mov $0, %%rax;"
" mov $38, %0;"
" cmovc %0, %%rax;"
/* Step 2: Add carry*38 to the original sum */
" xor %%ecx, %%ecx;"
" add %%rax, %%r8;"
" adcx %%rcx, %%r9;"
" movq %%r9, 8(%1);"
" adcx %%rcx, %%r10;"
" movq %%r10, 16(%1);"
" adcx %%rcx, %%r11;"
" movq %%r11, 24(%1);"
/* Step 3: Fold the carry bit back in; guaranteed not to carry at this point */
" mov $0, %%rax;"
" cmovc %0, %%rax;"
" add %%rax, %%r8;"
" movq %%r8, 0(%1);"
: "+&r"(f2)
: "r"(out), "r"(f1)
: "%rax", "%rcx", "%r8", "%r9", "%r10", "%r11", "memory", "cc");
}
/* Computes the field subtraction of two field elements */
static inline void fsub(u64 *out, const u64 *f1, const u64 *f2)
{
asm volatile(
/* Compute the raw subtraction of f1-f2 */
" movq 0(%1), %%r8;"
" subq 0(%2), %%r8;"
" movq 8(%1), %%r9;"
" sbbq 8(%2), %%r9;"
" movq 16(%1), %%r10;"
" sbbq 16(%2), %%r10;"
" movq 24(%1), %%r11;"
" sbbq 24(%2), %%r11;"
/* Wrap the result back into the field */
/* Step 1: Compute carry*38 */
" mov $0, %%rax;"
" mov $38, %%rcx;"
" cmovc %%rcx, %%rax;"
/* Step 2: Subtract carry*38 from the original difference */
" sub %%rax, %%r8;"
" sbb $0, %%r9;"
" sbb $0, %%r10;"
" sbb $0, %%r11;"
/* Step 3: Fold the carry bit back in; guaranteed not to carry at this point */
" mov $0, %%rax;"
" cmovc %%rcx, %%rax;"
" sub %%rax, %%r8;"
/* Store the result */
" movq %%r8, 0(%0);"
" movq %%r9, 8(%0);"
" movq %%r10, 16(%0);"
" movq %%r11, 24(%0);"
:
: "r"(out), "r"(f1), "r"(f2)
: "%rax", "%rcx", "%r8", "%r9", "%r10", "%r11", "memory", "cc");
}
/* Computes a field multiplication: out <- f1 * f2
* Uses the 8-element buffer tmp for intermediate results */
static inline void fmul(u64 *out, const u64 *f1, const u64 *f2, u64 *tmp)
{
asm volatile(
/* Compute the raw multiplication: tmp <- src1 * src2 */
/* Compute src1[0] * src2 */
" movq 0(%0), %%rdx;"
" mulxq 0(%1), %%r8, %%r9;"
" xor %%r10d, %%r10d;"
" movq %%r8, 0(%2);"
" mulxq 8(%1), %%r10, %%r11;"
" adox %%r9, %%r10;"
" movq %%r10, 8(%2);"
" mulxq 16(%1), %%rbx, %%r13;"
" adox %%r11, %%rbx;"
" mulxq 24(%1), %%r14, %%rdx;"
" adox %%r13, %%r14;"
" mov $0, %%rax;"
" adox %%rdx, %%rax;"
/* Compute src1[1] * src2 */
" movq 8(%0), %%rdx;"
" mulxq 0(%1), %%r8, %%r9;"
" xor %%r10d, %%r10d;"
" adcxq 8(%2), %%r8;"
" movq %%r8, 8(%2);"
" mulxq 8(%1), %%r10, %%r11;"
" adox %%r9, %%r10;"
" adcx %%rbx, %%r10;"
" movq %%r10, 16(%2);"
" mulxq 16(%1), %%rbx, %%r13;"
" adox %%r11, %%rbx;"
" adcx %%r14, %%rbx;"
" mov $0, %%r8;"
" mulxq 24(%1), %%r14, %%rdx;"
" adox %%r13, %%r14;"
" adcx %%rax, %%r14;"
" mov $0, %%rax;"
" adox %%rdx, %%rax;"
" adcx %%r8, %%rax;"
/* Compute src1[2] * src2 */
" movq 16(%0), %%rdx;"
" mulxq 0(%1), %%r8, %%r9;"
" xor %%r10d, %%r10d;"
" adcxq 16(%2), %%r8;"
" movq %%r8, 16(%2);"
" mulxq 8(%1), %%r10, %%r11;"
" adox %%r9, %%r10;"
" adcx %%rbx, %%r10;"
" movq %%r10, 24(%2);"
" mulxq 16(%1), %%rbx, %%r13;"
" adox %%r11, %%rbx;"
" adcx %%r14, %%rbx;"
" mov $0, %%r8;"
" mulxq 24(%1), %%r14, %%rdx;"
" adox %%r13, %%r14;"
" adcx %%rax, %%r14;"
" mov $0, %%rax;"
" adox %%rdx, %%rax;"
" adcx %%r8, %%rax;"
/* Compute src1[3] * src2 */
" movq 24(%0), %%rdx;"
" mulxq 0(%1), %%r8, %%r9;"
" xor %%r10d, %%r10d;"
" adcxq 24(%2), %%r8;"
" movq %%r8, 24(%2);"
" mulxq 8(%1), %%r10, %%r11;"
" adox %%r9, %%r10;"
" adcx %%rbx, %%r10;"
" movq %%r10, 32(%2);"
" mulxq 16(%1), %%rbx, %%r13;"
" adox %%r11, %%rbx;"
" adcx %%r14, %%rbx;"
" movq %%rbx, 40(%2);"
" mov $0, %%r8;"
" mulxq 24(%1), %%r14, %%rdx;"
" adox %%r13, %%r14;"
" adcx %%rax, %%r14;"
" movq %%r14, 48(%2);"
" mov $0, %%rax;"
" adox %%rdx, %%rax;"
" adcx %%r8, %%rax;"
" movq %%rax, 56(%2);"
/* Line up pointers */
" mov %2, %0;"
" mov %3, %2;"
/* Wrap the result back into the field */
/* Step 1: Compute dst + carry == tmp_hi * 38 + tmp_lo */
" mov $38, %%rdx;"
" mulxq 32(%0), %%r8, %%r13;"
" xor %k1, %k1;"
" adoxq 0(%0), %%r8;"
" mulxq 40(%0), %%r9, %%rbx;"
" adcx %%r13, %%r9;"
" adoxq 8(%0), %%r9;"
" mulxq 48(%0), %%r10, %%r13;"
" adcx %%rbx, %%r10;"
" adoxq 16(%0), %%r10;"
" mulxq 56(%0), %%r11, %%rax;"
" adcx %%r13, %%r11;"
" adoxq 24(%0), %%r11;"
" adcx %1, %%rax;"
" adox %1, %%rax;"
" imul %%rdx, %%rax;"
/* Step 2: Fold the carry back into dst */
" add %%rax, %%r8;"
" adcx %1, %%r9;"
" movq %%r9, 8(%2);"
" adcx %1, %%r10;"
" movq %%r10, 16(%2);"
" adcx %1, %%r11;"
" movq %%r11, 24(%2);"
/* Step 3: Fold the carry bit back in; guaranteed not to carry at this point */
" mov $0, %%rax;"
" cmovc %%rdx, %%rax;"
" add %%rax, %%r8;"
" movq %%r8, 0(%2);"
: "+&r"(f1), "+&r"(f2), "+&r"(tmp)
: "r"(out)
: "%rax", "%rbx", "%rdx", "%r8", "%r9", "%r10", "%r11", "%r13",
"%r14", "memory", "cc");
}
/* Computes two field multiplications:
* out[0] <- f1[0] * f2[0]
* out[1] <- f1[1] * f2[1]
* Uses the 16-element buffer tmp for intermediate results: */
static inline void fmul2(u64 *out, const u64 *f1, const u64 *f2, u64 *tmp)
{
asm volatile(
/* Compute the raw multiplication tmp[0] <- f1[0] * f2[0] */
/* Compute src1[0] * src2 */
" movq 0(%0), %%rdx;"
" mulxq 0(%1), %%r8, %%r9;"
" xor %%r10d, %%r10d;"
" movq %%r8, 0(%2);"
" mulxq 8(%1), %%r10, %%r11;"
" adox %%r9, %%r10;"
" movq %%r10, 8(%2);"
" mulxq 16(%1), %%rbx, %%r13;"
" adox %%r11, %%rbx;"
" mulxq 24(%1), %%r14, %%rdx;"
" adox %%r13, %%r14;"
" mov $0, %%rax;"
" adox %%rdx, %%rax;"
/* Compute src1[1] * src2 */
" movq 8(%0), %%rdx;"
" mulxq 0(%1), %%r8, %%r9;"
" xor %%r10d, %%r10d;"
" adcxq 8(%2), %%r8;"
" movq %%r8, 8(%2);"
" mulxq 8(%1), %%r10, %%r11;"
" adox %%r9, %%r10;"
" adcx %%rbx, %%r10;"
" movq %%r10, 16(%2);"
" mulxq 16(%1), %%rbx, %%r13;"
" adox %%r11, %%rbx;"
" adcx %%r14, %%rbx;"
" mov $0, %%r8;"
" mulxq 24(%1), %%r14, %%rdx;"
" adox %%r13, %%r14;"
" adcx %%rax, %%r14;"
" mov $0, %%rax;"
" adox %%rdx, %%rax;"
" adcx %%r8, %%rax;"
/* Compute src1[2] * src2 */
" movq 16(%0), %%rdx;"
" mulxq 0(%1), %%r8, %%r9;"
" xor %%r10d, %%r10d;"
" adcxq 16(%2), %%r8;"
" movq %%r8, 16(%2);"
" mulxq 8(%1), %%r10, %%r11;"
" adox %%r9, %%r10;"
" adcx %%rbx, %%r10;"
" movq %%r10, 24(%2);"
" mulxq 16(%1), %%rbx, %%r13;"
" adox %%r11, %%rbx;"
" adcx %%r14, %%rbx;"
" mov $0, %%r8;"
" mulxq 24(%1), %%r14, %%rdx;"
" adox %%r13, %%r14;"
" adcx %%rax, %%r14;"
" mov $0, %%rax;"
" adox %%rdx, %%rax;"
" adcx %%r8, %%rax;"
/* Compute src1[3] * src2 */
" movq 24(%0), %%rdx;"
" mulxq 0(%1), %%r8, %%r9;"
" xor %%r10d, %%r10d;"
" adcxq 24(%2), %%r8;"
" movq %%r8, 24(%2);"
" mulxq 8(%1), %%r10, %%r11;"
" adox %%r9, %%r10;"
" adcx %%rbx, %%r10;"
" movq %%r10, 32(%2);"
" mulxq 16(%1), %%rbx, %%r13;"
" adox %%r11, %%rbx;"
" adcx %%r14, %%rbx;"
" movq %%rbx, 40(%2);"
" mov $0, %%r8;"
" mulxq 24(%1), %%r14, %%rdx;"
" adox %%r13, %%r14;"
" adcx %%rax, %%r14;"
" movq %%r14, 48(%2);"
" mov $0, %%rax;"
" adox %%rdx, %%rax;"
" adcx %%r8, %%rax;"
" movq %%rax, 56(%2);"
/* Compute the raw multiplication tmp[1] <- f1[1] * f2[1] */
/* Compute src1[0] * src2 */
" movq 32(%0), %%rdx;"
" mulxq 32(%1), %%r8, %%r9;"
" xor %%r10d, %%r10d;"
" movq %%r8, 64(%2);"
" mulxq 40(%1), %%r10, %%r11;"
" adox %%r9, %%r10;"
" movq %%r10, 72(%2);"
" mulxq 48(%1), %%rbx, %%r13;"
" adox %%r11, %%rbx;"
" mulxq 56(%1), %%r14, %%rdx;"
" adox %%r13, %%r14;"
" mov $0, %%rax;"
" adox %%rdx, %%rax;"
/* Compute src1[1] * src2 */
" movq 40(%0), %%rdx;"
" mulxq 32(%1), %%r8, %%r9;"
" xor %%r10d, %%r10d;"
" adcxq 72(%2), %%r8;"
" movq %%r8, 72(%2);"
" mulxq 40(%1), %%r10, %%r11;"
" adox %%r9, %%r10;"
" adcx %%rbx, %%r10;"
" movq %%r10, 80(%2);"
" mulxq 48(%1), %%rbx, %%r13;"
" adox %%r11, %%rbx;"
" adcx %%r14, %%rbx;"
" mov $0, %%r8;"
" mulxq 56(%1), %%r14, %%rdx;"
" adox %%r13, %%r14;"
" adcx %%rax, %%r14;"
" mov $0, %%rax;"
" adox %%rdx, %%rax;"
" adcx %%r8, %%rax;"
/* Compute src1[2] * src2 */
" movq 48(%0), %%rdx;"
" mulxq 32(%1), %%r8, %%r9;"
" xor %%r10d, %%r10d;"
" adcxq 80(%2), %%r8;"
" movq %%r8, 80(%2);"
" mulxq 40(%1), %%r10, %%r11;"
" adox %%r9, %%r10;"
" adcx %%rbx, %%r10;"
" movq %%r10, 88(%2);"
" mulxq 48(%1), %%rbx, %%r13;"
" adox %%r11, %%rbx;"
" adcx %%r14, %%rbx;"
" mov $0, %%r8;"
" mulxq 56(%1), %%r14, %%rdx;"
" adox %%r13, %%r14;"
" adcx %%rax, %%r14;"
" mov $0, %%rax;"
" adox %%rdx, %%rax;"
" adcx %%r8, %%rax;"
/* Compute src1[3] * src2 */
" movq 56(%0), %%rdx;"
" mulxq 32(%1), %%r8, %%r9;"
" xor %%r10d, %%r10d;"
" adcxq 88(%2), %%r8;"
" movq %%r8, 88(%2);"
" mulxq 40(%1), %%r10, %%r11;"
" adox %%r9, %%r10;"
" adcx %%rbx, %%r10;"
" movq %%r10, 96(%2);"
" mulxq 48(%1), %%rbx, %%r13;"
" adox %%r11, %%rbx;"
" adcx %%r14, %%rbx;"
" movq %%rbx, 104(%2);"
" mov $0, %%r8;"
" mulxq 56(%1), %%r14, %%rdx;"
" adox %%r13, %%r14;"
" adcx %%rax, %%r14;"
" movq %%r14, 112(%2);"
" mov $0, %%rax;"
" adox %%rdx, %%rax;"
" adcx %%r8, %%rax;"
" movq %%rax, 120(%2);"
/* Line up pointers */
" mov %2, %0;"
" mov %3, %2;"
/* Wrap the results back into the field */
/* Step 1: Compute dst + carry == tmp_hi * 38 + tmp_lo */
" mov $38, %%rdx;"
" mulxq 32(%0), %%r8, %%r13;"
" xor %k1, %k1;"
" adoxq 0(%0), %%r8;"
" mulxq 40(%0), %%r9, %%rbx;"
" adcx %%r13, %%r9;"
" adoxq 8(%0), %%r9;"
" mulxq 48(%0), %%r10, %%r13;"
" adcx %%rbx, %%r10;"
" adoxq 16(%0), %%r10;"
" mulxq 56(%0), %%r11, %%rax;"
" adcx %%r13, %%r11;"
" adoxq 24(%0), %%r11;"
" adcx %1, %%rax;"
" adox %1, %%rax;"
" imul %%rdx, %%rax;"
/* Step 2: Fold the carry back into dst */
" add %%rax, %%r8;"
" adcx %1, %%r9;"
" movq %%r9, 8(%2);"
" adcx %1, %%r10;"
" movq %%r10, 16(%2);"
" adcx %1, %%r11;"
" movq %%r11, 24(%2);"
/* Step 3: Fold the carry bit back in; guaranteed not to carry at this point */
" mov $0, %%rax;"
" cmovc %%rdx, %%rax;"
" add %%rax, %%r8;"
" movq %%r8, 0(%2);"
/* Step 1: Compute dst + carry == tmp_hi * 38 + tmp_lo */
" mov $38, %%rdx;"
" mulxq 96(%0), %%r8, %%r13;"
" xor %k1, %k1;"
" adoxq 64(%0), %%r8;"
" mulxq 104(%0), %%r9, %%rbx;"
" adcx %%r13, %%r9;"
" adoxq 72(%0), %%r9;"
" mulxq 112(%0), %%r10, %%r13;"
" adcx %%rbx, %%r10;"
" adoxq 80(%0), %%r10;"
" mulxq 120(%0), %%r11, %%rax;"
" adcx %%r13, %%r11;"
" adoxq 88(%0), %%r11;"
" adcx %1, %%rax;"
" adox %1, %%rax;"
" imul %%rdx, %%rax;"
/* Step 2: Fold the carry back into dst */
" add %%rax, %%r8;"
" adcx %1, %%r9;"
" movq %%r9, 40(%2);"
" adcx %1, %%r10;"
" movq %%r10, 48(%2);"
" adcx %1, %%r11;"
" movq %%r11, 56(%2);"
/* Step 3: Fold the carry bit back in; guaranteed not to carry at this point */
" mov $0, %%rax;"
" cmovc %%rdx, %%rax;"
" add %%rax, %%r8;"
" movq %%r8, 32(%2);"
: "+&r"(f1), "+&r"(f2), "+&r"(tmp)
: "r"(out)
: "%rax", "%rbx", "%rdx", "%r8", "%r9", "%r10", "%r11", "%r13",
"%r14", "memory", "cc");
}
/* Computes the field multiplication of four-element f1 with value in f2
* Requires f2 to be smaller than 2^17 */
static inline void fmul_scalar(u64 *out, const u64 *f1, u64 f2)
{
register u64 f2_r asm("rdx") = f2;
asm volatile(
/* Compute the raw multiplication of f1*f2 */
" mulxq 0(%2), %%r8, %%rcx;" /* f1[0]*f2 */
" mulxq 8(%2), %%r9, %%rbx;" /* f1[1]*f2 */
" add %%rcx, %%r9;"
" mov $0, %%rcx;"
" mulxq 16(%2), %%r10, %%r13;" /* f1[2]*f2 */
" adcx %%rbx, %%r10;"
" mulxq 24(%2), %%r11, %%rax;" /* f1[3]*f2 */
" adcx %%r13, %%r11;"
" adcx %%rcx, %%rax;"
/* Wrap the result back into the field */
/* Step 1: Compute carry*38 */
" mov $38, %%rdx;"
" imul %%rdx, %%rax;"
/* Step 2: Fold the carry back into dst */
" add %%rax, %%r8;"
" adcx %%rcx, %%r9;"
" movq %%r9, 8(%1);"
" adcx %%rcx, %%r10;"
" movq %%r10, 16(%1);"
" adcx %%rcx, %%r11;"
" movq %%r11, 24(%1);"
/* Step 3: Fold the carry bit back in; guaranteed not to carry at this point */
" mov $0, %%rax;"
" cmovc %%rdx, %%rax;"
" add %%rax, %%r8;"
" movq %%r8, 0(%1);"
: "+&r"(f2_r)
: "r"(out), "r"(f1)
: "%rax", "%rbx", "%rcx", "%r8", "%r9", "%r10", "%r11", "%r13",
"memory", "cc");
}
/* Computes p1 <- bit ? p2 : p1 in constant time */
static inline void cswap2(u64 bit, const u64 *p1, const u64 *p2)
{
asm volatile(
/* Transfer bit into CF flag */
" add $18446744073709551615, %0;"
/* cswap p1[0], p2[0] */
" movq 0(%1), %%r8;"
" movq 0(%2), %%r9;"
" mov %%r8, %%r10;"
" cmovc %%r9, %%r8;"
" cmovc %%r10, %%r9;"
" movq %%r8, 0(%1);"
" movq %%r9, 0(%2);"
/* cswap p1[1], p2[1] */
" movq 8(%1), %%r8;"
" movq 8(%2), %%r9;"
" mov %%r8, %%r10;"
" cmovc %%r9, %%r8;"
" cmovc %%r10, %%r9;"
" movq %%r8, 8(%1);"
" movq %%r9, 8(%2);"
/* cswap p1[2], p2[2] */
" movq 16(%1), %%r8;"
" movq 16(%2), %%r9;"
" mov %%r8, %%r10;"
" cmovc %%r9, %%r8;"
" cmovc %%r10, %%r9;"
" movq %%r8, 16(%1);"
" movq %%r9, 16(%2);"
/* cswap p1[3], p2[3] */
" movq 24(%1), %%r8;"
" movq 24(%2), %%r9;"
" mov %%r8, %%r10;"
" cmovc %%r9, %%r8;"
" cmovc %%r10, %%r9;"
" movq %%r8, 24(%1);"
" movq %%r9, 24(%2);"
/* cswap p1[4], p2[4] */
" movq 32(%1), %%r8;"
" movq 32(%2), %%r9;"
" mov %%r8, %%r10;"
" cmovc %%r9, %%r8;"
" cmovc %%r10, %%r9;"
" movq %%r8, 32(%1);"
" movq %%r9, 32(%2);"
/* cswap p1[5], p2[5] */
" movq 40(%1), %%r8;"
" movq 40(%2), %%r9;"
" mov %%r8, %%r10;"
" cmovc %%r9, %%r8;"
" cmovc %%r10, %%r9;"
" movq %%r8, 40(%1);"
" movq %%r9, 40(%2);"
/* cswap p1[6], p2[6] */
" movq 48(%1), %%r8;"
" movq 48(%2), %%r9;"
" mov %%r8, %%r10;"
" cmovc %%r9, %%r8;"
" cmovc %%r10, %%r9;"
" movq %%r8, 48(%1);"
" movq %%r9, 48(%2);"
/* cswap p1[7], p2[7] */
" movq 56(%1), %%r8;"
" movq 56(%2), %%r9;"
" mov %%r8, %%r10;"
" cmovc %%r9, %%r8;"
" cmovc %%r10, %%r9;"
" movq %%r8, 56(%1);"
" movq %%r9, 56(%2);"
: "+&r"(bit)
: "r"(p1), "r"(p2)
: "%r8", "%r9", "%r10", "memory", "cc");
}
/* Computes the square of a field element: out <- f * f
* Uses the 8-element buffer tmp for intermediate results */
static inline void fsqr(u64 *out, const u64 *f, u64 *tmp)
{
asm volatile(
/* Compute the raw multiplication: tmp <- f * f */
/* Step 1: Compute all partial products */
" movq 0(%0), %%rdx;" /* f[0] */
" mulxq 8(%0), %%r8, %%r14;"
" xor %%r15d, %%r15d;" /* f[1]*f[0] */
" mulxq 16(%0), %%r9, %%r10;"
" adcx %%r14, %%r9;" /* f[2]*f[0] */
" mulxq 24(%0), %%rax, %%rcx;"
" adcx %%rax, %%r10;" /* f[3]*f[0] */
" movq 24(%0), %%rdx;" /* f[3] */
" mulxq 8(%0), %%r11, %%rbx;"
" adcx %%rcx, %%r11;" /* f[1]*f[3] */
" mulxq 16(%0), %%rax, %%r13;"
" adcx %%rax, %%rbx;" /* f[2]*f[3] */
" movq 8(%0), %%rdx;"
" adcx %%r15, %%r13;" /* f1 */
" mulxq 16(%0), %%rax, %%rcx;"
" mov $0, %%r14;" /* f[2]*f[1] */
/* Step 2: Compute two parallel carry chains */
" xor %%r15d, %%r15d;"
" adox %%rax, %%r10;"
" adcx %%r8, %%r8;"
" adox %%rcx, %%r11;"
" adcx %%r9, %%r9;"
" adox %%r15, %%rbx;"
" adcx %%r10, %%r10;"
" adox %%r15, %%r13;"
" adcx %%r11, %%r11;"
" adox %%r15, %%r14;"
" adcx %%rbx, %%rbx;"
" adcx %%r13, %%r13;"
" adcx %%r14, %%r14;"
/* Step 3: Compute intermediate squares */
" movq 0(%0), %%rdx;"
" mulx %%rdx, %%rax, %%rcx;" /* f[0]^2 */
" movq %%rax, 0(%1);"
" add %%rcx, %%r8;"
" movq %%r8, 8(%1);"
" movq 8(%0), %%rdx;"
" mulx %%rdx, %%rax, %%rcx;" /* f[1]^2 */
" adcx %%rax, %%r9;"
" movq %%r9, 16(%1);"
" adcx %%rcx, %%r10;"
" movq %%r10, 24(%1);"
" movq 16(%0), %%rdx;"
" mulx %%rdx, %%rax, %%rcx;" /* f[2]^2 */
" adcx %%rax, %%r11;"
" movq %%r11, 32(%1);"
" adcx %%rcx, %%rbx;"
" movq %%rbx, 40(%1);"
" movq 24(%0), %%rdx;"
" mulx %%rdx, %%rax, %%rcx;" /* f[3]^2 */
" adcx %%rax, %%r13;"
" movq %%r13, 48(%1);"
" adcx %%rcx, %%r14;"
" movq %%r14, 56(%1);"
/* Line up pointers */
" mov %1, %0;"
" mov %2, %1;"
/* Wrap the result back into the field */
/* Step 1: Compute dst + carry == tmp_hi * 38 + tmp_lo */
" mov $38, %%rdx;"
" mulxq 32(%0), %%r8, %%r13;"
" xor %%ecx, %%ecx;"
" adoxq 0(%0), %%r8;"
" mulxq 40(%0), %%r9, %%rbx;"
" adcx %%r13, %%r9;"
" adoxq 8(%0), %%r9;"
" mulxq 48(%0), %%r10, %%r13;"
" adcx %%rbx, %%r10;"
" adoxq 16(%0), %%r10;"
" mulxq 56(%0), %%r11, %%rax;"
" adcx %%r13, %%r11;"
" adoxq 24(%0), %%r11;"
" adcx %%rcx, %%rax;"
" adox %%rcx, %%rax;"
" imul %%rdx, %%rax;"
/* Step 2: Fold the carry back into dst */
" add %%rax, %%r8;"
" adcx %%rcx, %%r9;"
" movq %%r9, 8(%1);"
" adcx %%rcx, %%r10;"
" movq %%r10, 16(%1);"
" adcx %%rcx, %%r11;"
" movq %%r11, 24(%1);"
/* Step 3: Fold the carry bit back in; guaranteed not to carry at this point */
" mov $0, %%rax;"
" cmovc %%rdx, %%rax;"
" add %%rax, %%r8;"
" movq %%r8, 0(%1);"
: "+&r"(f), "+&r"(tmp)
: "r"(out)
: "%rax", "%rbx", "%rcx", "%rdx", "%r8", "%r9", "%r10", "%r11",
"%r13", "%r14", "%r15", "memory", "cc");
}
/* Computes two field squarings:
* out[0] <- f[0] * f[0]
* out[1] <- f[1] * f[1]
* Uses the 16-element buffer tmp for intermediate results */
static inline void fsqr2(u64 *out, const u64 *f, u64 *tmp)
{
asm volatile(
/* Step 1: Compute all partial products */
" movq 0(%0), %%rdx;" /* f[0] */
" mulxq 8(%0), %%r8, %%r14;"
" xor %%r15d, %%r15d;" /* f[1]*f[0] */
" mulxq 16(%0), %%r9, %%r10;"
" adcx %%r14, %%r9;" /* f[2]*f[0] */
" mulxq 24(%0), %%rax, %%rcx;"
" adcx %%rax, %%r10;" /* f[3]*f[0] */
" movq 24(%0), %%rdx;" /* f[3] */
" mulxq 8(%0), %%r11, %%rbx;"
" adcx %%rcx, %%r11;" /* f[1]*f[3] */
" mulxq 16(%0), %%rax, %%r13;"
" adcx %%rax, %%rbx;" /* f[2]*f[3] */
" movq 8(%0), %%rdx;"
" adcx %%r15, %%r13;" /* f1 */
" mulxq 16(%0), %%rax, %%rcx;"
" mov $0, %%r14;" /* f[2]*f[1] */
/* Step 2: Compute two parallel carry chains */
" xor %%r15d, %%r15d;"
" adox %%rax, %%r10;"
" adcx %%r8, %%r8;"
" adox %%rcx, %%r11;"
" adcx %%r9, %%r9;"
" adox %%r15, %%rbx;"
" adcx %%r10, %%r10;"
" adox %%r15, %%r13;"
" adcx %%r11, %%r11;"
" adox %%r15, %%r14;"
" adcx %%rbx, %%rbx;"
" adcx %%r13, %%r13;"
" adcx %%r14, %%r14;"
/* Step 3: Compute intermediate squares */
" movq 0(%0), %%rdx;"
" mulx %%rdx, %%rax, %%rcx;" /* f[0]^2 */
" movq %%rax, 0(%1);"
" add %%rcx, %%r8;"
" movq %%r8, 8(%1);"
" movq 8(%0), %%rdx;"
" mulx %%rdx, %%rax, %%rcx;" /* f[1]^2 */
" adcx %%rax, %%r9;"
" movq %%r9, 16(%1);"
" adcx %%rcx, %%r10;"
" movq %%r10, 24(%1);"
" movq 16(%0), %%rdx;"
" mulx %%rdx, %%rax, %%rcx;" /* f[2]^2 */
" adcx %%rax, %%r11;"
" movq %%r11, 32(%1);"
" adcx %%rcx, %%rbx;"
" movq %%rbx, 40(%1);"
" movq 24(%0), %%rdx;"
" mulx %%rdx, %%rax, %%rcx;" /* f[3]^2 */
" adcx %%rax, %%r13;"
" movq %%r13, 48(%1);"
" adcx %%rcx, %%r14;"
" movq %%r14, 56(%1);"
/* Step 1: Compute all partial products */
" movq 32(%0), %%rdx;" /* f[0] */
" mulxq 40(%0), %%r8, %%r14;"
" xor %%r15d, %%r15d;" /* f[1]*f[0] */
" mulxq 48(%0), %%r9, %%r10;"
" adcx %%r14, %%r9;" /* f[2]*f[0] */
" mulxq 56(%0), %%rax, %%rcx;"
" adcx %%rax, %%r10;" /* f[3]*f[0] */
" movq 56(%0), %%rdx;" /* f[3] */
" mulxq 40(%0), %%r11, %%rbx;"
" adcx %%rcx, %%r11;" /* f[1]*f[3] */
" mulxq 48(%0), %%rax, %%r13;"
" adcx %%rax, %%rbx;" /* f[2]*f[3] */
" movq 40(%0), %%rdx;"
" adcx %%r15, %%r13;" /* f1 */
" mulxq 48(%0), %%rax, %%rcx;"
" mov $0, %%r14;" /* f[2]*f[1] */
/* Step 2: Compute two parallel carry chains */
" xor %%r15d, %%r15d;"
" adox %%rax, %%r10;"
" adcx %%r8, %%r8;"
" adox %%rcx, %%r11;"
" adcx %%r9, %%r9;"
" adox %%r15, %%rbx;"
" adcx %%r10, %%r10;"
" adox %%r15, %%r13;"
" adcx %%r11, %%r11;"
" adox %%r15, %%r14;"
" adcx %%rbx, %%rbx;"
" adcx %%r13, %%r13;"
" adcx %%r14, %%r14;"
/* Step 3: Compute intermediate squares */
" movq 32(%0), %%rdx;"
" mulx %%rdx, %%rax, %%rcx;" /* f[0]^2 */
" movq %%rax, 64(%1);"
" add %%rcx, %%r8;"
" movq %%r8, 72(%1);"
" movq 40(%0), %%rdx;"
" mulx %%rdx, %%rax, %%rcx;" /* f[1]^2 */
" adcx %%rax, %%r9;"
" movq %%r9, 80(%1);"
" adcx %%rcx, %%r10;"
" movq %%r10, 88(%1);"
" movq 48(%0), %%rdx;"
" mulx %%rdx, %%rax, %%rcx;" /* f[2]^2 */
" adcx %%rax, %%r11;"
" movq %%r11, 96(%1);"
" adcx %%rcx, %%rbx;"
" movq %%rbx, 104(%1);"
" movq 56(%0), %%rdx;"
" mulx %%rdx, %%rax, %%rcx;" /* f[3]^2 */
" adcx %%rax, %%r13;"
" movq %%r13, 112(%1);"
" adcx %%rcx, %%r14;"
" movq %%r14, 120(%1);"
/* Line up pointers */
" mov %1, %0;"
" mov %2, %1;"
/* Step 1: Compute dst + carry == tmp_hi * 38 + tmp_lo */
" mov $38, %%rdx;"
" mulxq 32(%0), %%r8, %%r13;"
" xor %%ecx, %%ecx;"
" adoxq 0(%0), %%r8;"
" mulxq 40(%0), %%r9, %%rbx;"
" adcx %%r13, %%r9;"
" adoxq 8(%0), %%r9;"
" mulxq 48(%0), %%r10, %%r13;"
" adcx %%rbx, %%r10;"
" adoxq 16(%0), %%r10;"
" mulxq 56(%0), %%r11, %%rax;"
" adcx %%r13, %%r11;"
" adoxq 24(%0), %%r11;"
" adcx %%rcx, %%rax;"
" adox %%rcx, %%rax;"
" imul %%rdx, %%rax;"
/* Step 2: Fold the carry back into dst */
" add %%rax, %%r8;"
" adcx %%rcx, %%r9;"
" movq %%r9, 8(%1);"
" adcx %%rcx, %%r10;"
" movq %%r10, 16(%1);"
" adcx %%rcx, %%r11;"
" movq %%r11, 24(%1);"
/* Step 3: Fold the carry bit back in; guaranteed not to carry at this point */
" mov $0, %%rax;"
" cmovc %%rdx, %%rax;"
" add %%rax, %%r8;"
" movq %%r8, 0(%1);"
/* Step 1: Compute dst + carry == tmp_hi * 38 + tmp_lo */
" mov $38, %%rdx;"
" mulxq 96(%0), %%r8, %%r13;"
" xor %%ecx, %%ecx;"
" adoxq 64(%0), %%r8;"
" mulxq 104(%0), %%r9, %%rbx;"
" adcx %%r13, %%r9;"
" adoxq 72(%0), %%r9;"
" mulxq 112(%0), %%r10, %%r13;"
" adcx %%rbx, %%r10;"
" adoxq 80(%0), %%r10;"
" mulxq 120(%0), %%r11, %%rax;"
" adcx %%r13, %%r11;"
" adoxq 88(%0), %%r11;"
" adcx %%rcx, %%rax;"
" adox %%rcx, %%rax;"
" imul %%rdx, %%rax;"
/* Step 2: Fold the carry back into dst */
" add %%rax, %%r8;"
" adcx %%rcx, %%r9;"
" movq %%r9, 40(%1);"
" adcx %%rcx, %%r10;"
" movq %%r10, 48(%1);"
" adcx %%rcx, %%r11;"
" movq %%r11, 56(%1);"
/* Step 3: Fold the carry bit back in; guaranteed not to carry at this point */
" mov $0, %%rax;"
" cmovc %%rdx, %%rax;"
" add %%rax, %%r8;"
" movq %%r8, 32(%1);"
: "+&r"(f), "+&r"(tmp)
: "r"(out)
: "%rax", "%rbx", "%rcx", "%rdx", "%r8", "%r9", "%r10", "%r11",
"%r13", "%r14", "%r15", "memory", "cc");
}
static void point_add_and_double(u64 *q, u64 *p01_tmp1, u64 *tmp2)
{
u64 *nq = p01_tmp1;
u64 *nq_p1 = p01_tmp1 + (u32)8U;
u64 *tmp1 = p01_tmp1 + (u32)16U;
u64 *x1 = q;
u64 *x2 = nq;
u64 *z2 = nq + (u32)4U;
u64 *z3 = nq_p1 + (u32)4U;
u64 *a = tmp1;
u64 *b = tmp1 + (u32)4U;
u64 *ab = tmp1;
u64 *dc = tmp1 + (u32)8U;
u64 *x3;
u64 *z31;
u64 *d0;
u64 *c0;
u64 *a1;
u64 *b1;
u64 *d;
u64 *c;
u64 *ab1;
u64 *dc1;
fadd(a, x2, z2);
fsub(b, x2, z2);
x3 = nq_p1;
z31 = nq_p1 + (u32)4U;
d0 = dc;
c0 = dc + (u32)4U;
fadd(c0, x3, z31);
fsub(d0, x3, z31);
fmul2(dc, dc, ab, tmp2);
fadd(x3, d0, c0);
fsub(z31, d0, c0);
a1 = tmp1;
b1 = tmp1 + (u32)4U;
d = tmp1 + (u32)8U;
c = tmp1 + (u32)12U;
ab1 = tmp1;
dc1 = tmp1 + (u32)8U;
fsqr2(dc1, ab1, tmp2);
fsqr2(nq_p1, nq_p1, tmp2);
a1[0U] = c[0U];
a1[1U] = c[1U];
a1[2U] = c[2U];
a1[3U] = c[3U];
fsub(c, d, c);
fmul_scalar(b1, c, (u64)121665U);
fadd(b1, b1, d);
fmul2(nq, dc1, ab1, tmp2);
fmul(z3, z3, x1, tmp2);
}
static void point_double(u64 *nq, u64 *tmp1, u64 *tmp2)
{
u64 *x2 = nq;
u64 *z2 = nq + (u32)4U;
u64 *a = tmp1;
u64 *b = tmp1 + (u32)4U;
u64 *d = tmp1 + (u32)8U;
u64 *c = tmp1 + (u32)12U;
u64 *ab = tmp1;
u64 *dc = tmp1 + (u32)8U;
fadd(a, x2, z2);
fsub(b, x2, z2);
fsqr2(dc, ab, tmp2);
a[0U] = c[0U];
a[1U] = c[1U];
a[2U] = c[2U];
a[3U] = c[3U];
fsub(c, d, c);
fmul_scalar(b, c, (u64)121665U);
fadd(b, b, d);
fmul2(nq, dc, ab, tmp2);
}
static void montgomery_ladder(u64 *out, const u8 *key, u64 *init1)
{
u64 tmp2[16U] = { 0U };
u64 p01_tmp1_swap[33U] = { 0U };
u64 *p0 = p01_tmp1_swap;
u64 *p01 = p01_tmp1_swap;
u64 *p03 = p01;
u64 *p11 = p01 + (u32)8U;
u64 *x0;
u64 *z0;
u64 *p01_tmp1;
u64 *p01_tmp11;
u64 *nq10;
u64 *nq_p11;
u64 *swap1;
u64 sw0;
u64 *nq1;
u64 *tmp1;
memcpy(p11, init1, (u32)8U * sizeof(init1[0U]));
x0 = p03;
z0 = p03 + (u32)4U;
x0[0U] = (u64)1U;
x0[1U] = (u64)0U;
x0[2U] = (u64)0U;
x0[3U] = (u64)0U;
z0[0U] = (u64)0U;
z0[1U] = (u64)0U;
z0[2U] = (u64)0U;
z0[3U] = (u64)0U;
p01_tmp1 = p01_tmp1_swap;
p01_tmp11 = p01_tmp1_swap;
nq10 = p01_tmp1_swap;
nq_p11 = p01_tmp1_swap + (u32)8U;
swap1 = p01_tmp1_swap + (u32)32U;
cswap2((u64)1U, nq10, nq_p11);
point_add_and_double(init1, p01_tmp11, tmp2);
swap1[0U] = (u64)1U;
{
u32 i;
for (i = (u32)0U; i < (u32)251U; i = i + (u32)1U) {
u64 *p01_tmp12 = p01_tmp1_swap;
u64 *swap2 = p01_tmp1_swap + (u32)32U;
u64 *nq2 = p01_tmp12;
u64 *nq_p12 = p01_tmp12 + (u32)8U;
u64 bit = (u64)(key[((u32)253U - i) / (u32)8U] >> ((u32)253U - i) % (u32)8U & (u8)1U);
u64 sw = swap2[0U] ^ bit;
cswap2(sw, nq2, nq_p12);
point_add_and_double(init1, p01_tmp12, tmp2);
swap2[0U] = bit;
}
}
sw0 = swap1[0U];
cswap2(sw0, nq10, nq_p11);
nq1 = p01_tmp1;
tmp1 = p01_tmp1 + (u32)16U;
point_double(nq1, tmp1, tmp2);
point_double(nq1, tmp1, tmp2);
point_double(nq1, tmp1, tmp2);
memcpy(out, p0, (u32)8U * sizeof(p0[0U]));
memzero_explicit(tmp2, sizeof(tmp2));
memzero_explicit(p01_tmp1_swap, sizeof(p01_tmp1_swap));
}
static void fsquare_times(u64 *o, const u64 *inp, u64 *tmp, u32 n1)
{
u32 i;
fsqr(o, inp, tmp);
for (i = (u32)0U; i < n1 - (u32)1U; i = i + (u32)1U)
fsqr(o, o, tmp);
}
static void finv(u64 *o, const u64 *i, u64 *tmp)
{
u64 t1[16U] = { 0U };
u64 *a0 = t1;
u64 *b = t1 + (u32)4U;
u64 *c = t1 + (u32)8U;
u64 *t00 = t1 + (u32)12U;
u64 *tmp1 = tmp;
u64 *a;
u64 *t0;
fsquare_times(a0, i, tmp1, (u32)1U);
fsquare_times(t00, a0, tmp1, (u32)2U);
fmul(b, t00, i, tmp);
fmul(a0, b, a0, tmp);
fsquare_times(t00, a0, tmp1, (u32)1U);
fmul(b, t00, b, tmp);
fsquare_times(t00, b, tmp1, (u32)5U);
fmul(b, t00, b, tmp);
fsquare_times(t00, b, tmp1, (u32)10U);
fmul(c, t00, b, tmp);
fsquare_times(t00, c, tmp1, (u32)20U);
fmul(t00, t00, c, tmp);
fsquare_times(t00, t00, tmp1, (u32)10U);
fmul(b, t00, b, tmp);
fsquare_times(t00, b, tmp1, (u32)50U);
fmul(c, t00, b, tmp);
fsquare_times(t00, c, tmp1, (u32)100U);
fmul(t00, t00, c, tmp);
fsquare_times(t00, t00, tmp1, (u32)50U);
fmul(t00, t00, b, tmp);
fsquare_times(t00, t00, tmp1, (u32)5U);
a = t1;
t0 = t1 + (u32)12U;
fmul(o, t0, a, tmp);
}
static void store_felem(u64 *b, u64 *f)
{
u64 f30 = f[3U];
u64 top_bit0 = f30 >> (u32)63U;
u64 f31;
u64 top_bit;
u64 f0;
u64 f1;
u64 f2;
u64 f3;
u64 m0;
u64 m1;
u64 m2;
u64 m3;
u64 mask;
u64 f0_;
u64 f1_;
u64 f2_;
u64 f3_;
u64 o0;
u64 o1;
u64 o2;
u64 o3;
f[3U] = f30 & (u64)0x7fffffffffffffffU;
add_scalar(f, f, (u64)19U * top_bit0);
f31 = f[3U];
top_bit = f31 >> (u32)63U;
f[3U] = f31 & (u64)0x7fffffffffffffffU;
add_scalar(f, f, (u64)19U * top_bit);
f0 = f[0U];
f1 = f[1U];
f2 = f[2U];
f3 = f[3U];
m0 = gte_mask(f0, (u64)0xffffffffffffffedU);
m1 = eq_mask(f1, (u64)0xffffffffffffffffU);
m2 = eq_mask(f2, (u64)0xffffffffffffffffU);
m3 = eq_mask(f3, (u64)0x7fffffffffffffffU);
mask = ((m0 & m1) & m2) & m3;
f0_ = f0 - (mask & (u64)0xffffffffffffffedU);
f1_ = f1 - (mask & (u64)0xffffffffffffffffU);
f2_ = f2 - (mask & (u64)0xffffffffffffffffU);
f3_ = f3 - (mask & (u64)0x7fffffffffffffffU);
o0 = f0_;
o1 = f1_;
o2 = f2_;
o3 = f3_;
b[0U] = o0;
b[1U] = o1;
b[2U] = o2;
b[3U] = o3;
}
static void encode_point(u8 *o, const u64 *i)
{
const u64 *x = i;
const u64 *z = i + (u32)4U;
u64 tmp[4U] = { 0U };
u64 tmp_w[16U] = { 0U };
finv(tmp, z, tmp_w);
fmul(tmp, tmp, x, tmp_w);
store_felem((u64 *)o, tmp);
}
static void curve25519_ever64(u8 *out, const u8 *priv, const u8 *pub)
{
u64 init1[8U] = { 0U };
u64 tmp[4U] = { 0U };
u64 tmp3;
u64 *x;
u64 *z;
{
u32 i;
for (i = (u32)0U; i < (u32)4U; i = i + (u32)1U) {
u64 *os = tmp;
const u8 *bj = pub + i * (u32)8U;
u64 u = *(u64 *)bj;
u64 r = u;
u64 x0 = r;
os[i] = x0;
}
}
tmp3 = tmp[3U];
tmp[3U] = tmp3 & (u64)0x7fffffffffffffffU;
x = init1;
z = init1 + (u32)4U;
z[0U] = (u64)1U;
z[1U] = (u64)0U;
z[2U] = (u64)0U;
z[3U] = (u64)0U;
x[0U] = tmp[0U];
x[1U] = tmp[1U];
x[2U] = tmp[2U];
x[3U] = tmp[3U];
montgomery_ladder(init1, priv, init1);
encode_point(out, init1);
}
/* The below constants were generated using this sage script:
*
* #!/usr/bin/env sage
* import sys
* from sage.all import *
* def limbs(n):
* n = int(n)
* l = ((n >> 0) % 2^64, (n >> 64) % 2^64, (n >> 128) % 2^64, (n >> 192) % 2^64)
* return "0x%016xULL, 0x%016xULL, 0x%016xULL, 0x%016xULL" % l
* ec = EllipticCurve(GF(2^255 - 19), [0, 486662, 0, 1, 0])
* p_minus_s = (ec.lift_x(9) - ec.lift_x(1))[0]
* print("static const u64 p_minus_s[] = { %s };\n" % limbs(p_minus_s))
* print("static const u64 table_ladder[] = {")
* p = ec.lift_x(9)
* for i in range(252):
* l = (p[0] + p[2]) / (p[0] - p[2])
* print(("\t%s" + ("," if i != 251 else "")) % limbs(l))
* p = p * 2
* print("};")
*
*/
static const u64 p_minus_s[] = { 0x816b1e0137d48290ULL, 0x440f6a51eb4d1207ULL, 0x52385f46dca2b71dULL, 0x215132111d8354cbULL };
static const u64 table_ladder[] = {
0xfffffffffffffff3ULL, 0xffffffffffffffffULL, 0xffffffffffffffffULL, 0x5fffffffffffffffULL,
0x6b8220f416aafe96ULL, 0x82ebeb2b4f566a34ULL, 0xd5a9a5b075a5950fULL, 0x5142b2cf4b2488f4ULL,
0x6aaebc750069680cULL, 0x89cf7820a0f99c41ULL, 0x2a58d9183b56d0f4ULL, 0x4b5aca80e36011a4ULL,
0x329132348c29745dULL, 0xf4a2e616e1642fd7ULL, 0x1e45bb03ff67bc34ULL, 0x306912d0f42a9b4aULL,
0xff886507e6af7154ULL, 0x04f50e13dfeec82fULL, 0xaa512fe82abab5ceULL, 0x174e251a68d5f222ULL,
0xcf96700d82028898ULL, 0x1743e3370a2c02c5ULL, 0x379eec98b4e86eaaULL, 0x0c59888a51e0482eULL,
0xfbcbf1d699b5d189ULL, 0xacaef0d58e9fdc84ULL, 0xc1c20d06231f7614ULL, 0x2938218da274f972ULL,
0xf6af49beff1d7f18ULL, 0xcc541c22387ac9c2ULL, 0x96fcc9ef4015c56bULL, 0x69c1627c690913a9ULL,
0x7a86fd2f4733db0eULL, 0xfdb8c4f29e087de9ULL, 0x095e4b1a8ea2a229ULL, 0x1ad7a7c829b37a79ULL,
0x342d89cad17ea0c0ULL, 0x67bedda6cced2051ULL, 0x19ca31bf2bb42f74ULL, 0x3df7b4c84980acbbULL,
0xa8c6444dc80ad883ULL, 0xb91e440366e3ab85ULL, 0xc215cda00164f6d8ULL, 0x3d867c6ef247e668ULL,
0xc7dd582bcc3e658cULL, 0xfd2c4748ee0e5528ULL, 0xa0fd9b95cc9f4f71ULL, 0x7529d871b0675ddfULL,
0xb8f568b42d3cbd78ULL, 0x1233011b91f3da82ULL, 0x2dce6ccd4a7c3b62ULL, 0x75e7fc8e9e498603ULL,
0x2f4f13f1fcd0b6ecULL, 0xf1a8ca1f29ff7a45ULL, 0xc249c1a72981e29bULL, 0x6ebe0dbb8c83b56aULL,
0x7114fa8d170bb222ULL, 0x65a2dcd5bf93935fULL, 0xbdc41f68b59c979aULL, 0x2f0eef79a2ce9289ULL,
0x42ecbf0c083c37ceULL, 0x2930bc09ec496322ULL, 0xf294b0c19cfeac0dULL, 0x3780aa4bedfabb80ULL,
0x56c17d3e7cead929ULL, 0xe7cb4beb2e5722c5ULL, 0x0ce931732dbfe15aULL, 0x41b883c7621052f8ULL,
0xdbf75ca0c3d25350ULL, 0x2936be086eb1e351ULL, 0xc936e03cb4a9b212ULL, 0x1d45bf82322225aaULL,
0xe81ab1036a024cc5ULL, 0xe212201c304c9a72ULL, 0xc5d73fba6832b1fcULL, 0x20ffdb5a4d839581ULL,
0xa283d367be5d0fadULL, 0x6c2b25ca8b164475ULL, 0x9d4935467caaf22eULL, 0x5166408eee85ff49ULL,
0x3c67baa2fab4e361ULL, 0xb3e433c67ef35cefULL, 0x5259729241159b1cULL, 0x6a621892d5b0ab33ULL,
0x20b74a387555cdcbULL, 0x532aa10e1208923fULL, 0xeaa17b7762281dd1ULL, 0x61ab3443f05c44bfULL,
0x257a6c422324def8ULL, 0x131c6c1017e3cf7fULL, 0x23758739f630a257ULL, 0x295a407a01a78580ULL,
0xf8c443246d5da8d9ULL, 0x19d775450c52fa5dULL, 0x2afcfc92731bf83dULL, 0x7d10c8e81b2b4700ULL,
0xc8e0271f70baa20bULL, 0x993748867ca63957ULL, 0x5412efb3cb7ed4bbULL, 0x3196d36173e62975ULL,
0xde5bcad141c7dffcULL, 0x47cc8cd2b395c848ULL, 0xa34cd942e11af3cbULL, 0x0256dbf2d04ecec2ULL,
0x875ab7e94b0e667fULL, 0xcad4dd83c0850d10ULL, 0x47f12e8f4e72c79fULL, 0x5f1a87bb8c85b19bULL,
0x7ae9d0b6437f51b8ULL, 0x12c7ce5518879065ULL, 0x2ade09fe5cf77aeeULL, 0x23a05a2f7d2c5627ULL,
0x5908e128f17c169aULL, 0xf77498dd8ad0852dULL, 0x74b4c4ceab102f64ULL, 0x183abadd10139845ULL,
0xb165ba8daa92aaacULL, 0xd5c5ef9599386705ULL, 0xbe2f8f0cf8fc40d1ULL, 0x2701e635ee204514ULL,
0x629fa80020156514ULL, 0xf223868764a8c1ceULL, 0x5b894fff0b3f060eULL, 0x60d9944cf708a3faULL,
0xaeea001a1c7a201fULL, 0xebf16a633ee2ce63ULL, 0x6f7709594c7a07e1ULL, 0x79b958150d0208cbULL,
0x24b55e5301d410e7ULL, 0xe3a34edff3fdc84dULL, 0xd88768e4904032d8ULL, 0x131384427b3aaeecULL,
0x8405e51286234f14ULL, 0x14dc4739adb4c529ULL, 0xb8a2b5b250634ffdULL, 0x2fe2a94ad8a7ff93ULL,
0xec5c57efe843faddULL, 0x2843ce40f0bb9918ULL, 0xa4b561d6cf3d6305ULL, 0x743629bde8fb777eULL,
0x343edd46bbaf738fULL, 0xed981828b101a651ULL, 0xa401760b882c797aULL, 0x1fc223e28dc88730ULL,
0x48604e91fc0fba0eULL, 0xb637f78f052c6fa4ULL, 0x91ccac3d09e9239cULL, 0x23f7eed4437a687cULL,
0x5173b1118d9bd800ULL, 0x29d641b63189d4a7ULL, 0xfdbf177988bbc586ULL, 0x2959894fcad81df5ULL,
0xaebc8ef3b4bbc899ULL, 0x4148995ab26992b9ULL, 0x24e20b0134f92cfbULL, 0x40d158894a05dee8ULL,
0x46b00b1185af76f6ULL, 0x26bac77873187a79ULL, 0x3dc0bf95ab8fff5fULL, 0x2a608bd8945524d7ULL,
0x26449588bd446302ULL, 0x7c4bc21c0388439cULL, 0x8e98a4f383bd11b2ULL, 0x26218d7bc9d876b9ULL,
0xe3081542997c178aULL, 0x3c2d29a86fb6606fULL, 0x5c217736fa279374ULL, 0x7dde05734afeb1faULL,
0x3bf10e3906d42babULL, 0xe4f7803e1980649cULL, 0xe6053bf89595bf7aULL, 0x394faf38da245530ULL,
0x7a8efb58896928f4ULL, 0xfbc778e9cc6a113cULL, 0x72670ce330af596fULL, 0x48f222a81d3d6cf7ULL,
0xf01fce410d72caa7ULL, 0x5a20ecc7213b5595ULL, 0x7bc21165c1fa1483ULL, 0x07f89ae31da8a741ULL,
0x05d2c2b4c6830ff9ULL, 0xd43e330fc6316293ULL, 0xa5a5590a96d3a904ULL, 0x705edb91a65333b6ULL,
0x048ee15e0bb9a5f7ULL, 0x3240cfca9e0aaf5dULL, 0x8f4b71ceedc4a40bULL, 0x621c0da3de544a6dULL,
0x92872836a08c4091ULL, 0xce8375b010c91445ULL, 0x8a72eb524f276394ULL, 0x2667fcfa7ec83635ULL,
0x7f4c173345e8752aULL, 0x061b47feee7079a5ULL, 0x25dd9afa9f86ff34ULL, 0x3780cef5425dc89cULL,
0x1a46035a513bb4e9ULL, 0x3e1ef379ac575adaULL, 0xc78c5f1c5fa24b50ULL, 0x321a967634fd9f22ULL,
0x946707b8826e27faULL, 0x3dca84d64c506fd0ULL, 0xc189218075e91436ULL, 0x6d9284169b3b8484ULL,
0x3a67e840383f2ddfULL, 0x33eec9a30c4f9b75ULL, 0x3ec7c86fa783ef47ULL, 0x26ec449fbac9fbc4ULL,
0x5c0f38cba09b9e7dULL, 0x81168cc762a3478cULL, 0x3e23b0d306fc121cULL, 0x5a238aa0a5efdcddULL,
0x1ba26121c4ea43ffULL, 0x36f8c77f7c8832b5ULL, 0x88fbea0b0adcf99aULL, 0x5ca9938ec25bebf9ULL,
0xd5436a5e51fccda0ULL, 0x1dbc4797c2cd893bULL, 0x19346a65d3224a08ULL, 0x0f5034e49b9af466ULL,
0xf23c3967a1e0b96eULL, 0xe58b08fa867a4d88ULL, 0xfb2fabc6a7341679ULL, 0x2a75381eb6026946ULL,
0xc80a3be4c19420acULL, 0x66b1f6c681f2b6dcULL, 0x7cf7036761e93388ULL, 0x25abbbd8a660a4c4ULL,
0x91ea12ba14fd5198ULL, 0x684950fc4a3cffa9ULL, 0xf826842130f5ad28ULL, 0x3ea988f75301a441ULL,
0xc978109a695f8c6fULL, 0x1746eb4a0530c3f3ULL, 0x444d6d77b4459995ULL, 0x75952b8c054e5cc7ULL,
0xa3703f7915f4d6aaULL, 0x66c346202f2647d8ULL, 0xd01469df811d644bULL, 0x77fea47d81a5d71fULL,
0xc5e9529ef57ca381ULL, 0x6eeeb4b9ce2f881aULL, 0xb6e91a28e8009bd6ULL, 0x4b80be3e9afc3fecULL,
0x7e3773c526aed2c5ULL, 0x1b4afcb453c9a49dULL, 0xa920bdd7baffb24dULL, 0x7c54699f122d400eULL,
0xef46c8e14fa94bc8ULL, 0xe0b074ce2952ed5eULL, 0xbea450e1dbd885d5ULL, 0x61b68649320f712cULL,
0x8a485f7309ccbdd1ULL, 0xbd06320d7d4d1a2dULL, 0x25232973322dbef4ULL, 0x445dc4758c17f770ULL,
0xdb0434177cc8933cULL, 0xed6fe82175ea059fULL, 0x1efebefdc053db34ULL, 0x4adbe867c65daf99ULL,
0x3acd71a2a90609dfULL, 0xe5e991856dd04050ULL, 0x1ec69b688157c23cULL, 0x697427f6885cfe4dULL,
0xd7be7b9b65e1a851ULL, 0xa03d28d522c536ddULL, 0x28399d658fd2b645ULL, 0x49e5b7e17c2641e1ULL,
0x6f8c3a98700457a4ULL, 0x5078f0a25ebb6778ULL, 0xd13c3ccbc382960fULL, 0x2e003258a7df84b1ULL,
0x8ad1f39be6296a1cULL, 0xc1eeaa652a5fbfb2ULL, 0x33ee0673fd26f3cbULL, 0x59256173a69d2cccULL,
0x41ea07aa4e18fc41ULL, 0xd9fc19527c87a51eULL, 0xbdaacb805831ca6fULL, 0x445b652dc916694fULL,
0xce92a3a7f2172315ULL, 0x1edc282de11b9964ULL, 0xa1823aafe04c314aULL, 0x790a2d94437cf586ULL,
0x71c447fb93f6e009ULL, 0x8922a56722845276ULL, 0xbf70903b204f5169ULL, 0x2f7a89891ba319feULL,
0x02a08eb577e2140cULL, 0xed9a4ed4427bdcf4ULL, 0x5253ec44e4323cd1ULL, 0x3e88363c14e9355bULL,
0xaa66c14277110b8cULL, 0x1ae0391610a23390ULL, 0x2030bd12c93fc2a2ULL, 0x3ee141579555c7abULL,
0x9214de3a6d6e7d41ULL, 0x3ccdd88607f17efeULL, 0x674f1288f8e11217ULL, 0x5682250f329f93d0ULL,
0x6cf00b136d2e396eULL, 0x6e4cf86f1014debfULL, 0x5930b1b5bfcc4e83ULL, 0x047069b48aba16b6ULL,
0x0d4ce4ab69b20793ULL, 0xb24db91a97d0fb9eULL, 0xcdfa50f54e00d01dULL, 0x221b1085368bddb5ULL,
0xe7e59468b1e3d8d2ULL, 0x53c56563bd122f93ULL, 0xeee8a903e0663f09ULL, 0x61efa662cbbe3d42ULL,
0x2cf8ddddde6eab2aULL, 0x9bf80ad51435f231ULL, 0x5deadacec9f04973ULL, 0x29275b5d41d29b27ULL,
0xcfde0f0895ebf14fULL, 0xb9aab96b054905a7ULL, 0xcae80dd9a1c420fdULL, 0x0a63bf2f1673bbc7ULL,
0x092f6e11958fbc8cULL, 0x672a81e804822fadULL, 0xcac8351560d52517ULL, 0x6f3f7722c8f192f8ULL,
0xf8ba90ccc2e894b7ULL, 0x2c7557a438ff9f0dULL, 0x894d1d855ae52359ULL, 0x68e122157b743d69ULL,
0xd87e5570cfb919f3ULL, 0x3f2cdecd95798db9ULL, 0x2121154710c0a2ceULL, 0x3c66a115246dc5b2ULL,
0xcbedc562294ecb72ULL, 0xba7143c36a280b16ULL, 0x9610c2efd4078b67ULL, 0x6144735d946a4b1eULL,
0x536f111ed75b3350ULL, 0x0211db8c2041d81bULL, 0xf93cb1000e10413cULL, 0x149dfd3c039e8876ULL,
0xd479dde46b63155bULL, 0xb66e15e93c837976ULL, 0xdafde43b1f13e038ULL, 0x5fafda1a2e4b0b35ULL,
0x3600bbdf17197581ULL, 0x3972050bbe3cd2c2ULL, 0x5938906dbdd5be86ULL, 0x34fce5e43f9b860fULL,
0x75a8a4cd42d14d02ULL, 0x828dabc53441df65ULL, 0x33dcabedd2e131d3ULL, 0x3ebad76fb814d25fULL,
0xd4906f566f70e10fULL, 0x5d12f7aa51690f5aULL, 0x45adb16e76cefcf2ULL, 0x01f768aead232999ULL,
0x2b6cc77b6248febdULL, 0x3cd30628ec3aaffdULL, 0xce1c0b80d4ef486aULL, 0x4c3bff2ea6f66c23ULL,
0x3f2ec4094aeaeb5fULL, 0x61b19b286e372ca7ULL, 0x5eefa966de2a701dULL, 0x23b20565de55e3efULL,
0xe301ca5279d58557ULL, 0x07b2d4ce27c2874fULL, 0xa532cd8a9dcf1d67ULL, 0x2a52fee23f2bff56ULL,
0x8624efb37cd8663dULL, 0xbbc7ac20ffbd7594ULL, 0x57b85e9c82d37445ULL, 0x7b3052cb86a6ec66ULL,
0x3482f0ad2525e91eULL, 0x2cb68043d28edca0ULL, 0xaf4f6d052e1b003aULL, 0x185f8c2529781b0aULL,
0xaa41de5bd80ce0d6ULL, 0x9407b2416853e9d6ULL, 0x563ec36e357f4c3aULL, 0x4cc4b8dd0e297bceULL,
0xa2fc1a52ffb8730eULL, 0x1811f16e67058e37ULL, 0x10f9a366cddf4ee1ULL, 0x72f4a0c4a0b9f099ULL,
0x8c16c06f663f4ea7ULL, 0x693b3af74e970fbaULL, 0x2102e7f1d69ec345ULL, 0x0ba53cbc968a8089ULL,
0xca3d9dc7fea15537ULL, 0x4c6824bb51536493ULL, 0xb9886314844006b1ULL, 0x40d2a72ab454cc60ULL,
0x5936a1b712570975ULL, 0x91b9d648debda657ULL, 0x3344094bb64330eaULL, 0x006ba10d12ee51d0ULL,
0x19228468f5de5d58ULL, 0x0eb12f4c38cc05b0ULL, 0xa1039f9dd5601990ULL, 0x4502d4ce4fff0e0bULL,
0xeb2054106837c189ULL, 0xd0f6544c6dd3b93cULL, 0x40727064c416d74fULL, 0x6e15c6114b502ef0ULL,
0x4df2a398cfb1a76bULL, 0x11256c7419f2f6b1ULL, 0x4a497962066e6043ULL, 0x705b3aab41355b44ULL,
0x365ef536d797b1d8ULL, 0x00076bd622ddf0dbULL, 0x3bbf33b0e0575a88ULL, 0x3777aa05c8e4ca4dULL,
0x392745c85578db5fULL, 0x6fda4149dbae5ae2ULL, 0xb1f0b00b8adc9867ULL, 0x09963437d36f1da3ULL,
0x7e824e90a5dc3853ULL, 0xccb5f6641f135cbdULL, 0x6736d86c87ce8fccULL, 0x625f3ce26604249fULL,
0xaf8ac8059502f63fULL, 0x0c05e70a2e351469ULL, 0x35292e9c764b6305ULL, 0x1a394360c7e23ac3ULL,
0xd5c6d53251183264ULL, 0x62065abd43c2b74fULL, 0xb5fbf5d03b973f9bULL, 0x13a3da3661206e5eULL,
0xc6bd5837725d94e5ULL, 0x18e30912205016c5ULL, 0x2088ce1570033c68ULL, 0x7fba1f495c837987ULL,
0x5a8c7423f2f9079dULL, 0x1735157b34023fc5ULL, 0xe4f9b49ad2fab351ULL, 0x6691ff72c878e33cULL,
0x122c2adedc5eff3eULL, 0xf8dd4bf1d8956cf4ULL, 0xeb86205d9e9e5bdaULL, 0x049b92b9d975c743ULL,
0xa5379730b0f6c05aULL, 0x72a0ffacc6f3a553ULL, 0xb0032c34b20dcd6dULL, 0x470e9dbc88d5164aULL,
0xb19cf10ca237c047ULL, 0xb65466711f6c81a2ULL, 0xb3321bd16dd80b43ULL, 0x48c14f600c5fbe8eULL,
0x66451c264aa6c803ULL, 0xb66e3904a4fa7da6ULL, 0xd45f19b0b3128395ULL, 0x31602627c3c9bc10ULL,
0x3120dc4832e4e10dULL, 0xeb20c46756c717f7ULL, 0x00f52e3f67280294ULL, 0x566d4fc14730c509ULL,
0x7e3a5d40fd837206ULL, 0xc1e926dc7159547aULL, 0x216730fba68d6095ULL, 0x22e8c3843f69cea7ULL,
0x33d074e8930e4b2bULL, 0xb6e4350e84d15816ULL, 0x5534c26ad6ba2365ULL, 0x7773c12f89f1f3f3ULL,
0x8cba404da57962aaULL, 0x5b9897a81999ce56ULL, 0x508e862f121692fcULL, 0x3a81907fa093c291ULL,
0x0dded0ff4725a510ULL, 0x10d8cc10673fc503ULL, 0x5b9d151c9f1f4e89ULL, 0x32a5c1d5cb09a44cULL,
0x1e0aa442b90541fbULL, 0x5f85eb7cc1b485dbULL, 0xbee595ce8a9df2e5ULL, 0x25e496c722422236ULL,
0x5edf3c46cd0fe5b9ULL, 0x34e75a7ed2a43388ULL, 0xe488de11d761e352ULL, 0x0e878a01a085545cULL,
0xba493c77e021bb04ULL, 0x2b4d1843c7df899aULL, 0x9ea37a487ae80d67ULL, 0x67a9958011e41794ULL,
0x4b58051a6697b065ULL, 0x47e33f7d8d6ba6d4ULL, 0xbb4da8d483ca46c1ULL, 0x68becaa181c2db0dULL,
0x8d8980e90b989aa5ULL, 0xf95eb14a2c93c99bULL, 0x51c6c7c4796e73a2ULL, 0x6e228363b5efb569ULL,
0xc6bbc0b02dd624c8ULL, 0x777eb47dec8170eeULL, 0x3cde15a004cfafa9ULL, 0x1dc6bc087160bf9bULL,
0x2e07e043eec34002ULL, 0x18e9fc677a68dc7fULL, 0xd8da03188bd15b9aULL, 0x48fbc3bb00568253ULL,
0x57547d4cfb654ce1ULL, 0xd3565b82a058e2adULL, 0xf63eaf0bbf154478ULL, 0x47531ef114dfbb18ULL,
0xe1ec630a4278c587ULL, 0x5507d546ca8e83f3ULL, 0x85e135c63adc0c2bULL, 0x0aa7efa85682844eULL,
0x72691ba8b3e1f615ULL, 0x32b4e9701fbe3ffaULL, 0x97b6d92e39bb7868ULL, 0x2cfe53dea02e39e8ULL,
0x687392cd85cd52b0ULL, 0x27ff66c910e29831ULL, 0x97134556a9832d06ULL, 0x269bb0360a84f8a0ULL,
0x706e55457643f85cULL, 0x3734a48c9b597d1bULL, 0x7aee91e8c6efa472ULL, 0x5cd6abc198a9d9e0ULL,
0x0e04de06cb3ce41aULL, 0xd8c6eb893402e138ULL, 0x904659bb686e3772ULL, 0x7215c371746ba8c8ULL,
0xfd12a97eeae4a2d9ULL, 0x9514b7516394f2c5ULL, 0x266fd5809208f294ULL, 0x5c847085619a26b9ULL,
0x52985410fed694eaULL, 0x3c905b934a2ed254ULL, 0x10bb47692d3be467ULL, 0x063b3d2d69e5e9e1ULL,
0x472726eedda57debULL, 0xefb6c4ae10f41891ULL, 0x2b1641917b307614ULL, 0x117c554fc4f45b7cULL,
0xc07cf3118f9d8812ULL, 0x01dbd82050017939ULL, 0xd7e803f4171b2827ULL, 0x1015e87487d225eaULL,
0xc58de3fed23acc4dULL, 0x50db91c294a7be2dULL, 0x0b94d43d1c9cf457ULL, 0x6b1640fa6e37524aULL,
0x692f346c5fda0d09ULL, 0x200b1c59fa4d3151ULL, 0xb8c46f760777a296ULL, 0x4b38395f3ffdfbcfULL,
0x18d25e00be54d671ULL, 0x60d50582bec8aba6ULL, 0x87ad8f263b78b982ULL, 0x50fdf64e9cda0432ULL,
0x90f567aac578dcf0ULL, 0xef1e9b0ef2a3133bULL, 0x0eebba9242d9de71ULL, 0x15473c9bf03101c7ULL,
0x7c77e8ae56b78095ULL, 0xb678e7666e6f078eULL, 0x2da0b9615348ba1fULL, 0x7cf931c1ff733f0bULL,
0x26b357f50a0a366cULL, 0xe9708cf42b87d732ULL, 0xc13aeea5f91cb2c0ULL, 0x35d90c991143bb4cULL,
0x47c1c404a9a0d9dcULL, 0x659e58451972d251ULL, 0x3875a8c473b38c31ULL, 0x1fbd9ed379561f24ULL,
0x11fabc6fd41ec28dULL, 0x7ef8dfe3cd2a2dcaULL, 0x72e73b5d8c404595ULL, 0x6135fa4954b72f27ULL,
0xccfc32a2de24b69cULL, 0x3f55698c1f095d88ULL, 0xbe3350ed5ac3f929ULL, 0x5e9bf806ca477eebULL,
0xe9ce8fb63c309f68ULL, 0x5376f63565e1f9f4ULL, 0xd1afcfb35a6393f1ULL, 0x6632a1ede5623506ULL,
0x0b7d6c390c2ded4cULL, 0x56cb3281df04cb1fULL, 0x66305a1249ecc3c7ULL, 0x5d588b60a38ca72aULL,
0xa6ecbf78e8e5f42dULL, 0x86eeb44b3c8a3eecULL, 0xec219c48fbd21604ULL, 0x1aaf1af517c36731ULL,
0xc306a2836769bde7ULL, 0x208280622b1e2adbULL, 0x8027f51ffbff94a6ULL, 0x76cfa1ce1124f26bULL,
0x18eb00562422abb6ULL, 0xf377c4d58f8c29c3ULL, 0x4dbbc207f531561aULL, 0x0253b7f082128a27ULL,
0x3d1f091cb62c17e0ULL, 0x4860e1abd64628a9ULL, 0x52d17436309d4253ULL, 0x356f97e13efae576ULL,
0xd351e11aa150535bULL, 0x3e6b45bb1dd878ccULL, 0x0c776128bed92c98ULL, 0x1d34ae93032885b8ULL,
0x4ba0488ca85ba4c3ULL, 0x985348c33c9ce6ceULL, 0x66124c6f97bda770ULL, 0x0f81a0290654124aULL,
0x9ed09ca6569b86fdULL, 0x811009fd18af9a2dULL, 0xff08d03f93d8c20aULL, 0x52a148199faef26bULL,
0x3e03f9dc2d8d1b73ULL, 0x4205801873961a70ULL, 0xc0d987f041a35970ULL, 0x07aa1f15a1c0d549ULL,
0xdfd46ce08cd27224ULL, 0x6d0a024f934e4239ULL, 0x808a7a6399897b59ULL, 0x0a4556e9e13d95a2ULL,
0xd21a991fe9c13045ULL, 0x9b0e8548fe7751b8ULL, 0x5da643cb4bf30035ULL, 0x77db28d63940f721ULL,
0xfc5eeb614adc9011ULL, 0x5229419ae8c411ebULL, 0x9ec3e7787d1dcf74ULL, 0x340d053e216e4cb5ULL,
0xcac7af39b48df2b4ULL, 0xc0faec2871a10a94ULL, 0x140a69245ca575edULL, 0x0cf1c37134273a4cULL,
0xc8ee306ac224b8a5ULL, 0x57eaee7ccb4930b0ULL, 0xa1e806bdaacbe74fULL, 0x7d9a62742eeb657dULL,
0x9eb6b6ef546c4830ULL, 0x885cca1fddb36e2eULL, 0xe6b9f383ef0d7105ULL, 0x58654fef9d2e0412ULL,
0xa905c4ffbe0e8e26ULL, 0x942de5df9b31816eULL, 0x497d723f802e88e1ULL, 0x30684dea602f408dULL,
0x21e5a278a3e6cb34ULL, 0xaefb6e6f5b151dc4ULL, 0xb30b8e049d77ca15ULL, 0x28c3c9cf53b98981ULL,
0x287fb721556cdd2aULL, 0x0d317ca897022274ULL, 0x7468c7423a543258ULL, 0x4a7f11464eb5642fULL,
0xa237a4774d193aa6ULL, 0xd865986ea92129a1ULL, 0x24c515ecf87c1a88ULL, 0x604003575f39f5ebULL,
0x47b9f189570a9b27ULL, 0x2b98cede465e4b78ULL, 0x026df551dbb85c20ULL, 0x74fcd91047e21901ULL,
0x13e2a90a23c1bfa3ULL, 0x0cb0074e478519f6ULL, 0x5ff1cbbe3af6cf44ULL, 0x67fe5438be812dbeULL,
0xd13cf64fa40f05b0ULL, 0x054dfb2f32283787ULL, 0x4173915b7f0d2aeaULL, 0x482f144f1f610d4eULL,
0xf6210201b47f8234ULL, 0x5d0ae1929e70b990ULL, 0xdcd7f455b049567cULL, 0x7e93d0f1f0916f01ULL,
0xdd79cbf18a7db4faULL, 0xbe8391bf6f74c62fULL, 0x027145d14b8291bdULL, 0x585a73ea2cbf1705ULL,
0x485ca03e928a0db2ULL, 0x10fc01a5742857e7ULL, 0x2f482edbd6d551a7ULL, 0x0f0433b5048fdb8aULL,
0x60da2e8dd7dc6247ULL, 0x88b4c9d38cd4819aULL, 0x13033ac001f66697ULL, 0x273b24fe3b367d75ULL,
0xc6e8f66a31b3b9d4ULL, 0x281514a494df49d5ULL, 0xd1726fdfc8b23da7ULL, 0x4b3ae7d103dee548ULL,
0xc6256e19ce4b9d7eULL, 0xff5c5cf186e3c61cULL, 0xacc63ca34b8ec145ULL, 0x74621888fee66574ULL,
0x956f409645290a1eULL, 0xef0bf8e3263a962eULL, 0xed6a50eb5ec2647bULL, 0x0694283a9dca7502ULL,
0x769b963643a2dcd1ULL, 0x42b7c8ea09fc5353ULL, 0x4f002aee13397eabULL, 0x63005e2c19b7d63aULL,
0xca6736da63023beaULL, 0x966c7f6db12a99b7ULL, 0xace09390c537c5e1ULL, 0x0b696063a1aa89eeULL,
0xebb03e97288c56e5ULL, 0x432a9f9f938c8be8ULL, 0xa6a5a93d5b717f71ULL, 0x1a5fb4c3e18f9d97ULL,
0x1c94e7ad1c60cdceULL, 0xee202a43fc02c4a0ULL, 0x8dafe4d867c46a20ULL, 0x0a10263c8ac27b58ULL,
0xd0dea9dfe4432a4aULL, 0x856af87bbe9277c5ULL, 0xce8472acc212c71aULL, 0x6f151b6d9bbb1e91ULL,
0x26776c527ceed56aULL, 0x7d211cb7fbf8faecULL, 0x37ae66a6fd4609ccULL, 0x1f81b702d2770c42ULL,
0x2fb0b057eac58392ULL, 0xe1dd89fe29744e9dULL, 0xc964f8eb17beb4f8ULL, 0x29571073c9a2d41eULL,
0xa948a18981c0e254ULL, 0x2df6369b65b22830ULL, 0xa33eb2d75fcfd3c6ULL, 0x078cd6ec4199a01fULL,
0x4a584a41ad900d2fULL, 0x32142b78e2c74c52ULL, 0x68c4e8338431c978ULL, 0x7f69ea9008689fc2ULL,
0x52f2c81e46a38265ULL, 0xfd78072d04a832fdULL, 0x8cd7d5fa25359e94ULL, 0x4de71b7454cc29d2ULL,
0x42eb60ad1eda6ac9ULL, 0x0aad37dfdbc09c3aULL, 0x81004b71e33cc191ULL, 0x44e6be345122803cULL,
0x03fe8388ba1920dbULL, 0xf5d57c32150db008ULL, 0x49c8c4281af60c29ULL, 0x21edb518de701aeeULL,
0x7fb63e418f06dc99ULL, 0xa4460d99c166d7b8ULL, 0x24dd5248ce520a83ULL, 0x5ec3ad712b928358ULL,
0x15022a5fbd17930fULL, 0xa4f64a77d82570e3ULL, 0x12bc8d6915783712ULL, 0x498194c0fc620abbULL,
0x38a2d9d255686c82ULL, 0x785c6bd9193e21f0ULL, 0xe4d5c81ab24a5484ULL, 0x56307860b2e20989ULL,
0x429d55f78b4d74c4ULL, 0x22f1834643350131ULL, 0x1e60c24598c71fffULL, 0x59f2f014979983efULL,
0x46a47d56eb494a44ULL, 0x3e22a854d636a18eULL, 0xb346e15274491c3bULL, 0x2ceafd4e5390cde7ULL,
0xba8a8538be0d6675ULL, 0x4b9074bb50818e23ULL, 0xcbdab89085d304c3ULL, 0x61a24fe0e56192c4ULL,
0xcb7615e6db525bcbULL, 0xdd7d8c35a567e4caULL, 0xe6b4153acafcdd69ULL, 0x2d668e097f3c9766ULL,
0xa57e7e265ce55ef0ULL, 0x5d9f4e527cd4b967ULL, 0xfbc83606492fd1e5ULL, 0x090d52beb7c3f7aeULL,
0x09b9515a1e7b4d7cULL, 0x1f266a2599da44c0ULL, 0xa1c49548e2c55504ULL, 0x7ef04287126f15ccULL,
0xfed1659dbd30ef15ULL, 0x8b4ab9eec4e0277bULL, 0x884d6236a5df3291ULL, 0x1fd96ea6bf5cf788ULL,
0x42a161981f190d9aULL, 0x61d849507e6052c1ULL, 0x9fe113bf285a2cd5ULL, 0x7c22d676dbad85d8ULL,
0x82e770ed2bfbd27dULL, 0x4c05b2ece996f5a5ULL, 0xcd40a9c2b0900150ULL, 0x5895319213d9bf64ULL,
0xe7cc5d703fea2e08ULL, 0xb50c491258e2188cULL, 0xcce30baa48205bf0ULL, 0x537c659ccfa32d62ULL,
0x37b6623a98cfc088ULL, 0xfe9bed1fa4d6aca4ULL, 0x04d29b8e56a8d1b0ULL, 0x725f71c40b519575ULL,
0x28c7f89cd0339ce6ULL, 0x8367b14469ddc18bULL, 0x883ada83a6a1652cULL, 0x585f1974034d6c17ULL,
0x89cfb266f1b19188ULL, 0xe63b4863e7c35217ULL, 0xd88c9da6b4c0526aULL, 0x3e035c9df0954635ULL,
0xdd9d5412fb45de9dULL, 0xdd684532e4cff40dULL, 0x4b5c999b151d671cULL, 0x2d8c2cc811e7f690ULL,
0x7f54be1d90055d40ULL, 0xa464c5df464aaf40ULL, 0x33979624f0e917beULL, 0x2c018dc527356b30ULL,
0xa5415024e330b3d4ULL, 0x73ff3d96691652d3ULL, 0x94ec42c4ef9b59f1ULL, 0x0747201618d08e5aULL,
0x4d6ca48aca411c53ULL, 0x66415f2fcfa66119ULL, 0x9c4dd40051e227ffULL, 0x59810bc09a02f7ebULL,
0x2a7eb171b3dc101dULL, 0x441c5ab99ffef68eULL, 0x32025c9b93b359eaULL, 0x5e8ce0a71e9d112fULL,
0xbfcccb92429503fdULL, 0xd271ba752f095d55ULL, 0x345ead5e972d091eULL, 0x18c8df11a83103baULL,
0x90cd949a9aed0f4cULL, 0xc5d1f4cb6660e37eULL, 0xb8cac52d56c52e0bULL, 0x6e42e400c5808e0dULL,
0xa3b46966eeaefd23ULL, 0x0c4f1f0be39ecdcaULL, 0x189dc8c9d683a51dULL, 0x51f27f054c09351bULL,
0x4c487ccd2a320682ULL, 0x587ea95bb3df1c96ULL, 0xc8ccf79e555cb8e8ULL, 0x547dc829a206d73dULL,
0xb822a6cd80c39b06ULL, 0xe96d54732000d4c6ULL, 0x28535b6f91463b4dULL, 0x228f4660e2486e1dULL,
0x98799538de8d3abfULL, 0x8cd8330045ebca6eULL, 0x79952a008221e738ULL, 0x4322e1a7535cd2bbULL,
0xb114c11819d1801cULL, 0x2016e4d84f3f5ec7ULL, 0xdd0e2df409260f4cULL, 0x5ec362c0ae5f7266ULL,
0xc0462b18b8b2b4eeULL, 0x7cc8d950274d1afbULL, 0xf25f7105436b02d2ULL, 0x43bbf8dcbff9ccd3ULL,
0xb6ad1767a039e9dfULL, 0xb0714da8f69d3583ULL, 0x5e55fa18b42931f5ULL, 0x4ed5558f33c60961ULL,
0x1fe37901c647a5ddULL, 0x593ddf1f8081d357ULL, 0x0249a4fd813fd7a6ULL, 0x69acca274e9caf61ULL,
0x047ba3ea330721c9ULL, 0x83423fc20e7e1ea0ULL, 0x1df4c0af01314a60ULL, 0x09a62dab89289527ULL,
0xa5b325a49cc6cb00ULL, 0xe94b5dc654b56cb6ULL, 0x3be28779adc994a0ULL, 0x4296e8f8ba3a4aadULL,
0x328689761e451eabULL, 0x2e4d598bff59594aULL, 0x49b96853d7a7084aULL, 0x4980a319601420a8ULL,
0x9565b9e12f552c42ULL, 0x8a5318db7100fe96ULL, 0x05c90b4d43add0d7ULL, 0x538b4cd66a5d4edaULL,
0xf4e94fc3e89f039fULL, 0x592c9af26f618045ULL, 0x08a36eb5fd4b9550ULL, 0x25fffaf6c2ed1419ULL,
0x34434459cc79d354ULL, 0xeeecbfb4b1d5476bULL, 0xddeb34a061615d99ULL, 0x5129cecceb64b773ULL,
0xee43215894993520ULL, 0x772f9c7cf14c0b3bULL, 0xd2e2fce306bedad5ULL, 0x715f42b546f06a97ULL,
0x434ecdceda5b5f1aULL, 0x0da17115a49741a9ULL, 0x680bd77c73edad2eULL, 0x487c02354edd9041ULL,
0xb8efeff3a70ed9c4ULL, 0x56a32aa3e857e302ULL, 0xdf3a68bd48a2a5a0ULL, 0x07f650b73176c444ULL,
0xe38b9b1626e0ccb1ULL, 0x79e053c18b09fb36ULL, 0x56d90319c9f94964ULL, 0x1ca941e7ac9ff5c4ULL,
0x49c4df29162fa0bbULL, 0x8488cf3282b33305ULL, 0x95dfda14cabb437dULL, 0x3391f78264d5ad86ULL,
0x729ae06ae2b5095dULL, 0xd58a58d73259a946ULL, 0xe9834262d13921edULL, 0x27fedafaa54bb592ULL,
0xa99dc5b829ad48bbULL, 0x5f025742499ee260ULL, 0x802c8ecd5d7513fdULL, 0x78ceb3ef3f6dd938ULL,
0xc342f44f8a135d94ULL, 0x7b9edb44828cdda3ULL, 0x9436d11a0537cfe7ULL, 0x5064b164ec1ab4c8ULL,
0x7020eccfd37eb2fcULL, 0x1f31ea3ed90d25fcULL, 0x1b930d7bdfa1bb34ULL, 0x5344467a48113044ULL,
0x70073170f25e6dfbULL, 0xe385dc1a50114cc8ULL, 0x2348698ac8fc4f00ULL, 0x2a77a55284dd40d8ULL,
0xfe06afe0c98c6ce4ULL, 0xc235df96dddfd6e4ULL, 0x1428d01e33bf1ed3ULL, 0x785768ec9300bdafULL,
0x9702e57a91deb63bULL, 0x61bdb8bfe5ce8b80ULL, 0x645b426f3d1d58acULL, 0x4804a82227a557bcULL,
0x8e57048ab44d2601ULL, 0x68d6501a4b3a6935ULL, 0xc39c9ec3f9e1c293ULL, 0x4172f257d4de63e2ULL,
0xd368b450330c6401ULL, 0x040d3017418f2391ULL, 0x2c34bb6090b7d90dULL, 0x16f649228fdfd51fULL,
0xbea6818e2b928ef5ULL, 0xe28ccf91cdc11e72ULL, 0x594aaa68e77a36cdULL, 0x313034806c7ffd0fULL,
0x8a9d27ac2249bd65ULL, 0x19a3b464018e9512ULL, 0xc26ccff352b37ec7ULL, 0x056f68341d797b21ULL,
0x5e79d6757efd2327ULL, 0xfabdbcb6553afe15ULL, 0xd3e7222c6eaf5a60ULL, 0x7046c76d4dae743bULL,
0x660be872b18d4a55ULL, 0x19992518574e1496ULL, 0xc103053a302bdcbbULL, 0x3ed8e9800b218e8eULL,
0x7b0b9239fa75e03eULL, 0xefe9fb684633c083ULL, 0x98a35fbe391a7793ULL, 0x6065510fe2d0fe34ULL,
0x55cb668548abad0cULL, 0xb4584548da87e527ULL, 0x2c43ecea0107c1ddULL, 0x526028809372de35ULL,
0x3415c56af9213b1fULL, 0x5bee1a4d017e98dbULL, 0x13f6b105b5cf709bULL, 0x5ff20e3482b29ab6ULL,
0x0aa29c75cc2e6c90ULL, 0xfc7d73ca3a70e206ULL, 0x899fc38fc4b5c515ULL, 0x250386b124ffc207ULL,
0x54ea28d5ae3d2b56ULL, 0x9913149dd6de60ceULL, 0x16694fc58f06d6c1ULL, 0x46b23975eb018fc7ULL,
0x470a6a0fb4b7b4e2ULL, 0x5d92475a8f7253deULL, 0xabeee5b52fbd3adbULL, 0x7fa20801a0806968ULL,
0x76f3faf19f7714d2ULL, 0xb3e840c12f4660c3ULL, 0x0fb4cd8df212744eULL, 0x4b065a251d3a2dd2ULL,
0x5cebde383d77cd4aULL, 0x6adf39df882c9cb1ULL, 0xa2dd242eb09af759ULL, 0x3147c0e50e5f6422ULL,
0x164ca5101d1350dbULL, 0xf8d13479c33fc962ULL, 0xe640ce4d13e5da08ULL, 0x4bdee0c45061f8baULL,
0xd7c46dc1a4edb1c9ULL, 0x5514d7b6437fd98aULL, 0x58942f6bb2a1c00bULL, 0x2dffb2ab1d70710eULL,
0xccdfcf2fc18b6d68ULL, 0xa8ebcba8b7806167ULL, 0x980697f95e2937e3ULL, 0x02fbba1cd0126e8cULL
};
static void curve25519_ever64_base(u8 *out, const u8 *priv)
{
u64 swap = 1;
int i, j, k;
u64 tmp[16 + 32 + 4];
u64 *x1 = &tmp[0];
u64 *z1 = &tmp[4];
u64 *x2 = &tmp[8];
u64 *z2 = &tmp[12];
u64 *xz1 = &tmp[0];
u64 *xz2 = &tmp[8];
u64 *a = &tmp[0 + 16];
u64 *b = &tmp[4 + 16];
u64 *c = &tmp[8 + 16];
u64 *ab = &tmp[0 + 16];
u64 *abcd = &tmp[0 + 16];
u64 *ef = &tmp[16 + 16];
u64 *efgh = &tmp[16 + 16];
u64 *key = &tmp[0 + 16 + 32];
memcpy(key, priv, 32);
((u8 *)key)[0] &= 248;
((u8 *)key)[31] = (((u8 *)key)[31] & 127) | 64;
x1[0] = 1, x1[1] = x1[2] = x1[3] = 0;
z1[0] = 1, z1[1] = z1[2] = z1[3] = 0;
z2[0] = 1, z2[1] = z2[2] = z2[3] = 0;
memcpy(x2, p_minus_s, sizeof(p_minus_s));
j = 3;
for (i = 0; i < 4; ++i) {
while (j < (const int[]){ 64, 64, 64, 63 }[i]) {
u64 bit = (key[i] >> j) & 1;
k = (64 * i + j - 3);
swap = swap ^ bit;
cswap2(swap, xz1, xz2);
swap = bit;
fsub(b, x1, z1);
fadd(a, x1, z1);
fmul(c, &table_ladder[4 * k], b, ef);
fsub(b, a, c);
fadd(a, a, c);
fsqr2(ab, ab, efgh);
fmul2(xz1, xz2, ab, efgh);
++j;
}
j = 0;
}
point_double(xz1, abcd, efgh);
point_double(xz1, abcd, efgh);
point_double(xz1, abcd, efgh);
encode_point(out, xz1);
memzero_explicit(tmp, sizeof(tmp));
}
static __ro_after_init DEFINE_STATIC_KEY_FALSE(curve25519_use_bmi2_adx);
void curve25519_arch(u8 mypublic[CURVE25519_KEY_SIZE],
const u8 secret[CURVE25519_KEY_SIZE],
const u8 basepoint[CURVE25519_KEY_SIZE])
{
if (static_branch_likely(&curve25519_use_bmi2_adx))
curve25519_ever64(mypublic, secret, basepoint);
else
curve25519_generic(mypublic, secret, basepoint);
}
EXPORT_SYMBOL(curve25519_arch);
void curve25519_base_arch(u8 pub[CURVE25519_KEY_SIZE],
const u8 secret[CURVE25519_KEY_SIZE])
{
if (static_branch_likely(&curve25519_use_bmi2_adx))
curve25519_ever64_base(pub, secret);
else
curve25519_generic(pub, secret, curve25519_base_point);
}
EXPORT_SYMBOL(curve25519_base_arch);
static int curve25519_set_secret(struct crypto_kpp *tfm, const void *buf,
unsigned int len)
{
u8 *secret = kpp_tfm_ctx(tfm);
if (!len)
curve25519_generate_secret(secret);
else if (len == CURVE25519_KEY_SIZE &&
crypto_memneq(buf, curve25519_null_point, CURVE25519_KEY_SIZE))
memcpy(secret, buf, CURVE25519_KEY_SIZE);
else
return -EINVAL;
return 0;
}
static int curve25519_generate_public_key(struct kpp_request *req)
{
struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
const u8 *secret = kpp_tfm_ctx(tfm);
u8 buf[CURVE25519_KEY_SIZE];
int copied, nbytes;
if (req->src)
return -EINVAL;
curve25519_base_arch(buf, secret);
/* might want less than we've got */
nbytes = min_t(size_t, CURVE25519_KEY_SIZE, req->dst_len);
copied = sg_copy_from_buffer(req->dst, sg_nents_for_len(req->dst,
nbytes),
buf, nbytes);
if (copied != nbytes)
return -EINVAL;
return 0;
}
static int curve25519_compute_shared_secret(struct kpp_request *req)
{
struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
const u8 *secret = kpp_tfm_ctx(tfm);
u8 public_key[CURVE25519_KEY_SIZE];
u8 buf[CURVE25519_KEY_SIZE];
int copied, nbytes;
if (!req->src)
return -EINVAL;
copied = sg_copy_to_buffer(req->src,
sg_nents_for_len(req->src,
CURVE25519_KEY_SIZE),
public_key, CURVE25519_KEY_SIZE);
if (copied != CURVE25519_KEY_SIZE)
return -EINVAL;
curve25519_arch(buf, secret, public_key);
/* might want less than we've got */
nbytes = min_t(size_t, CURVE25519_KEY_SIZE, req->dst_len);
copied = sg_copy_from_buffer(req->dst, sg_nents_for_len(req->dst,
nbytes),
buf, nbytes);
if (copied != nbytes)
return -EINVAL;
return 0;
}
static unsigned int curve25519_max_size(struct crypto_kpp *tfm)
{
return CURVE25519_KEY_SIZE;
}
static struct kpp_alg curve25519_alg = {
.base.cra_name = "curve25519",
.base.cra_driver_name = "curve25519-x86",
.base.cra_priority = 200,
.base.cra_module = THIS_MODULE,
.base.cra_ctxsize = CURVE25519_KEY_SIZE,
.set_secret = curve25519_set_secret,
.generate_public_key = curve25519_generate_public_key,
.compute_shared_secret = curve25519_compute_shared_secret,
.max_size = curve25519_max_size,
};
static int __init curve25519_mod_init(void)
{
if (boot_cpu_has(X86_FEATURE_BMI2) && boot_cpu_has(X86_FEATURE_ADX))
static_branch_enable(&curve25519_use_bmi2_adx);
else
return 0;
return IS_REACHABLE(CONFIG_CRYPTO_KPP) ?
crypto_register_kpp(&curve25519_alg) : 0;
}
static void __exit curve25519_mod_exit(void)
{
if (IS_REACHABLE(CONFIG_CRYPTO_KPP) &&
static_branch_likely(&curve25519_use_bmi2_adx))
crypto_unregister_kpp(&curve25519_alg);
}
module_init(curve25519_mod_init);
module_exit(curve25519_mod_exit);
MODULE_ALIAS_CRYPTO("curve25519");
MODULE_ALIAS_CRYPTO("curve25519-x86");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Jason A. Donenfeld <[email protected]>");
| linux-master | arch/x86/crypto/curve25519-x86_64.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Glue code for POLYVAL using PCMULQDQ-NI
*
* Copyright (c) 2007 Nokia Siemens Networks - Mikko Herranen <[email protected]>
* Copyright (c) 2009 Intel Corp.
* Author: Huang Ying <[email protected]>
* Copyright 2021 Google LLC
*/
/*
* Glue code based on ghash-clmulni-intel_glue.c.
*
* This implementation of POLYVAL uses montgomery multiplication
* accelerated by PCLMULQDQ-NI to implement the finite field
* operations.
*/
#include <crypto/algapi.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/simd.h>
#include <crypto/polyval.h>
#include <linux/crypto.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <asm/cpu_device_id.h>
#include <asm/simd.h>
#define POLYVAL_ALIGN 16
#define POLYVAL_ALIGN_ATTR __aligned(POLYVAL_ALIGN)
#define POLYVAL_ALIGN_EXTRA ((POLYVAL_ALIGN - 1) & ~(CRYPTO_MINALIGN - 1))
#define POLYVAL_CTX_SIZE (sizeof(struct polyval_tfm_ctx) + POLYVAL_ALIGN_EXTRA)
#define NUM_KEY_POWERS 8
struct polyval_tfm_ctx {
/*
* These powers must be in the order h^8, ..., h^1.
*/
u8 key_powers[NUM_KEY_POWERS][POLYVAL_BLOCK_SIZE] POLYVAL_ALIGN_ATTR;
};
struct polyval_desc_ctx {
u8 buffer[POLYVAL_BLOCK_SIZE];
u32 bytes;
};
asmlinkage void clmul_polyval_update(const struct polyval_tfm_ctx *keys,
const u8 *in, size_t nblocks, u8 *accumulator);
asmlinkage void clmul_polyval_mul(u8 *op1, const u8 *op2);
static inline struct polyval_tfm_ctx *polyval_tfm_ctx(struct crypto_shash *tfm)
{
return PTR_ALIGN(crypto_shash_ctx(tfm), POLYVAL_ALIGN);
}
static void internal_polyval_update(const struct polyval_tfm_ctx *keys,
const u8 *in, size_t nblocks, u8 *accumulator)
{
if (likely(crypto_simd_usable())) {
kernel_fpu_begin();
clmul_polyval_update(keys, in, nblocks, accumulator);
kernel_fpu_end();
} else {
polyval_update_non4k(keys->key_powers[NUM_KEY_POWERS-1], in,
nblocks, accumulator);
}
}
static void internal_polyval_mul(u8 *op1, const u8 *op2)
{
if (likely(crypto_simd_usable())) {
kernel_fpu_begin();
clmul_polyval_mul(op1, op2);
kernel_fpu_end();
} else {
polyval_mul_non4k(op1, op2);
}
}
static int polyval_x86_setkey(struct crypto_shash *tfm,
const u8 *key, unsigned int keylen)
{
struct polyval_tfm_ctx *tctx = polyval_tfm_ctx(tfm);
int i;
if (keylen != POLYVAL_BLOCK_SIZE)
return -EINVAL;
memcpy(tctx->key_powers[NUM_KEY_POWERS-1], key, POLYVAL_BLOCK_SIZE);
for (i = NUM_KEY_POWERS-2; i >= 0; i--) {
memcpy(tctx->key_powers[i], key, POLYVAL_BLOCK_SIZE);
internal_polyval_mul(tctx->key_powers[i],
tctx->key_powers[i+1]);
}
return 0;
}
static int polyval_x86_init(struct shash_desc *desc)
{
struct polyval_desc_ctx *dctx = shash_desc_ctx(desc);
memset(dctx, 0, sizeof(*dctx));
return 0;
}
static int polyval_x86_update(struct shash_desc *desc,
const u8 *src, unsigned int srclen)
{
struct polyval_desc_ctx *dctx = shash_desc_ctx(desc);
const struct polyval_tfm_ctx *tctx = polyval_tfm_ctx(desc->tfm);
u8 *pos;
unsigned int nblocks;
unsigned int n;
if (dctx->bytes) {
n = min(srclen, dctx->bytes);
pos = dctx->buffer + POLYVAL_BLOCK_SIZE - dctx->bytes;
dctx->bytes -= n;
srclen -= n;
while (n--)
*pos++ ^= *src++;
if (!dctx->bytes)
internal_polyval_mul(dctx->buffer,
tctx->key_powers[NUM_KEY_POWERS-1]);
}
while (srclen >= POLYVAL_BLOCK_SIZE) {
/* Allow rescheduling every 4K bytes. */
nblocks = min(srclen, 4096U) / POLYVAL_BLOCK_SIZE;
internal_polyval_update(tctx, src, nblocks, dctx->buffer);
srclen -= nblocks * POLYVAL_BLOCK_SIZE;
src += nblocks * POLYVAL_BLOCK_SIZE;
}
if (srclen) {
dctx->bytes = POLYVAL_BLOCK_SIZE - srclen;
pos = dctx->buffer;
while (srclen--)
*pos++ ^= *src++;
}
return 0;
}
static int polyval_x86_final(struct shash_desc *desc, u8 *dst)
{
struct polyval_desc_ctx *dctx = shash_desc_ctx(desc);
const struct polyval_tfm_ctx *tctx = polyval_tfm_ctx(desc->tfm);
if (dctx->bytes) {
internal_polyval_mul(dctx->buffer,
tctx->key_powers[NUM_KEY_POWERS-1]);
}
memcpy(dst, dctx->buffer, POLYVAL_BLOCK_SIZE);
return 0;
}
static struct shash_alg polyval_alg = {
.digestsize = POLYVAL_DIGEST_SIZE,
.init = polyval_x86_init,
.update = polyval_x86_update,
.final = polyval_x86_final,
.setkey = polyval_x86_setkey,
.descsize = sizeof(struct polyval_desc_ctx),
.base = {
.cra_name = "polyval",
.cra_driver_name = "polyval-clmulni",
.cra_priority = 200,
.cra_blocksize = POLYVAL_BLOCK_SIZE,
.cra_ctxsize = POLYVAL_CTX_SIZE,
.cra_module = THIS_MODULE,
},
};
__maybe_unused static const struct x86_cpu_id pcmul_cpu_id[] = {
X86_MATCH_FEATURE(X86_FEATURE_PCLMULQDQ, NULL),
{}
};
MODULE_DEVICE_TABLE(x86cpu, pcmul_cpu_id);
static int __init polyval_clmulni_mod_init(void)
{
if (!x86_match_cpu(pcmul_cpu_id))
return -ENODEV;
if (!boot_cpu_has(X86_FEATURE_AVX))
return -ENODEV;
return crypto_register_shash(&polyval_alg);
}
static void __exit polyval_clmulni_mod_exit(void)
{
crypto_unregister_shash(&polyval_alg);
}
module_init(polyval_clmulni_mod_init);
module_exit(polyval_clmulni_mod_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("POLYVAL hash function accelerated by PCLMULQDQ-NI");
MODULE_ALIAS_CRYPTO("polyval");
MODULE_ALIAS_CRYPTO("polyval-clmulni");
| linux-master | arch/x86/crypto/polyval-clmulni_glue.c |
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* SM4 Cipher Algorithm, AES-NI/AVX optimized.
* as specified in
* https://tools.ietf.org/id/draft-ribose-cfrg-sm4-10.html
*
* Copyright (c) 2021, Alibaba Group.
* Copyright (c) 2021 Tianjia Zhang <[email protected]>
*/
#include <linux/module.h>
#include <linux/crypto.h>
#include <linux/kernel.h>
#include <asm/simd.h>
#include <crypto/internal/simd.h>
#include <crypto/internal/skcipher.h>
#include <crypto/sm4.h>
#include "sm4-avx.h"
#define SM4_CRYPT8_BLOCK_SIZE (SM4_BLOCK_SIZE * 8)
asmlinkage void sm4_aesni_avx_crypt4(const u32 *rk, u8 *dst,
const u8 *src, int nblocks);
asmlinkage void sm4_aesni_avx_crypt8(const u32 *rk, u8 *dst,
const u8 *src, int nblocks);
asmlinkage void sm4_aesni_avx_ctr_enc_blk8(const u32 *rk, u8 *dst,
const u8 *src, u8 *iv);
asmlinkage void sm4_aesni_avx_cbc_dec_blk8(const u32 *rk, u8 *dst,
const u8 *src, u8 *iv);
asmlinkage void sm4_aesni_avx_cfb_dec_blk8(const u32 *rk, u8 *dst,
const u8 *src, u8 *iv);
static int sm4_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int key_len)
{
struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm);
return sm4_expandkey(ctx, key, key_len);
}
static int ecb_do_crypt(struct skcipher_request *req, const u32 *rkey)
{
struct skcipher_walk walk;
unsigned int nbytes;
int err;
err = skcipher_walk_virt(&walk, req, false);
while ((nbytes = walk.nbytes) > 0) {
const u8 *src = walk.src.virt.addr;
u8 *dst = walk.dst.virt.addr;
kernel_fpu_begin();
while (nbytes >= SM4_CRYPT8_BLOCK_SIZE) {
sm4_aesni_avx_crypt8(rkey, dst, src, 8);
dst += SM4_CRYPT8_BLOCK_SIZE;
src += SM4_CRYPT8_BLOCK_SIZE;
nbytes -= SM4_CRYPT8_BLOCK_SIZE;
}
while (nbytes >= SM4_BLOCK_SIZE) {
unsigned int nblocks = min(nbytes >> 4, 4u);
sm4_aesni_avx_crypt4(rkey, dst, src, nblocks);
dst += nblocks * SM4_BLOCK_SIZE;
src += nblocks * SM4_BLOCK_SIZE;
nbytes -= nblocks * SM4_BLOCK_SIZE;
}
kernel_fpu_end();
err = skcipher_walk_done(&walk, nbytes);
}
return err;
}
int sm4_avx_ecb_encrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm);
return ecb_do_crypt(req, ctx->rkey_enc);
}
EXPORT_SYMBOL_GPL(sm4_avx_ecb_encrypt);
int sm4_avx_ecb_decrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm);
return ecb_do_crypt(req, ctx->rkey_dec);
}
EXPORT_SYMBOL_GPL(sm4_avx_ecb_decrypt);
int sm4_cbc_encrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm);
struct skcipher_walk walk;
unsigned int nbytes;
int err;
err = skcipher_walk_virt(&walk, req, false);
while ((nbytes = walk.nbytes) > 0) {
const u8 *iv = walk.iv;
const u8 *src = walk.src.virt.addr;
u8 *dst = walk.dst.virt.addr;
while (nbytes >= SM4_BLOCK_SIZE) {
crypto_xor_cpy(dst, src, iv, SM4_BLOCK_SIZE);
sm4_crypt_block(ctx->rkey_enc, dst, dst);
iv = dst;
src += SM4_BLOCK_SIZE;
dst += SM4_BLOCK_SIZE;
nbytes -= SM4_BLOCK_SIZE;
}
if (iv != walk.iv)
memcpy(walk.iv, iv, SM4_BLOCK_SIZE);
err = skcipher_walk_done(&walk, nbytes);
}
return err;
}
EXPORT_SYMBOL_GPL(sm4_cbc_encrypt);
int sm4_avx_cbc_decrypt(struct skcipher_request *req,
unsigned int bsize, sm4_crypt_func func)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm);
struct skcipher_walk walk;
unsigned int nbytes;
int err;
err = skcipher_walk_virt(&walk, req, false);
while ((nbytes = walk.nbytes) > 0) {
const u8 *src = walk.src.virt.addr;
u8 *dst = walk.dst.virt.addr;
kernel_fpu_begin();
while (nbytes >= bsize) {
func(ctx->rkey_dec, dst, src, walk.iv);
dst += bsize;
src += bsize;
nbytes -= bsize;
}
while (nbytes >= SM4_BLOCK_SIZE) {
u8 keystream[SM4_BLOCK_SIZE * 8];
u8 iv[SM4_BLOCK_SIZE];
unsigned int nblocks = min(nbytes >> 4, 8u);
int i;
sm4_aesni_avx_crypt8(ctx->rkey_dec, keystream,
src, nblocks);
src += ((int)nblocks - 2) * SM4_BLOCK_SIZE;
dst += (nblocks - 1) * SM4_BLOCK_SIZE;
memcpy(iv, src + SM4_BLOCK_SIZE, SM4_BLOCK_SIZE);
for (i = nblocks - 1; i > 0; i--) {
crypto_xor_cpy(dst, src,
&keystream[i * SM4_BLOCK_SIZE],
SM4_BLOCK_SIZE);
src -= SM4_BLOCK_SIZE;
dst -= SM4_BLOCK_SIZE;
}
crypto_xor_cpy(dst, walk.iv, keystream, SM4_BLOCK_SIZE);
memcpy(walk.iv, iv, SM4_BLOCK_SIZE);
dst += nblocks * SM4_BLOCK_SIZE;
src += (nblocks + 1) * SM4_BLOCK_SIZE;
nbytes -= nblocks * SM4_BLOCK_SIZE;
}
kernel_fpu_end();
err = skcipher_walk_done(&walk, nbytes);
}
return err;
}
EXPORT_SYMBOL_GPL(sm4_avx_cbc_decrypt);
static int cbc_decrypt(struct skcipher_request *req)
{
return sm4_avx_cbc_decrypt(req, SM4_CRYPT8_BLOCK_SIZE,
sm4_aesni_avx_cbc_dec_blk8);
}
int sm4_cfb_encrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm);
struct skcipher_walk walk;
unsigned int nbytes;
int err;
err = skcipher_walk_virt(&walk, req, false);
while ((nbytes = walk.nbytes) > 0) {
u8 keystream[SM4_BLOCK_SIZE];
const u8 *iv = walk.iv;
const u8 *src = walk.src.virt.addr;
u8 *dst = walk.dst.virt.addr;
while (nbytes >= SM4_BLOCK_SIZE) {
sm4_crypt_block(ctx->rkey_enc, keystream, iv);
crypto_xor_cpy(dst, src, keystream, SM4_BLOCK_SIZE);
iv = dst;
src += SM4_BLOCK_SIZE;
dst += SM4_BLOCK_SIZE;
nbytes -= SM4_BLOCK_SIZE;
}
if (iv != walk.iv)
memcpy(walk.iv, iv, SM4_BLOCK_SIZE);
/* tail */
if (walk.nbytes == walk.total && nbytes > 0) {
sm4_crypt_block(ctx->rkey_enc, keystream, walk.iv);
crypto_xor_cpy(dst, src, keystream, nbytes);
nbytes = 0;
}
err = skcipher_walk_done(&walk, nbytes);
}
return err;
}
EXPORT_SYMBOL_GPL(sm4_cfb_encrypt);
int sm4_avx_cfb_decrypt(struct skcipher_request *req,
unsigned int bsize, sm4_crypt_func func)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm);
struct skcipher_walk walk;
unsigned int nbytes;
int err;
err = skcipher_walk_virt(&walk, req, false);
while ((nbytes = walk.nbytes) > 0) {
const u8 *src = walk.src.virt.addr;
u8 *dst = walk.dst.virt.addr;
kernel_fpu_begin();
while (nbytes >= bsize) {
func(ctx->rkey_enc, dst, src, walk.iv);
dst += bsize;
src += bsize;
nbytes -= bsize;
}
while (nbytes >= SM4_BLOCK_SIZE) {
u8 keystream[SM4_BLOCK_SIZE * 8];
unsigned int nblocks = min(nbytes >> 4, 8u);
memcpy(keystream, walk.iv, SM4_BLOCK_SIZE);
if (nblocks > 1)
memcpy(&keystream[SM4_BLOCK_SIZE], src,
(nblocks - 1) * SM4_BLOCK_SIZE);
memcpy(walk.iv, src + (nblocks - 1) * SM4_BLOCK_SIZE,
SM4_BLOCK_SIZE);
sm4_aesni_avx_crypt8(ctx->rkey_enc, keystream,
keystream, nblocks);
crypto_xor_cpy(dst, src, keystream,
nblocks * SM4_BLOCK_SIZE);
dst += nblocks * SM4_BLOCK_SIZE;
src += nblocks * SM4_BLOCK_SIZE;
nbytes -= nblocks * SM4_BLOCK_SIZE;
}
kernel_fpu_end();
/* tail */
if (walk.nbytes == walk.total && nbytes > 0) {
u8 keystream[SM4_BLOCK_SIZE];
sm4_crypt_block(ctx->rkey_enc, keystream, walk.iv);
crypto_xor_cpy(dst, src, keystream, nbytes);
nbytes = 0;
}
err = skcipher_walk_done(&walk, nbytes);
}
return err;
}
EXPORT_SYMBOL_GPL(sm4_avx_cfb_decrypt);
static int cfb_decrypt(struct skcipher_request *req)
{
return sm4_avx_cfb_decrypt(req, SM4_CRYPT8_BLOCK_SIZE,
sm4_aesni_avx_cfb_dec_blk8);
}
int sm4_avx_ctr_crypt(struct skcipher_request *req,
unsigned int bsize, sm4_crypt_func func)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm);
struct skcipher_walk walk;
unsigned int nbytes;
int err;
err = skcipher_walk_virt(&walk, req, false);
while ((nbytes = walk.nbytes) > 0) {
const u8 *src = walk.src.virt.addr;
u8 *dst = walk.dst.virt.addr;
kernel_fpu_begin();
while (nbytes >= bsize) {
func(ctx->rkey_enc, dst, src, walk.iv);
dst += bsize;
src += bsize;
nbytes -= bsize;
}
while (nbytes >= SM4_BLOCK_SIZE) {
u8 keystream[SM4_BLOCK_SIZE * 8];
unsigned int nblocks = min(nbytes >> 4, 8u);
int i;
for (i = 0; i < nblocks; i++) {
memcpy(&keystream[i * SM4_BLOCK_SIZE],
walk.iv, SM4_BLOCK_SIZE);
crypto_inc(walk.iv, SM4_BLOCK_SIZE);
}
sm4_aesni_avx_crypt8(ctx->rkey_enc, keystream,
keystream, nblocks);
crypto_xor_cpy(dst, src, keystream,
nblocks * SM4_BLOCK_SIZE);
dst += nblocks * SM4_BLOCK_SIZE;
src += nblocks * SM4_BLOCK_SIZE;
nbytes -= nblocks * SM4_BLOCK_SIZE;
}
kernel_fpu_end();
/* tail */
if (walk.nbytes == walk.total && nbytes > 0) {
u8 keystream[SM4_BLOCK_SIZE];
memcpy(keystream, walk.iv, SM4_BLOCK_SIZE);
crypto_inc(walk.iv, SM4_BLOCK_SIZE);
sm4_crypt_block(ctx->rkey_enc, keystream, keystream);
crypto_xor_cpy(dst, src, keystream, nbytes);
dst += nbytes;
src += nbytes;
nbytes = 0;
}
err = skcipher_walk_done(&walk, nbytes);
}
return err;
}
EXPORT_SYMBOL_GPL(sm4_avx_ctr_crypt);
static int ctr_crypt(struct skcipher_request *req)
{
return sm4_avx_ctr_crypt(req, SM4_CRYPT8_BLOCK_SIZE,
sm4_aesni_avx_ctr_enc_blk8);
}
static struct skcipher_alg sm4_aesni_avx_skciphers[] = {
{
.base = {
.cra_name = "__ecb(sm4)",
.cra_driver_name = "__ecb-sm4-aesni-avx",
.cra_priority = 400,
.cra_flags = CRYPTO_ALG_INTERNAL,
.cra_blocksize = SM4_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct sm4_ctx),
.cra_module = THIS_MODULE,
},
.min_keysize = SM4_KEY_SIZE,
.max_keysize = SM4_KEY_SIZE,
.walksize = 8 * SM4_BLOCK_SIZE,
.setkey = sm4_skcipher_setkey,
.encrypt = sm4_avx_ecb_encrypt,
.decrypt = sm4_avx_ecb_decrypt,
}, {
.base = {
.cra_name = "__cbc(sm4)",
.cra_driver_name = "__cbc-sm4-aesni-avx",
.cra_priority = 400,
.cra_flags = CRYPTO_ALG_INTERNAL,
.cra_blocksize = SM4_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct sm4_ctx),
.cra_module = THIS_MODULE,
},
.min_keysize = SM4_KEY_SIZE,
.max_keysize = SM4_KEY_SIZE,
.ivsize = SM4_BLOCK_SIZE,
.walksize = 8 * SM4_BLOCK_SIZE,
.setkey = sm4_skcipher_setkey,
.encrypt = sm4_cbc_encrypt,
.decrypt = cbc_decrypt,
}, {
.base = {
.cra_name = "__cfb(sm4)",
.cra_driver_name = "__cfb-sm4-aesni-avx",
.cra_priority = 400,
.cra_flags = CRYPTO_ALG_INTERNAL,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct sm4_ctx),
.cra_module = THIS_MODULE,
},
.min_keysize = SM4_KEY_SIZE,
.max_keysize = SM4_KEY_SIZE,
.ivsize = SM4_BLOCK_SIZE,
.chunksize = SM4_BLOCK_SIZE,
.walksize = 8 * SM4_BLOCK_SIZE,
.setkey = sm4_skcipher_setkey,
.encrypt = sm4_cfb_encrypt,
.decrypt = cfb_decrypt,
}, {
.base = {
.cra_name = "__ctr(sm4)",
.cra_driver_name = "__ctr-sm4-aesni-avx",
.cra_priority = 400,
.cra_flags = CRYPTO_ALG_INTERNAL,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct sm4_ctx),
.cra_module = THIS_MODULE,
},
.min_keysize = SM4_KEY_SIZE,
.max_keysize = SM4_KEY_SIZE,
.ivsize = SM4_BLOCK_SIZE,
.chunksize = SM4_BLOCK_SIZE,
.walksize = 8 * SM4_BLOCK_SIZE,
.setkey = sm4_skcipher_setkey,
.encrypt = ctr_crypt,
.decrypt = ctr_crypt,
}
};
static struct simd_skcipher_alg *
simd_sm4_aesni_avx_skciphers[ARRAY_SIZE(sm4_aesni_avx_skciphers)];
static int __init sm4_init(void)
{
const char *feature_name;
if (!boot_cpu_has(X86_FEATURE_AVX) ||
!boot_cpu_has(X86_FEATURE_AES) ||
!boot_cpu_has(X86_FEATURE_OSXSAVE)) {
pr_info("AVX or AES-NI instructions are not detected.\n");
return -ENODEV;
}
if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM,
&feature_name)) {
pr_info("CPU feature '%s' is not supported.\n", feature_name);
return -ENODEV;
}
return simd_register_skciphers_compat(sm4_aesni_avx_skciphers,
ARRAY_SIZE(sm4_aesni_avx_skciphers),
simd_sm4_aesni_avx_skciphers);
}
static void __exit sm4_exit(void)
{
simd_unregister_skciphers(sm4_aesni_avx_skciphers,
ARRAY_SIZE(sm4_aesni_avx_skciphers),
simd_sm4_aesni_avx_skciphers);
}
module_init(sm4_init);
module_exit(sm4_exit);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Tianjia Zhang <[email protected]>");
MODULE_DESCRIPTION("SM4 Cipher Algorithm, AES-NI/AVX optimized");
MODULE_ALIAS_CRYPTO("sm4");
MODULE_ALIAS_CRYPTO("sm4-aesni-avx");
| linux-master | arch/x86/crypto/sm4_aesni_avx_glue.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Glue Code for SSE2 assembler versions of Serpent Cipher
*
* Copyright (c) 2011 Jussi Kivilinna <[email protected]>
*
* Glue code based on aesni-intel_glue.c by:
* Copyright (C) 2008, Intel Corp.
* Author: Huang Ying <[email protected]>
*
* CBC & ECB parts based on code (crypto/cbc.c,ecb.c) by:
* Copyright (c) 2006 Herbert Xu <[email protected]>
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/crypto.h>
#include <linux/err.h>
#include <crypto/algapi.h>
#include <crypto/b128ops.h>
#include <crypto/internal/simd.h>
#include <crypto/serpent.h>
#include "serpent-sse2.h"
#include "ecb_cbc_helpers.h"
static int serpent_setkey_skcipher(struct crypto_skcipher *tfm,
const u8 *key, unsigned int keylen)
{
return __serpent_setkey(crypto_skcipher_ctx(tfm), key, keylen);
}
static void serpent_decrypt_cbc_xway(const void *ctx, u8 *dst, const u8 *src)
{
u8 buf[SERPENT_PARALLEL_BLOCKS - 1][SERPENT_BLOCK_SIZE];
const u8 *s = src;
if (dst == src)
s = memcpy(buf, src, sizeof(buf));
serpent_dec_blk_xway(ctx, dst, src);
crypto_xor(dst + SERPENT_BLOCK_SIZE, s, sizeof(buf));
}
static int ecb_encrypt(struct skcipher_request *req)
{
ECB_WALK_START(req, SERPENT_BLOCK_SIZE, SERPENT_PARALLEL_BLOCKS);
ECB_BLOCK(SERPENT_PARALLEL_BLOCKS, serpent_enc_blk_xway);
ECB_BLOCK(1, __serpent_encrypt);
ECB_WALK_END();
}
static int ecb_decrypt(struct skcipher_request *req)
{
ECB_WALK_START(req, SERPENT_BLOCK_SIZE, SERPENT_PARALLEL_BLOCKS);
ECB_BLOCK(SERPENT_PARALLEL_BLOCKS, serpent_dec_blk_xway);
ECB_BLOCK(1, __serpent_decrypt);
ECB_WALK_END();
}
static int cbc_encrypt(struct skcipher_request *req)
{
CBC_WALK_START(req, SERPENT_BLOCK_SIZE, -1);
CBC_ENC_BLOCK(__serpent_encrypt);
CBC_WALK_END();
}
static int cbc_decrypt(struct skcipher_request *req)
{
CBC_WALK_START(req, SERPENT_BLOCK_SIZE, SERPENT_PARALLEL_BLOCKS);
CBC_DEC_BLOCK(SERPENT_PARALLEL_BLOCKS, serpent_decrypt_cbc_xway);
CBC_DEC_BLOCK(1, __serpent_decrypt);
CBC_WALK_END();
}
static struct skcipher_alg serpent_algs[] = {
{
.base.cra_name = "__ecb(serpent)",
.base.cra_driver_name = "__ecb-serpent-sse2",
.base.cra_priority = 400,
.base.cra_flags = CRYPTO_ALG_INTERNAL,
.base.cra_blocksize = SERPENT_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct serpent_ctx),
.base.cra_module = THIS_MODULE,
.min_keysize = SERPENT_MIN_KEY_SIZE,
.max_keysize = SERPENT_MAX_KEY_SIZE,
.setkey = serpent_setkey_skcipher,
.encrypt = ecb_encrypt,
.decrypt = ecb_decrypt,
}, {
.base.cra_name = "__cbc(serpent)",
.base.cra_driver_name = "__cbc-serpent-sse2",
.base.cra_priority = 400,
.base.cra_flags = CRYPTO_ALG_INTERNAL,
.base.cra_blocksize = SERPENT_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct serpent_ctx),
.base.cra_module = THIS_MODULE,
.min_keysize = SERPENT_MIN_KEY_SIZE,
.max_keysize = SERPENT_MAX_KEY_SIZE,
.ivsize = SERPENT_BLOCK_SIZE,
.setkey = serpent_setkey_skcipher,
.encrypt = cbc_encrypt,
.decrypt = cbc_decrypt,
},
};
static struct simd_skcipher_alg *serpent_simd_algs[ARRAY_SIZE(serpent_algs)];
static int __init serpent_sse2_init(void)
{
if (!boot_cpu_has(X86_FEATURE_XMM2)) {
printk(KERN_INFO "SSE2 instructions are not detected.\n");
return -ENODEV;
}
return simd_register_skciphers_compat(serpent_algs,
ARRAY_SIZE(serpent_algs),
serpent_simd_algs);
}
static void __exit serpent_sse2_exit(void)
{
simd_unregister_skciphers(serpent_algs, ARRAY_SIZE(serpent_algs),
serpent_simd_algs);
}
module_init(serpent_sse2_init);
module_exit(serpent_sse2_exit);
MODULE_DESCRIPTION("Serpent Cipher Algorithm, SSE2 optimized");
MODULE_LICENSE("GPL");
MODULE_ALIAS_CRYPTO("serpent");
| linux-master | arch/x86/crypto/serpent_sse2_glue.c |
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* SM4 Cipher Algorithm, AES-NI/AVX2 optimized.
* as specified in
* https://tools.ietf.org/id/draft-ribose-cfrg-sm4-10.html
*
* Copyright (c) 2021, Alibaba Group.
* Copyright (c) 2021 Tianjia Zhang <[email protected]>
*/
#include <linux/module.h>
#include <linux/crypto.h>
#include <linux/kernel.h>
#include <asm/simd.h>
#include <crypto/internal/simd.h>
#include <crypto/internal/skcipher.h>
#include <crypto/sm4.h>
#include "sm4-avx.h"
#define SM4_CRYPT16_BLOCK_SIZE (SM4_BLOCK_SIZE * 16)
asmlinkage void sm4_aesni_avx2_ctr_enc_blk16(const u32 *rk, u8 *dst,
const u8 *src, u8 *iv);
asmlinkage void sm4_aesni_avx2_cbc_dec_blk16(const u32 *rk, u8 *dst,
const u8 *src, u8 *iv);
asmlinkage void sm4_aesni_avx2_cfb_dec_blk16(const u32 *rk, u8 *dst,
const u8 *src, u8 *iv);
static int sm4_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int key_len)
{
struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm);
return sm4_expandkey(ctx, key, key_len);
}
static int cbc_decrypt(struct skcipher_request *req)
{
return sm4_avx_cbc_decrypt(req, SM4_CRYPT16_BLOCK_SIZE,
sm4_aesni_avx2_cbc_dec_blk16);
}
static int cfb_decrypt(struct skcipher_request *req)
{
return sm4_avx_cfb_decrypt(req, SM4_CRYPT16_BLOCK_SIZE,
sm4_aesni_avx2_cfb_dec_blk16);
}
static int ctr_crypt(struct skcipher_request *req)
{
return sm4_avx_ctr_crypt(req, SM4_CRYPT16_BLOCK_SIZE,
sm4_aesni_avx2_ctr_enc_blk16);
}
static struct skcipher_alg sm4_aesni_avx2_skciphers[] = {
{
.base = {
.cra_name = "__ecb(sm4)",
.cra_driver_name = "__ecb-sm4-aesni-avx2",
.cra_priority = 500,
.cra_flags = CRYPTO_ALG_INTERNAL,
.cra_blocksize = SM4_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct sm4_ctx),
.cra_module = THIS_MODULE,
},
.min_keysize = SM4_KEY_SIZE,
.max_keysize = SM4_KEY_SIZE,
.walksize = 16 * SM4_BLOCK_SIZE,
.setkey = sm4_skcipher_setkey,
.encrypt = sm4_avx_ecb_encrypt,
.decrypt = sm4_avx_ecb_decrypt,
}, {
.base = {
.cra_name = "__cbc(sm4)",
.cra_driver_name = "__cbc-sm4-aesni-avx2",
.cra_priority = 500,
.cra_flags = CRYPTO_ALG_INTERNAL,
.cra_blocksize = SM4_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct sm4_ctx),
.cra_module = THIS_MODULE,
},
.min_keysize = SM4_KEY_SIZE,
.max_keysize = SM4_KEY_SIZE,
.ivsize = SM4_BLOCK_SIZE,
.walksize = 16 * SM4_BLOCK_SIZE,
.setkey = sm4_skcipher_setkey,
.encrypt = sm4_cbc_encrypt,
.decrypt = cbc_decrypt,
}, {
.base = {
.cra_name = "__cfb(sm4)",
.cra_driver_name = "__cfb-sm4-aesni-avx2",
.cra_priority = 500,
.cra_flags = CRYPTO_ALG_INTERNAL,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct sm4_ctx),
.cra_module = THIS_MODULE,
},
.min_keysize = SM4_KEY_SIZE,
.max_keysize = SM4_KEY_SIZE,
.ivsize = SM4_BLOCK_SIZE,
.chunksize = SM4_BLOCK_SIZE,
.walksize = 16 * SM4_BLOCK_SIZE,
.setkey = sm4_skcipher_setkey,
.encrypt = sm4_cfb_encrypt,
.decrypt = cfb_decrypt,
}, {
.base = {
.cra_name = "__ctr(sm4)",
.cra_driver_name = "__ctr-sm4-aesni-avx2",
.cra_priority = 500,
.cra_flags = CRYPTO_ALG_INTERNAL,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct sm4_ctx),
.cra_module = THIS_MODULE,
},
.min_keysize = SM4_KEY_SIZE,
.max_keysize = SM4_KEY_SIZE,
.ivsize = SM4_BLOCK_SIZE,
.chunksize = SM4_BLOCK_SIZE,
.walksize = 16 * SM4_BLOCK_SIZE,
.setkey = sm4_skcipher_setkey,
.encrypt = ctr_crypt,
.decrypt = ctr_crypt,
}
};
static struct simd_skcipher_alg *
simd_sm4_aesni_avx2_skciphers[ARRAY_SIZE(sm4_aesni_avx2_skciphers)];
static int __init sm4_init(void)
{
const char *feature_name;
if (!boot_cpu_has(X86_FEATURE_AVX) ||
!boot_cpu_has(X86_FEATURE_AVX2) ||
!boot_cpu_has(X86_FEATURE_AES) ||
!boot_cpu_has(X86_FEATURE_OSXSAVE)) {
pr_info("AVX2 or AES-NI instructions are not detected.\n");
return -ENODEV;
}
if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM,
&feature_name)) {
pr_info("CPU feature '%s' is not supported.\n", feature_name);
return -ENODEV;
}
return simd_register_skciphers_compat(sm4_aesni_avx2_skciphers,
ARRAY_SIZE(sm4_aesni_avx2_skciphers),
simd_sm4_aesni_avx2_skciphers);
}
static void __exit sm4_exit(void)
{
simd_unregister_skciphers(sm4_aesni_avx2_skciphers,
ARRAY_SIZE(sm4_aesni_avx2_skciphers),
simd_sm4_aesni_avx2_skciphers);
}
module_init(sm4_init);
module_exit(sm4_exit);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Tianjia Zhang <[email protected]>");
MODULE_DESCRIPTION("SM4 Cipher Algorithm, AES-NI/AVX2 optimized");
MODULE_ALIAS_CRYPTO("sm4");
MODULE_ALIAS_CRYPTO("sm4-aesni-avx2");
| linux-master | arch/x86/crypto/sm4_aesni_avx2_glue.c |
/*
* Cryptographic API.
*
* Glue code for the SHA512 Secure Hash Algorithm assembler
* implementation using supplemental SSE3 / AVX / AVX2 instructions.
*
* This file is based on sha512_generic.c
*
* Copyright (C) 2013 Intel Corporation
* Author: Tim Chen <[email protected]>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <crypto/internal/hash.h>
#include <crypto/internal/simd.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/types.h>
#include <crypto/sha2.h>
#include <crypto/sha512_base.h>
#include <asm/cpu_device_id.h>
#include <asm/simd.h>
asmlinkage void sha512_transform_ssse3(struct sha512_state *state,
const u8 *data, int blocks);
static int sha512_update(struct shash_desc *desc, const u8 *data,
unsigned int len, sha512_block_fn *sha512_xform)
{
struct sha512_state *sctx = shash_desc_ctx(desc);
if (!crypto_simd_usable() ||
(sctx->count[0] % SHA512_BLOCK_SIZE) + len < SHA512_BLOCK_SIZE)
return crypto_sha512_update(desc, data, len);
/*
* Make sure struct sha512_state begins directly with the SHA512
* 512-bit internal state, as this is what the asm functions expect.
*/
BUILD_BUG_ON(offsetof(struct sha512_state, state) != 0);
kernel_fpu_begin();
sha512_base_do_update(desc, data, len, sha512_xform);
kernel_fpu_end();
return 0;
}
static int sha512_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out, sha512_block_fn *sha512_xform)
{
if (!crypto_simd_usable())
return crypto_sha512_finup(desc, data, len, out);
kernel_fpu_begin();
if (len)
sha512_base_do_update(desc, data, len, sha512_xform);
sha512_base_do_finalize(desc, sha512_xform);
kernel_fpu_end();
return sha512_base_finish(desc, out);
}
static int sha512_ssse3_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
return sha512_update(desc, data, len, sha512_transform_ssse3);
}
static int sha512_ssse3_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
return sha512_finup(desc, data, len, out, sha512_transform_ssse3);
}
/* Add padding and return the message digest. */
static int sha512_ssse3_final(struct shash_desc *desc, u8 *out)
{
return sha512_ssse3_finup(desc, NULL, 0, out);
}
static struct shash_alg sha512_ssse3_algs[] = { {
.digestsize = SHA512_DIGEST_SIZE,
.init = sha512_base_init,
.update = sha512_ssse3_update,
.final = sha512_ssse3_final,
.finup = sha512_ssse3_finup,
.descsize = sizeof(struct sha512_state),
.base = {
.cra_name = "sha512",
.cra_driver_name = "sha512-ssse3",
.cra_priority = 150,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
}, {
.digestsize = SHA384_DIGEST_SIZE,
.init = sha384_base_init,
.update = sha512_ssse3_update,
.final = sha512_ssse3_final,
.finup = sha512_ssse3_finup,
.descsize = sizeof(struct sha512_state),
.base = {
.cra_name = "sha384",
.cra_driver_name = "sha384-ssse3",
.cra_priority = 150,
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
} };
static int register_sha512_ssse3(void)
{
if (boot_cpu_has(X86_FEATURE_SSSE3))
return crypto_register_shashes(sha512_ssse3_algs,
ARRAY_SIZE(sha512_ssse3_algs));
return 0;
}
static void unregister_sha512_ssse3(void)
{
if (boot_cpu_has(X86_FEATURE_SSSE3))
crypto_unregister_shashes(sha512_ssse3_algs,
ARRAY_SIZE(sha512_ssse3_algs));
}
asmlinkage void sha512_transform_avx(struct sha512_state *state,
const u8 *data, int blocks);
static bool avx_usable(void)
{
if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL)) {
if (boot_cpu_has(X86_FEATURE_AVX))
pr_info("AVX detected but unusable.\n");
return false;
}
return true;
}
static int sha512_avx_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
return sha512_update(desc, data, len, sha512_transform_avx);
}
static int sha512_avx_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
return sha512_finup(desc, data, len, out, sha512_transform_avx);
}
/* Add padding and return the message digest. */
static int sha512_avx_final(struct shash_desc *desc, u8 *out)
{
return sha512_avx_finup(desc, NULL, 0, out);
}
static struct shash_alg sha512_avx_algs[] = { {
.digestsize = SHA512_DIGEST_SIZE,
.init = sha512_base_init,
.update = sha512_avx_update,
.final = sha512_avx_final,
.finup = sha512_avx_finup,
.descsize = sizeof(struct sha512_state),
.base = {
.cra_name = "sha512",
.cra_driver_name = "sha512-avx",
.cra_priority = 160,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
}, {
.digestsize = SHA384_DIGEST_SIZE,
.init = sha384_base_init,
.update = sha512_avx_update,
.final = sha512_avx_final,
.finup = sha512_avx_finup,
.descsize = sizeof(struct sha512_state),
.base = {
.cra_name = "sha384",
.cra_driver_name = "sha384-avx",
.cra_priority = 160,
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
} };
static int register_sha512_avx(void)
{
if (avx_usable())
return crypto_register_shashes(sha512_avx_algs,
ARRAY_SIZE(sha512_avx_algs));
return 0;
}
static void unregister_sha512_avx(void)
{
if (avx_usable())
crypto_unregister_shashes(sha512_avx_algs,
ARRAY_SIZE(sha512_avx_algs));
}
asmlinkage void sha512_transform_rorx(struct sha512_state *state,
const u8 *data, int blocks);
static int sha512_avx2_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
return sha512_update(desc, data, len, sha512_transform_rorx);
}
static int sha512_avx2_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
return sha512_finup(desc, data, len, out, sha512_transform_rorx);
}
/* Add padding and return the message digest. */
static int sha512_avx2_final(struct shash_desc *desc, u8 *out)
{
return sha512_avx2_finup(desc, NULL, 0, out);
}
static struct shash_alg sha512_avx2_algs[] = { {
.digestsize = SHA512_DIGEST_SIZE,
.init = sha512_base_init,
.update = sha512_avx2_update,
.final = sha512_avx2_final,
.finup = sha512_avx2_finup,
.descsize = sizeof(struct sha512_state),
.base = {
.cra_name = "sha512",
.cra_driver_name = "sha512-avx2",
.cra_priority = 170,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
}, {
.digestsize = SHA384_DIGEST_SIZE,
.init = sha384_base_init,
.update = sha512_avx2_update,
.final = sha512_avx2_final,
.finup = sha512_avx2_finup,
.descsize = sizeof(struct sha512_state),
.base = {
.cra_name = "sha384",
.cra_driver_name = "sha384-avx2",
.cra_priority = 170,
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
} };
static bool avx2_usable(void)
{
if (avx_usable() && boot_cpu_has(X86_FEATURE_AVX2) &&
boot_cpu_has(X86_FEATURE_BMI2))
return true;
return false;
}
static int register_sha512_avx2(void)
{
if (avx2_usable())
return crypto_register_shashes(sha512_avx2_algs,
ARRAY_SIZE(sha512_avx2_algs));
return 0;
}
static const struct x86_cpu_id module_cpu_ids[] = {
X86_MATCH_FEATURE(X86_FEATURE_AVX2, NULL),
X86_MATCH_FEATURE(X86_FEATURE_AVX, NULL),
X86_MATCH_FEATURE(X86_FEATURE_SSSE3, NULL),
{}
};
MODULE_DEVICE_TABLE(x86cpu, module_cpu_ids);
static void unregister_sha512_avx2(void)
{
if (avx2_usable())
crypto_unregister_shashes(sha512_avx2_algs,
ARRAY_SIZE(sha512_avx2_algs));
}
static int __init sha512_ssse3_mod_init(void)
{
if (!x86_match_cpu(module_cpu_ids))
return -ENODEV;
if (register_sha512_ssse3())
goto fail;
if (register_sha512_avx()) {
unregister_sha512_ssse3();
goto fail;
}
if (register_sha512_avx2()) {
unregister_sha512_avx();
unregister_sha512_ssse3();
goto fail;
}
return 0;
fail:
return -ENODEV;
}
static void __exit sha512_ssse3_mod_fini(void)
{
unregister_sha512_avx2();
unregister_sha512_avx();
unregister_sha512_ssse3();
}
module_init(sha512_ssse3_mod_init);
module_exit(sha512_ssse3_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("SHA512 Secure Hash Algorithm, Supplemental SSE3 accelerated");
MODULE_ALIAS_CRYPTO("sha512");
MODULE_ALIAS_CRYPTO("sha512-ssse3");
MODULE_ALIAS_CRYPTO("sha512-avx");
MODULE_ALIAS_CRYPTO("sha512-avx2");
MODULE_ALIAS_CRYPTO("sha384");
MODULE_ALIAS_CRYPTO("sha384-ssse3");
MODULE_ALIAS_CRYPTO("sha384-avx");
MODULE_ALIAS_CRYPTO("sha384-avx2");
| linux-master | arch/x86/crypto/sha512_ssse3_glue.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Using hardware provided CRC32 instruction to accelerate the CRC32 disposal.
* CRC32C polynomial:0x1EDC6F41(BE)/0x82F63B78(LE)
* CRC32 is a new instruction in Intel SSE4.2, the reference can be found at:
* http://www.intel.com/products/processor/manuals/
* Intel(R) 64 and IA-32 Architectures Software Developer's Manual
* Volume 2A: Instruction Set Reference, A-M
*
* Copyright (C) 2008 Intel Corporation
* Authors: Austin Zhang <[email protected]>
* Kent Liu <[email protected]>
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/string.h>
#include <linux/kernel.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/simd.h>
#include <asm/cpufeatures.h>
#include <asm/cpu_device_id.h>
#include <asm/simd.h>
#define CHKSUM_BLOCK_SIZE 1
#define CHKSUM_DIGEST_SIZE 4
#define SCALE_F sizeof(unsigned long)
#ifdef CONFIG_X86_64
#define CRC32_INST "crc32q %1, %q0"
#else
#define CRC32_INST "crc32l %1, %0"
#endif
#ifdef CONFIG_X86_64
/*
* use carryless multiply version of crc32c when buffer
* size is >= 512 to account
* for fpu state save/restore overhead.
*/
#define CRC32C_PCL_BREAKEVEN 512
asmlinkage unsigned int crc_pcl(const u8 *buffer, int len,
unsigned int crc_init);
#endif /* CONFIG_X86_64 */
static u32 crc32c_intel_le_hw_byte(u32 crc, unsigned char const *data, size_t length)
{
while (length--) {
asm("crc32b %1, %0"
: "+r" (crc) : "rm" (*data));
data++;
}
return crc;
}
static u32 __pure crc32c_intel_le_hw(u32 crc, unsigned char const *p, size_t len)
{
unsigned int iquotient = len / SCALE_F;
unsigned int iremainder = len % SCALE_F;
unsigned long *ptmp = (unsigned long *)p;
while (iquotient--) {
asm(CRC32_INST
: "+r" (crc) : "rm" (*ptmp));
ptmp++;
}
if (iremainder)
crc = crc32c_intel_le_hw_byte(crc, (unsigned char *)ptmp,
iremainder);
return crc;
}
/*
* Setting the seed allows arbitrary accumulators and flexible XOR policy
* If your algorithm starts with ~0, then XOR with ~0 before you set
* the seed.
*/
static int crc32c_intel_setkey(struct crypto_shash *hash, const u8 *key,
unsigned int keylen)
{
u32 *mctx = crypto_shash_ctx(hash);
if (keylen != sizeof(u32))
return -EINVAL;
*mctx = le32_to_cpup((__le32 *)key);
return 0;
}
static int crc32c_intel_init(struct shash_desc *desc)
{
u32 *mctx = crypto_shash_ctx(desc->tfm);
u32 *crcp = shash_desc_ctx(desc);
*crcp = *mctx;
return 0;
}
static int crc32c_intel_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
u32 *crcp = shash_desc_ctx(desc);
*crcp = crc32c_intel_le_hw(*crcp, data, len);
return 0;
}
static int __crc32c_intel_finup(u32 *crcp, const u8 *data, unsigned int len,
u8 *out)
{
*(__le32 *)out = ~cpu_to_le32(crc32c_intel_le_hw(*crcp, data, len));
return 0;
}
static int crc32c_intel_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
return __crc32c_intel_finup(shash_desc_ctx(desc), data, len, out);
}
static int crc32c_intel_final(struct shash_desc *desc, u8 *out)
{
u32 *crcp = shash_desc_ctx(desc);
*(__le32 *)out = ~cpu_to_le32p(crcp);
return 0;
}
static int crc32c_intel_digest(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
return __crc32c_intel_finup(crypto_shash_ctx(desc->tfm), data, len,
out);
}
static int crc32c_intel_cra_init(struct crypto_tfm *tfm)
{
u32 *key = crypto_tfm_ctx(tfm);
*key = ~0;
return 0;
}
#ifdef CONFIG_X86_64
static int crc32c_pcl_intel_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
u32 *crcp = shash_desc_ctx(desc);
/*
* use faster PCL version if datasize is large enough to
* overcome kernel fpu state save/restore overhead
*/
if (len >= CRC32C_PCL_BREAKEVEN && crypto_simd_usable()) {
kernel_fpu_begin();
*crcp = crc_pcl(data, len, *crcp);
kernel_fpu_end();
} else
*crcp = crc32c_intel_le_hw(*crcp, data, len);
return 0;
}
static int __crc32c_pcl_intel_finup(u32 *crcp, const u8 *data, unsigned int len,
u8 *out)
{
if (len >= CRC32C_PCL_BREAKEVEN && crypto_simd_usable()) {
kernel_fpu_begin();
*(__le32 *)out = ~cpu_to_le32(crc_pcl(data, len, *crcp));
kernel_fpu_end();
} else
*(__le32 *)out =
~cpu_to_le32(crc32c_intel_le_hw(*crcp, data, len));
return 0;
}
static int crc32c_pcl_intel_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
return __crc32c_pcl_intel_finup(shash_desc_ctx(desc), data, len, out);
}
static int crc32c_pcl_intel_digest(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
return __crc32c_pcl_intel_finup(crypto_shash_ctx(desc->tfm), data, len,
out);
}
#endif /* CONFIG_X86_64 */
static struct shash_alg alg = {
.setkey = crc32c_intel_setkey,
.init = crc32c_intel_init,
.update = crc32c_intel_update,
.final = crc32c_intel_final,
.finup = crc32c_intel_finup,
.digest = crc32c_intel_digest,
.descsize = sizeof(u32),
.digestsize = CHKSUM_DIGEST_SIZE,
.base = {
.cra_name = "crc32c",
.cra_driver_name = "crc32c-intel",
.cra_priority = 200,
.cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
.cra_blocksize = CHKSUM_BLOCK_SIZE,
.cra_ctxsize = sizeof(u32),
.cra_module = THIS_MODULE,
.cra_init = crc32c_intel_cra_init,
}
};
static const struct x86_cpu_id crc32c_cpu_id[] = {
X86_MATCH_FEATURE(X86_FEATURE_XMM4_2, NULL),
{}
};
MODULE_DEVICE_TABLE(x86cpu, crc32c_cpu_id);
static int __init crc32c_intel_mod_init(void)
{
if (!x86_match_cpu(crc32c_cpu_id))
return -ENODEV;
#ifdef CONFIG_X86_64
if (boot_cpu_has(X86_FEATURE_PCLMULQDQ)) {
alg.update = crc32c_pcl_intel_update;
alg.finup = crc32c_pcl_intel_finup;
alg.digest = crc32c_pcl_intel_digest;
}
#endif
return crypto_register_shash(&alg);
}
static void __exit crc32c_intel_mod_fini(void)
{
crypto_unregister_shash(&alg);
}
module_init(crc32c_intel_mod_init);
module_exit(crc32c_intel_mod_fini);
MODULE_AUTHOR("Austin Zhang <[email protected]>, Kent Liu <[email protected]>");
MODULE_DESCRIPTION("CRC32c (Castagnoli) optimization using Intel Hardware.");
MODULE_LICENSE("GPL");
MODULE_ALIAS_CRYPTO("crc32c");
MODULE_ALIAS_CRYPTO("crc32c-intel");
| linux-master | arch/x86/crypto/crc32c-intel_glue.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Support for Intel AES-NI instructions. This file contains glue
* code, the real AES implementation is in intel-aes_asm.S.
*
* Copyright (C) 2008, Intel Corp.
* Author: Huang Ying <[email protected]>
*
* Added RFC4106 AES-GCM support for 128-bit keys under the AEAD
* interface for 64-bit kernels.
* Authors: Adrian Hoban <[email protected]>
* Gabriele Paoloni <[email protected]>
* Tadeusz Struk ([email protected])
* Aidan O'Mahony ([email protected])
* Copyright (c) 2010, Intel Corporation.
*/
#include <linux/hardirq.h>
#include <linux/types.h>
#include <linux/module.h>
#include <linux/err.h>
#include <crypto/algapi.h>
#include <crypto/aes.h>
#include <crypto/ctr.h>
#include <crypto/b128ops.h>
#include <crypto/gcm.h>
#include <crypto/xts.h>
#include <asm/cpu_device_id.h>
#include <asm/simd.h>
#include <crypto/scatterwalk.h>
#include <crypto/internal/aead.h>
#include <crypto/internal/simd.h>
#include <crypto/internal/skcipher.h>
#include <linux/jump_label.h>
#include <linux/workqueue.h>
#include <linux/spinlock.h>
#include <linux/static_call.h>
#define AESNI_ALIGN 16
#define AESNI_ALIGN_ATTR __attribute__ ((__aligned__(AESNI_ALIGN)))
#define AES_BLOCK_MASK (~(AES_BLOCK_SIZE - 1))
#define RFC4106_HASH_SUBKEY_SIZE 16
#define AESNI_ALIGN_EXTRA ((AESNI_ALIGN - 1) & ~(CRYPTO_MINALIGN - 1))
#define CRYPTO_AES_CTX_SIZE (sizeof(struct crypto_aes_ctx) + AESNI_ALIGN_EXTRA)
#define XTS_AES_CTX_SIZE (sizeof(struct aesni_xts_ctx) + AESNI_ALIGN_EXTRA)
/* This data is stored at the end of the crypto_tfm struct.
* It's a type of per "session" data storage location.
* This needs to be 16 byte aligned.
*/
struct aesni_rfc4106_gcm_ctx {
u8 hash_subkey[16] AESNI_ALIGN_ATTR;
struct crypto_aes_ctx aes_key_expanded AESNI_ALIGN_ATTR;
u8 nonce[4];
};
struct generic_gcmaes_ctx {
u8 hash_subkey[16] AESNI_ALIGN_ATTR;
struct crypto_aes_ctx aes_key_expanded AESNI_ALIGN_ATTR;
};
struct aesni_xts_ctx {
u8 raw_tweak_ctx[sizeof(struct crypto_aes_ctx)] AESNI_ALIGN_ATTR;
u8 raw_crypt_ctx[sizeof(struct crypto_aes_ctx)] AESNI_ALIGN_ATTR;
};
#define GCM_BLOCK_LEN 16
struct gcm_context_data {
/* init, update and finalize context data */
u8 aad_hash[GCM_BLOCK_LEN];
u64 aad_length;
u64 in_length;
u8 partial_block_enc_key[GCM_BLOCK_LEN];
u8 orig_IV[GCM_BLOCK_LEN];
u8 current_counter[GCM_BLOCK_LEN];
u64 partial_block_len;
u64 unused;
u8 hash_keys[GCM_BLOCK_LEN * 16];
};
asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
unsigned int key_len);
asmlinkage void aesni_enc(const void *ctx, u8 *out, const u8 *in);
asmlinkage void aesni_dec(const void *ctx, u8 *out, const u8 *in);
asmlinkage void aesni_ecb_enc(struct crypto_aes_ctx *ctx, u8 *out,
const u8 *in, unsigned int len);
asmlinkage void aesni_ecb_dec(struct crypto_aes_ctx *ctx, u8 *out,
const u8 *in, unsigned int len);
asmlinkage void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out,
const u8 *in, unsigned int len, u8 *iv);
asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
const u8 *in, unsigned int len, u8 *iv);
asmlinkage void aesni_cts_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out,
const u8 *in, unsigned int len, u8 *iv);
asmlinkage void aesni_cts_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
const u8 *in, unsigned int len, u8 *iv);
#define AVX_GEN2_OPTSIZE 640
#define AVX_GEN4_OPTSIZE 4096
asmlinkage void aesni_xts_encrypt(const struct crypto_aes_ctx *ctx, u8 *out,
const u8 *in, unsigned int len, u8 *iv);
asmlinkage void aesni_xts_decrypt(const struct crypto_aes_ctx *ctx, u8 *out,
const u8 *in, unsigned int len, u8 *iv);
#ifdef CONFIG_X86_64
asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out,
const u8 *in, unsigned int len, u8 *iv);
DEFINE_STATIC_CALL(aesni_ctr_enc_tfm, aesni_ctr_enc);
/* Scatter / Gather routines, with args similar to above */
asmlinkage void aesni_gcm_init(void *ctx,
struct gcm_context_data *gdata,
u8 *iv,
u8 *hash_subkey, const u8 *aad,
unsigned long aad_len);
asmlinkage void aesni_gcm_enc_update(void *ctx,
struct gcm_context_data *gdata, u8 *out,
const u8 *in, unsigned long plaintext_len);
asmlinkage void aesni_gcm_dec_update(void *ctx,
struct gcm_context_data *gdata, u8 *out,
const u8 *in,
unsigned long ciphertext_len);
asmlinkage void aesni_gcm_finalize(void *ctx,
struct gcm_context_data *gdata,
u8 *auth_tag, unsigned long auth_tag_len);
asmlinkage void aes_ctr_enc_128_avx_by8(const u8 *in, u8 *iv,
void *keys, u8 *out, unsigned int num_bytes);
asmlinkage void aes_ctr_enc_192_avx_by8(const u8 *in, u8 *iv,
void *keys, u8 *out, unsigned int num_bytes);
asmlinkage void aes_ctr_enc_256_avx_by8(const u8 *in, u8 *iv,
void *keys, u8 *out, unsigned int num_bytes);
asmlinkage void aes_xctr_enc_128_avx_by8(const u8 *in, const u8 *iv,
const void *keys, u8 *out, unsigned int num_bytes,
unsigned int byte_ctr);
asmlinkage void aes_xctr_enc_192_avx_by8(const u8 *in, const u8 *iv,
const void *keys, u8 *out, unsigned int num_bytes,
unsigned int byte_ctr);
asmlinkage void aes_xctr_enc_256_avx_by8(const u8 *in, const u8 *iv,
const void *keys, u8 *out, unsigned int num_bytes,
unsigned int byte_ctr);
/*
* asmlinkage void aesni_gcm_init_avx_gen2()
* gcm_data *my_ctx_data, context data
* u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
*/
asmlinkage void aesni_gcm_init_avx_gen2(void *my_ctx_data,
struct gcm_context_data *gdata,
u8 *iv,
u8 *hash_subkey,
const u8 *aad,
unsigned long aad_len);
asmlinkage void aesni_gcm_enc_update_avx_gen2(void *ctx,
struct gcm_context_data *gdata, u8 *out,
const u8 *in, unsigned long plaintext_len);
asmlinkage void aesni_gcm_dec_update_avx_gen2(void *ctx,
struct gcm_context_data *gdata, u8 *out,
const u8 *in,
unsigned long ciphertext_len);
asmlinkage void aesni_gcm_finalize_avx_gen2(void *ctx,
struct gcm_context_data *gdata,
u8 *auth_tag, unsigned long auth_tag_len);
/*
* asmlinkage void aesni_gcm_init_avx_gen4()
* gcm_data *my_ctx_data, context data
* u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
*/
asmlinkage void aesni_gcm_init_avx_gen4(void *my_ctx_data,
struct gcm_context_data *gdata,
u8 *iv,
u8 *hash_subkey,
const u8 *aad,
unsigned long aad_len);
asmlinkage void aesni_gcm_enc_update_avx_gen4(void *ctx,
struct gcm_context_data *gdata, u8 *out,
const u8 *in, unsigned long plaintext_len);
asmlinkage void aesni_gcm_dec_update_avx_gen4(void *ctx,
struct gcm_context_data *gdata, u8 *out,
const u8 *in,
unsigned long ciphertext_len);
asmlinkage void aesni_gcm_finalize_avx_gen4(void *ctx,
struct gcm_context_data *gdata,
u8 *auth_tag, unsigned long auth_tag_len);
static __ro_after_init DEFINE_STATIC_KEY_FALSE(gcm_use_avx);
static __ro_after_init DEFINE_STATIC_KEY_FALSE(gcm_use_avx2);
static inline struct
aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm)
{
unsigned long align = AESNI_ALIGN;
if (align <= crypto_tfm_ctx_alignment())
align = 1;
return PTR_ALIGN(crypto_aead_ctx(tfm), align);
}
static inline struct
generic_gcmaes_ctx *generic_gcmaes_ctx_get(struct crypto_aead *tfm)
{
unsigned long align = AESNI_ALIGN;
if (align <= crypto_tfm_ctx_alignment())
align = 1;
return PTR_ALIGN(crypto_aead_ctx(tfm), align);
}
#endif
static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx)
{
unsigned long addr = (unsigned long)raw_ctx;
unsigned long align = AESNI_ALIGN;
if (align <= crypto_tfm_ctx_alignment())
align = 1;
return (struct crypto_aes_ctx *)ALIGN(addr, align);
}
static int aes_set_key_common(struct crypto_aes_ctx *ctx,
const u8 *in_key, unsigned int key_len)
{
int err;
if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 &&
key_len != AES_KEYSIZE_256)
return -EINVAL;
if (!crypto_simd_usable())
err = aes_expandkey(ctx, in_key, key_len);
else {
kernel_fpu_begin();
err = aesni_set_key(ctx, in_key, key_len);
kernel_fpu_end();
}
return err;
}
static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len)
{
return aes_set_key_common(aes_ctx(crypto_tfm_ctx(tfm)), in_key,
key_len);
}
static void aesni_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
if (!crypto_simd_usable()) {
aes_encrypt(ctx, dst, src);
} else {
kernel_fpu_begin();
aesni_enc(ctx, dst, src);
kernel_fpu_end();
}
}
static void aesni_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
if (!crypto_simd_usable()) {
aes_decrypt(ctx, dst, src);
} else {
kernel_fpu_begin();
aesni_dec(ctx, dst, src);
kernel_fpu_end();
}
}
static int aesni_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int len)
{
return aes_set_key_common(aes_ctx(crypto_skcipher_ctx(tfm)), key, len);
}
static int ecb_encrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
struct skcipher_walk walk;
unsigned int nbytes;
int err;
err = skcipher_walk_virt(&walk, req, false);
while ((nbytes = walk.nbytes)) {
kernel_fpu_begin();
aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
nbytes & AES_BLOCK_MASK);
kernel_fpu_end();
nbytes &= AES_BLOCK_SIZE - 1;
err = skcipher_walk_done(&walk, nbytes);
}
return err;
}
static int ecb_decrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
struct skcipher_walk walk;
unsigned int nbytes;
int err;
err = skcipher_walk_virt(&walk, req, false);
while ((nbytes = walk.nbytes)) {
kernel_fpu_begin();
aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
nbytes & AES_BLOCK_MASK);
kernel_fpu_end();
nbytes &= AES_BLOCK_SIZE - 1;
err = skcipher_walk_done(&walk, nbytes);
}
return err;
}
static int cbc_encrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
struct skcipher_walk walk;
unsigned int nbytes;
int err;
err = skcipher_walk_virt(&walk, req, false);
while ((nbytes = walk.nbytes)) {
kernel_fpu_begin();
aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
nbytes & AES_BLOCK_MASK, walk.iv);
kernel_fpu_end();
nbytes &= AES_BLOCK_SIZE - 1;
err = skcipher_walk_done(&walk, nbytes);
}
return err;
}
static int cbc_decrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
struct skcipher_walk walk;
unsigned int nbytes;
int err;
err = skcipher_walk_virt(&walk, req, false);
while ((nbytes = walk.nbytes)) {
kernel_fpu_begin();
aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
nbytes & AES_BLOCK_MASK, walk.iv);
kernel_fpu_end();
nbytes &= AES_BLOCK_SIZE - 1;
err = skcipher_walk_done(&walk, nbytes);
}
return err;
}
static int cts_cbc_encrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
int cbc_blocks = DIV_ROUND_UP(req->cryptlen, AES_BLOCK_SIZE) - 2;
struct scatterlist *src = req->src, *dst = req->dst;
struct scatterlist sg_src[2], sg_dst[2];
struct skcipher_request subreq;
struct skcipher_walk walk;
int err;
skcipher_request_set_tfm(&subreq, tfm);
skcipher_request_set_callback(&subreq, skcipher_request_flags(req),
NULL, NULL);
if (req->cryptlen <= AES_BLOCK_SIZE) {
if (req->cryptlen < AES_BLOCK_SIZE)
return -EINVAL;
cbc_blocks = 1;
}
if (cbc_blocks > 0) {
skcipher_request_set_crypt(&subreq, req->src, req->dst,
cbc_blocks * AES_BLOCK_SIZE,
req->iv);
err = cbc_encrypt(&subreq);
if (err)
return err;
if (req->cryptlen == AES_BLOCK_SIZE)
return 0;
dst = src = scatterwalk_ffwd(sg_src, req->src, subreq.cryptlen);
if (req->dst != req->src)
dst = scatterwalk_ffwd(sg_dst, req->dst,
subreq.cryptlen);
}
/* handle ciphertext stealing */
skcipher_request_set_crypt(&subreq, src, dst,
req->cryptlen - cbc_blocks * AES_BLOCK_SIZE,
req->iv);
err = skcipher_walk_virt(&walk, &subreq, false);
if (err)
return err;
kernel_fpu_begin();
aesni_cts_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
walk.nbytes, walk.iv);
kernel_fpu_end();
return skcipher_walk_done(&walk, 0);
}
static int cts_cbc_decrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
int cbc_blocks = DIV_ROUND_UP(req->cryptlen, AES_BLOCK_SIZE) - 2;
struct scatterlist *src = req->src, *dst = req->dst;
struct scatterlist sg_src[2], sg_dst[2];
struct skcipher_request subreq;
struct skcipher_walk walk;
int err;
skcipher_request_set_tfm(&subreq, tfm);
skcipher_request_set_callback(&subreq, skcipher_request_flags(req),
NULL, NULL);
if (req->cryptlen <= AES_BLOCK_SIZE) {
if (req->cryptlen < AES_BLOCK_SIZE)
return -EINVAL;
cbc_blocks = 1;
}
if (cbc_blocks > 0) {
skcipher_request_set_crypt(&subreq, req->src, req->dst,
cbc_blocks * AES_BLOCK_SIZE,
req->iv);
err = cbc_decrypt(&subreq);
if (err)
return err;
if (req->cryptlen == AES_BLOCK_SIZE)
return 0;
dst = src = scatterwalk_ffwd(sg_src, req->src, subreq.cryptlen);
if (req->dst != req->src)
dst = scatterwalk_ffwd(sg_dst, req->dst,
subreq.cryptlen);
}
/* handle ciphertext stealing */
skcipher_request_set_crypt(&subreq, src, dst,
req->cryptlen - cbc_blocks * AES_BLOCK_SIZE,
req->iv);
err = skcipher_walk_virt(&walk, &subreq, false);
if (err)
return err;
kernel_fpu_begin();
aesni_cts_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
walk.nbytes, walk.iv);
kernel_fpu_end();
return skcipher_walk_done(&walk, 0);
}
#ifdef CONFIG_X86_64
static void aesni_ctr_enc_avx_tfm(struct crypto_aes_ctx *ctx, u8 *out,
const u8 *in, unsigned int len, u8 *iv)
{
/*
* based on key length, override with the by8 version
* of ctr mode encryption/decryption for improved performance
* aes_set_key_common() ensures that key length is one of
* {128,192,256}
*/
if (ctx->key_length == AES_KEYSIZE_128)
aes_ctr_enc_128_avx_by8(in, iv, (void *)ctx, out, len);
else if (ctx->key_length == AES_KEYSIZE_192)
aes_ctr_enc_192_avx_by8(in, iv, (void *)ctx, out, len);
else
aes_ctr_enc_256_avx_by8(in, iv, (void *)ctx, out, len);
}
static int ctr_crypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
u8 keystream[AES_BLOCK_SIZE];
struct skcipher_walk walk;
unsigned int nbytes;
int err;
err = skcipher_walk_virt(&walk, req, false);
while ((nbytes = walk.nbytes) > 0) {
kernel_fpu_begin();
if (nbytes & AES_BLOCK_MASK)
static_call(aesni_ctr_enc_tfm)(ctx, walk.dst.virt.addr,
walk.src.virt.addr,
nbytes & AES_BLOCK_MASK,
walk.iv);
nbytes &= ~AES_BLOCK_MASK;
if (walk.nbytes == walk.total && nbytes > 0) {
aesni_enc(ctx, keystream, walk.iv);
crypto_xor_cpy(walk.dst.virt.addr + walk.nbytes - nbytes,
walk.src.virt.addr + walk.nbytes - nbytes,
keystream, nbytes);
crypto_inc(walk.iv, AES_BLOCK_SIZE);
nbytes = 0;
}
kernel_fpu_end();
err = skcipher_walk_done(&walk, nbytes);
}
return err;
}
static void aesni_xctr_enc_avx_tfm(struct crypto_aes_ctx *ctx, u8 *out,
const u8 *in, unsigned int len, u8 *iv,
unsigned int byte_ctr)
{
if (ctx->key_length == AES_KEYSIZE_128)
aes_xctr_enc_128_avx_by8(in, iv, (void *)ctx, out, len,
byte_ctr);
else if (ctx->key_length == AES_KEYSIZE_192)
aes_xctr_enc_192_avx_by8(in, iv, (void *)ctx, out, len,
byte_ctr);
else
aes_xctr_enc_256_avx_by8(in, iv, (void *)ctx, out, len,
byte_ctr);
}
static int xctr_crypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
u8 keystream[AES_BLOCK_SIZE];
struct skcipher_walk walk;
unsigned int nbytes;
unsigned int byte_ctr = 0;
int err;
__le32 block[AES_BLOCK_SIZE / sizeof(__le32)];
err = skcipher_walk_virt(&walk, req, false);
while ((nbytes = walk.nbytes) > 0) {
kernel_fpu_begin();
if (nbytes & AES_BLOCK_MASK)
aesni_xctr_enc_avx_tfm(ctx, walk.dst.virt.addr,
walk.src.virt.addr, nbytes & AES_BLOCK_MASK,
walk.iv, byte_ctr);
nbytes &= ~AES_BLOCK_MASK;
byte_ctr += walk.nbytes - nbytes;
if (walk.nbytes == walk.total && nbytes > 0) {
memcpy(block, walk.iv, AES_BLOCK_SIZE);
block[0] ^= cpu_to_le32(1 + byte_ctr / AES_BLOCK_SIZE);
aesni_enc(ctx, keystream, (u8 *)block);
crypto_xor_cpy(walk.dst.virt.addr + walk.nbytes -
nbytes, walk.src.virt.addr + walk.nbytes
- nbytes, keystream, nbytes);
byte_ctr += nbytes;
nbytes = 0;
}
kernel_fpu_end();
err = skcipher_walk_done(&walk, nbytes);
}
return err;
}
static int
rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len)
{
struct crypto_aes_ctx ctx;
int ret;
ret = aes_expandkey(&ctx, key, key_len);
if (ret)
return ret;
/* Clear the data in the hash sub key container to zero.*/
/* We want to cipher all zeros to create the hash sub key. */
memset(hash_subkey, 0, RFC4106_HASH_SUBKEY_SIZE);
aes_encrypt(&ctx, hash_subkey, hash_subkey);
memzero_explicit(&ctx, sizeof(ctx));
return 0;
}
static int common_rfc4106_set_key(struct crypto_aead *aead, const u8 *key,
unsigned int key_len)
{
struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(aead);
if (key_len < 4)
return -EINVAL;
/*Account for 4 byte nonce at the end.*/
key_len -= 4;
memcpy(ctx->nonce, key + key_len, sizeof(ctx->nonce));
return aes_set_key_common(&ctx->aes_key_expanded, key, key_len) ?:
rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
}
/* This is the Integrity Check Value (aka the authentication tag) length and can
* be 8, 12 or 16 bytes long. */
static int common_rfc4106_set_authsize(struct crypto_aead *aead,
unsigned int authsize)
{
switch (authsize) {
case 8:
case 12:
case 16:
break;
default:
return -EINVAL;
}
return 0;
}
static int generic_gcmaes_set_authsize(struct crypto_aead *tfm,
unsigned int authsize)
{
switch (authsize) {
case 4:
case 8:
case 12:
case 13:
case 14:
case 15:
case 16:
break;
default:
return -EINVAL;
}
return 0;
}
static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req,
unsigned int assoclen, u8 *hash_subkey,
u8 *iv, void *aes_ctx, u8 *auth_tag,
unsigned long auth_tag_len)
{
u8 databuf[sizeof(struct gcm_context_data) + (AESNI_ALIGN - 8)] __aligned(8);
struct gcm_context_data *data = PTR_ALIGN((void *)databuf, AESNI_ALIGN);
unsigned long left = req->cryptlen;
struct scatter_walk assoc_sg_walk;
struct skcipher_walk walk;
bool do_avx, do_avx2;
u8 *assocmem = NULL;
u8 *assoc;
int err;
if (!enc)
left -= auth_tag_len;
do_avx = (left >= AVX_GEN2_OPTSIZE);
do_avx2 = (left >= AVX_GEN4_OPTSIZE);
/* Linearize assoc, if not already linear */
if (req->src->length >= assoclen && req->src->length) {
scatterwalk_start(&assoc_sg_walk, req->src);
assoc = scatterwalk_map(&assoc_sg_walk);
} else {
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
/* assoc can be any length, so must be on heap */
assocmem = kmalloc(assoclen, flags);
if (unlikely(!assocmem))
return -ENOMEM;
assoc = assocmem;
scatterwalk_map_and_copy(assoc, req->src, 0, assoclen, 0);
}
kernel_fpu_begin();
if (static_branch_likely(&gcm_use_avx2) && do_avx2)
aesni_gcm_init_avx_gen4(aes_ctx, data, iv, hash_subkey, assoc,
assoclen);
else if (static_branch_likely(&gcm_use_avx) && do_avx)
aesni_gcm_init_avx_gen2(aes_ctx, data, iv, hash_subkey, assoc,
assoclen);
else
aesni_gcm_init(aes_ctx, data, iv, hash_subkey, assoc, assoclen);
kernel_fpu_end();
if (!assocmem)
scatterwalk_unmap(assoc);
else
kfree(assocmem);
err = enc ? skcipher_walk_aead_encrypt(&walk, req, false)
: skcipher_walk_aead_decrypt(&walk, req, false);
while (walk.nbytes > 0) {
kernel_fpu_begin();
if (static_branch_likely(&gcm_use_avx2) && do_avx2) {
if (enc)
aesni_gcm_enc_update_avx_gen4(aes_ctx, data,
walk.dst.virt.addr,
walk.src.virt.addr,
walk.nbytes);
else
aesni_gcm_dec_update_avx_gen4(aes_ctx, data,
walk.dst.virt.addr,
walk.src.virt.addr,
walk.nbytes);
} else if (static_branch_likely(&gcm_use_avx) && do_avx) {
if (enc)
aesni_gcm_enc_update_avx_gen2(aes_ctx, data,
walk.dst.virt.addr,
walk.src.virt.addr,
walk.nbytes);
else
aesni_gcm_dec_update_avx_gen2(aes_ctx, data,
walk.dst.virt.addr,
walk.src.virt.addr,
walk.nbytes);
} else if (enc) {
aesni_gcm_enc_update(aes_ctx, data, walk.dst.virt.addr,
walk.src.virt.addr, walk.nbytes);
} else {
aesni_gcm_dec_update(aes_ctx, data, walk.dst.virt.addr,
walk.src.virt.addr, walk.nbytes);
}
kernel_fpu_end();
err = skcipher_walk_done(&walk, 0);
}
if (err)
return err;
kernel_fpu_begin();
if (static_branch_likely(&gcm_use_avx2) && do_avx2)
aesni_gcm_finalize_avx_gen4(aes_ctx, data, auth_tag,
auth_tag_len);
else if (static_branch_likely(&gcm_use_avx) && do_avx)
aesni_gcm_finalize_avx_gen2(aes_ctx, data, auth_tag,
auth_tag_len);
else
aesni_gcm_finalize(aes_ctx, data, auth_tag, auth_tag_len);
kernel_fpu_end();
return 0;
}
static int gcmaes_encrypt(struct aead_request *req, unsigned int assoclen,
u8 *hash_subkey, u8 *iv, void *aes_ctx)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
unsigned long auth_tag_len = crypto_aead_authsize(tfm);
u8 auth_tag[16];
int err;
err = gcmaes_crypt_by_sg(true, req, assoclen, hash_subkey, iv, aes_ctx,
auth_tag, auth_tag_len);
if (err)
return err;
scatterwalk_map_and_copy(auth_tag, req->dst,
req->assoclen + req->cryptlen,
auth_tag_len, 1);
return 0;
}
static int gcmaes_decrypt(struct aead_request *req, unsigned int assoclen,
u8 *hash_subkey, u8 *iv, void *aes_ctx)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
unsigned long auth_tag_len = crypto_aead_authsize(tfm);
u8 auth_tag_msg[16];
u8 auth_tag[16];
int err;
err = gcmaes_crypt_by_sg(false, req, assoclen, hash_subkey, iv, aes_ctx,
auth_tag, auth_tag_len);
if (err)
return err;
/* Copy out original auth_tag */
scatterwalk_map_and_copy(auth_tag_msg, req->src,
req->assoclen + req->cryptlen - auth_tag_len,
auth_tag_len, 0);
/* Compare generated tag with passed in tag. */
if (crypto_memneq(auth_tag_msg, auth_tag, auth_tag_len)) {
memzero_explicit(auth_tag, sizeof(auth_tag));
return -EBADMSG;
}
return 0;
}
static int helper_rfc4106_encrypt(struct aead_request *req)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
void *aes_ctx = &(ctx->aes_key_expanded);
u8 ivbuf[16 + (AESNI_ALIGN - 8)] __aligned(8);
u8 *iv = PTR_ALIGN(&ivbuf[0], AESNI_ALIGN);
unsigned int i;
__be32 counter = cpu_to_be32(1);
/* Assuming we are supporting rfc4106 64-bit extended */
/* sequence numbers We need to have the AAD length equal */
/* to 16 or 20 bytes */
if (unlikely(req->assoclen != 16 && req->assoclen != 20))
return -EINVAL;
/* IV below built */
for (i = 0; i < 4; i++)
*(iv+i) = ctx->nonce[i];
for (i = 0; i < 8; i++)
*(iv+4+i) = req->iv[i];
*((__be32 *)(iv+12)) = counter;
return gcmaes_encrypt(req, req->assoclen - 8, ctx->hash_subkey, iv,
aes_ctx);
}
static int helper_rfc4106_decrypt(struct aead_request *req)
{
__be32 counter = cpu_to_be32(1);
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
void *aes_ctx = &(ctx->aes_key_expanded);
u8 ivbuf[16 + (AESNI_ALIGN - 8)] __aligned(8);
u8 *iv = PTR_ALIGN(&ivbuf[0], AESNI_ALIGN);
unsigned int i;
if (unlikely(req->assoclen != 16 && req->assoclen != 20))
return -EINVAL;
/* Assuming we are supporting rfc4106 64-bit extended */
/* sequence numbers We need to have the AAD length */
/* equal to 16 or 20 bytes */
/* IV below built */
for (i = 0; i < 4; i++)
*(iv+i) = ctx->nonce[i];
for (i = 0; i < 8; i++)
*(iv+4+i) = req->iv[i];
*((__be32 *)(iv+12)) = counter;
return gcmaes_decrypt(req, req->assoclen - 8, ctx->hash_subkey, iv,
aes_ctx);
}
#endif
static int xts_aesni_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen)
{
struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
int err;
err = xts_verify_key(tfm, key, keylen);
if (err)
return err;
keylen /= 2;
/* first half of xts-key is for crypt */
err = aes_set_key_common(aes_ctx(ctx->raw_crypt_ctx), key, keylen);
if (err)
return err;
/* second half of xts-key is for tweak */
return aes_set_key_common(aes_ctx(ctx->raw_tweak_ctx), key + keylen,
keylen);
}
static int xts_crypt(struct skcipher_request *req, bool encrypt)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
int tail = req->cryptlen % AES_BLOCK_SIZE;
struct skcipher_request subreq;
struct skcipher_walk walk;
int err;
if (req->cryptlen < AES_BLOCK_SIZE)
return -EINVAL;
err = skcipher_walk_virt(&walk, req, false);
if (!walk.nbytes)
return err;
if (unlikely(tail > 0 && walk.nbytes < walk.total)) {
int blocks = DIV_ROUND_UP(req->cryptlen, AES_BLOCK_SIZE) - 2;
skcipher_walk_abort(&walk);
skcipher_request_set_tfm(&subreq, tfm);
skcipher_request_set_callback(&subreq,
skcipher_request_flags(req),
NULL, NULL);
skcipher_request_set_crypt(&subreq, req->src, req->dst,
blocks * AES_BLOCK_SIZE, req->iv);
req = &subreq;
err = skcipher_walk_virt(&walk, req, false);
if (!walk.nbytes)
return err;
} else {
tail = 0;
}
kernel_fpu_begin();
/* calculate first value of T */
aesni_enc(aes_ctx(ctx->raw_tweak_ctx), walk.iv, walk.iv);
while (walk.nbytes > 0) {
int nbytes = walk.nbytes;
if (nbytes < walk.total)
nbytes &= ~(AES_BLOCK_SIZE - 1);
if (encrypt)
aesni_xts_encrypt(aes_ctx(ctx->raw_crypt_ctx),
walk.dst.virt.addr, walk.src.virt.addr,
nbytes, walk.iv);
else
aesni_xts_decrypt(aes_ctx(ctx->raw_crypt_ctx),
walk.dst.virt.addr, walk.src.virt.addr,
nbytes, walk.iv);
kernel_fpu_end();
err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
if (walk.nbytes > 0)
kernel_fpu_begin();
}
if (unlikely(tail > 0 && !err)) {
struct scatterlist sg_src[2], sg_dst[2];
struct scatterlist *src, *dst;
dst = src = scatterwalk_ffwd(sg_src, req->src, req->cryptlen);
if (req->dst != req->src)
dst = scatterwalk_ffwd(sg_dst, req->dst, req->cryptlen);
skcipher_request_set_crypt(req, src, dst, AES_BLOCK_SIZE + tail,
req->iv);
err = skcipher_walk_virt(&walk, &subreq, false);
if (err)
return err;
kernel_fpu_begin();
if (encrypt)
aesni_xts_encrypt(aes_ctx(ctx->raw_crypt_ctx),
walk.dst.virt.addr, walk.src.virt.addr,
walk.nbytes, walk.iv);
else
aesni_xts_decrypt(aes_ctx(ctx->raw_crypt_ctx),
walk.dst.virt.addr, walk.src.virt.addr,
walk.nbytes, walk.iv);
kernel_fpu_end();
err = skcipher_walk_done(&walk, 0);
}
return err;
}
static int xts_encrypt(struct skcipher_request *req)
{
return xts_crypt(req, true);
}
static int xts_decrypt(struct skcipher_request *req)
{
return xts_crypt(req, false);
}
static struct crypto_alg aesni_cipher_alg = {
.cra_name = "aes",
.cra_driver_name = "aes-aesni",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = CRYPTO_AES_CTX_SIZE,
.cra_module = THIS_MODULE,
.cra_u = {
.cipher = {
.cia_min_keysize = AES_MIN_KEY_SIZE,
.cia_max_keysize = AES_MAX_KEY_SIZE,
.cia_setkey = aes_set_key,
.cia_encrypt = aesni_encrypt,
.cia_decrypt = aesni_decrypt
}
}
};
static struct skcipher_alg aesni_skciphers[] = {
{
.base = {
.cra_name = "__ecb(aes)",
.cra_driver_name = "__ecb-aes-aesni",
.cra_priority = 400,
.cra_flags = CRYPTO_ALG_INTERNAL,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = CRYPTO_AES_CTX_SIZE,
.cra_module = THIS_MODULE,
},
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.setkey = aesni_skcipher_setkey,
.encrypt = ecb_encrypt,
.decrypt = ecb_decrypt,
}, {
.base = {
.cra_name = "__cbc(aes)",
.cra_driver_name = "__cbc-aes-aesni",
.cra_priority = 400,
.cra_flags = CRYPTO_ALG_INTERNAL,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = CRYPTO_AES_CTX_SIZE,
.cra_module = THIS_MODULE,
},
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
.setkey = aesni_skcipher_setkey,
.encrypt = cbc_encrypt,
.decrypt = cbc_decrypt,
}, {
.base = {
.cra_name = "__cts(cbc(aes))",
.cra_driver_name = "__cts-cbc-aes-aesni",
.cra_priority = 400,
.cra_flags = CRYPTO_ALG_INTERNAL,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = CRYPTO_AES_CTX_SIZE,
.cra_module = THIS_MODULE,
},
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
.walksize = 2 * AES_BLOCK_SIZE,
.setkey = aesni_skcipher_setkey,
.encrypt = cts_cbc_encrypt,
.decrypt = cts_cbc_decrypt,
#ifdef CONFIG_X86_64
}, {
.base = {
.cra_name = "__ctr(aes)",
.cra_driver_name = "__ctr-aes-aesni",
.cra_priority = 400,
.cra_flags = CRYPTO_ALG_INTERNAL,
.cra_blocksize = 1,
.cra_ctxsize = CRYPTO_AES_CTX_SIZE,
.cra_module = THIS_MODULE,
},
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
.chunksize = AES_BLOCK_SIZE,
.setkey = aesni_skcipher_setkey,
.encrypt = ctr_crypt,
.decrypt = ctr_crypt,
#endif
}, {
.base = {
.cra_name = "__xts(aes)",
.cra_driver_name = "__xts-aes-aesni",
.cra_priority = 401,
.cra_flags = CRYPTO_ALG_INTERNAL,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = XTS_AES_CTX_SIZE,
.cra_module = THIS_MODULE,
},
.min_keysize = 2 * AES_MIN_KEY_SIZE,
.max_keysize = 2 * AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
.walksize = 2 * AES_BLOCK_SIZE,
.setkey = xts_aesni_setkey,
.encrypt = xts_encrypt,
.decrypt = xts_decrypt,
}
};
static
struct simd_skcipher_alg *aesni_simd_skciphers[ARRAY_SIZE(aesni_skciphers)];
#ifdef CONFIG_X86_64
/*
* XCTR does not have a non-AVX implementation, so it must be enabled
* conditionally.
*/
static struct skcipher_alg aesni_xctr = {
.base = {
.cra_name = "__xctr(aes)",
.cra_driver_name = "__xctr-aes-aesni",
.cra_priority = 400,
.cra_flags = CRYPTO_ALG_INTERNAL,
.cra_blocksize = 1,
.cra_ctxsize = CRYPTO_AES_CTX_SIZE,
.cra_module = THIS_MODULE,
},
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
.chunksize = AES_BLOCK_SIZE,
.setkey = aesni_skcipher_setkey,
.encrypt = xctr_crypt,
.decrypt = xctr_crypt,
};
static struct simd_skcipher_alg *aesni_simd_xctr;
#endif /* CONFIG_X86_64 */
#ifdef CONFIG_X86_64
static int generic_gcmaes_set_key(struct crypto_aead *aead, const u8 *key,
unsigned int key_len)
{
struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(aead);
return aes_set_key_common(&ctx->aes_key_expanded, key, key_len) ?:
rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
}
static int generic_gcmaes_encrypt(struct aead_request *req)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(tfm);
void *aes_ctx = &(ctx->aes_key_expanded);
u8 ivbuf[16 + (AESNI_ALIGN - 8)] __aligned(8);
u8 *iv = PTR_ALIGN(&ivbuf[0], AESNI_ALIGN);
__be32 counter = cpu_to_be32(1);
memcpy(iv, req->iv, 12);
*((__be32 *)(iv+12)) = counter;
return gcmaes_encrypt(req, req->assoclen, ctx->hash_subkey, iv,
aes_ctx);
}
static int generic_gcmaes_decrypt(struct aead_request *req)
{
__be32 counter = cpu_to_be32(1);
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(tfm);
void *aes_ctx = &(ctx->aes_key_expanded);
u8 ivbuf[16 + (AESNI_ALIGN - 8)] __aligned(8);
u8 *iv = PTR_ALIGN(&ivbuf[0], AESNI_ALIGN);
memcpy(iv, req->iv, 12);
*((__be32 *)(iv+12)) = counter;
return gcmaes_decrypt(req, req->assoclen, ctx->hash_subkey, iv,
aes_ctx);
}
static struct aead_alg aesni_aeads[] = { {
.setkey = common_rfc4106_set_key,
.setauthsize = common_rfc4106_set_authsize,
.encrypt = helper_rfc4106_encrypt,
.decrypt = helper_rfc4106_decrypt,
.ivsize = GCM_RFC4106_IV_SIZE,
.maxauthsize = 16,
.base = {
.cra_name = "__rfc4106(gcm(aes))",
.cra_driver_name = "__rfc4106-gcm-aesni",
.cra_priority = 400,
.cra_flags = CRYPTO_ALG_INTERNAL,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct aesni_rfc4106_gcm_ctx),
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
},
}, {
.setkey = generic_gcmaes_set_key,
.setauthsize = generic_gcmaes_set_authsize,
.encrypt = generic_gcmaes_encrypt,
.decrypt = generic_gcmaes_decrypt,
.ivsize = GCM_AES_IV_SIZE,
.maxauthsize = 16,
.base = {
.cra_name = "__gcm(aes)",
.cra_driver_name = "__generic-gcm-aesni",
.cra_priority = 400,
.cra_flags = CRYPTO_ALG_INTERNAL,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct generic_gcmaes_ctx),
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
},
} };
#else
static struct aead_alg aesni_aeads[0];
#endif
static struct simd_aead_alg *aesni_simd_aeads[ARRAY_SIZE(aesni_aeads)];
static const struct x86_cpu_id aesni_cpu_id[] = {
X86_MATCH_FEATURE(X86_FEATURE_AES, NULL),
{}
};
MODULE_DEVICE_TABLE(x86cpu, aesni_cpu_id);
static int __init aesni_init(void)
{
int err;
if (!x86_match_cpu(aesni_cpu_id))
return -ENODEV;
#ifdef CONFIG_X86_64
if (boot_cpu_has(X86_FEATURE_AVX2)) {
pr_info("AVX2 version of gcm_enc/dec engaged.\n");
static_branch_enable(&gcm_use_avx);
static_branch_enable(&gcm_use_avx2);
} else
if (boot_cpu_has(X86_FEATURE_AVX)) {
pr_info("AVX version of gcm_enc/dec engaged.\n");
static_branch_enable(&gcm_use_avx);
} else {
pr_info("SSE version of gcm_enc/dec engaged.\n");
}
if (boot_cpu_has(X86_FEATURE_AVX)) {
/* optimize performance of ctr mode encryption transform */
static_call_update(aesni_ctr_enc_tfm, aesni_ctr_enc_avx_tfm);
pr_info("AES CTR mode by8 optimization enabled\n");
}
#endif /* CONFIG_X86_64 */
err = crypto_register_alg(&aesni_cipher_alg);
if (err)
return err;
err = simd_register_skciphers_compat(aesni_skciphers,
ARRAY_SIZE(aesni_skciphers),
aesni_simd_skciphers);
if (err)
goto unregister_cipher;
err = simd_register_aeads_compat(aesni_aeads, ARRAY_SIZE(aesni_aeads),
aesni_simd_aeads);
if (err)
goto unregister_skciphers;
#ifdef CONFIG_X86_64
if (boot_cpu_has(X86_FEATURE_AVX))
err = simd_register_skciphers_compat(&aesni_xctr, 1,
&aesni_simd_xctr);
if (err)
goto unregister_aeads;
#endif /* CONFIG_X86_64 */
return 0;
#ifdef CONFIG_X86_64
unregister_aeads:
simd_unregister_aeads(aesni_aeads, ARRAY_SIZE(aesni_aeads),
aesni_simd_aeads);
#endif /* CONFIG_X86_64 */
unregister_skciphers:
simd_unregister_skciphers(aesni_skciphers, ARRAY_SIZE(aesni_skciphers),
aesni_simd_skciphers);
unregister_cipher:
crypto_unregister_alg(&aesni_cipher_alg);
return err;
}
static void __exit aesni_exit(void)
{
simd_unregister_aeads(aesni_aeads, ARRAY_SIZE(aesni_aeads),
aesni_simd_aeads);
simd_unregister_skciphers(aesni_skciphers, ARRAY_SIZE(aesni_skciphers),
aesni_simd_skciphers);
crypto_unregister_alg(&aesni_cipher_alg);
#ifdef CONFIG_X86_64
if (boot_cpu_has(X86_FEATURE_AVX))
simd_unregister_skciphers(&aesni_xctr, 1, &aesni_simd_xctr);
#endif /* CONFIG_X86_64 */
}
late_initcall(aesni_init);
module_exit(aesni_exit);
MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized");
MODULE_LICENSE("GPL");
MODULE_ALIAS_CRYPTO("aes");
| linux-master | arch/x86/crypto/aesni-intel_glue.c |
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Glue Code for the AVX512/GFNI assembler implementation of the ARIA Cipher
*
* Copyright (c) 2022 Taehee Yoo <[email protected]>
*/
#include <crypto/algapi.h>
#include <crypto/internal/simd.h>
#include <crypto/aria.h>
#include <linux/crypto.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/types.h>
#include "ecb_cbc_helpers.h"
#include "aria-avx.h"
asmlinkage void aria_gfni_avx512_encrypt_64way(const void *ctx, u8 *dst,
const u8 *src);
asmlinkage void aria_gfni_avx512_decrypt_64way(const void *ctx, u8 *dst,
const u8 *src);
asmlinkage void aria_gfni_avx512_ctr_crypt_64way(const void *ctx, u8 *dst,
const u8 *src,
u8 *keystream, u8 *iv);
static struct aria_avx_ops aria_ops;
struct aria_avx512_request_ctx {
u8 keystream[ARIA_GFNI_AVX512_PARALLEL_BLOCK_SIZE];
};
static int ecb_do_encrypt(struct skcipher_request *req, const u32 *rkey)
{
ECB_WALK_START(req, ARIA_BLOCK_SIZE, ARIA_AESNI_PARALLEL_BLOCKS);
ECB_BLOCK(ARIA_GFNI_AVX512_PARALLEL_BLOCKS, aria_ops.aria_encrypt_64way);
ECB_BLOCK(ARIA_AESNI_AVX2_PARALLEL_BLOCKS, aria_ops.aria_encrypt_32way);
ECB_BLOCK(ARIA_AESNI_PARALLEL_BLOCKS, aria_ops.aria_encrypt_16way);
ECB_BLOCK(1, aria_encrypt);
ECB_WALK_END();
}
static int ecb_do_decrypt(struct skcipher_request *req, const u32 *rkey)
{
ECB_WALK_START(req, ARIA_BLOCK_SIZE, ARIA_AESNI_PARALLEL_BLOCKS);
ECB_BLOCK(ARIA_GFNI_AVX512_PARALLEL_BLOCKS, aria_ops.aria_decrypt_64way);
ECB_BLOCK(ARIA_AESNI_AVX2_PARALLEL_BLOCKS, aria_ops.aria_decrypt_32way);
ECB_BLOCK(ARIA_AESNI_PARALLEL_BLOCKS, aria_ops.aria_decrypt_16way);
ECB_BLOCK(1, aria_decrypt);
ECB_WALK_END();
}
static int aria_avx512_ecb_encrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct aria_ctx *ctx = crypto_skcipher_ctx(tfm);
return ecb_do_encrypt(req, ctx->enc_key[0]);
}
static int aria_avx512_ecb_decrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct aria_ctx *ctx = crypto_skcipher_ctx(tfm);
return ecb_do_decrypt(req, ctx->dec_key[0]);
}
static int aria_avx512_set_key(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen)
{
return aria_set_key(&tfm->base, key, keylen);
}
static int aria_avx512_ctr_encrypt(struct skcipher_request *req)
{
struct aria_avx512_request_ctx *req_ctx = skcipher_request_ctx(req);
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct aria_ctx *ctx = crypto_skcipher_ctx(tfm);
struct skcipher_walk walk;
unsigned int nbytes;
int err;
err = skcipher_walk_virt(&walk, req, false);
while ((nbytes = walk.nbytes) > 0) {
const u8 *src = walk.src.virt.addr;
u8 *dst = walk.dst.virt.addr;
while (nbytes >= ARIA_GFNI_AVX512_PARALLEL_BLOCK_SIZE) {
kernel_fpu_begin();
aria_ops.aria_ctr_crypt_64way(ctx, dst, src,
&req_ctx->keystream[0],
walk.iv);
kernel_fpu_end();
dst += ARIA_GFNI_AVX512_PARALLEL_BLOCK_SIZE;
src += ARIA_GFNI_AVX512_PARALLEL_BLOCK_SIZE;
nbytes -= ARIA_GFNI_AVX512_PARALLEL_BLOCK_SIZE;
}
while (nbytes >= ARIA_AESNI_AVX2_PARALLEL_BLOCK_SIZE) {
kernel_fpu_begin();
aria_ops.aria_ctr_crypt_32way(ctx, dst, src,
&req_ctx->keystream[0],
walk.iv);
kernel_fpu_end();
dst += ARIA_AESNI_AVX2_PARALLEL_BLOCK_SIZE;
src += ARIA_AESNI_AVX2_PARALLEL_BLOCK_SIZE;
nbytes -= ARIA_AESNI_AVX2_PARALLEL_BLOCK_SIZE;
}
while (nbytes >= ARIA_AESNI_PARALLEL_BLOCK_SIZE) {
kernel_fpu_begin();
aria_ops.aria_ctr_crypt_16way(ctx, dst, src,
&req_ctx->keystream[0],
walk.iv);
kernel_fpu_end();
dst += ARIA_AESNI_PARALLEL_BLOCK_SIZE;
src += ARIA_AESNI_PARALLEL_BLOCK_SIZE;
nbytes -= ARIA_AESNI_PARALLEL_BLOCK_SIZE;
}
while (nbytes >= ARIA_BLOCK_SIZE) {
memcpy(&req_ctx->keystream[0], walk.iv,
ARIA_BLOCK_SIZE);
crypto_inc(walk.iv, ARIA_BLOCK_SIZE);
aria_encrypt(ctx, &req_ctx->keystream[0],
&req_ctx->keystream[0]);
crypto_xor_cpy(dst, src, &req_ctx->keystream[0],
ARIA_BLOCK_SIZE);
dst += ARIA_BLOCK_SIZE;
src += ARIA_BLOCK_SIZE;
nbytes -= ARIA_BLOCK_SIZE;
}
if (walk.nbytes == walk.total && nbytes > 0) {
memcpy(&req_ctx->keystream[0], walk.iv,
ARIA_BLOCK_SIZE);
crypto_inc(walk.iv, ARIA_BLOCK_SIZE);
aria_encrypt(ctx, &req_ctx->keystream[0],
&req_ctx->keystream[0]);
crypto_xor_cpy(dst, src, &req_ctx->keystream[0],
nbytes);
dst += nbytes;
src += nbytes;
nbytes = 0;
}
err = skcipher_walk_done(&walk, nbytes);
}
return err;
}
static int aria_avx512_init_tfm(struct crypto_skcipher *tfm)
{
crypto_skcipher_set_reqsize(tfm,
sizeof(struct aria_avx512_request_ctx));
return 0;
}
static struct skcipher_alg aria_algs[] = {
{
.base.cra_name = "__ecb(aria)",
.base.cra_driver_name = "__ecb-aria-avx512",
.base.cra_priority = 600,
.base.cra_flags = CRYPTO_ALG_INTERNAL,
.base.cra_blocksize = ARIA_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct aria_ctx),
.base.cra_module = THIS_MODULE,
.min_keysize = ARIA_MIN_KEY_SIZE,
.max_keysize = ARIA_MAX_KEY_SIZE,
.setkey = aria_avx512_set_key,
.encrypt = aria_avx512_ecb_encrypt,
.decrypt = aria_avx512_ecb_decrypt,
}, {
.base.cra_name = "__ctr(aria)",
.base.cra_driver_name = "__ctr-aria-avx512",
.base.cra_priority = 600,
.base.cra_flags = CRYPTO_ALG_INTERNAL |
CRYPTO_ALG_SKCIPHER_REQSIZE_LARGE,
.base.cra_blocksize = 1,
.base.cra_ctxsize = sizeof(struct aria_ctx),
.base.cra_module = THIS_MODULE,
.min_keysize = ARIA_MIN_KEY_SIZE,
.max_keysize = ARIA_MAX_KEY_SIZE,
.ivsize = ARIA_BLOCK_SIZE,
.chunksize = ARIA_BLOCK_SIZE,
.setkey = aria_avx512_set_key,
.encrypt = aria_avx512_ctr_encrypt,
.decrypt = aria_avx512_ctr_encrypt,
.init = aria_avx512_init_tfm,
}
};
static struct simd_skcipher_alg *aria_simd_algs[ARRAY_SIZE(aria_algs)];
static int __init aria_avx512_init(void)
{
const char *feature_name;
if (!boot_cpu_has(X86_FEATURE_AVX) ||
!boot_cpu_has(X86_FEATURE_AVX2) ||
!boot_cpu_has(X86_FEATURE_AVX512F) ||
!boot_cpu_has(X86_FEATURE_AVX512VL) ||
!boot_cpu_has(X86_FEATURE_GFNI) ||
!boot_cpu_has(X86_FEATURE_OSXSAVE)) {
pr_info("AVX512/GFNI instructions are not detected.\n");
return -ENODEV;
}
if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM |
XFEATURE_MASK_AVX512, &feature_name)) {
pr_info("CPU feature '%s' is not supported.\n", feature_name);
return -ENODEV;
}
aria_ops.aria_encrypt_16way = aria_aesni_avx_gfni_encrypt_16way;
aria_ops.aria_decrypt_16way = aria_aesni_avx_gfni_decrypt_16way;
aria_ops.aria_ctr_crypt_16way = aria_aesni_avx_gfni_ctr_crypt_16way;
aria_ops.aria_encrypt_32way = aria_aesni_avx2_gfni_encrypt_32way;
aria_ops.aria_decrypt_32way = aria_aesni_avx2_gfni_decrypt_32way;
aria_ops.aria_ctr_crypt_32way = aria_aesni_avx2_gfni_ctr_crypt_32way;
aria_ops.aria_encrypt_64way = aria_gfni_avx512_encrypt_64way;
aria_ops.aria_decrypt_64way = aria_gfni_avx512_decrypt_64way;
aria_ops.aria_ctr_crypt_64way = aria_gfni_avx512_ctr_crypt_64way;
return simd_register_skciphers_compat(aria_algs,
ARRAY_SIZE(aria_algs),
aria_simd_algs);
}
static void __exit aria_avx512_exit(void)
{
simd_unregister_skciphers(aria_algs, ARRAY_SIZE(aria_algs),
aria_simd_algs);
}
module_init(aria_avx512_init);
module_exit(aria_avx512_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Taehee Yoo <[email protected]>");
MODULE_DESCRIPTION("ARIA Cipher Algorithm, AVX512/GFNI optimized");
MODULE_ALIAS_CRYPTO("aria");
MODULE_ALIAS_CRYPTO("aria-gfni-avx512");
| linux-master | arch/x86/crypto/aria_gfni_avx512_glue.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Glue Code for the AVX assembler implementation of the Cast6 Cipher
*
* Copyright (C) 2012 Johannes Goetzfried
* <[email protected]>
*
* Copyright © 2013 Jussi Kivilinna <[email protected]>
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/crypto.h>
#include <linux/err.h>
#include <crypto/algapi.h>
#include <crypto/cast6.h>
#include <crypto/internal/simd.h>
#include "ecb_cbc_helpers.h"
#define CAST6_PARALLEL_BLOCKS 8
asmlinkage void cast6_ecb_enc_8way(const void *ctx, u8 *dst, const u8 *src);
asmlinkage void cast6_ecb_dec_8way(const void *ctx, u8 *dst, const u8 *src);
asmlinkage void cast6_cbc_dec_8way(const void *ctx, u8 *dst, const u8 *src);
static int cast6_setkey_skcipher(struct crypto_skcipher *tfm,
const u8 *key, unsigned int keylen)
{
return cast6_setkey(&tfm->base, key, keylen);
}
static int ecb_encrypt(struct skcipher_request *req)
{
ECB_WALK_START(req, CAST6_BLOCK_SIZE, CAST6_PARALLEL_BLOCKS);
ECB_BLOCK(CAST6_PARALLEL_BLOCKS, cast6_ecb_enc_8way);
ECB_BLOCK(1, __cast6_encrypt);
ECB_WALK_END();
}
static int ecb_decrypt(struct skcipher_request *req)
{
ECB_WALK_START(req, CAST6_BLOCK_SIZE, CAST6_PARALLEL_BLOCKS);
ECB_BLOCK(CAST6_PARALLEL_BLOCKS, cast6_ecb_dec_8way);
ECB_BLOCK(1, __cast6_decrypt);
ECB_WALK_END();
}
static int cbc_encrypt(struct skcipher_request *req)
{
CBC_WALK_START(req, CAST6_BLOCK_SIZE, -1);
CBC_ENC_BLOCK(__cast6_encrypt);
CBC_WALK_END();
}
static int cbc_decrypt(struct skcipher_request *req)
{
CBC_WALK_START(req, CAST6_BLOCK_SIZE, CAST6_PARALLEL_BLOCKS);
CBC_DEC_BLOCK(CAST6_PARALLEL_BLOCKS, cast6_cbc_dec_8way);
CBC_DEC_BLOCK(1, __cast6_decrypt);
CBC_WALK_END();
}
static struct skcipher_alg cast6_algs[] = {
{
.base.cra_name = "__ecb(cast6)",
.base.cra_driver_name = "__ecb-cast6-avx",
.base.cra_priority = 200,
.base.cra_flags = CRYPTO_ALG_INTERNAL,
.base.cra_blocksize = CAST6_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct cast6_ctx),
.base.cra_module = THIS_MODULE,
.min_keysize = CAST6_MIN_KEY_SIZE,
.max_keysize = CAST6_MAX_KEY_SIZE,
.setkey = cast6_setkey_skcipher,
.encrypt = ecb_encrypt,
.decrypt = ecb_decrypt,
}, {
.base.cra_name = "__cbc(cast6)",
.base.cra_driver_name = "__cbc-cast6-avx",
.base.cra_priority = 200,
.base.cra_flags = CRYPTO_ALG_INTERNAL,
.base.cra_blocksize = CAST6_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct cast6_ctx),
.base.cra_module = THIS_MODULE,
.min_keysize = CAST6_MIN_KEY_SIZE,
.max_keysize = CAST6_MAX_KEY_SIZE,
.ivsize = CAST6_BLOCK_SIZE,
.setkey = cast6_setkey_skcipher,
.encrypt = cbc_encrypt,
.decrypt = cbc_decrypt,
},
};
static struct simd_skcipher_alg *cast6_simd_algs[ARRAY_SIZE(cast6_algs)];
static int __init cast6_init(void)
{
const char *feature_name;
if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM,
&feature_name)) {
pr_info("CPU feature '%s' is not supported.\n", feature_name);
return -ENODEV;
}
return simd_register_skciphers_compat(cast6_algs,
ARRAY_SIZE(cast6_algs),
cast6_simd_algs);
}
static void __exit cast6_exit(void)
{
simd_unregister_skciphers(cast6_algs, ARRAY_SIZE(cast6_algs),
cast6_simd_algs);
}
module_init(cast6_init);
module_exit(cast6_exit);
MODULE_DESCRIPTION("Cast6 Cipher Algorithm, AVX optimized");
MODULE_LICENSE("GPL");
MODULE_ALIAS_CRYPTO("cast6");
| linux-master | arch/x86/crypto/cast6_avx_glue.c |
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* SM3 Secure Hash Algorithm, AVX assembler accelerated.
* specified in: https://datatracker.ietf.org/doc/html/draft-sca-cfrg-sm3-02
*
* Copyright (C) 2021 Tianjia Zhang <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <crypto/internal/hash.h>
#include <crypto/internal/simd.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/types.h>
#include <crypto/sm3.h>
#include <crypto/sm3_base.h>
#include <asm/simd.h>
asmlinkage void sm3_transform_avx(struct sm3_state *state,
const u8 *data, int nblocks);
static int sm3_avx_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
struct sm3_state *sctx = shash_desc_ctx(desc);
if (!crypto_simd_usable() ||
(sctx->count % SM3_BLOCK_SIZE) + len < SM3_BLOCK_SIZE) {
sm3_update(sctx, data, len);
return 0;
}
/*
* Make sure struct sm3_state begins directly with the SM3
* 256-bit internal state, as this is what the asm functions expect.
*/
BUILD_BUG_ON(offsetof(struct sm3_state, state) != 0);
kernel_fpu_begin();
sm3_base_do_update(desc, data, len, sm3_transform_avx);
kernel_fpu_end();
return 0;
}
static int sm3_avx_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
if (!crypto_simd_usable()) {
struct sm3_state *sctx = shash_desc_ctx(desc);
if (len)
sm3_update(sctx, data, len);
sm3_final(sctx, out);
return 0;
}
kernel_fpu_begin();
if (len)
sm3_base_do_update(desc, data, len, sm3_transform_avx);
sm3_base_do_finalize(desc, sm3_transform_avx);
kernel_fpu_end();
return sm3_base_finish(desc, out);
}
static int sm3_avx_final(struct shash_desc *desc, u8 *out)
{
if (!crypto_simd_usable()) {
sm3_final(shash_desc_ctx(desc), out);
return 0;
}
kernel_fpu_begin();
sm3_base_do_finalize(desc, sm3_transform_avx);
kernel_fpu_end();
return sm3_base_finish(desc, out);
}
static struct shash_alg sm3_avx_alg = {
.digestsize = SM3_DIGEST_SIZE,
.init = sm3_base_init,
.update = sm3_avx_update,
.final = sm3_avx_final,
.finup = sm3_avx_finup,
.descsize = sizeof(struct sm3_state),
.base = {
.cra_name = "sm3",
.cra_driver_name = "sm3-avx",
.cra_priority = 300,
.cra_blocksize = SM3_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
};
static int __init sm3_avx_mod_init(void)
{
const char *feature_name;
if (!boot_cpu_has(X86_FEATURE_AVX)) {
pr_info("AVX instruction are not detected.\n");
return -ENODEV;
}
if (!boot_cpu_has(X86_FEATURE_BMI2)) {
pr_info("BMI2 instruction are not detected.\n");
return -ENODEV;
}
if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM,
&feature_name)) {
pr_info("CPU feature '%s' is not supported.\n", feature_name);
return -ENODEV;
}
return crypto_register_shash(&sm3_avx_alg);
}
static void __exit sm3_avx_mod_exit(void)
{
crypto_unregister_shash(&sm3_avx_alg);
}
module_init(sm3_avx_mod_init);
module_exit(sm3_avx_mod_exit);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Tianjia Zhang <[email protected]>");
MODULE_DESCRIPTION("SM3 Secure Hash Algorithm, AVX assembler accelerated");
MODULE_ALIAS_CRYPTO("sm3");
MODULE_ALIAS_CRYPTO("sm3-avx");
| linux-master | arch/x86/crypto/sm3_avx_glue.c |
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Glue Code for the AVX2/AES-NI/GFNI assembler implementation of the ARIA Cipher
*
* Copyright (c) 2022 Taehee Yoo <[email protected]>
*/
#include <crypto/algapi.h>
#include <crypto/internal/simd.h>
#include <crypto/aria.h>
#include <linux/crypto.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/types.h>
#include "ecb_cbc_helpers.h"
#include "aria-avx.h"
asmlinkage void aria_aesni_avx2_encrypt_32way(const void *ctx, u8 *dst,
const u8 *src);
EXPORT_SYMBOL_GPL(aria_aesni_avx2_encrypt_32way);
asmlinkage void aria_aesni_avx2_decrypt_32way(const void *ctx, u8 *dst,
const u8 *src);
EXPORT_SYMBOL_GPL(aria_aesni_avx2_decrypt_32way);
asmlinkage void aria_aesni_avx2_ctr_crypt_32way(const void *ctx, u8 *dst,
const u8 *src,
u8 *keystream, u8 *iv);
EXPORT_SYMBOL_GPL(aria_aesni_avx2_ctr_crypt_32way);
#ifdef CONFIG_AS_GFNI
asmlinkage void aria_aesni_avx2_gfni_encrypt_32way(const void *ctx, u8 *dst,
const u8 *src);
EXPORT_SYMBOL_GPL(aria_aesni_avx2_gfni_encrypt_32way);
asmlinkage void aria_aesni_avx2_gfni_decrypt_32way(const void *ctx, u8 *dst,
const u8 *src);
EXPORT_SYMBOL_GPL(aria_aesni_avx2_gfni_decrypt_32way);
asmlinkage void aria_aesni_avx2_gfni_ctr_crypt_32way(const void *ctx, u8 *dst,
const u8 *src,
u8 *keystream, u8 *iv);
EXPORT_SYMBOL_GPL(aria_aesni_avx2_gfni_ctr_crypt_32way);
#endif /* CONFIG_AS_GFNI */
static struct aria_avx_ops aria_ops;
struct aria_avx2_request_ctx {
u8 keystream[ARIA_AESNI_AVX2_PARALLEL_BLOCK_SIZE];
};
static int ecb_do_encrypt(struct skcipher_request *req, const u32 *rkey)
{
ECB_WALK_START(req, ARIA_BLOCK_SIZE, ARIA_AESNI_PARALLEL_BLOCKS);
ECB_BLOCK(ARIA_AESNI_AVX2_PARALLEL_BLOCKS, aria_ops.aria_encrypt_32way);
ECB_BLOCK(ARIA_AESNI_PARALLEL_BLOCKS, aria_ops.aria_encrypt_16way);
ECB_BLOCK(1, aria_encrypt);
ECB_WALK_END();
}
static int ecb_do_decrypt(struct skcipher_request *req, const u32 *rkey)
{
ECB_WALK_START(req, ARIA_BLOCK_SIZE, ARIA_AESNI_PARALLEL_BLOCKS);
ECB_BLOCK(ARIA_AESNI_AVX2_PARALLEL_BLOCKS, aria_ops.aria_decrypt_32way);
ECB_BLOCK(ARIA_AESNI_PARALLEL_BLOCKS, aria_ops.aria_decrypt_16way);
ECB_BLOCK(1, aria_decrypt);
ECB_WALK_END();
}
static int aria_avx2_ecb_encrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct aria_ctx *ctx = crypto_skcipher_ctx(tfm);
return ecb_do_encrypt(req, ctx->enc_key[0]);
}
static int aria_avx2_ecb_decrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct aria_ctx *ctx = crypto_skcipher_ctx(tfm);
return ecb_do_decrypt(req, ctx->dec_key[0]);
}
static int aria_avx2_set_key(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen)
{
return aria_set_key(&tfm->base, key, keylen);
}
static int aria_avx2_ctr_encrypt(struct skcipher_request *req)
{
struct aria_avx2_request_ctx *req_ctx = skcipher_request_ctx(req);
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct aria_ctx *ctx = crypto_skcipher_ctx(tfm);
struct skcipher_walk walk;
unsigned int nbytes;
int err;
err = skcipher_walk_virt(&walk, req, false);
while ((nbytes = walk.nbytes) > 0) {
const u8 *src = walk.src.virt.addr;
u8 *dst = walk.dst.virt.addr;
while (nbytes >= ARIA_AESNI_AVX2_PARALLEL_BLOCK_SIZE) {
kernel_fpu_begin();
aria_ops.aria_ctr_crypt_32way(ctx, dst, src,
&req_ctx->keystream[0],
walk.iv);
kernel_fpu_end();
dst += ARIA_AESNI_AVX2_PARALLEL_BLOCK_SIZE;
src += ARIA_AESNI_AVX2_PARALLEL_BLOCK_SIZE;
nbytes -= ARIA_AESNI_AVX2_PARALLEL_BLOCK_SIZE;
}
while (nbytes >= ARIA_AESNI_PARALLEL_BLOCK_SIZE) {
kernel_fpu_begin();
aria_ops.aria_ctr_crypt_16way(ctx, dst, src,
&req_ctx->keystream[0],
walk.iv);
kernel_fpu_end();
dst += ARIA_AESNI_PARALLEL_BLOCK_SIZE;
src += ARIA_AESNI_PARALLEL_BLOCK_SIZE;
nbytes -= ARIA_AESNI_PARALLEL_BLOCK_SIZE;
}
while (nbytes >= ARIA_BLOCK_SIZE) {
memcpy(&req_ctx->keystream[0], walk.iv, ARIA_BLOCK_SIZE);
crypto_inc(walk.iv, ARIA_BLOCK_SIZE);
aria_encrypt(ctx, &req_ctx->keystream[0],
&req_ctx->keystream[0]);
crypto_xor_cpy(dst, src, &req_ctx->keystream[0],
ARIA_BLOCK_SIZE);
dst += ARIA_BLOCK_SIZE;
src += ARIA_BLOCK_SIZE;
nbytes -= ARIA_BLOCK_SIZE;
}
if (walk.nbytes == walk.total && nbytes > 0) {
memcpy(&req_ctx->keystream[0], walk.iv,
ARIA_BLOCK_SIZE);
crypto_inc(walk.iv, ARIA_BLOCK_SIZE);
aria_encrypt(ctx, &req_ctx->keystream[0],
&req_ctx->keystream[0]);
crypto_xor_cpy(dst, src, &req_ctx->keystream[0],
nbytes);
dst += nbytes;
src += nbytes;
nbytes = 0;
}
err = skcipher_walk_done(&walk, nbytes);
}
return err;
}
static int aria_avx2_init_tfm(struct crypto_skcipher *tfm)
{
crypto_skcipher_set_reqsize(tfm, sizeof(struct aria_avx2_request_ctx));
return 0;
}
static struct skcipher_alg aria_algs[] = {
{
.base.cra_name = "__ecb(aria)",
.base.cra_driver_name = "__ecb-aria-avx2",
.base.cra_priority = 500,
.base.cra_flags = CRYPTO_ALG_INTERNAL,
.base.cra_blocksize = ARIA_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct aria_ctx),
.base.cra_module = THIS_MODULE,
.min_keysize = ARIA_MIN_KEY_SIZE,
.max_keysize = ARIA_MAX_KEY_SIZE,
.setkey = aria_avx2_set_key,
.encrypt = aria_avx2_ecb_encrypt,
.decrypt = aria_avx2_ecb_decrypt,
}, {
.base.cra_name = "__ctr(aria)",
.base.cra_driver_name = "__ctr-aria-avx2",
.base.cra_priority = 500,
.base.cra_flags = CRYPTO_ALG_INTERNAL |
CRYPTO_ALG_SKCIPHER_REQSIZE_LARGE,
.base.cra_blocksize = 1,
.base.cra_ctxsize = sizeof(struct aria_ctx),
.base.cra_module = THIS_MODULE,
.min_keysize = ARIA_MIN_KEY_SIZE,
.max_keysize = ARIA_MAX_KEY_SIZE,
.ivsize = ARIA_BLOCK_SIZE,
.chunksize = ARIA_BLOCK_SIZE,
.setkey = aria_avx2_set_key,
.encrypt = aria_avx2_ctr_encrypt,
.decrypt = aria_avx2_ctr_encrypt,
.init = aria_avx2_init_tfm,
}
};
static struct simd_skcipher_alg *aria_simd_algs[ARRAY_SIZE(aria_algs)];
static int __init aria_avx2_init(void)
{
const char *feature_name;
if (!boot_cpu_has(X86_FEATURE_AVX) ||
!boot_cpu_has(X86_FEATURE_AVX2) ||
!boot_cpu_has(X86_FEATURE_AES) ||
!boot_cpu_has(X86_FEATURE_OSXSAVE)) {
pr_info("AVX2 or AES-NI instructions are not detected.\n");
return -ENODEV;
}
if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM,
&feature_name)) {
pr_info("CPU feature '%s' is not supported.\n", feature_name);
return -ENODEV;
}
if (boot_cpu_has(X86_FEATURE_GFNI) && IS_ENABLED(CONFIG_AS_GFNI)) {
aria_ops.aria_encrypt_16way = aria_aesni_avx_gfni_encrypt_16way;
aria_ops.aria_decrypt_16way = aria_aesni_avx_gfni_decrypt_16way;
aria_ops.aria_ctr_crypt_16way = aria_aesni_avx_gfni_ctr_crypt_16way;
aria_ops.aria_encrypt_32way = aria_aesni_avx2_gfni_encrypt_32way;
aria_ops.aria_decrypt_32way = aria_aesni_avx2_gfni_decrypt_32way;
aria_ops.aria_ctr_crypt_32way = aria_aesni_avx2_gfni_ctr_crypt_32way;
} else {
aria_ops.aria_encrypt_16way = aria_aesni_avx_encrypt_16way;
aria_ops.aria_decrypt_16way = aria_aesni_avx_decrypt_16way;
aria_ops.aria_ctr_crypt_16way = aria_aesni_avx_ctr_crypt_16way;
aria_ops.aria_encrypt_32way = aria_aesni_avx2_encrypt_32way;
aria_ops.aria_decrypt_32way = aria_aesni_avx2_decrypt_32way;
aria_ops.aria_ctr_crypt_32way = aria_aesni_avx2_ctr_crypt_32way;
}
return simd_register_skciphers_compat(aria_algs,
ARRAY_SIZE(aria_algs),
aria_simd_algs);
}
static void __exit aria_avx2_exit(void)
{
simd_unregister_skciphers(aria_algs, ARRAY_SIZE(aria_algs),
aria_simd_algs);
}
module_init(aria_avx2_init);
module_exit(aria_avx2_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Taehee Yoo <[email protected]>");
MODULE_DESCRIPTION("ARIA Cipher Algorithm, AVX2/AES-NI/GFNI optimized");
MODULE_ALIAS_CRYPTO("aria");
MODULE_ALIAS_CRYPTO("aria-aesni-avx2");
| linux-master | arch/x86/crypto/aria_aesni_avx2_glue.c |
// SPDX-License-Identifier: GPL-2.0
/*
* NHPoly1305 - ε-almost-∆-universal hash function for Adiantum
* (SSE2 accelerated version)
*
* Copyright 2018 Google LLC
*/
#include <crypto/internal/hash.h>
#include <crypto/internal/simd.h>
#include <crypto/nhpoly1305.h>
#include <linux/module.h>
#include <linux/sizes.h>
#include <asm/simd.h>
asmlinkage void nh_sse2(const u32 *key, const u8 *message, size_t message_len,
__le64 hash[NH_NUM_PASSES]);
static int nhpoly1305_sse2_update(struct shash_desc *desc,
const u8 *src, unsigned int srclen)
{
if (srclen < 64 || !crypto_simd_usable())
return crypto_nhpoly1305_update(desc, src, srclen);
do {
unsigned int n = min_t(unsigned int, srclen, SZ_4K);
kernel_fpu_begin();
crypto_nhpoly1305_update_helper(desc, src, n, nh_sse2);
kernel_fpu_end();
src += n;
srclen -= n;
} while (srclen);
return 0;
}
static struct shash_alg nhpoly1305_alg = {
.base.cra_name = "nhpoly1305",
.base.cra_driver_name = "nhpoly1305-sse2",
.base.cra_priority = 200,
.base.cra_ctxsize = sizeof(struct nhpoly1305_key),
.base.cra_module = THIS_MODULE,
.digestsize = POLY1305_DIGEST_SIZE,
.init = crypto_nhpoly1305_init,
.update = nhpoly1305_sse2_update,
.final = crypto_nhpoly1305_final,
.setkey = crypto_nhpoly1305_setkey,
.descsize = sizeof(struct nhpoly1305_state),
};
static int __init nhpoly1305_mod_init(void)
{
if (!boot_cpu_has(X86_FEATURE_XMM2))
return -ENODEV;
return crypto_register_shash(&nhpoly1305_alg);
}
static void __exit nhpoly1305_mod_exit(void)
{
crypto_unregister_shash(&nhpoly1305_alg);
}
module_init(nhpoly1305_mod_init);
module_exit(nhpoly1305_mod_exit);
MODULE_DESCRIPTION("NHPoly1305 ε-almost-∆-universal hash function (SSE2-accelerated)");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Eric Biggers <[email protected]>");
MODULE_ALIAS_CRYPTO("nhpoly1305");
MODULE_ALIAS_CRYPTO("nhpoly1305-sse2");
| linux-master | arch/x86/crypto/nhpoly1305-sse2-glue.c |
// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
* Copyright (C) 2015-2019 Jason A. Donenfeld <[email protected]>. All Rights Reserved.
*/
#include <crypto/algapi.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/poly1305.h>
#include <crypto/internal/simd.h>
#include <linux/crypto.h>
#include <linux/jump_label.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sizes.h>
#include <asm/intel-family.h>
#include <asm/simd.h>
asmlinkage void poly1305_init_x86_64(void *ctx,
const u8 key[POLY1305_BLOCK_SIZE]);
asmlinkage void poly1305_blocks_x86_64(void *ctx, const u8 *inp,
const size_t len, const u32 padbit);
asmlinkage void poly1305_emit_x86_64(void *ctx, u8 mac[POLY1305_DIGEST_SIZE],
const u32 nonce[4]);
asmlinkage void poly1305_emit_avx(void *ctx, u8 mac[POLY1305_DIGEST_SIZE],
const u32 nonce[4]);
asmlinkage void poly1305_blocks_avx(void *ctx, const u8 *inp, const size_t len,
const u32 padbit);
asmlinkage void poly1305_blocks_avx2(void *ctx, const u8 *inp, const size_t len,
const u32 padbit);
asmlinkage void poly1305_blocks_avx512(void *ctx, const u8 *inp,
const size_t len, const u32 padbit);
static __ro_after_init DEFINE_STATIC_KEY_FALSE(poly1305_use_avx);
static __ro_after_init DEFINE_STATIC_KEY_FALSE(poly1305_use_avx2);
static __ro_after_init DEFINE_STATIC_KEY_FALSE(poly1305_use_avx512);
struct poly1305_arch_internal {
union {
struct {
u32 h[5];
u32 is_base2_26;
};
u64 hs[3];
};
u64 r[2];
u64 pad;
struct { u32 r2, r1, r4, r3; } rn[9];
};
/* The AVX code uses base 2^26, while the scalar code uses base 2^64. If we hit
* the unfortunate situation of using AVX and then having to go back to scalar
* -- because the user is silly and has called the update function from two
* separate contexts -- then we need to convert back to the original base before
* proceeding. It is possible to reason that the initial reduction below is
* sufficient given the implementation invariants. However, for an avoidance of
* doubt and because this is not performance critical, we do the full reduction
* anyway. Z3 proof of below function: https://xn--4db.cc/ltPtHCKN/py
*/
static void convert_to_base2_64(void *ctx)
{
struct poly1305_arch_internal *state = ctx;
u32 cy;
if (!state->is_base2_26)
return;
cy = state->h[0] >> 26; state->h[0] &= 0x3ffffff; state->h[1] += cy;
cy = state->h[1] >> 26; state->h[1] &= 0x3ffffff; state->h[2] += cy;
cy = state->h[2] >> 26; state->h[2] &= 0x3ffffff; state->h[3] += cy;
cy = state->h[3] >> 26; state->h[3] &= 0x3ffffff; state->h[4] += cy;
state->hs[0] = ((u64)state->h[2] << 52) | ((u64)state->h[1] << 26) | state->h[0];
state->hs[1] = ((u64)state->h[4] << 40) | ((u64)state->h[3] << 14) | (state->h[2] >> 12);
state->hs[2] = state->h[4] >> 24;
#define ULT(a, b) ((a ^ ((a ^ b) | ((a - b) ^ b))) >> (sizeof(a) * 8 - 1))
cy = (state->hs[2] >> 2) + (state->hs[2] & ~3ULL);
state->hs[2] &= 3;
state->hs[0] += cy;
state->hs[1] += (cy = ULT(state->hs[0], cy));
state->hs[2] += ULT(state->hs[1], cy);
#undef ULT
state->is_base2_26 = 0;
}
static void poly1305_simd_init(void *ctx, const u8 key[POLY1305_BLOCK_SIZE])
{
poly1305_init_x86_64(ctx, key);
}
static void poly1305_simd_blocks(void *ctx, const u8 *inp, size_t len,
const u32 padbit)
{
struct poly1305_arch_internal *state = ctx;
/* SIMD disables preemption, so relax after processing each page. */
BUILD_BUG_ON(SZ_4K < POLY1305_BLOCK_SIZE ||
SZ_4K % POLY1305_BLOCK_SIZE);
if (!static_branch_likely(&poly1305_use_avx) ||
(len < (POLY1305_BLOCK_SIZE * 18) && !state->is_base2_26) ||
!crypto_simd_usable()) {
convert_to_base2_64(ctx);
poly1305_blocks_x86_64(ctx, inp, len, padbit);
return;
}
do {
const size_t bytes = min_t(size_t, len, SZ_4K);
kernel_fpu_begin();
if (IS_ENABLED(CONFIG_AS_AVX512) && static_branch_likely(&poly1305_use_avx512))
poly1305_blocks_avx512(ctx, inp, bytes, padbit);
else if (static_branch_likely(&poly1305_use_avx2))
poly1305_blocks_avx2(ctx, inp, bytes, padbit);
else
poly1305_blocks_avx(ctx, inp, bytes, padbit);
kernel_fpu_end();
len -= bytes;
inp += bytes;
} while (len);
}
static void poly1305_simd_emit(void *ctx, u8 mac[POLY1305_DIGEST_SIZE],
const u32 nonce[4])
{
if (!static_branch_likely(&poly1305_use_avx))
poly1305_emit_x86_64(ctx, mac, nonce);
else
poly1305_emit_avx(ctx, mac, nonce);
}
void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 key[POLY1305_KEY_SIZE])
{
poly1305_simd_init(&dctx->h, key);
dctx->s[0] = get_unaligned_le32(&key[16]);
dctx->s[1] = get_unaligned_le32(&key[20]);
dctx->s[2] = get_unaligned_le32(&key[24]);
dctx->s[3] = get_unaligned_le32(&key[28]);
dctx->buflen = 0;
dctx->sset = true;
}
EXPORT_SYMBOL(poly1305_init_arch);
static unsigned int crypto_poly1305_setdctxkey(struct poly1305_desc_ctx *dctx,
const u8 *inp, unsigned int len)
{
unsigned int acc = 0;
if (unlikely(!dctx->sset)) {
if (!dctx->rset && len >= POLY1305_BLOCK_SIZE) {
poly1305_simd_init(&dctx->h, inp);
inp += POLY1305_BLOCK_SIZE;
len -= POLY1305_BLOCK_SIZE;
acc += POLY1305_BLOCK_SIZE;
dctx->rset = 1;
}
if (len >= POLY1305_BLOCK_SIZE) {
dctx->s[0] = get_unaligned_le32(&inp[0]);
dctx->s[1] = get_unaligned_le32(&inp[4]);
dctx->s[2] = get_unaligned_le32(&inp[8]);
dctx->s[3] = get_unaligned_le32(&inp[12]);
acc += POLY1305_BLOCK_SIZE;
dctx->sset = true;
}
}
return acc;
}
void poly1305_update_arch(struct poly1305_desc_ctx *dctx, const u8 *src,
unsigned int srclen)
{
unsigned int bytes, used;
if (unlikely(dctx->buflen)) {
bytes = min(srclen, POLY1305_BLOCK_SIZE - dctx->buflen);
memcpy(dctx->buf + dctx->buflen, src, bytes);
src += bytes;
srclen -= bytes;
dctx->buflen += bytes;
if (dctx->buflen == POLY1305_BLOCK_SIZE) {
if (likely(!crypto_poly1305_setdctxkey(dctx, dctx->buf, POLY1305_BLOCK_SIZE)))
poly1305_simd_blocks(&dctx->h, dctx->buf, POLY1305_BLOCK_SIZE, 1);
dctx->buflen = 0;
}
}
if (likely(srclen >= POLY1305_BLOCK_SIZE)) {
bytes = round_down(srclen, POLY1305_BLOCK_SIZE);
srclen -= bytes;
used = crypto_poly1305_setdctxkey(dctx, src, bytes);
if (likely(bytes - used))
poly1305_simd_blocks(&dctx->h, src + used, bytes - used, 1);
src += bytes;
}
if (unlikely(srclen)) {
dctx->buflen = srclen;
memcpy(dctx->buf, src, srclen);
}
}
EXPORT_SYMBOL(poly1305_update_arch);
void poly1305_final_arch(struct poly1305_desc_ctx *dctx, u8 *dst)
{
if (unlikely(dctx->buflen)) {
dctx->buf[dctx->buflen++] = 1;
memset(dctx->buf + dctx->buflen, 0,
POLY1305_BLOCK_SIZE - dctx->buflen);
poly1305_simd_blocks(&dctx->h, dctx->buf, POLY1305_BLOCK_SIZE, 0);
}
poly1305_simd_emit(&dctx->h, dst, dctx->s);
memzero_explicit(dctx, sizeof(*dctx));
}
EXPORT_SYMBOL(poly1305_final_arch);
static int crypto_poly1305_init(struct shash_desc *desc)
{
struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
*dctx = (struct poly1305_desc_ctx){};
return 0;
}
static int crypto_poly1305_update(struct shash_desc *desc,
const u8 *src, unsigned int srclen)
{
struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
poly1305_update_arch(dctx, src, srclen);
return 0;
}
static int crypto_poly1305_final(struct shash_desc *desc, u8 *dst)
{
struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
if (unlikely(!dctx->sset))
return -ENOKEY;
poly1305_final_arch(dctx, dst);
return 0;
}
static struct shash_alg alg = {
.digestsize = POLY1305_DIGEST_SIZE,
.init = crypto_poly1305_init,
.update = crypto_poly1305_update,
.final = crypto_poly1305_final,
.descsize = sizeof(struct poly1305_desc_ctx),
.base = {
.cra_name = "poly1305",
.cra_driver_name = "poly1305-simd",
.cra_priority = 300,
.cra_blocksize = POLY1305_BLOCK_SIZE,
.cra_module = THIS_MODULE,
},
};
static int __init poly1305_simd_mod_init(void)
{
if (boot_cpu_has(X86_FEATURE_AVX) &&
cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL))
static_branch_enable(&poly1305_use_avx);
if (boot_cpu_has(X86_FEATURE_AVX) && boot_cpu_has(X86_FEATURE_AVX2) &&
cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL))
static_branch_enable(&poly1305_use_avx2);
if (IS_ENABLED(CONFIG_AS_AVX512) && boot_cpu_has(X86_FEATURE_AVX) &&
boot_cpu_has(X86_FEATURE_AVX2) && boot_cpu_has(X86_FEATURE_AVX512F) &&
cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM | XFEATURE_MASK_AVX512, NULL) &&
/* Skylake downclocks unacceptably much when using zmm, but later generations are fast. */
boot_cpu_data.x86_model != INTEL_FAM6_SKYLAKE_X)
static_branch_enable(&poly1305_use_avx512);
return IS_REACHABLE(CONFIG_CRYPTO_HASH) ? crypto_register_shash(&alg) : 0;
}
static void __exit poly1305_simd_mod_exit(void)
{
if (IS_REACHABLE(CONFIG_CRYPTO_HASH))
crypto_unregister_shash(&alg);
}
module_init(poly1305_simd_mod_init);
module_exit(poly1305_simd_mod_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jason A. Donenfeld <[email protected]>");
MODULE_DESCRIPTION("Poly1305 authenticator");
MODULE_ALIAS_CRYPTO("poly1305");
MODULE_ALIAS_CRYPTO("poly1305-simd");
| linux-master | arch/x86/crypto/poly1305_glue.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Glue Code for AVX assembler version of Twofish Cipher
*
* Copyright (C) 2012 Johannes Goetzfried
* <[email protected]>
*
* Copyright © 2013 Jussi Kivilinna <[email protected]>
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/crypto.h>
#include <linux/err.h>
#include <crypto/algapi.h>
#include <crypto/internal/simd.h>
#include <crypto/twofish.h>
#include "twofish.h"
#include "ecb_cbc_helpers.h"
#define TWOFISH_PARALLEL_BLOCKS 8
/* 8-way parallel cipher functions */
asmlinkage void twofish_ecb_enc_8way(const void *ctx, u8 *dst, const u8 *src);
asmlinkage void twofish_ecb_dec_8way(const void *ctx, u8 *dst, const u8 *src);
asmlinkage void twofish_cbc_dec_8way(const void *ctx, u8 *dst, const u8 *src);
static int twofish_setkey_skcipher(struct crypto_skcipher *tfm,
const u8 *key, unsigned int keylen)
{
return twofish_setkey(&tfm->base, key, keylen);
}
static inline void twofish_enc_blk_3way(const void *ctx, u8 *dst, const u8 *src)
{
__twofish_enc_blk_3way(ctx, dst, src, false);
}
static int ecb_encrypt(struct skcipher_request *req)
{
ECB_WALK_START(req, TF_BLOCK_SIZE, TWOFISH_PARALLEL_BLOCKS);
ECB_BLOCK(TWOFISH_PARALLEL_BLOCKS, twofish_ecb_enc_8way);
ECB_BLOCK(3, twofish_enc_blk_3way);
ECB_BLOCK(1, twofish_enc_blk);
ECB_WALK_END();
}
static int ecb_decrypt(struct skcipher_request *req)
{
ECB_WALK_START(req, TF_BLOCK_SIZE, TWOFISH_PARALLEL_BLOCKS);
ECB_BLOCK(TWOFISH_PARALLEL_BLOCKS, twofish_ecb_dec_8way);
ECB_BLOCK(3, twofish_dec_blk_3way);
ECB_BLOCK(1, twofish_dec_blk);
ECB_WALK_END();
}
static int cbc_encrypt(struct skcipher_request *req)
{
CBC_WALK_START(req, TF_BLOCK_SIZE, -1);
CBC_ENC_BLOCK(twofish_enc_blk);
CBC_WALK_END();
}
static int cbc_decrypt(struct skcipher_request *req)
{
CBC_WALK_START(req, TF_BLOCK_SIZE, TWOFISH_PARALLEL_BLOCKS);
CBC_DEC_BLOCK(TWOFISH_PARALLEL_BLOCKS, twofish_cbc_dec_8way);
CBC_DEC_BLOCK(3, twofish_dec_blk_cbc_3way);
CBC_DEC_BLOCK(1, twofish_dec_blk);
CBC_WALK_END();
}
static struct skcipher_alg twofish_algs[] = {
{
.base.cra_name = "__ecb(twofish)",
.base.cra_driver_name = "__ecb-twofish-avx",
.base.cra_priority = 400,
.base.cra_flags = CRYPTO_ALG_INTERNAL,
.base.cra_blocksize = TF_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct twofish_ctx),
.base.cra_module = THIS_MODULE,
.min_keysize = TF_MIN_KEY_SIZE,
.max_keysize = TF_MAX_KEY_SIZE,
.setkey = twofish_setkey_skcipher,
.encrypt = ecb_encrypt,
.decrypt = ecb_decrypt,
}, {
.base.cra_name = "__cbc(twofish)",
.base.cra_driver_name = "__cbc-twofish-avx",
.base.cra_priority = 400,
.base.cra_flags = CRYPTO_ALG_INTERNAL,
.base.cra_blocksize = TF_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct twofish_ctx),
.base.cra_module = THIS_MODULE,
.min_keysize = TF_MIN_KEY_SIZE,
.max_keysize = TF_MAX_KEY_SIZE,
.ivsize = TF_BLOCK_SIZE,
.setkey = twofish_setkey_skcipher,
.encrypt = cbc_encrypt,
.decrypt = cbc_decrypt,
},
};
static struct simd_skcipher_alg *twofish_simd_algs[ARRAY_SIZE(twofish_algs)];
static int __init twofish_init(void)
{
const char *feature_name;
if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, &feature_name)) {
pr_info("CPU feature '%s' is not supported.\n", feature_name);
return -ENODEV;
}
return simd_register_skciphers_compat(twofish_algs,
ARRAY_SIZE(twofish_algs),
twofish_simd_algs);
}
static void __exit twofish_exit(void)
{
simd_unregister_skciphers(twofish_algs, ARRAY_SIZE(twofish_algs),
twofish_simd_algs);
}
module_init(twofish_init);
module_exit(twofish_exit);
MODULE_DESCRIPTION("Twofish Cipher Algorithm, AVX optimized");
MODULE_LICENSE("GPL");
MODULE_ALIAS_CRYPTO("twofish");
| linux-master | arch/x86/crypto/twofish_avx_glue.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Glue Code for assembler optimized version of 3DES
*
* Copyright © 2014 Jussi Kivilinna <[email protected]>
*
* CBC & ECB parts based on code (crypto/cbc.c,ecb.c) by:
* Copyright (c) 2006 Herbert Xu <[email protected]>
*/
#include <crypto/algapi.h>
#include <crypto/des.h>
#include <crypto/internal/skcipher.h>
#include <linux/crypto.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/types.h>
struct des3_ede_x86_ctx {
struct des3_ede_ctx enc;
struct des3_ede_ctx dec;
};
/* regular block cipher functions */
asmlinkage void des3_ede_x86_64_crypt_blk(const u32 *expkey, u8 *dst,
const u8 *src);
/* 3-way parallel cipher functions */
asmlinkage void des3_ede_x86_64_crypt_blk_3way(const u32 *expkey, u8 *dst,
const u8 *src);
static inline void des3_ede_enc_blk(struct des3_ede_x86_ctx *ctx, u8 *dst,
const u8 *src)
{
u32 *enc_ctx = ctx->enc.expkey;
des3_ede_x86_64_crypt_blk(enc_ctx, dst, src);
}
static inline void des3_ede_dec_blk(struct des3_ede_x86_ctx *ctx, u8 *dst,
const u8 *src)
{
u32 *dec_ctx = ctx->dec.expkey;
des3_ede_x86_64_crypt_blk(dec_ctx, dst, src);
}
static inline void des3_ede_dec_blk_3way(struct des3_ede_x86_ctx *ctx, u8 *dst,
const u8 *src)
{
u32 *dec_ctx = ctx->dec.expkey;
des3_ede_x86_64_crypt_blk_3way(dec_ctx, dst, src);
}
static void des3_ede_x86_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
des3_ede_enc_blk(crypto_tfm_ctx(tfm), dst, src);
}
static void des3_ede_x86_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
des3_ede_dec_blk(crypto_tfm_ctx(tfm), dst, src);
}
static int ecb_crypt(struct skcipher_request *req, const u32 *expkey)
{
const unsigned int bsize = DES3_EDE_BLOCK_SIZE;
struct skcipher_walk walk;
unsigned int nbytes;
int err;
err = skcipher_walk_virt(&walk, req, false);
while ((nbytes = walk.nbytes)) {
u8 *wsrc = walk.src.virt.addr;
u8 *wdst = walk.dst.virt.addr;
/* Process four block batch */
if (nbytes >= bsize * 3) {
do {
des3_ede_x86_64_crypt_blk_3way(expkey, wdst,
wsrc);
wsrc += bsize * 3;
wdst += bsize * 3;
nbytes -= bsize * 3;
} while (nbytes >= bsize * 3);
if (nbytes < bsize)
goto done;
}
/* Handle leftovers */
do {
des3_ede_x86_64_crypt_blk(expkey, wdst, wsrc);
wsrc += bsize;
wdst += bsize;
nbytes -= bsize;
} while (nbytes >= bsize);
done:
err = skcipher_walk_done(&walk, nbytes);
}
return err;
}
static int ecb_encrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct des3_ede_x86_ctx *ctx = crypto_skcipher_ctx(tfm);
return ecb_crypt(req, ctx->enc.expkey);
}
static int ecb_decrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct des3_ede_x86_ctx *ctx = crypto_skcipher_ctx(tfm);
return ecb_crypt(req, ctx->dec.expkey);
}
static unsigned int __cbc_encrypt(struct des3_ede_x86_ctx *ctx,
struct skcipher_walk *walk)
{
unsigned int bsize = DES3_EDE_BLOCK_SIZE;
unsigned int nbytes = walk->nbytes;
u64 *src = (u64 *)walk->src.virt.addr;
u64 *dst = (u64 *)walk->dst.virt.addr;
u64 *iv = (u64 *)walk->iv;
do {
*dst = *src ^ *iv;
des3_ede_enc_blk(ctx, (u8 *)dst, (u8 *)dst);
iv = dst;
src += 1;
dst += 1;
nbytes -= bsize;
} while (nbytes >= bsize);
*(u64 *)walk->iv = *iv;
return nbytes;
}
static int cbc_encrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct des3_ede_x86_ctx *ctx = crypto_skcipher_ctx(tfm);
struct skcipher_walk walk;
unsigned int nbytes;
int err;
err = skcipher_walk_virt(&walk, req, false);
while (walk.nbytes) {
nbytes = __cbc_encrypt(ctx, &walk);
err = skcipher_walk_done(&walk, nbytes);
}
return err;
}
static unsigned int __cbc_decrypt(struct des3_ede_x86_ctx *ctx,
struct skcipher_walk *walk)
{
unsigned int bsize = DES3_EDE_BLOCK_SIZE;
unsigned int nbytes = walk->nbytes;
u64 *src = (u64 *)walk->src.virt.addr;
u64 *dst = (u64 *)walk->dst.virt.addr;
u64 ivs[3 - 1];
u64 last_iv;
/* Start of the last block. */
src += nbytes / bsize - 1;
dst += nbytes / bsize - 1;
last_iv = *src;
/* Process four block batch */
if (nbytes >= bsize * 3) {
do {
nbytes -= bsize * 3 - bsize;
src -= 3 - 1;
dst -= 3 - 1;
ivs[0] = src[0];
ivs[1] = src[1];
des3_ede_dec_blk_3way(ctx, (u8 *)dst, (u8 *)src);
dst[1] ^= ivs[0];
dst[2] ^= ivs[1];
nbytes -= bsize;
if (nbytes < bsize)
goto done;
*dst ^= *(src - 1);
src -= 1;
dst -= 1;
} while (nbytes >= bsize * 3);
}
/* Handle leftovers */
for (;;) {
des3_ede_dec_blk(ctx, (u8 *)dst, (u8 *)src);
nbytes -= bsize;
if (nbytes < bsize)
break;
*dst ^= *(src - 1);
src -= 1;
dst -= 1;
}
done:
*dst ^= *(u64 *)walk->iv;
*(u64 *)walk->iv = last_iv;
return nbytes;
}
static int cbc_decrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct des3_ede_x86_ctx *ctx = crypto_skcipher_ctx(tfm);
struct skcipher_walk walk;
unsigned int nbytes;
int err;
err = skcipher_walk_virt(&walk, req, false);
while (walk.nbytes) {
nbytes = __cbc_decrypt(ctx, &walk);
err = skcipher_walk_done(&walk, nbytes);
}
return err;
}
static int des3_ede_x86_setkey(struct crypto_tfm *tfm, const u8 *key,
unsigned int keylen)
{
struct des3_ede_x86_ctx *ctx = crypto_tfm_ctx(tfm);
u32 i, j, tmp;
int err;
err = des3_ede_expand_key(&ctx->enc, key, keylen);
if (err == -ENOKEY) {
if (crypto_tfm_get_flags(tfm) & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)
err = -EINVAL;
else
err = 0;
}
if (err) {
memset(ctx, 0, sizeof(*ctx));
return err;
}
/* Fix encryption context for this implementation and form decryption
* context. */
j = DES3_EDE_EXPKEY_WORDS - 2;
for (i = 0; i < DES3_EDE_EXPKEY_WORDS; i += 2, j -= 2) {
tmp = ror32(ctx->enc.expkey[i + 1], 4);
ctx->enc.expkey[i + 1] = tmp;
ctx->dec.expkey[j + 0] = ctx->enc.expkey[i + 0];
ctx->dec.expkey[j + 1] = tmp;
}
return 0;
}
static int des3_ede_x86_setkey_skcipher(struct crypto_skcipher *tfm,
const u8 *key,
unsigned int keylen)
{
return des3_ede_x86_setkey(&tfm->base, key, keylen);
}
static struct crypto_alg des3_ede_cipher = {
.cra_name = "des3_ede",
.cra_driver_name = "des3_ede-asm",
.cra_priority = 200,
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct des3_ede_x86_ctx),
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
.cra_u = {
.cipher = {
.cia_min_keysize = DES3_EDE_KEY_SIZE,
.cia_max_keysize = DES3_EDE_KEY_SIZE,
.cia_setkey = des3_ede_x86_setkey,
.cia_encrypt = des3_ede_x86_encrypt,
.cia_decrypt = des3_ede_x86_decrypt,
}
}
};
static struct skcipher_alg des3_ede_skciphers[] = {
{
.base.cra_name = "ecb(des3_ede)",
.base.cra_driver_name = "ecb-des3_ede-asm",
.base.cra_priority = 300,
.base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct des3_ede_x86_ctx),
.base.cra_module = THIS_MODULE,
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
.setkey = des3_ede_x86_setkey_skcipher,
.encrypt = ecb_encrypt,
.decrypt = ecb_decrypt,
}, {
.base.cra_name = "cbc(des3_ede)",
.base.cra_driver_name = "cbc-des3_ede-asm",
.base.cra_priority = 300,
.base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct des3_ede_x86_ctx),
.base.cra_module = THIS_MODULE,
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
.ivsize = DES3_EDE_BLOCK_SIZE,
.setkey = des3_ede_x86_setkey_skcipher,
.encrypt = cbc_encrypt,
.decrypt = cbc_decrypt,
}
};
static bool is_blacklisted_cpu(void)
{
if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
return false;
if (boot_cpu_data.x86 == 0x0f) {
/*
* On Pentium 4, des3_ede-x86_64 is slower than generic C
* implementation because use of 64bit rotates (which are really
* slow on P4). Therefore blacklist P4s.
*/
return true;
}
return false;
}
static int force;
module_param(force, int, 0);
MODULE_PARM_DESC(force, "Force module load, ignore CPU blacklist");
static int __init des3_ede_x86_init(void)
{
int err;
if (!force && is_blacklisted_cpu()) {
pr_info("des3_ede-x86_64: performance on this CPU would be suboptimal: disabling des3_ede-x86_64.\n");
return -ENODEV;
}
err = crypto_register_alg(&des3_ede_cipher);
if (err)
return err;
err = crypto_register_skciphers(des3_ede_skciphers,
ARRAY_SIZE(des3_ede_skciphers));
if (err)
crypto_unregister_alg(&des3_ede_cipher);
return err;
}
static void __exit des3_ede_x86_fini(void)
{
crypto_unregister_alg(&des3_ede_cipher);
crypto_unregister_skciphers(des3_ede_skciphers,
ARRAY_SIZE(des3_ede_skciphers));
}
module_init(des3_ede_x86_init);
module_exit(des3_ede_x86_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Triple DES EDE Cipher Algorithm, asm optimized");
MODULE_ALIAS_CRYPTO("des3_ede");
MODULE_ALIAS_CRYPTO("des3_ede-asm");
MODULE_AUTHOR("Jussi Kivilinna <[email protected]>");
| linux-master | arch/x86/crypto/des3_ede_glue.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Cryptographic API.
*
* Glue code for the SHA1 Secure Hash Algorithm assembler implementation using
* Supplemental SSE3 instructions.
*
* This file is based on sha1_generic.c
*
* Copyright (c) Alan Smithee.
* Copyright (c) Andrew McDonald <[email protected]>
* Copyright (c) Jean-Francois Dive <[email protected]>
* Copyright (c) Mathias Krause <[email protected]>
* Copyright (c) Chandramouli Narayanan <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <crypto/internal/hash.h>
#include <crypto/internal/simd.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/types.h>
#include <crypto/sha1.h>
#include <crypto/sha1_base.h>
#include <asm/simd.h>
static int sha1_update(struct shash_desc *desc, const u8 *data,
unsigned int len, sha1_block_fn *sha1_xform)
{
struct sha1_state *sctx = shash_desc_ctx(desc);
if (!crypto_simd_usable() ||
(sctx->count % SHA1_BLOCK_SIZE) + len < SHA1_BLOCK_SIZE)
return crypto_sha1_update(desc, data, len);
/*
* Make sure struct sha1_state begins directly with the SHA1
* 160-bit internal state, as this is what the asm functions expect.
*/
BUILD_BUG_ON(offsetof(struct sha1_state, state) != 0);
kernel_fpu_begin();
sha1_base_do_update(desc, data, len, sha1_xform);
kernel_fpu_end();
return 0;
}
static int sha1_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out, sha1_block_fn *sha1_xform)
{
if (!crypto_simd_usable())
return crypto_sha1_finup(desc, data, len, out);
kernel_fpu_begin();
if (len)
sha1_base_do_update(desc, data, len, sha1_xform);
sha1_base_do_finalize(desc, sha1_xform);
kernel_fpu_end();
return sha1_base_finish(desc, out);
}
asmlinkage void sha1_transform_ssse3(struct sha1_state *state,
const u8 *data, int blocks);
static int sha1_ssse3_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
return sha1_update(desc, data, len, sha1_transform_ssse3);
}
static int sha1_ssse3_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
return sha1_finup(desc, data, len, out, sha1_transform_ssse3);
}
/* Add padding and return the message digest. */
static int sha1_ssse3_final(struct shash_desc *desc, u8 *out)
{
return sha1_ssse3_finup(desc, NULL, 0, out);
}
static struct shash_alg sha1_ssse3_alg = {
.digestsize = SHA1_DIGEST_SIZE,
.init = sha1_base_init,
.update = sha1_ssse3_update,
.final = sha1_ssse3_final,
.finup = sha1_ssse3_finup,
.descsize = sizeof(struct sha1_state),
.base = {
.cra_name = "sha1",
.cra_driver_name = "sha1-ssse3",
.cra_priority = 150,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
};
static int register_sha1_ssse3(void)
{
if (boot_cpu_has(X86_FEATURE_SSSE3))
return crypto_register_shash(&sha1_ssse3_alg);
return 0;
}
static void unregister_sha1_ssse3(void)
{
if (boot_cpu_has(X86_FEATURE_SSSE3))
crypto_unregister_shash(&sha1_ssse3_alg);
}
asmlinkage void sha1_transform_avx(struct sha1_state *state,
const u8 *data, int blocks);
static int sha1_avx_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
return sha1_update(desc, data, len, sha1_transform_avx);
}
static int sha1_avx_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
return sha1_finup(desc, data, len, out, sha1_transform_avx);
}
static int sha1_avx_final(struct shash_desc *desc, u8 *out)
{
return sha1_avx_finup(desc, NULL, 0, out);
}
static struct shash_alg sha1_avx_alg = {
.digestsize = SHA1_DIGEST_SIZE,
.init = sha1_base_init,
.update = sha1_avx_update,
.final = sha1_avx_final,
.finup = sha1_avx_finup,
.descsize = sizeof(struct sha1_state),
.base = {
.cra_name = "sha1",
.cra_driver_name = "sha1-avx",
.cra_priority = 160,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
};
static bool avx_usable(void)
{
if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL)) {
if (boot_cpu_has(X86_FEATURE_AVX))
pr_info("AVX detected but unusable.\n");
return false;
}
return true;
}
static int register_sha1_avx(void)
{
if (avx_usable())
return crypto_register_shash(&sha1_avx_alg);
return 0;
}
static void unregister_sha1_avx(void)
{
if (avx_usable())
crypto_unregister_shash(&sha1_avx_alg);
}
#define SHA1_AVX2_BLOCK_OPTSIZE 4 /* optimal 4*64 bytes of SHA1 blocks */
asmlinkage void sha1_transform_avx2(struct sha1_state *state,
const u8 *data, int blocks);
static bool avx2_usable(void)
{
if (avx_usable() && boot_cpu_has(X86_FEATURE_AVX2)
&& boot_cpu_has(X86_FEATURE_BMI1)
&& boot_cpu_has(X86_FEATURE_BMI2))
return true;
return false;
}
static void sha1_apply_transform_avx2(struct sha1_state *state,
const u8 *data, int blocks)
{
/* Select the optimal transform based on data block size */
if (blocks >= SHA1_AVX2_BLOCK_OPTSIZE)
sha1_transform_avx2(state, data, blocks);
else
sha1_transform_avx(state, data, blocks);
}
static int sha1_avx2_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
return sha1_update(desc, data, len, sha1_apply_transform_avx2);
}
static int sha1_avx2_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
return sha1_finup(desc, data, len, out, sha1_apply_transform_avx2);
}
static int sha1_avx2_final(struct shash_desc *desc, u8 *out)
{
return sha1_avx2_finup(desc, NULL, 0, out);
}
static struct shash_alg sha1_avx2_alg = {
.digestsize = SHA1_DIGEST_SIZE,
.init = sha1_base_init,
.update = sha1_avx2_update,
.final = sha1_avx2_final,
.finup = sha1_avx2_finup,
.descsize = sizeof(struct sha1_state),
.base = {
.cra_name = "sha1",
.cra_driver_name = "sha1-avx2",
.cra_priority = 170,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
};
static int register_sha1_avx2(void)
{
if (avx2_usable())
return crypto_register_shash(&sha1_avx2_alg);
return 0;
}
static void unregister_sha1_avx2(void)
{
if (avx2_usable())
crypto_unregister_shash(&sha1_avx2_alg);
}
#ifdef CONFIG_AS_SHA1_NI
asmlinkage void sha1_ni_transform(struct sha1_state *digest, const u8 *data,
int rounds);
static int sha1_ni_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
return sha1_update(desc, data, len, sha1_ni_transform);
}
static int sha1_ni_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
return sha1_finup(desc, data, len, out, sha1_ni_transform);
}
static int sha1_ni_final(struct shash_desc *desc, u8 *out)
{
return sha1_ni_finup(desc, NULL, 0, out);
}
static struct shash_alg sha1_ni_alg = {
.digestsize = SHA1_DIGEST_SIZE,
.init = sha1_base_init,
.update = sha1_ni_update,
.final = sha1_ni_final,
.finup = sha1_ni_finup,
.descsize = sizeof(struct sha1_state),
.base = {
.cra_name = "sha1",
.cra_driver_name = "sha1-ni",
.cra_priority = 250,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
};
static int register_sha1_ni(void)
{
if (boot_cpu_has(X86_FEATURE_SHA_NI))
return crypto_register_shash(&sha1_ni_alg);
return 0;
}
static void unregister_sha1_ni(void)
{
if (boot_cpu_has(X86_FEATURE_SHA_NI))
crypto_unregister_shash(&sha1_ni_alg);
}
#else
static inline int register_sha1_ni(void) { return 0; }
static inline void unregister_sha1_ni(void) { }
#endif
static int __init sha1_ssse3_mod_init(void)
{
if (register_sha1_ssse3())
goto fail;
if (register_sha1_avx()) {
unregister_sha1_ssse3();
goto fail;
}
if (register_sha1_avx2()) {
unregister_sha1_avx();
unregister_sha1_ssse3();
goto fail;
}
if (register_sha1_ni()) {
unregister_sha1_avx2();
unregister_sha1_avx();
unregister_sha1_ssse3();
goto fail;
}
return 0;
fail:
return -ENODEV;
}
static void __exit sha1_ssse3_mod_fini(void)
{
unregister_sha1_ni();
unregister_sha1_avx2();
unregister_sha1_avx();
unregister_sha1_ssse3();
}
module_init(sha1_ssse3_mod_init);
module_exit(sha1_ssse3_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm, Supplemental SSE3 accelerated");
MODULE_ALIAS_CRYPTO("sha1");
MODULE_ALIAS_CRYPTO("sha1-ssse3");
MODULE_ALIAS_CRYPTO("sha1-avx");
MODULE_ALIAS_CRYPTO("sha1-avx2");
#ifdef CONFIG_AS_SHA1_NI
MODULE_ALIAS_CRYPTO("sha1-ni");
#endif
| linux-master | arch/x86/crypto/sha1_ssse3_glue.c |
/*
* Glue Code for assembler optimized version of TWOFISH
*
* Originally Twofish for GPG
* By Matthew Skala <[email protected]>, July 26, 1998
* 256-bit key length added March 20, 1999
* Some modifications to reduce the text size by Werner Koch, April, 1998
* Ported to the kerneli patch by Marc Mutz <[email protected]>
* Ported to CryptoAPI by Colin Slater <[email protected]>
*
* The original author has disclaimed all copyright interest in this
* code and thus put it in the public domain. The subsequent authors
* have put this under the GNU General Public License.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
* USA
*
* This code is a "clean room" implementation, written from the paper
* _Twofish: A 128-Bit Block Cipher_ by Bruce Schneier, John Kelsey,
* Doug Whiting, David Wagner, Chris Hall, and Niels Ferguson, available
* through http://www.counterpane.com/twofish.html
*
* For background information on multiplication in finite fields, used for
* the matrix operations in the key schedule, see the book _Contemporary
* Abstract Algebra_ by Joseph A. Gallian, especially chapter 22 in the
* Third Edition.
*/
#include <crypto/algapi.h>
#include <crypto/twofish.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/types.h>
asmlinkage void twofish_enc_blk(struct twofish_ctx *ctx, u8 *dst,
const u8 *src);
EXPORT_SYMBOL_GPL(twofish_enc_blk);
asmlinkage void twofish_dec_blk(struct twofish_ctx *ctx, u8 *dst,
const u8 *src);
EXPORT_SYMBOL_GPL(twofish_dec_blk);
static void twofish_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
twofish_enc_blk(crypto_tfm_ctx(tfm), dst, src);
}
static void twofish_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
twofish_dec_blk(crypto_tfm_ctx(tfm), dst, src);
}
static struct crypto_alg alg = {
.cra_name = "twofish",
.cra_driver_name = "twofish-asm",
.cra_priority = 200,
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = TF_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct twofish_ctx),
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
.cra_u = {
.cipher = {
.cia_min_keysize = TF_MIN_KEY_SIZE,
.cia_max_keysize = TF_MAX_KEY_SIZE,
.cia_setkey = twofish_setkey,
.cia_encrypt = twofish_encrypt,
.cia_decrypt = twofish_decrypt
}
}
};
static int __init twofish_glue_init(void)
{
return crypto_register_alg(&alg);
}
static void __exit twofish_glue_fini(void)
{
crypto_unregister_alg(&alg);
}
module_init(twofish_glue_init);
module_exit(twofish_glue_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION ("Twofish Cipher Algorithm, asm optimized");
MODULE_ALIAS_CRYPTO("twofish");
MODULE_ALIAS_CRYPTO("twofish-asm");
| linux-master | arch/x86/crypto/twofish_glue.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Glue Code for x86_64/AVX2/AES-NI assembler optimized version of Camellia
*
* Copyright © 2013 Jussi Kivilinna <[email protected]>
*/
#include <crypto/algapi.h>
#include <crypto/internal/simd.h>
#include <linux/crypto.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/types.h>
#include "camellia.h"
#include "ecb_cbc_helpers.h"
#define CAMELLIA_AESNI_PARALLEL_BLOCKS 16
#define CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS 32
/* 32-way AVX2/AES-NI parallel cipher functions */
asmlinkage void camellia_ecb_enc_32way(const void *ctx, u8 *dst, const u8 *src);
asmlinkage void camellia_ecb_dec_32way(const void *ctx, u8 *dst, const u8 *src);
asmlinkage void camellia_cbc_dec_32way(const void *ctx, u8 *dst, const u8 *src);
static int camellia_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen)
{
return __camellia_setkey(crypto_skcipher_ctx(tfm), key, keylen);
}
static int ecb_encrypt(struct skcipher_request *req)
{
ECB_WALK_START(req, CAMELLIA_BLOCK_SIZE, CAMELLIA_AESNI_PARALLEL_BLOCKS);
ECB_BLOCK(CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS, camellia_ecb_enc_32way);
ECB_BLOCK(CAMELLIA_AESNI_PARALLEL_BLOCKS, camellia_ecb_enc_16way);
ECB_BLOCK(2, camellia_enc_blk_2way);
ECB_BLOCK(1, camellia_enc_blk);
ECB_WALK_END();
}
static int ecb_decrypt(struct skcipher_request *req)
{
ECB_WALK_START(req, CAMELLIA_BLOCK_SIZE, CAMELLIA_AESNI_PARALLEL_BLOCKS);
ECB_BLOCK(CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS, camellia_ecb_dec_32way);
ECB_BLOCK(CAMELLIA_AESNI_PARALLEL_BLOCKS, camellia_ecb_dec_16way);
ECB_BLOCK(2, camellia_dec_blk_2way);
ECB_BLOCK(1, camellia_dec_blk);
ECB_WALK_END();
}
static int cbc_encrypt(struct skcipher_request *req)
{
CBC_WALK_START(req, CAMELLIA_BLOCK_SIZE, -1);
CBC_ENC_BLOCK(camellia_enc_blk);
CBC_WALK_END();
}
static int cbc_decrypt(struct skcipher_request *req)
{
CBC_WALK_START(req, CAMELLIA_BLOCK_SIZE, CAMELLIA_AESNI_PARALLEL_BLOCKS);
CBC_DEC_BLOCK(CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS, camellia_cbc_dec_32way);
CBC_DEC_BLOCK(CAMELLIA_AESNI_PARALLEL_BLOCKS, camellia_cbc_dec_16way);
CBC_DEC_BLOCK(2, camellia_decrypt_cbc_2way);
CBC_DEC_BLOCK(1, camellia_dec_blk);
CBC_WALK_END();
}
static struct skcipher_alg camellia_algs[] = {
{
.base.cra_name = "__ecb(camellia)",
.base.cra_driver_name = "__ecb-camellia-aesni-avx2",
.base.cra_priority = 500,
.base.cra_flags = CRYPTO_ALG_INTERNAL,
.base.cra_blocksize = CAMELLIA_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct camellia_ctx),
.base.cra_module = THIS_MODULE,
.min_keysize = CAMELLIA_MIN_KEY_SIZE,
.max_keysize = CAMELLIA_MAX_KEY_SIZE,
.setkey = camellia_setkey,
.encrypt = ecb_encrypt,
.decrypt = ecb_decrypt,
}, {
.base.cra_name = "__cbc(camellia)",
.base.cra_driver_name = "__cbc-camellia-aesni-avx2",
.base.cra_priority = 500,
.base.cra_flags = CRYPTO_ALG_INTERNAL,
.base.cra_blocksize = CAMELLIA_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct camellia_ctx),
.base.cra_module = THIS_MODULE,
.min_keysize = CAMELLIA_MIN_KEY_SIZE,
.max_keysize = CAMELLIA_MAX_KEY_SIZE,
.ivsize = CAMELLIA_BLOCK_SIZE,
.setkey = camellia_setkey,
.encrypt = cbc_encrypt,
.decrypt = cbc_decrypt,
},
};
static struct simd_skcipher_alg *camellia_simd_algs[ARRAY_SIZE(camellia_algs)];
static int __init camellia_aesni_init(void)
{
const char *feature_name;
if (!boot_cpu_has(X86_FEATURE_AVX) ||
!boot_cpu_has(X86_FEATURE_AVX2) ||
!boot_cpu_has(X86_FEATURE_AES) ||
!boot_cpu_has(X86_FEATURE_OSXSAVE)) {
pr_info("AVX2 or AES-NI instructions are not detected.\n");
return -ENODEV;
}
if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM,
&feature_name)) {
pr_info("CPU feature '%s' is not supported.\n", feature_name);
return -ENODEV;
}
return simd_register_skciphers_compat(camellia_algs,
ARRAY_SIZE(camellia_algs),
camellia_simd_algs);
}
static void __exit camellia_aesni_fini(void)
{
simd_unregister_skciphers(camellia_algs, ARRAY_SIZE(camellia_algs),
camellia_simd_algs);
}
module_init(camellia_aesni_init);
module_exit(camellia_aesni_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Camellia Cipher Algorithm, AES-NI/AVX2 optimized");
MODULE_ALIAS_CRYPTO("camellia");
MODULE_ALIAS_CRYPTO("camellia-asm");
| linux-master | arch/x86/crypto/camellia_aesni_avx2_glue.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Glue Code for AVX assembler versions of Serpent Cipher
*
* Copyright (C) 2012 Johannes Goetzfried
* <[email protected]>
*
* Copyright © 2011-2013 Jussi Kivilinna <[email protected]>
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/crypto.h>
#include <linux/err.h>
#include <crypto/algapi.h>
#include <crypto/internal/simd.h>
#include <crypto/serpent.h>
#include "serpent-avx.h"
#include "ecb_cbc_helpers.h"
/* 8-way parallel cipher functions */
asmlinkage void serpent_ecb_enc_8way_avx(const void *ctx, u8 *dst,
const u8 *src);
EXPORT_SYMBOL_GPL(serpent_ecb_enc_8way_avx);
asmlinkage void serpent_ecb_dec_8way_avx(const void *ctx, u8 *dst,
const u8 *src);
EXPORT_SYMBOL_GPL(serpent_ecb_dec_8way_avx);
asmlinkage void serpent_cbc_dec_8way_avx(const void *ctx, u8 *dst,
const u8 *src);
EXPORT_SYMBOL_GPL(serpent_cbc_dec_8way_avx);
static int serpent_setkey_skcipher(struct crypto_skcipher *tfm,
const u8 *key, unsigned int keylen)
{
return __serpent_setkey(crypto_skcipher_ctx(tfm), key, keylen);
}
static int ecb_encrypt(struct skcipher_request *req)
{
ECB_WALK_START(req, SERPENT_BLOCK_SIZE, SERPENT_PARALLEL_BLOCKS);
ECB_BLOCK(SERPENT_PARALLEL_BLOCKS, serpent_ecb_enc_8way_avx);
ECB_BLOCK(1, __serpent_encrypt);
ECB_WALK_END();
}
static int ecb_decrypt(struct skcipher_request *req)
{
ECB_WALK_START(req, SERPENT_BLOCK_SIZE, SERPENT_PARALLEL_BLOCKS);
ECB_BLOCK(SERPENT_PARALLEL_BLOCKS, serpent_ecb_dec_8way_avx);
ECB_BLOCK(1, __serpent_decrypt);
ECB_WALK_END();
}
static int cbc_encrypt(struct skcipher_request *req)
{
CBC_WALK_START(req, SERPENT_BLOCK_SIZE, -1);
CBC_ENC_BLOCK(__serpent_encrypt);
CBC_WALK_END();
}
static int cbc_decrypt(struct skcipher_request *req)
{
CBC_WALK_START(req, SERPENT_BLOCK_SIZE, SERPENT_PARALLEL_BLOCKS);
CBC_DEC_BLOCK(SERPENT_PARALLEL_BLOCKS, serpent_cbc_dec_8way_avx);
CBC_DEC_BLOCK(1, __serpent_decrypt);
CBC_WALK_END();
}
static struct skcipher_alg serpent_algs[] = {
{
.base.cra_name = "__ecb(serpent)",
.base.cra_driver_name = "__ecb-serpent-avx",
.base.cra_priority = 500,
.base.cra_flags = CRYPTO_ALG_INTERNAL,
.base.cra_blocksize = SERPENT_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct serpent_ctx),
.base.cra_module = THIS_MODULE,
.min_keysize = SERPENT_MIN_KEY_SIZE,
.max_keysize = SERPENT_MAX_KEY_SIZE,
.setkey = serpent_setkey_skcipher,
.encrypt = ecb_encrypt,
.decrypt = ecb_decrypt,
}, {
.base.cra_name = "__cbc(serpent)",
.base.cra_driver_name = "__cbc-serpent-avx",
.base.cra_priority = 500,
.base.cra_flags = CRYPTO_ALG_INTERNAL,
.base.cra_blocksize = SERPENT_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct serpent_ctx),
.base.cra_module = THIS_MODULE,
.min_keysize = SERPENT_MIN_KEY_SIZE,
.max_keysize = SERPENT_MAX_KEY_SIZE,
.ivsize = SERPENT_BLOCK_SIZE,
.setkey = serpent_setkey_skcipher,
.encrypt = cbc_encrypt,
.decrypt = cbc_decrypt,
},
};
static struct simd_skcipher_alg *serpent_simd_algs[ARRAY_SIZE(serpent_algs)];
static int __init serpent_init(void)
{
const char *feature_name;
if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM,
&feature_name)) {
pr_info("CPU feature '%s' is not supported.\n", feature_name);
return -ENODEV;
}
return simd_register_skciphers_compat(serpent_algs,
ARRAY_SIZE(serpent_algs),
serpent_simd_algs);
}
static void __exit serpent_exit(void)
{
simd_unregister_skciphers(serpent_algs, ARRAY_SIZE(serpent_algs),
serpent_simd_algs);
}
module_init(serpent_init);
module_exit(serpent_exit);
MODULE_DESCRIPTION("Serpent Cipher Algorithm, AVX optimized");
MODULE_LICENSE("GPL");
MODULE_ALIAS_CRYPTO("serpent");
| linux-master | arch/x86/crypto/serpent_avx_glue.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Glue Code for 3-way parallel assembler optimized version of Twofish
*
* Copyright (c) 2011 Jussi Kivilinna <[email protected]>
*/
#include <crypto/algapi.h>
#include <crypto/twofish.h>
#include <linux/crypto.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/types.h>
#include "twofish.h"
#include "ecb_cbc_helpers.h"
EXPORT_SYMBOL_GPL(__twofish_enc_blk_3way);
EXPORT_SYMBOL_GPL(twofish_dec_blk_3way);
static int twofish_setkey_skcipher(struct crypto_skcipher *tfm,
const u8 *key, unsigned int keylen)
{
return twofish_setkey(&tfm->base, key, keylen);
}
static inline void twofish_enc_blk_3way(const void *ctx, u8 *dst, const u8 *src)
{
__twofish_enc_blk_3way(ctx, dst, src, false);
}
void twofish_dec_blk_cbc_3way(const void *ctx, u8 *dst, const u8 *src)
{
u8 buf[2][TF_BLOCK_SIZE];
const u8 *s = src;
if (dst == src)
s = memcpy(buf, src, sizeof(buf));
twofish_dec_blk_3way(ctx, dst, src);
crypto_xor(dst + TF_BLOCK_SIZE, s, sizeof(buf));
}
EXPORT_SYMBOL_GPL(twofish_dec_blk_cbc_3way);
static int ecb_encrypt(struct skcipher_request *req)
{
ECB_WALK_START(req, TF_BLOCK_SIZE, -1);
ECB_BLOCK(3, twofish_enc_blk_3way);
ECB_BLOCK(1, twofish_enc_blk);
ECB_WALK_END();
}
static int ecb_decrypt(struct skcipher_request *req)
{
ECB_WALK_START(req, TF_BLOCK_SIZE, -1);
ECB_BLOCK(3, twofish_dec_blk_3way);
ECB_BLOCK(1, twofish_dec_blk);
ECB_WALK_END();
}
static int cbc_encrypt(struct skcipher_request *req)
{
CBC_WALK_START(req, TF_BLOCK_SIZE, -1);
CBC_ENC_BLOCK(twofish_enc_blk);
CBC_WALK_END();
}
static int cbc_decrypt(struct skcipher_request *req)
{
CBC_WALK_START(req, TF_BLOCK_SIZE, -1);
CBC_DEC_BLOCK(3, twofish_dec_blk_cbc_3way);
CBC_DEC_BLOCK(1, twofish_dec_blk);
CBC_WALK_END();
}
static struct skcipher_alg tf_skciphers[] = {
{
.base.cra_name = "ecb(twofish)",
.base.cra_driver_name = "ecb-twofish-3way",
.base.cra_priority = 300,
.base.cra_blocksize = TF_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct twofish_ctx),
.base.cra_module = THIS_MODULE,
.min_keysize = TF_MIN_KEY_SIZE,
.max_keysize = TF_MAX_KEY_SIZE,
.setkey = twofish_setkey_skcipher,
.encrypt = ecb_encrypt,
.decrypt = ecb_decrypt,
}, {
.base.cra_name = "cbc(twofish)",
.base.cra_driver_name = "cbc-twofish-3way",
.base.cra_priority = 300,
.base.cra_blocksize = TF_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct twofish_ctx),
.base.cra_module = THIS_MODULE,
.min_keysize = TF_MIN_KEY_SIZE,
.max_keysize = TF_MAX_KEY_SIZE,
.ivsize = TF_BLOCK_SIZE,
.setkey = twofish_setkey_skcipher,
.encrypt = cbc_encrypt,
.decrypt = cbc_decrypt,
},
};
static bool is_blacklisted_cpu(void)
{
if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
return false;
if (boot_cpu_data.x86 == 0x06 &&
(boot_cpu_data.x86_model == 0x1c ||
boot_cpu_data.x86_model == 0x26 ||
boot_cpu_data.x86_model == 0x36)) {
/*
* On Atom, twofish-3way is slower than original assembler
* implementation. Twofish-3way trades off some performance in
* storing blocks in 64bit registers to allow three blocks to
* be processed parallel. Parallel operation then allows gaining
* more performance than was trade off, on out-of-order CPUs.
* However Atom does not benefit from this parallelism and
* should be blacklisted.
*/
return true;
}
if (boot_cpu_data.x86 == 0x0f) {
/*
* On Pentium 4, twofish-3way is slower than original assembler
* implementation because excessive uses of 64bit rotate and
* left-shifts (which are really slow on P4) needed to store and
* handle 128bit block in two 64bit registers.
*/
return true;
}
return false;
}
static int force;
module_param(force, int, 0);
MODULE_PARM_DESC(force, "Force module load, ignore CPU blacklist");
static int __init twofish_3way_init(void)
{
if (!force && is_blacklisted_cpu()) {
printk(KERN_INFO
"twofish-x86_64-3way: performance on this CPU "
"would be suboptimal: disabling "
"twofish-x86_64-3way.\n");
return -ENODEV;
}
return crypto_register_skciphers(tf_skciphers,
ARRAY_SIZE(tf_skciphers));
}
static void __exit twofish_3way_fini(void)
{
crypto_unregister_skciphers(tf_skciphers, ARRAY_SIZE(tf_skciphers));
}
module_init(twofish_3way_init);
module_exit(twofish_3way_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Twofish Cipher Algorithm, 3-way parallel asm optimized");
MODULE_ALIAS_CRYPTO("twofish");
MODULE_ALIAS_CRYPTO("twofish-asm");
| linux-master | arch/x86/crypto/twofish_glue_3way.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Glue Code for x86_64/AVX/AES-NI assembler optimized version of Camellia
*
* Copyright © 2012-2013 Jussi Kivilinna <[email protected]>
*/
#include <crypto/algapi.h>
#include <crypto/internal/simd.h>
#include <linux/crypto.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/types.h>
#include "camellia.h"
#include "ecb_cbc_helpers.h"
#define CAMELLIA_AESNI_PARALLEL_BLOCKS 16
/* 16-way parallel cipher functions (avx/aes-ni) */
asmlinkage void camellia_ecb_enc_16way(const void *ctx, u8 *dst, const u8 *src);
EXPORT_SYMBOL_GPL(camellia_ecb_enc_16way);
asmlinkage void camellia_ecb_dec_16way(const void *ctx, u8 *dst, const u8 *src);
EXPORT_SYMBOL_GPL(camellia_ecb_dec_16way);
asmlinkage void camellia_cbc_dec_16way(const void *ctx, u8 *dst, const u8 *src);
EXPORT_SYMBOL_GPL(camellia_cbc_dec_16way);
static int camellia_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen)
{
return __camellia_setkey(crypto_skcipher_ctx(tfm), key, keylen);
}
static int ecb_encrypt(struct skcipher_request *req)
{
ECB_WALK_START(req, CAMELLIA_BLOCK_SIZE, CAMELLIA_AESNI_PARALLEL_BLOCKS);
ECB_BLOCK(CAMELLIA_AESNI_PARALLEL_BLOCKS, camellia_ecb_enc_16way);
ECB_BLOCK(2, camellia_enc_blk_2way);
ECB_BLOCK(1, camellia_enc_blk);
ECB_WALK_END();
}
static int ecb_decrypt(struct skcipher_request *req)
{
ECB_WALK_START(req, CAMELLIA_BLOCK_SIZE, CAMELLIA_AESNI_PARALLEL_BLOCKS);
ECB_BLOCK(CAMELLIA_AESNI_PARALLEL_BLOCKS, camellia_ecb_dec_16way);
ECB_BLOCK(2, camellia_dec_blk_2way);
ECB_BLOCK(1, camellia_dec_blk);
ECB_WALK_END();
}
static int cbc_encrypt(struct skcipher_request *req)
{
CBC_WALK_START(req, CAMELLIA_BLOCK_SIZE, -1);
CBC_ENC_BLOCK(camellia_enc_blk);
CBC_WALK_END();
}
static int cbc_decrypt(struct skcipher_request *req)
{
CBC_WALK_START(req, CAMELLIA_BLOCK_SIZE, CAMELLIA_AESNI_PARALLEL_BLOCKS);
CBC_DEC_BLOCK(CAMELLIA_AESNI_PARALLEL_BLOCKS, camellia_cbc_dec_16way);
CBC_DEC_BLOCK(2, camellia_decrypt_cbc_2way);
CBC_DEC_BLOCK(1, camellia_dec_blk);
CBC_WALK_END();
}
static struct skcipher_alg camellia_algs[] = {
{
.base.cra_name = "__ecb(camellia)",
.base.cra_driver_name = "__ecb-camellia-aesni",
.base.cra_priority = 400,
.base.cra_flags = CRYPTO_ALG_INTERNAL,
.base.cra_blocksize = CAMELLIA_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct camellia_ctx),
.base.cra_module = THIS_MODULE,
.min_keysize = CAMELLIA_MIN_KEY_SIZE,
.max_keysize = CAMELLIA_MAX_KEY_SIZE,
.setkey = camellia_setkey,
.encrypt = ecb_encrypt,
.decrypt = ecb_decrypt,
}, {
.base.cra_name = "__cbc(camellia)",
.base.cra_driver_name = "__cbc-camellia-aesni",
.base.cra_priority = 400,
.base.cra_flags = CRYPTO_ALG_INTERNAL,
.base.cra_blocksize = CAMELLIA_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct camellia_ctx),
.base.cra_module = THIS_MODULE,
.min_keysize = CAMELLIA_MIN_KEY_SIZE,
.max_keysize = CAMELLIA_MAX_KEY_SIZE,
.ivsize = CAMELLIA_BLOCK_SIZE,
.setkey = camellia_setkey,
.encrypt = cbc_encrypt,
.decrypt = cbc_decrypt,
}
};
static struct simd_skcipher_alg *camellia_simd_algs[ARRAY_SIZE(camellia_algs)];
static int __init camellia_aesni_init(void)
{
const char *feature_name;
if (!boot_cpu_has(X86_FEATURE_AVX) ||
!boot_cpu_has(X86_FEATURE_AES) ||
!boot_cpu_has(X86_FEATURE_OSXSAVE)) {
pr_info("AVX or AES-NI instructions are not detected.\n");
return -ENODEV;
}
if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM,
&feature_name)) {
pr_info("CPU feature '%s' is not supported.\n", feature_name);
return -ENODEV;
}
return simd_register_skciphers_compat(camellia_algs,
ARRAY_SIZE(camellia_algs),
camellia_simd_algs);
}
static void __exit camellia_aesni_fini(void)
{
simd_unregister_skciphers(camellia_algs, ARRAY_SIZE(camellia_algs),
camellia_simd_algs);
}
module_init(camellia_aesni_init);
module_exit(camellia_aesni_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Camellia Cipher Algorithm, AES-NI/AVX optimized");
MODULE_ALIAS_CRYPTO("camellia");
MODULE_ALIAS_CRYPTO("camellia-asm");
| linux-master | arch/x86/crypto/camellia_aesni_avx_glue.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* x64 SIMD accelerated ChaCha and XChaCha stream ciphers,
* including ChaCha20 (RFC7539)
*
* Copyright (C) 2015 Martin Willi
*/
#include <crypto/algapi.h>
#include <crypto/internal/chacha.h>
#include <crypto/internal/simd.h>
#include <crypto/internal/skcipher.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sizes.h>
#include <asm/simd.h>
asmlinkage void chacha_block_xor_ssse3(u32 *state, u8 *dst, const u8 *src,
unsigned int len, int nrounds);
asmlinkage void chacha_4block_xor_ssse3(u32 *state, u8 *dst, const u8 *src,
unsigned int len, int nrounds);
asmlinkage void hchacha_block_ssse3(const u32 *state, u32 *out, int nrounds);
asmlinkage void chacha_2block_xor_avx2(u32 *state, u8 *dst, const u8 *src,
unsigned int len, int nrounds);
asmlinkage void chacha_4block_xor_avx2(u32 *state, u8 *dst, const u8 *src,
unsigned int len, int nrounds);
asmlinkage void chacha_8block_xor_avx2(u32 *state, u8 *dst, const u8 *src,
unsigned int len, int nrounds);
asmlinkage void chacha_2block_xor_avx512vl(u32 *state, u8 *dst, const u8 *src,
unsigned int len, int nrounds);
asmlinkage void chacha_4block_xor_avx512vl(u32 *state, u8 *dst, const u8 *src,
unsigned int len, int nrounds);
asmlinkage void chacha_8block_xor_avx512vl(u32 *state, u8 *dst, const u8 *src,
unsigned int len, int nrounds);
static __ro_after_init DEFINE_STATIC_KEY_FALSE(chacha_use_simd);
static __ro_after_init DEFINE_STATIC_KEY_FALSE(chacha_use_avx2);
static __ro_after_init DEFINE_STATIC_KEY_FALSE(chacha_use_avx512vl);
static unsigned int chacha_advance(unsigned int len, unsigned int maxblocks)
{
len = min(len, maxblocks * CHACHA_BLOCK_SIZE);
return round_up(len, CHACHA_BLOCK_SIZE) / CHACHA_BLOCK_SIZE;
}
static void chacha_dosimd(u32 *state, u8 *dst, const u8 *src,
unsigned int bytes, int nrounds)
{
if (IS_ENABLED(CONFIG_AS_AVX512) &&
static_branch_likely(&chacha_use_avx512vl)) {
while (bytes >= CHACHA_BLOCK_SIZE * 8) {
chacha_8block_xor_avx512vl(state, dst, src, bytes,
nrounds);
bytes -= CHACHA_BLOCK_SIZE * 8;
src += CHACHA_BLOCK_SIZE * 8;
dst += CHACHA_BLOCK_SIZE * 8;
state[12] += 8;
}
if (bytes > CHACHA_BLOCK_SIZE * 4) {
chacha_8block_xor_avx512vl(state, dst, src, bytes,
nrounds);
state[12] += chacha_advance(bytes, 8);
return;
}
if (bytes > CHACHA_BLOCK_SIZE * 2) {
chacha_4block_xor_avx512vl(state, dst, src, bytes,
nrounds);
state[12] += chacha_advance(bytes, 4);
return;
}
if (bytes) {
chacha_2block_xor_avx512vl(state, dst, src, bytes,
nrounds);
state[12] += chacha_advance(bytes, 2);
return;
}
}
if (static_branch_likely(&chacha_use_avx2)) {
while (bytes >= CHACHA_BLOCK_SIZE * 8) {
chacha_8block_xor_avx2(state, dst, src, bytes, nrounds);
bytes -= CHACHA_BLOCK_SIZE * 8;
src += CHACHA_BLOCK_SIZE * 8;
dst += CHACHA_BLOCK_SIZE * 8;
state[12] += 8;
}
if (bytes > CHACHA_BLOCK_SIZE * 4) {
chacha_8block_xor_avx2(state, dst, src, bytes, nrounds);
state[12] += chacha_advance(bytes, 8);
return;
}
if (bytes > CHACHA_BLOCK_SIZE * 2) {
chacha_4block_xor_avx2(state, dst, src, bytes, nrounds);
state[12] += chacha_advance(bytes, 4);
return;
}
if (bytes > CHACHA_BLOCK_SIZE) {
chacha_2block_xor_avx2(state, dst, src, bytes, nrounds);
state[12] += chacha_advance(bytes, 2);
return;
}
}
while (bytes >= CHACHA_BLOCK_SIZE * 4) {
chacha_4block_xor_ssse3(state, dst, src, bytes, nrounds);
bytes -= CHACHA_BLOCK_SIZE * 4;
src += CHACHA_BLOCK_SIZE * 4;
dst += CHACHA_BLOCK_SIZE * 4;
state[12] += 4;
}
if (bytes > CHACHA_BLOCK_SIZE) {
chacha_4block_xor_ssse3(state, dst, src, bytes, nrounds);
state[12] += chacha_advance(bytes, 4);
return;
}
if (bytes) {
chacha_block_xor_ssse3(state, dst, src, bytes, nrounds);
state[12]++;
}
}
void hchacha_block_arch(const u32 *state, u32 *stream, int nrounds)
{
if (!static_branch_likely(&chacha_use_simd) || !crypto_simd_usable()) {
hchacha_block_generic(state, stream, nrounds);
} else {
kernel_fpu_begin();
hchacha_block_ssse3(state, stream, nrounds);
kernel_fpu_end();
}
}
EXPORT_SYMBOL(hchacha_block_arch);
void chacha_init_arch(u32 *state, const u32 *key, const u8 *iv)
{
chacha_init_generic(state, key, iv);
}
EXPORT_SYMBOL(chacha_init_arch);
void chacha_crypt_arch(u32 *state, u8 *dst, const u8 *src, unsigned int bytes,
int nrounds)
{
if (!static_branch_likely(&chacha_use_simd) || !crypto_simd_usable() ||
bytes <= CHACHA_BLOCK_SIZE)
return chacha_crypt_generic(state, dst, src, bytes, nrounds);
do {
unsigned int todo = min_t(unsigned int, bytes, SZ_4K);
kernel_fpu_begin();
chacha_dosimd(state, dst, src, todo, nrounds);
kernel_fpu_end();
bytes -= todo;
src += todo;
dst += todo;
} while (bytes);
}
EXPORT_SYMBOL(chacha_crypt_arch);
static int chacha_simd_stream_xor(struct skcipher_request *req,
const struct chacha_ctx *ctx, const u8 *iv)
{
u32 state[CHACHA_STATE_WORDS] __aligned(8);
struct skcipher_walk walk;
int err;
err = skcipher_walk_virt(&walk, req, false);
chacha_init_generic(state, ctx->key, iv);
while (walk.nbytes > 0) {
unsigned int nbytes = walk.nbytes;
if (nbytes < walk.total)
nbytes = round_down(nbytes, walk.stride);
if (!static_branch_likely(&chacha_use_simd) ||
!crypto_simd_usable()) {
chacha_crypt_generic(state, walk.dst.virt.addr,
walk.src.virt.addr, nbytes,
ctx->nrounds);
} else {
kernel_fpu_begin();
chacha_dosimd(state, walk.dst.virt.addr,
walk.src.virt.addr, nbytes,
ctx->nrounds);
kernel_fpu_end();
}
err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
}
return err;
}
static int chacha_simd(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
return chacha_simd_stream_xor(req, ctx, req->iv);
}
static int xchacha_simd(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
u32 state[CHACHA_STATE_WORDS] __aligned(8);
struct chacha_ctx subctx;
u8 real_iv[16];
chacha_init_generic(state, ctx->key, req->iv);
if (req->cryptlen > CHACHA_BLOCK_SIZE && crypto_simd_usable()) {
kernel_fpu_begin();
hchacha_block_ssse3(state, subctx.key, ctx->nrounds);
kernel_fpu_end();
} else {
hchacha_block_generic(state, subctx.key, ctx->nrounds);
}
subctx.nrounds = ctx->nrounds;
memcpy(&real_iv[0], req->iv + 24, 8);
memcpy(&real_iv[8], req->iv + 16, 8);
return chacha_simd_stream_xor(req, &subctx, real_iv);
}
static struct skcipher_alg algs[] = {
{
.base.cra_name = "chacha20",
.base.cra_driver_name = "chacha20-simd",
.base.cra_priority = 300,
.base.cra_blocksize = 1,
.base.cra_ctxsize = sizeof(struct chacha_ctx),
.base.cra_module = THIS_MODULE,
.min_keysize = CHACHA_KEY_SIZE,
.max_keysize = CHACHA_KEY_SIZE,
.ivsize = CHACHA_IV_SIZE,
.chunksize = CHACHA_BLOCK_SIZE,
.setkey = chacha20_setkey,
.encrypt = chacha_simd,
.decrypt = chacha_simd,
}, {
.base.cra_name = "xchacha20",
.base.cra_driver_name = "xchacha20-simd",
.base.cra_priority = 300,
.base.cra_blocksize = 1,
.base.cra_ctxsize = sizeof(struct chacha_ctx),
.base.cra_module = THIS_MODULE,
.min_keysize = CHACHA_KEY_SIZE,
.max_keysize = CHACHA_KEY_SIZE,
.ivsize = XCHACHA_IV_SIZE,
.chunksize = CHACHA_BLOCK_SIZE,
.setkey = chacha20_setkey,
.encrypt = xchacha_simd,
.decrypt = xchacha_simd,
}, {
.base.cra_name = "xchacha12",
.base.cra_driver_name = "xchacha12-simd",
.base.cra_priority = 300,
.base.cra_blocksize = 1,
.base.cra_ctxsize = sizeof(struct chacha_ctx),
.base.cra_module = THIS_MODULE,
.min_keysize = CHACHA_KEY_SIZE,
.max_keysize = CHACHA_KEY_SIZE,
.ivsize = XCHACHA_IV_SIZE,
.chunksize = CHACHA_BLOCK_SIZE,
.setkey = chacha12_setkey,
.encrypt = xchacha_simd,
.decrypt = xchacha_simd,
},
};
static int __init chacha_simd_mod_init(void)
{
if (!boot_cpu_has(X86_FEATURE_SSSE3))
return 0;
static_branch_enable(&chacha_use_simd);
if (boot_cpu_has(X86_FEATURE_AVX) &&
boot_cpu_has(X86_FEATURE_AVX2) &&
cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL)) {
static_branch_enable(&chacha_use_avx2);
if (IS_ENABLED(CONFIG_AS_AVX512) &&
boot_cpu_has(X86_FEATURE_AVX512VL) &&
boot_cpu_has(X86_FEATURE_AVX512BW)) /* kmovq */
static_branch_enable(&chacha_use_avx512vl);
}
return IS_REACHABLE(CONFIG_CRYPTO_SKCIPHER) ?
crypto_register_skciphers(algs, ARRAY_SIZE(algs)) : 0;
}
static void __exit chacha_simd_mod_fini(void)
{
if (IS_REACHABLE(CONFIG_CRYPTO_SKCIPHER) && boot_cpu_has(X86_FEATURE_SSSE3))
crypto_unregister_skciphers(algs, ARRAY_SIZE(algs));
}
module_init(chacha_simd_mod_init);
module_exit(chacha_simd_mod_fini);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Martin Willi <[email protected]>");
MODULE_DESCRIPTION("ChaCha and XChaCha stream ciphers (x64 SIMD accelerated)");
MODULE_ALIAS_CRYPTO("chacha20");
MODULE_ALIAS_CRYPTO("chacha20-simd");
MODULE_ALIAS_CRYPTO("xchacha20");
MODULE_ALIAS_CRYPTO("xchacha20-simd");
MODULE_ALIAS_CRYPTO("xchacha12");
MODULE_ALIAS_CRYPTO("xchacha12-simd");
| linux-master | arch/x86/crypto/chacha_glue.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Accelerated GHASH implementation with Intel PCLMULQDQ-NI
* instructions. This file contains glue code.
*
* Copyright (c) 2009 Intel Corp.
* Author: Huang Ying <[email protected]>
*/
#include <linux/err.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/crypto.h>
#include <crypto/algapi.h>
#include <crypto/cryptd.h>
#include <crypto/gf128mul.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/simd.h>
#include <asm/cpu_device_id.h>
#include <asm/simd.h>
#include <asm/unaligned.h>
#define GHASH_BLOCK_SIZE 16
#define GHASH_DIGEST_SIZE 16
void clmul_ghash_mul(char *dst, const le128 *shash);
void clmul_ghash_update(char *dst, const char *src, unsigned int srclen,
const le128 *shash);
struct ghash_async_ctx {
struct cryptd_ahash *cryptd_tfm;
};
struct ghash_ctx {
le128 shash;
};
struct ghash_desc_ctx {
u8 buffer[GHASH_BLOCK_SIZE];
u32 bytes;
};
static int ghash_init(struct shash_desc *desc)
{
struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
memset(dctx, 0, sizeof(*dctx));
return 0;
}
static int ghash_setkey(struct crypto_shash *tfm,
const u8 *key, unsigned int keylen)
{
struct ghash_ctx *ctx = crypto_shash_ctx(tfm);
u64 a, b;
if (keylen != GHASH_BLOCK_SIZE)
return -EINVAL;
/*
* GHASH maps bits to polynomial coefficients backwards, which makes it
* hard to implement. But it can be shown that the GHASH multiplication
*
* D * K (mod x^128 + x^7 + x^2 + x + 1)
*
* (where D is a data block and K is the key) is equivalent to:
*
* bitreflect(D) * bitreflect(K) * x^(-127)
* (mod x^128 + x^127 + x^126 + x^121 + 1)
*
* So, the code below precomputes:
*
* bitreflect(K) * x^(-127) (mod x^128 + x^127 + x^126 + x^121 + 1)
*
* ... but in Montgomery form (so that Montgomery multiplication can be
* used), i.e. with an extra x^128 factor, which means actually:
*
* bitreflect(K) * x (mod x^128 + x^127 + x^126 + x^121 + 1)
*
* The within-a-byte part of bitreflect() cancels out GHASH's built-in
* reflection, and thus bitreflect() is actually a byteswap.
*/
a = get_unaligned_be64(key);
b = get_unaligned_be64(key + 8);
ctx->shash.a = cpu_to_le64((a << 1) | (b >> 63));
ctx->shash.b = cpu_to_le64((b << 1) | (a >> 63));
if (a >> 63)
ctx->shash.a ^= cpu_to_le64((u64)0xc2 << 56);
return 0;
}
static int ghash_update(struct shash_desc *desc,
const u8 *src, unsigned int srclen)
{
struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
u8 *dst = dctx->buffer;
kernel_fpu_begin();
if (dctx->bytes) {
int n = min(srclen, dctx->bytes);
u8 *pos = dst + (GHASH_BLOCK_SIZE - dctx->bytes);
dctx->bytes -= n;
srclen -= n;
while (n--)
*pos++ ^= *src++;
if (!dctx->bytes)
clmul_ghash_mul(dst, &ctx->shash);
}
clmul_ghash_update(dst, src, srclen, &ctx->shash);
kernel_fpu_end();
if (srclen & 0xf) {
src += srclen - (srclen & 0xf);
srclen &= 0xf;
dctx->bytes = GHASH_BLOCK_SIZE - srclen;
while (srclen--)
*dst++ ^= *src++;
}
return 0;
}
static void ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx)
{
u8 *dst = dctx->buffer;
if (dctx->bytes) {
u8 *tmp = dst + (GHASH_BLOCK_SIZE - dctx->bytes);
while (dctx->bytes--)
*tmp++ ^= 0;
kernel_fpu_begin();
clmul_ghash_mul(dst, &ctx->shash);
kernel_fpu_end();
}
dctx->bytes = 0;
}
static int ghash_final(struct shash_desc *desc, u8 *dst)
{
struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
u8 *buf = dctx->buffer;
ghash_flush(ctx, dctx);
memcpy(dst, buf, GHASH_BLOCK_SIZE);
return 0;
}
static struct shash_alg ghash_alg = {
.digestsize = GHASH_DIGEST_SIZE,
.init = ghash_init,
.update = ghash_update,
.final = ghash_final,
.setkey = ghash_setkey,
.descsize = sizeof(struct ghash_desc_ctx),
.base = {
.cra_name = "__ghash",
.cra_driver_name = "__ghash-pclmulqdqni",
.cra_priority = 0,
.cra_flags = CRYPTO_ALG_INTERNAL,
.cra_blocksize = GHASH_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct ghash_ctx),
.cra_module = THIS_MODULE,
},
};
static int ghash_async_init(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
struct ahash_request *cryptd_req = ahash_request_ctx(req);
struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
struct crypto_shash *child = cryptd_ahash_child(cryptd_tfm);
desc->tfm = child;
return crypto_shash_init(desc);
}
static int ghash_async_update(struct ahash_request *req)
{
struct ahash_request *cryptd_req = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
if (!crypto_simd_usable() ||
(in_atomic() && cryptd_ahash_queued(cryptd_tfm))) {
memcpy(cryptd_req, req, sizeof(*req));
ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base);
return crypto_ahash_update(cryptd_req);
} else {
struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
return shash_ahash_update(req, desc);
}
}
static int ghash_async_final(struct ahash_request *req)
{
struct ahash_request *cryptd_req = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
if (!crypto_simd_usable() ||
(in_atomic() && cryptd_ahash_queued(cryptd_tfm))) {
memcpy(cryptd_req, req, sizeof(*req));
ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base);
return crypto_ahash_final(cryptd_req);
} else {
struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
return crypto_shash_final(desc, req->result);
}
}
static int ghash_async_import(struct ahash_request *req, const void *in)
{
struct ahash_request *cryptd_req = ahash_request_ctx(req);
struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
ghash_async_init(req);
memcpy(dctx, in, sizeof(*dctx));
return 0;
}
static int ghash_async_export(struct ahash_request *req, void *out)
{
struct ahash_request *cryptd_req = ahash_request_ctx(req);
struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
memcpy(out, dctx, sizeof(*dctx));
return 0;
}
static int ghash_async_digest(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
struct ahash_request *cryptd_req = ahash_request_ctx(req);
struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
if (!crypto_simd_usable() ||
(in_atomic() && cryptd_ahash_queued(cryptd_tfm))) {
memcpy(cryptd_req, req, sizeof(*req));
ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base);
return crypto_ahash_digest(cryptd_req);
} else {
struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
struct crypto_shash *child = cryptd_ahash_child(cryptd_tfm);
desc->tfm = child;
return shash_ahash_digest(req, desc);
}
}
static int ghash_async_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen)
{
struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
struct crypto_ahash *child = &ctx->cryptd_tfm->base;
crypto_ahash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
crypto_ahash_set_flags(child, crypto_ahash_get_flags(tfm)
& CRYPTO_TFM_REQ_MASK);
return crypto_ahash_setkey(child, key, keylen);
}
static int ghash_async_init_tfm(struct crypto_tfm *tfm)
{
struct cryptd_ahash *cryptd_tfm;
struct ghash_async_ctx *ctx = crypto_tfm_ctx(tfm);
cryptd_tfm = cryptd_alloc_ahash("__ghash-pclmulqdqni",
CRYPTO_ALG_INTERNAL,
CRYPTO_ALG_INTERNAL);
if (IS_ERR(cryptd_tfm))
return PTR_ERR(cryptd_tfm);
ctx->cryptd_tfm = cryptd_tfm;
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
sizeof(struct ahash_request) +
crypto_ahash_reqsize(&cryptd_tfm->base));
return 0;
}
static void ghash_async_exit_tfm(struct crypto_tfm *tfm)
{
struct ghash_async_ctx *ctx = crypto_tfm_ctx(tfm);
cryptd_free_ahash(ctx->cryptd_tfm);
}
static struct ahash_alg ghash_async_alg = {
.init = ghash_async_init,
.update = ghash_async_update,
.final = ghash_async_final,
.setkey = ghash_async_setkey,
.digest = ghash_async_digest,
.export = ghash_async_export,
.import = ghash_async_import,
.halg = {
.digestsize = GHASH_DIGEST_SIZE,
.statesize = sizeof(struct ghash_desc_ctx),
.base = {
.cra_name = "ghash",
.cra_driver_name = "ghash-clmulni",
.cra_priority = 400,
.cra_ctxsize = sizeof(struct ghash_async_ctx),
.cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = GHASH_BLOCK_SIZE,
.cra_module = THIS_MODULE,
.cra_init = ghash_async_init_tfm,
.cra_exit = ghash_async_exit_tfm,
},
},
};
static const struct x86_cpu_id pcmul_cpu_id[] = {
X86_MATCH_FEATURE(X86_FEATURE_PCLMULQDQ, NULL), /* Pickle-Mickle-Duck */
{}
};
MODULE_DEVICE_TABLE(x86cpu, pcmul_cpu_id);
static int __init ghash_pclmulqdqni_mod_init(void)
{
int err;
if (!x86_match_cpu(pcmul_cpu_id))
return -ENODEV;
err = crypto_register_shash(&ghash_alg);
if (err)
goto err_out;
err = crypto_register_ahash(&ghash_async_alg);
if (err)
goto err_shash;
return 0;
err_shash:
crypto_unregister_shash(&ghash_alg);
err_out:
return err;
}
static void __exit ghash_pclmulqdqni_mod_exit(void)
{
crypto_unregister_ahash(&ghash_async_alg);
crypto_unregister_shash(&ghash_alg);
}
module_init(ghash_pclmulqdqni_mod_init);
module_exit(ghash_pclmulqdqni_mod_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("GHASH hash function, accelerated by PCLMULQDQ-NI");
MODULE_ALIAS_CRYPTO("ghash");
| linux-master | arch/x86/crypto/ghash-clmulni-intel_glue.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Glue Code for x86_64/AVX2 assembler optimized version of Serpent
*
* Copyright © 2012-2013 Jussi Kivilinna <[email protected]>
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/crypto.h>
#include <linux/err.h>
#include <crypto/algapi.h>
#include <crypto/internal/simd.h>
#include <crypto/serpent.h>
#include "serpent-avx.h"
#include "ecb_cbc_helpers.h"
#define SERPENT_AVX2_PARALLEL_BLOCKS 16
/* 16-way AVX2 parallel cipher functions */
asmlinkage void serpent_ecb_enc_16way(const void *ctx, u8 *dst, const u8 *src);
asmlinkage void serpent_ecb_dec_16way(const void *ctx, u8 *dst, const u8 *src);
asmlinkage void serpent_cbc_dec_16way(const void *ctx, u8 *dst, const u8 *src);
static int serpent_setkey_skcipher(struct crypto_skcipher *tfm,
const u8 *key, unsigned int keylen)
{
return __serpent_setkey(crypto_skcipher_ctx(tfm), key, keylen);
}
static int ecb_encrypt(struct skcipher_request *req)
{
ECB_WALK_START(req, SERPENT_BLOCK_SIZE, SERPENT_PARALLEL_BLOCKS);
ECB_BLOCK(SERPENT_AVX2_PARALLEL_BLOCKS, serpent_ecb_enc_16way);
ECB_BLOCK(SERPENT_PARALLEL_BLOCKS, serpent_ecb_enc_8way_avx);
ECB_BLOCK(1, __serpent_encrypt);
ECB_WALK_END();
}
static int ecb_decrypt(struct skcipher_request *req)
{
ECB_WALK_START(req, SERPENT_BLOCK_SIZE, SERPENT_PARALLEL_BLOCKS);
ECB_BLOCK(SERPENT_AVX2_PARALLEL_BLOCKS, serpent_ecb_dec_16way);
ECB_BLOCK(SERPENT_PARALLEL_BLOCKS, serpent_ecb_dec_8way_avx);
ECB_BLOCK(1, __serpent_decrypt);
ECB_WALK_END();
}
static int cbc_encrypt(struct skcipher_request *req)
{
CBC_WALK_START(req, SERPENT_BLOCK_SIZE, -1);
CBC_ENC_BLOCK(__serpent_encrypt);
CBC_WALK_END();
}
static int cbc_decrypt(struct skcipher_request *req)
{
CBC_WALK_START(req, SERPENT_BLOCK_SIZE, SERPENT_PARALLEL_BLOCKS);
CBC_DEC_BLOCK(SERPENT_AVX2_PARALLEL_BLOCKS, serpent_cbc_dec_16way);
CBC_DEC_BLOCK(SERPENT_PARALLEL_BLOCKS, serpent_cbc_dec_8way_avx);
CBC_DEC_BLOCK(1, __serpent_decrypt);
CBC_WALK_END();
}
static struct skcipher_alg serpent_algs[] = {
{
.base.cra_name = "__ecb(serpent)",
.base.cra_driver_name = "__ecb-serpent-avx2",
.base.cra_priority = 600,
.base.cra_flags = CRYPTO_ALG_INTERNAL,
.base.cra_blocksize = SERPENT_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct serpent_ctx),
.base.cra_module = THIS_MODULE,
.min_keysize = SERPENT_MIN_KEY_SIZE,
.max_keysize = SERPENT_MAX_KEY_SIZE,
.setkey = serpent_setkey_skcipher,
.encrypt = ecb_encrypt,
.decrypt = ecb_decrypt,
}, {
.base.cra_name = "__cbc(serpent)",
.base.cra_driver_name = "__cbc-serpent-avx2",
.base.cra_priority = 600,
.base.cra_flags = CRYPTO_ALG_INTERNAL,
.base.cra_blocksize = SERPENT_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct serpent_ctx),
.base.cra_module = THIS_MODULE,
.min_keysize = SERPENT_MIN_KEY_SIZE,
.max_keysize = SERPENT_MAX_KEY_SIZE,
.ivsize = SERPENT_BLOCK_SIZE,
.setkey = serpent_setkey_skcipher,
.encrypt = cbc_encrypt,
.decrypt = cbc_decrypt,
},
};
static struct simd_skcipher_alg *serpent_simd_algs[ARRAY_SIZE(serpent_algs)];
static int __init serpent_avx2_init(void)
{
const char *feature_name;
if (!boot_cpu_has(X86_FEATURE_AVX2) || !boot_cpu_has(X86_FEATURE_OSXSAVE)) {
pr_info("AVX2 instructions are not detected.\n");
return -ENODEV;
}
if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM,
&feature_name)) {
pr_info("CPU feature '%s' is not supported.\n", feature_name);
return -ENODEV;
}
return simd_register_skciphers_compat(serpent_algs,
ARRAY_SIZE(serpent_algs),
serpent_simd_algs);
}
static void __exit serpent_avx2_fini(void)
{
simd_unregister_skciphers(serpent_algs, ARRAY_SIZE(serpent_algs),
serpent_simd_algs);
}
module_init(serpent_avx2_init);
module_exit(serpent_avx2_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Serpent Cipher Algorithm, AVX2 optimized");
MODULE_ALIAS_CRYPTO("serpent");
MODULE_ALIAS_CRYPTO("serpent-asm");
| linux-master | arch/x86/crypto/serpent_avx2_glue.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Glue Code for the AVX assembler implementation of the Cast5 Cipher
*
* Copyright (C) 2012 Johannes Goetzfried
* <[email protected]>
*/
#include <crypto/algapi.h>
#include <crypto/cast5.h>
#include <crypto/internal/simd.h>
#include <linux/crypto.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/types.h>
#include "ecb_cbc_helpers.h"
#define CAST5_PARALLEL_BLOCKS 16
asmlinkage void cast5_ecb_enc_16way(struct cast5_ctx *ctx, u8 *dst,
const u8 *src);
asmlinkage void cast5_ecb_dec_16way(struct cast5_ctx *ctx, u8 *dst,
const u8 *src);
asmlinkage void cast5_cbc_dec_16way(struct cast5_ctx *ctx, u8 *dst,
const u8 *src);
static int cast5_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen)
{
return cast5_setkey(&tfm->base, key, keylen);
}
static int ecb_encrypt(struct skcipher_request *req)
{
ECB_WALK_START(req, CAST5_BLOCK_SIZE, CAST5_PARALLEL_BLOCKS);
ECB_BLOCK(CAST5_PARALLEL_BLOCKS, cast5_ecb_enc_16way);
ECB_BLOCK(1, __cast5_encrypt);
ECB_WALK_END();
}
static int ecb_decrypt(struct skcipher_request *req)
{
ECB_WALK_START(req, CAST5_BLOCK_SIZE, CAST5_PARALLEL_BLOCKS);
ECB_BLOCK(CAST5_PARALLEL_BLOCKS, cast5_ecb_dec_16way);
ECB_BLOCK(1, __cast5_decrypt);
ECB_WALK_END();
}
static int cbc_encrypt(struct skcipher_request *req)
{
CBC_WALK_START(req, CAST5_BLOCK_SIZE, -1);
CBC_ENC_BLOCK(__cast5_encrypt);
CBC_WALK_END();
}
static int cbc_decrypt(struct skcipher_request *req)
{
CBC_WALK_START(req, CAST5_BLOCK_SIZE, CAST5_PARALLEL_BLOCKS);
CBC_DEC_BLOCK(CAST5_PARALLEL_BLOCKS, cast5_cbc_dec_16way);
CBC_DEC_BLOCK(1, __cast5_decrypt);
CBC_WALK_END();
}
static struct skcipher_alg cast5_algs[] = {
{
.base.cra_name = "__ecb(cast5)",
.base.cra_driver_name = "__ecb-cast5-avx",
.base.cra_priority = 200,
.base.cra_flags = CRYPTO_ALG_INTERNAL,
.base.cra_blocksize = CAST5_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct cast5_ctx),
.base.cra_module = THIS_MODULE,
.min_keysize = CAST5_MIN_KEY_SIZE,
.max_keysize = CAST5_MAX_KEY_SIZE,
.setkey = cast5_setkey_skcipher,
.encrypt = ecb_encrypt,
.decrypt = ecb_decrypt,
}, {
.base.cra_name = "__cbc(cast5)",
.base.cra_driver_name = "__cbc-cast5-avx",
.base.cra_priority = 200,
.base.cra_flags = CRYPTO_ALG_INTERNAL,
.base.cra_blocksize = CAST5_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct cast5_ctx),
.base.cra_module = THIS_MODULE,
.min_keysize = CAST5_MIN_KEY_SIZE,
.max_keysize = CAST5_MAX_KEY_SIZE,
.ivsize = CAST5_BLOCK_SIZE,
.setkey = cast5_setkey_skcipher,
.encrypt = cbc_encrypt,
.decrypt = cbc_decrypt,
}
};
static struct simd_skcipher_alg *cast5_simd_algs[ARRAY_SIZE(cast5_algs)];
static int __init cast5_init(void)
{
const char *feature_name;
if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM,
&feature_name)) {
pr_info("CPU feature '%s' is not supported.\n", feature_name);
return -ENODEV;
}
return simd_register_skciphers_compat(cast5_algs,
ARRAY_SIZE(cast5_algs),
cast5_simd_algs);
}
static void __exit cast5_exit(void)
{
simd_unregister_skciphers(cast5_algs, ARRAY_SIZE(cast5_algs),
cast5_simd_algs);
}
module_init(cast5_init);
module_exit(cast5_exit);
MODULE_DESCRIPTION("Cast5 Cipher Algorithm, AVX optimized");
MODULE_LICENSE("GPL");
MODULE_ALIAS_CRYPTO("cast5");
| linux-master | arch/x86/crypto/cast5_avx_glue.c |
/*
* Cryptographic API.
*
* T10 Data Integrity Field CRC16 Crypto Transform using PCLMULQDQ Instructions
*
* Copyright (C) 2013 Intel Corporation
* Author: Tim Chen <[email protected]>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/types.h>
#include <linux/module.h>
#include <linux/crc-t10dif.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/simd.h>
#include <linux/init.h>
#include <linux/string.h>
#include <linux/kernel.h>
#include <asm/cpufeatures.h>
#include <asm/cpu_device_id.h>
#include <asm/simd.h>
asmlinkage u16 crc_t10dif_pcl(u16 init_crc, const u8 *buf, size_t len);
struct chksum_desc_ctx {
__u16 crc;
};
static int chksum_init(struct shash_desc *desc)
{
struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
ctx->crc = 0;
return 0;
}
static int chksum_update(struct shash_desc *desc, const u8 *data,
unsigned int length)
{
struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
if (length >= 16 && crypto_simd_usable()) {
kernel_fpu_begin();
ctx->crc = crc_t10dif_pcl(ctx->crc, data, length);
kernel_fpu_end();
} else
ctx->crc = crc_t10dif_generic(ctx->crc, data, length);
return 0;
}
static int chksum_final(struct shash_desc *desc, u8 *out)
{
struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
*(__u16 *)out = ctx->crc;
return 0;
}
static int __chksum_finup(__u16 crc, const u8 *data, unsigned int len, u8 *out)
{
if (len >= 16 && crypto_simd_usable()) {
kernel_fpu_begin();
*(__u16 *)out = crc_t10dif_pcl(crc, data, len);
kernel_fpu_end();
} else
*(__u16 *)out = crc_t10dif_generic(crc, data, len);
return 0;
}
static int chksum_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
return __chksum_finup(ctx->crc, data, len, out);
}
static int chksum_digest(struct shash_desc *desc, const u8 *data,
unsigned int length, u8 *out)
{
return __chksum_finup(0, data, length, out);
}
static struct shash_alg alg = {
.digestsize = CRC_T10DIF_DIGEST_SIZE,
.init = chksum_init,
.update = chksum_update,
.final = chksum_final,
.finup = chksum_finup,
.digest = chksum_digest,
.descsize = sizeof(struct chksum_desc_ctx),
.base = {
.cra_name = "crct10dif",
.cra_driver_name = "crct10dif-pclmul",
.cra_priority = 200,
.cra_blocksize = CRC_T10DIF_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
};
static const struct x86_cpu_id crct10dif_cpu_id[] = {
X86_MATCH_FEATURE(X86_FEATURE_PCLMULQDQ, NULL),
{}
};
MODULE_DEVICE_TABLE(x86cpu, crct10dif_cpu_id);
static int __init crct10dif_intel_mod_init(void)
{
if (!x86_match_cpu(crct10dif_cpu_id))
return -ENODEV;
return crypto_register_shash(&alg);
}
static void __exit crct10dif_intel_mod_fini(void)
{
crypto_unregister_shash(&alg);
}
module_init(crct10dif_intel_mod_init);
module_exit(crct10dif_intel_mod_fini);
MODULE_AUTHOR("Tim Chen <[email protected]>");
MODULE_DESCRIPTION("T10 DIF CRC calculation accelerated with PCLMULQDQ.");
MODULE_LICENSE("GPL");
MODULE_ALIAS_CRYPTO("crct10dif");
MODULE_ALIAS_CRYPTO("crct10dif-pclmul");
| linux-master | arch/x86/crypto/crct10dif-pclmul_glue.c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.